hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79504dafe55c1356ba5a6f6170bc95b88d7282c5
| 30,959
|
py
|
Python
|
scripts/finetune_on_target.py
|
MLforHealth/HurtfulWords
|
b59181585aa70152f0fbe79fa2611ded928bf9f1
|
[
"Apache-2.0"
] | 12
|
2019-12-24T05:13:47.000Z
|
2021-05-27T05:25:38.000Z
|
scripts/finetune_on_target.py
|
MLforHealth/HurtfulWords
|
b59181585aa70152f0fbe79fa2611ded928bf9f1
|
[
"Apache-2.0"
] | 1
|
2021-03-26T00:27:19.000Z
|
2021-03-26T00:27:19.000Z
|
scripts/finetune_on_target.py
|
MLforHealth/HurtfulWords
|
b59181585aa70152f0fbe79fa2611ded928bf9f1
|
[
"Apache-2.0"
] | 6
|
2020-08-27T21:01:40.000Z
|
2021-12-03T00:41:04.000Z
|
#!/h/haoran/anaconda3/bin/python
import sys
import os
sys.path.append(os.getcwd())
import pandas as pd
import numpy as np
import argparse
import Constants
import torch
import torch.nn as nn
from torch.utils import data
import pickle
from pytorch_pretrained_bert import BertTokenizer, BertModel
from run_classifier_dataset_utils import InputExample, convert_examples_to_features
from pathlib import Path
from tqdm import tqdm
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from gradient_reversal import GradientReversal
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, log_loss, mean_squared_error, classification_report
import random
import json
from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME
from utils import create_hdf_key, Classifier, get_emb_size, MIMICDataset, extract_embeddings, EarlyStopping, load_checkpoint
from sklearn.model_selection import ParameterGrid
parser = argparse.ArgumentParser('Fine-tunes a pre-trained BERT model on a certain target for one fold. Outputs fine-tuned BERT model and classifier, ' +
'as well as a pickled dictionary mapping id: predicted probability')
parser.add_argument("--df_path",help = 'must have the following columns: seqs, num_seqs, fold, with note_id as index', type=str)
parser.add_argument("--model_path", type=str)
parser.add_argument('--fold_id', help = 'what fold to use as the DEV fold. Dataframe must have a "fold" column',nargs = '+', type=str, dest = 'fold_id', default = [])
parser.add_argument('--target_col_name', help = 'name of target to train on. Must be a column in the dataframe', type=str)
parser.add_argument("--output_dir", help = 'folder to output model/results', type=str)
parser.add_argument('--use_adversary', help = "whether or not to use an adversary. If True, must not have --freeze_bert", action = 'store_true')
parser.add_argument('--lm', help = 'lambda value for the adversary', type = float, default = 1.0)
parser.add_argument('--protected_group', help = 'name of protected group, must be a column in the dataframe', type = str, default = 'insurance')
parser.add_argument('--adv_layers', help = 'number of layers in adversary', type = int, default = 2)
parser.add_argument('--freeze_bert', help = 'freeze all BERT layers and only use pre-trained representation', action = 'store_true')
parser.add_argument('--train_batch_size', help = 'batch size to use for training', type = int)
parser.add_argument('--max_num_epochs', help = 'maximum number of epochs to train for', type = int, default = 20)
parser.add_argument('--es_patience', help = 'patience for the early stopping', type = int, default = 3)
parser.add_argument('--other_fields', help = 'other fields to add, must be columns in df', nargs = '+', type = str, dest = 'other_fields', default = [])
parser.add_argument('--seed', type = int, default = 42, help = 'random seed for initialization')
parser.add_argument('--dropout', type = float, default = 0, help = 'dropout probability for classifier')
parser.add_argument('--lr', type = float, default = 5e-4, help = 'learning rate for BertAdam optimizer')
parser.add_argument('--predictor_layers', type = int, default = 2, help = 'number of layers for classifier, ignored if gridsearch_classifier')
parser.add_argument('--emb_method', default = 'last', const = 'last', nargs = '?', choices = ['last', 'sum4', 'cat4'], help = 'what embedding layer to take')
parser.add_argument('--fairness_def', default = 'demo', const = 'demo', nargs = '?', choices = ['demo', 'odds'], help = 'what fairness definition to use: demographic parity, equality of odds')
parser.add_argument('--task_type', default = 'binary', const = 'binary', nargs = '?', choices = ['binary', 'multiclass', 'regression'], help = 'what type of data the target_col_name is')
parser.add_argument('--save_embs', help = 'save computed embeddings at the end', action = 'store_true')
parser.add_argument('--output_train_stats', help = 'export training set predictions into the dataframe', action = 'store_true')
parser.add_argument('--gridsearch_classifier', help = 'whether to run a grid search over the classifier parameters, using AUPRC as metric', action = 'store_true')
parser.add_argument('--average', help = 'whether to aggregate sequences to a single prediction by simple average, or by using the NYU agg function', action = 'store_true')
parser.add_argument('--gridsearch_c', help = 'whether to run a grid search over the NYU agg c parameter, using AUPRC as metric, only valid if not --average, and --gridsearch_classifier', action = 'store_true')
parser.add_argument('--use_new_mapping', help = 'whether to use new mapping for adversarial training', action = 'store_true')
parser.add_argument('--pregen_emb_path', help = '''if embeddings have been precomputed, can provide a path here (as a pickled dictionary mapping note_id:numpy array).
Will only be used if freeze_bert. note_ids in this dictionary must a be a superset of the note_ids in df_path''', type = str)
parser.add_argument('--overwrite', help = 'whether to overwrite existing model/predictions', action = 'store_true')
args = parser.parse_args()
if os.path.isfile(os.path.join(args.output_dir, 'preds.pkl')) and not args.overwrite:
print("File already exists; exiting.")
sys.exit()
print('Reading dataframe...', flush = True)
df = pd.read_pickle(args.df_path)
if 'note_id' in df.columns:
df = df.set_index('note_id')
tokenizer = BertTokenizer.from_pretrained(args.model_path)
model = BertModel.from_pretrained(args.model_path)
target = args.target_col_name
assert(target in df.columns)
#even if no adversary, must have valid protected group column for code to work
if args.use_adversary:
protected_group = args.protected_group
assert(protected_group in df.columns)
if args.use_new_mapping:
mapping = Constants.newmapping
for i in Constants.drop_groups[protected_group]:
df = df[df[protected_group] != i]
else:
mapping = Constants.mapping
other_fields_to_include = args.other_fields
if args.freeze_bert:
for param in model.parameters():
param.requires_grad = False
assert('fold' in df.columns)
for i in args.fold_id:
assert(i in df['fold'].unique())
assert('test' in df['fold'].unique())
fold_id = args.fold_id
if args.gridsearch_c:
assert(args.task_type == 'binary')
c_grid = [0.001, 0.005, 0.01, 0.05, 0.1, 0.2, 0.5, 0.7, 1, 1.2, 1.5, 2, 3, 5, 10, 20, 50, 100, 1000]
else:
c_grid = [2]
Path(args.output_dir).mkdir(parents = True, exist_ok = True)
EMB_SIZE = get_emb_size(args.emb_method)
train_df = df[~df.fold.isin(['test', 'NA', *fold_id])]
val_df = df[df.fold.isin(fold_id)]
test_df = df[df.fold == 'test']
def convert_input_example(note_id, text, seqIdx, target, group, other_fields = []):
return InputExample(guid = '%s-%s'%(note_id,seqIdx), text_a = text, text_b = None, label = target, group = mapping[protected_group][group] if args.use_adversary else 0, other_fields = other_fields)
# in training generator, return all folds except this.
# in validation generator, return only this fold
print('Converting input examples to appropriate format...', flush = True)
examples_train = [convert_input_example(idx, i, c, row[target], row[protected_group] if args.use_adversary else 0,
[] if len(other_fields_to_include) ==0 else row[other_fields_to_include].values.tolist())
for idx, row in train_df.iterrows()
for c, i in enumerate(row.seqs)]
examples_eval = [convert_input_example(idx, i, c, row[target], row[protected_group] if args.use_adversary else 0,
[] if len(other_fields_to_include) ==0 else row[other_fields_to_include].values.tolist())
for idx, row in val_df.iterrows()
for c, i in enumerate(row.seqs)]
examples_test = [convert_input_example(idx, i, c, row[target], row[protected_group] if args.use_adversary else 0,
[] if len(other_fields_to_include) ==0 else row[other_fields_to_include].values.tolist())
for idx, row in test_df.iterrows()
for c, i in enumerate(row.seqs)]
def convert_examples_to_features_emb(examples, embs):
features = []
for i in examples:
note_id, seq_id = i.guid.split('-')
emb = embs[note_id][int(seq_id), :]
features.append(EmbFeature(emb, y = i.label, guid = i.guid, group = i.group, other_fields = i.other_fields))
return features
class EmbFeature():
def __init__(self, emb, y, guid, group, other_fields):
self.emb = emb
self.y = y
self.guid = guid
self.group = group
self.other_fields = other_fields
class Embdataset(data.Dataset):
def __init__(self, features, gen_type):
self.features = features #list of EmbFeatures
self.gen_type = gen_type
self.length = len(features)
def __len__(self):
return self.length
def __getitem__(self, index):
emb = torch.tensor(self.features[index].emb, dtype = torch.float32)
if args.task_type in ['binary', 'regression']:
y = torch.tensor(self.features[index].y, dtype = torch.float32)
else:
y = torch.tensor(self.features[index].y, dtype = torch.long)
other_fields = self.features[index].other_fields
guid = self.features[index].guid
return emb, y, guid, other_fields
class Discriminator(nn.Module):
def __init__(self, input_dim, num_layers, num_categories, lm):
super(Discriminator, self).__init__()
self.num_layers = num_layers
assert(num_layers >= 1)
self.input_dim = input_dim
self.num_categories = num_categories
self.lm = lm
self.layers = [GradientReversal(lambda_ = lm)]
for c, i in enumerate(range(num_layers)):
if c != num_layers-1:
self.layers.append(nn.Linear(input_dim // (2**c), input_dim // (2**(c+1))))
self.layers.append(nn.ReLU())
else:
self.layers.append(nn.Linear(input_dim // (2**c), num_categories))
self.layers.append(nn.Softmax(dim = 0))
self.layers = nn.ModuleList(self.layers)
def forward(self, x):
for i in range(len(self.layers)):
x = self.layers[i](x)
return x
if args.gridsearch_classifier:
assert(args.freeze_bert)
grid = list(ParameterGrid({
'num_layers': [2,3,4],
'dropout_prob': [0, 0.2],
'decay_rate': [2,4,6]
}))
grid.append({
'num_layers': 1,
'dropout_prob': 0,
'decay_rate': 2
})
for i in grid: # adds extra fields to input arguments
i['input_dim'] = EMB_SIZE + len(other_fields_to_include)
i['task_type'] = args.task_type
else:
grid = [{ # only one parameter combination
'input_dim': EMB_SIZE + len(other_fields_to_include),
'num_layers': args.predictor_layers,
'dropout_prob': args.dropout,
'task_type': args.task_type
}]
if args.task_type == 'multiclass':
for i in grid:
i['multiclass_nclasses'] = len(df[target].unique())
if args.use_adversary:
discriminator = Discriminator(EMB_SIZE + int(args.fairness_def == 'odds'), args.adv_layers, len(mapping[protected_group]), args.lm)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
model.to(device)
if args.use_adversary:
discriminator.to(device)
seed = args.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
if args.task_type == 'binary':
criterion = nn.BCELoss()
elif args.task_type == 'multiclass':
criterion = nn.CrossEntropyLoss()
elif args.task_type == 'regression':
criterion = nn.MSELoss()
criterion_adv = nn.CrossEntropyLoss()
if n_gpu > 1:
model = torch.nn.DataParallel(model)
criterion = torch.nn.DataParallel(criterion)
if args.use_adversary:
discriminator = torch.nn.DataParallel(discriminator)
criterion_adv = torch.nn.DataParallel(criterion_adv)
def get_embs(generator):
'''
given a generator, runs all the data through one pass of the model to calculate embeddings
used when BERT weights are frozen, calculates embeddings first to save compute
'''
features = []
model.eval()
with torch.no_grad():
for input_ids, input_mask, segment_ids, y, group, guid, other_vars in generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for c,i in enumerate(guid):
note_id, seq_id = i.split('-')
emb = bert_out[c,:].detach().cpu().numpy()
features.append(EmbFeature(emb = emb, y = y[c], guid = i, group = group, other_fields= [i[c] for i in other_vars]))
return features
print('Featurizing examples...', flush = True)
if not args.pregen_emb_path:
features_train = convert_examples_to_features(examples_train,
Constants.MAX_SEQ_LEN, tokenizer, output_mode = ('regression' if args.task_type == 'regression' else 'classification'))
features_eval = convert_examples_to_features(examples_eval,
Constants.MAX_SEQ_LEN, tokenizer, output_mode = ('regression' if args.task_type == 'regression' else 'classification'))
features_test = convert_examples_to_features(examples_test,
Constants.MAX_SEQ_LEN, tokenizer, output_mode = ('regression' if args.task_type == 'regression' else 'classification'))
training_set = MIMICDataset(features_train, 'train' ,args.task_type)
training_generator = data.DataLoader(training_set, shuffle = True, batch_size = args.train_batch_size, drop_last = True)
val_set = MIMICDataset(features_eval, 'val', args.task_type)
val_generator = data.DataLoader(val_set, shuffle = False, batch_size = args.train_batch_size)
test_set = MIMICDataset(features_test, 'test', args.task_type)
test_generator = data.DataLoader(test_set, shuffle = False, batch_size = args.train_batch_size)
if args.freeze_bert: #only need to precalculate for training and val set
if args.pregen_emb_path:
pregen_embs = pickle.load(open(args.pregen_emb_path, 'rb'))
features_train_embs = convert_examples_to_features_emb(examples_train, pregen_embs)
features_val_embs = convert_examples_to_features_emb(examples_eval, pregen_embs)
features_test_embs = convert_examples_to_features_emb(examples_test, pregen_embs)
else:
features_train_embs = get_embs(training_generator)
features_val_embs = get_embs(val_generator)
features_test_embs = get_embs(test_generator)
training_generator = data.DataLoader(Embdataset(features_train_embs, 'train'), shuffle = True, batch_size = args.train_batch_size, drop_last = True)
val_generator = data.DataLoader(Embdataset(features_val_embs, 'val'), shuffle = False, batch_size = args.train_batch_size)
test_generator= data.DataLoader(Embdataset(features_test_embs, 'test'), shuffle = False, batch_size = args.train_batch_size)
num_train_epochs = args.max_num_epochs
learning_rate = args.lr
num_train_optimization_steps = len(training_generator) * num_train_epochs
warmup_proportion = 0.1
PREDICTOR_CHECKPOINT_PATH = os.path.join(args.output_dir, 'predictor.chkpt')
MODEL_CHECKPOINT_PATH = os.path.join(args.output_dir, 'model.chkpt')
grid_auprcs = []
es_models = []
optimal_cs = []
actual_val = val_df[target]
def merge_probs(probs, c):
return (np.max(probs) + np.mean(probs)*len(probs)/float(c))/(1+len(probs)/float(c))
def avg_probs(probs):
return np.mean(probs)
def avg_probs_multiclass(probs):
return np.argmax(np.mean(probs, axis = 0))
def merge_regression(preds):
return np.mean(preds)
def evaluate_on_set(generator, predictor, emb_gen = False, c_val=2):
'''
Input: a pytorch data loader, whether the generator is an embedding or text generator
Outputs:
prediction_dict: a dictionary mapping note_id (str) to list of predicted probabilities
merged_preds: a dictionary mapping note_id (str) to a single merged probability
embs: a dictionary mapping note_id (str) to a numpy 2d array (shape num_seq * 768)
'''
model.eval()
predictor.eval()
if generator.dataset.gen_type == 'val':
prediction_dict = {str(idx): [0]*row['num_seqs'] for idx, row in val_df.iterrows()}
embs = {str(idx):np.zeros(shape = (row['num_seqs'], EMB_SIZE)) for idx, row in val_df.iterrows()}
elif generator.dataset.gen_type == 'test':
prediction_dict = {str(idx): [0]*row['num_seqs'] for idx, row in test_df.iterrows()}
embs = {str(idx):np.zeros(shape = (row['num_seqs'], EMB_SIZE)) for idx, row in test_df.iterrows()}
elif generator.dataset.gen_type == 'train':
prediction_dict = {str(idx): [0]*row['num_seqs'] for idx, row in train_df.iterrows()}
embs = {str(idx):np.zeros(shape = (row['num_seqs'], EMB_SIZE)) for idx, row in train_df.iterrows()}
if emb_gen:
with torch.no_grad():
for embs, y, guid, other_vars in generator:
embs = embs.to(device)
y = y.to(device)
for i in other_vars:
embs = torch.cat([embs, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(embs).detach().cpu()
for c,i in enumerate(guid):
note_id, seq_id = i.split('-')
if args.task_type in ['binary', 'regression']:
prediction_dict[note_id][int(seq_id)] = preds[c].item()
else:
prediction_dict[note_id][int(seq_id)] = preds[c,:].numpy()
else:
with torch.no_grad():
for input_ids, input_mask, segment_ids, y, group, guid, other_vars in generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
y = y.to(device)
group = group.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for i in other_vars:
bert_out = torch.cat([bert_out, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(bert_out).detach().cpu()
for c,i in enumerate(guid):
note_id, seq_id = i.split('-')
if args.task_type in ['binary', 'regression']:
prediction_dict[note_id][int(seq_id)] = preds[c].item()
else:
prediction_dict[note_id][int(seq_id)] = preds[c,:].numpy()
embs[note_id][int(seq_id), :] = bert_out[c,:EMB_SIZE].detach().cpu().numpy()
merged_preds = merge_preds(prediction_dict, c_val)
return (prediction_dict, merged_preds, embs)
def merge_preds(prediction_dict, c=2):
merged_preds = {}
for i in prediction_dict:
if args.task_type == 'binary':
if args.average:
merged_preds[i] = avg_probs(prediction_dict[i])
else:
merged_preds[i] = merge_probs(prediction_dict[i], c)
elif args.task_type == 'regression':
merged_preds[i] = merge_regression(prediction_dict[i])
elif args.task_type == 'multiclass':
merged_preds[i] = avg_probs_multiclass(np.array(prediction_dict[i]))
return merged_preds
for predictor_params in grid:
print(predictor_params, flush = True)
predictor = Classifier(**predictor_params).to(device)
if n_gpu > 1:
predictor = torch.nn.DataParallel(predictor)
if not(args.freeze_bert) and not(args.use_adversary):
param_optimizer = list(model.named_parameters()) + list(predictor.named_parameters())
elif args.freeze_bert and not(args.use_adversary):
param_optimizer = list(predictor.named_parameters())
elif args.freeze_bert and args.use_adversary:
raise Exception('No purpose in using an adversary if BERT layers are frozen')
else:
param_optimizer = list(model.named_parameters()) + list(predictor.named_parameters()) + list(discriminator.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
es = EarlyStopping(patience = args.es_patience)
optimizer = BertAdam(optimizer_grouped_parameters,
lr=learning_rate,
warmup=warmup_proportion,
t_total=num_train_optimization_steps)
warmup_linear = WarmupLinearSchedule(warmup=warmup_proportion,
t_total=num_train_optimization_steps)
for epoch in range(1, num_train_epochs+1):
# training
if not args.freeze_bert:
model.train()
else:
model.eval()
predictor.train()
if args.use_adversary:
discriminator.train()
running_loss = 0.0
num_steps = 0
with tqdm(total=len(training_generator), desc="Epoch %s"%epoch) as pbar:
if not args.freeze_bert:
for input_ids, input_mask, segment_ids, y, group, _, other_vars in training_generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
y = y.to(device)
group = group.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for i in other_vars:
bert_out = torch.cat([bert_out, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(bert_out)
loss = criterion(preds, y)
if args.use_adversary:
adv_input = bert_out[:, :-len(other_vars)]
if args.fairness_def == 'odds':
adv_input = torch.cat([adv_input, y.unsqueeze(dim = 1)], 1)
adv_pred = discriminator(adv_input)
adv_loss = criterion_adv(adv_pred, group)
if n_gpu > 1:
loss = loss.mean()
if args.use_adversary:
adv_loss = adv_loss.mean()
if args.use_adversary:
loss += adv_loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
num_steps += 1
running_loss += loss.item()
mean_loss = running_loss/num_steps
pbar.update(1)
pbar.set_postfix_str("Running Training Loss: %.5f" % mean_loss)
else: # if frozen, use precomputed embeddings to save time
for embs, y,_, other_vars in training_generator:
embs = embs.to(device)
y = y.to(device)
for i in other_vars:
embs = torch.cat([embs, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(embs)
loss = criterion(preds, y)
if n_gpu > 1:
loss = loss.mean()
loss.backward()
optimizer.step()
optimizer.zero_grad()
num_steps += 1
running_loss += loss.item()
mean_loss = running_loss/num_steps
pbar.update(1)
pbar.set_postfix_str("Running Training Loss: %.5f" % mean_loss)
# evaluate here
model.eval()
predictor.eval()
val_loss = 0
with torch.no_grad():
if args.freeze_bert:
checkpoints = {PREDICTOR_CHECKPOINT_PATH: predictor}
for embs, y, guid, other_vars in val_generator:
embs = embs.to(device)
y = y.to(device)
for i in other_vars:
embs = torch.cat([embs, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(embs)
loss = criterion(preds, y)
if n_gpu > 1:
loss = loss.mean()
val_loss += loss.item()
val_loss /= len(val_generator)
# early stopping uses val loss as metric
# model selection/c selection uses AUPRC as metric
else:
checkpoints = {PREDICTOR_CHECKPOINT_PATH: predictor,
MODEL_CHECKPOINT_PATH: model}
for input_ids, input_mask, segment_ids, y, group, guid, other_vars in val_generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
y = y.to(device)
group = group.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for i in other_vars:
bert_out = torch.cat([bert_out, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(bert_out)
loss = criterion(preds, y)
if n_gpu > 1:
loss = loss.mean()
if args.use_adversary:
adv_loss = adv_loss.mean()
if args.use_adversary:
loss += adv_loss
val_loss += loss.item()
val_loss /= len(val_generator)
print('Val loss: %s'%val_loss, flush = True)
es(val_loss, checkpoints)
if es.early_stop:
break
print('Trained for %s epochs' % epoch)
predictor.load_state_dict(load_checkpoint(PREDICTOR_CHECKPOINT_PATH))
os.remove(PREDICTOR_CHECKPOINT_PATH)
if not args.freeze_bert:
model.load_state_dict(load_checkpoint(MODEL_CHECKPOINT_PATH))
os.remove(MODEL_CHECKPOINT_PATH)
if args.gridsearch_classifier:
auprcs = [] #one value for each in c grid
prediction_dict, _, _ = evaluate_on_set(val_generator, predictor, emb_gen = args.freeze_bert)
for c_val in c_grid:
merged_preds_val = merge_preds(prediction_dict, c_val)
merged_preds_val_list = [merged_preds_val[str(i)] for i in actual_val.index]
auprcs.append(average_precision_score(actual_val.values.astype(int), merged_preds_val_list))
print(auprcs, flush = True)
print(c_grid, flush = True)
idx_max = np.argmax(auprcs)
grid_auprcs.append(auprcs[idx_max])
es_models.append(predictor.cpu())
optimal_cs.append(c_grid[idx_max])
print('val AUPRC:%.5f optimal c: %s' %(auprcs[idx_max], c_grid[idx_max] ))
# find best predictor here, move back to cpu
if args.gridsearch_classifier:
idx_max = np.argmax(grid_auprcs)
predictor = es_models[idx_max].to(device)
opt_c = optimal_cs[idx_max]
else:
opt_c = 2.0
# evaluate on val set
prediction_dict_val, merged_preds_val, embs_val = evaluate_on_set(val_generator, predictor, emb_gen = args.freeze_bert, c_val = opt_c)
merged_preds_val_list = [merged_preds_val[str(i)] for i in actual_val.index]
if args.task_type == 'binary':
acc = accuracy_score(actual_val.values.astype(int), np.array(merged_preds_val_list).round())
auprc = average_precision_score(actual_val.values.astype(int), merged_preds_val_list)
ll = log_loss(actual_val.values.astype(int), merged_preds_val_list)
roc = roc_auc_score(actual_val.values.astype(int), merged_preds_val_list)
print('Accuracy: %.5f' % acc)
print('AUPRC: %.5f' % auprc)
print('Log Loss: %.5f' % ll)
print('AUROC: %.5f' % roc)
elif args.task_type == 'regression':
mse = mean_squared_error(actual_val, merged_preds_val_list)
print('MSE: %.5f' % mse)
elif args.task_type == 'multiclass':
report = classification_report(actual_val.values.astype(int), np.array(merged_preds_val_list))
print(report)
prediction_dict_test, merged_preds_test, embs_test = evaluate_on_set(test_generator, predictor, emb_gen = args.freeze_bert, c_val = opt_c)
if args.output_train_stats:
prediction_dict_train, merged_preds_train, embs_train = evaluate_on_set(training_generator, predictor, emb_gen = args.freeze_bert, c_val = opt_c)
else:
merged_preds_train, embs_train = {}, {}
# save predictor
json.dump(predictor_params, open(os.path.join(args.output_dir, 'predictor_params.json'), 'w'))
torch.save(predictor.state_dict(), os.path.join(args.output_dir, 'predictor.pt'))
# save model
if not args.freeze_bert:
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# save args
json.dump(vars(args), open(os.path.join(args.output_dir, 'argparse_args.json'), 'w'))
#saves embeddings
if args.save_embs:
embs = {**embs_val, **embs_test, **embs_train}
pickle.dump(embs, open(os.path.join(args.output_dir, 'embs.pkl'), 'wb'))
rough_preds = {**merged_preds_val, **merged_preds_test, **merged_preds_train}
pickle.dump(rough_preds, open(os.path.join(args.output_dir, 'preds.pkl'), 'wb'))
# saves gridsearch info
pickle.dump({
'grid_auprcs':grid_auprcs,
'optimal_cs': optimal_cs,
'opt_c': opt_c
}, open(os.path.join(args.output_dir, 'gs_info.pkl'), 'wb'))
| 46.978756
| 209
| 0.647405
|
79504e2726f5c2bbe17f27addc47fa5dde28d5ce
| 3,249
|
py
|
Python
|
python/p011.py
|
lbreede/project-euler
|
c225e3742f749fc681034aac98cc9c23f8cdb37e
|
[
"MIT"
] | null | null | null |
python/p011.py
|
lbreede/project-euler
|
c225e3742f749fc681034aac98cc9c23f8cdb37e
|
[
"MIT"
] | null | null | null |
python/p011.py
|
lbreede/project-euler
|
c225e3742f749fc681034aac98cc9c23f8cdb37e
|
[
"MIT"
] | null | null | null |
from math import prod
INPUT = "08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08\n"\
"49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00\n"\
"81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65\n"\
"52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91\n"\
"22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80\n"\
"24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50\n"\
"32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70\n"\
"67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21\n"\
"24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72\n"\
"21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95\n"\
"78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92\n"\
"16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57\n"\
"86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58\n"\
"19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40\n"\
"04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66\n"\
"88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69\n"\
"04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36\n"\
"20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16\n"\
"20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54\n"\
"01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48"\
BRACKET_SIZE = 4
def main():
line_list = INPUT.split("\n")
grid = gen_grid(line_list)
value = 0
value = process_rows(grid, value)
value = process_cols(grid, value)
# Diagonals. Legend: Top (T), Left (L), Bottom (B), Right (R)
value = process_cols(grid, value, [0,1,2,3]) # TL-BR
value = process_cols(grid, value, [3,2,1,0]) # TR-BL
print(value)
def gen_grid(lines):
grid_list = []
for line in lines:
grid_list.append(
[int(val) for val in line.split()]
)
return grid_list
def process_rows(grid, value):
for row in grid:
row_len = len(row)
for x in range(row_len - BRACKET_SIZE + 1):
bracket = row[x:x + BRACKET_SIZE]
product = prod(bracket)
if product > value:
value = product
return value
def process_cols(grid, value, offset=[0,0,0,0]):
"""Processes four rows at a time and running over the columns of that sub
grid.
Args:
grid (list[int]):
A nested list of columns and rows.
value (int):
The parsed value gets processes by all function, yielding the final
result at the very end.
offset (list[int]):
A lookup offset, enabling this function to also process diagonals.
Recommended patterns are 0,0,0,0 (default), 0,1,2,3 and 3,2,1,0
Returns:
The integer value compared with itself on every iteration and increased
if needed. This will always yield the largest possible number per
function.
"""
col_len = len(grid)
for x in range(col_len - BRACKET_SIZE + 1):
sub_grid = grid[x:x + BRACKET_SIZE]
sample_row_len = len(sub_grid[0])
for y in range(sample_row_len - max(offset)):
product = prod([
sub_grid[0][y + offset[0]],
sub_grid[1][y + offset[1]],
sub_grid[2][y + offset[2]],
sub_grid[3][y + offset[3]]
])
if product > value:
value = product
return value
if __name__ == "__main__":
main()
| 34.2
| 79
| 0.633118
|
79504ec79b52df4752b1fd981bf49f355f7f43d9
| 27,860
|
py
|
Python
|
sdk/network/azure-mgmt-network/tests/test_cli_mgmt_network_load_balancer.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/tests/test_cli_mgmt_network_load_balancer.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/tests/test_cli_mgmt_network_load_balancer.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 21
# Methods Covered : 21
# Examples Total : 26
# Examples Tested : 26
# Coverage % : 100
# ----------------------
# load_balancers: 6/6
# inbound_nat_rules: 4/4
# load_balancer_frontend_ip_configurations: 2/2
# load_balancer_backend_address_pools: 2/2
# load_balancer_load_balancing_rules: 2/2
# load_balancer_outbound_rules: 2/2
# load_balancer_probes: 2/2
# load_balancer_network_interfaces: 1/1
import unittest
import azure.mgmt.network
from devtools_testutils import AzureMgmtTestCase, RandomNameResourceGroupPreparer
AZURE_LOCATION = 'eastus'
class MgmtNetworkTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtNetworkTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.network.NetworkManagementClient
)
def create_public_ip_address(self, group_name, location, public_ip_address_name):
# Create public IP address defaults[put]
BODY = {
"public_ip_allocation_method": "Static",
"idle_timeout_in_minutes": 10,
"public_ip_address_version": "IPv4",
"location": location,
"sku": {
"name": "Standard"
}
}
result = self.mgmt_client.public_ip_addresses.begin_create_or_update(group_name, public_ip_address_name, BODY)
result = result.result()
def create_virtual_network(self, group_name, location, network_name, subnet_name):
result = self.mgmt_client.virtual_networks.begin_create_or_update(
group_name,
network_name,
{
'location': location,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = result.result()
async_subnet_creation = self.mgmt_client.subnets.begin_create_or_update(
group_name,
network_name,
subnet_name,
{'address_prefix': '10.0.0.0/24'}
)
subnet_info = async_subnet_creation.result()
return subnet_info
@RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
def test_network(self, resource_group):
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
RESOURCE_GROUP = resource_group.name
VIRTUAL_NETWORK_NAME = "virtualnetwork"
SUBNET_NAME = "subnet"
PUBLIC_IP_ADDRESS_NAME = "public_ip_address_name"
LOAD_BALANCER_NAME = "myLoadBalancer"
INBOUND_NAT_RULE_NAME = "myInboundNatRule"
FRONTEND_IPCONFIGURATION_NAME = "myFrontendIpconfiguration"
BACKEND_ADDRESS_POOL_NAME = "myBackendAddressPool"
LOAD_BALANCING_RULE_NAME = "myLoadBalancingRule"
OUTBOUND_RULE_NAME = "myOutboundRule"
PROBE_NAME = "myProbe"
# self.create_virtual_network(RESOURCE_GROUP, AZURE_LOCATION, VIRTUAL_NETWORK_NAME, SUBNET_NAME)
self.create_public_ip_address(RESOURCE_GROUP, AZURE_LOCATION, PUBLIC_IP_ADDRESS_NAME)
# Create load balancer
BODY = {
"location": "eastus",
"sku": {
"name": "Standard"
},
"frontendIPConfigurations": [
{
"name": FRONTEND_IPCONFIGURATION_NAME,
# "subnet": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME
# }
"public_ip_address": {
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/publicIPAddresses/" + PUBLIC_IP_ADDRESS_NAME
}
}
],
"backend_address_pools": [
{
"name": BACKEND_ADDRESS_POOL_NAME
}
],
"load_balancing_rules": [
{
"name": LOAD_BALANCING_RULE_NAME,
"frontend_ip_configuration": {
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancers/" + LOAD_BALANCER_NAME + "/frontendIPConfigurations/" + FRONTEND_IPCONFIGURATION_NAME
},
"frontend_port": "80",
"backend_port": "80",
"enable_floating_ip": True,
"idle_timeout_in_minutes": "15",
"protocol": "Tcp",
"load_distribution": "Default",
"disable_outbound_snat": True,
"backend_address_pool": {
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancers/" + LOAD_BALANCER_NAME + "/backendAddressPools/" + BACKEND_ADDRESS_POOL_NAME
},
"probe": {
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancers/" + LOAD_BALANCER_NAME + "/probes/" + PROBE_NAME
}
}
],
"probes": [
{
"name": PROBE_NAME,
"protocol": "Http",
"port": "80",
"request_path": "healthcheck.aspx",
"interval_in_seconds": "15",
"number_of_probes": "2"
}
],
"outbound_rules": [
{
"name": OUTBOUND_RULE_NAME,
"backend_address_pool": {
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancers/" + LOAD_BALANCER_NAME + "/backendAddressPools/" + BACKEND_ADDRESS_POOL_NAME
},
"frontend_ip_configurations": [
{
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancers/" + LOAD_BALANCER_NAME + "/frontendIPConfigurations/" + FRONTEND_IPCONFIGURATION_NAME
}
],
"protocol": "All"
}
]
}
result = self.mgmt_client.load_balancers.begin_create_or_update(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, parameters=BODY)
result = result.result()
# # /LoadBalancers/put/Create load balancer with Standard SKU[put]
# BODY = {
# "location": "eastus",
# "sku": {
# "name": "Standard"
# },
# "frontend_ipconfigurations": [
# {
# "name": "fe-lb",
# "properties": {
# "subnet": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworksvnetlbsubnetssubnetlb"
# }
# }
# }
# ],
# "backend_address_pools": [
# {
# "name": "be-lb"
# }
# ],
# "load_balancing_rules": [
# {
# "name": "rulelb",
# "properties": {
# "frontend_ipconfiguration": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbfrontendIPConfigurationsfe-lb"
# },
# "frontend_port": "80",
# "backend_port": "80",
# "enable_floating_ip": True,
# "idle_timeout_in_minutes": "15",
# "protocol": "Tcp",
# "load_distribution": "Default",
# "backend_address_pool": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbbackendAddressPoolsbe-lb"
# },
# "probe": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbprobesprobe-lb"
# }
# }
# }
# ],
# "probes": [
# {
# "name": "probe-lb",
# "properties": {
# "protocol": "Http",
# "port": "80",
# "request_path": "healthcheck.aspx",
# "interval_in_seconds": "15",
# "number_of_probes": "2"
# }
# }
# ],
# "inbound_nat_rules": [
# {
# "name": "in-nat-rule",
# "properties": {
# "frontend_ipconfiguration": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbfrontendIPConfigurationsfe-lb"
# },
# "frontend_port": "3389",
# "backend_port": "3389",
# "enable_floating_ip": True,
# "idle_timeout_in_minutes": "15",
# "protocol": "Tcp"
# }
# }
# ],
# "inbound_nat_pools": [],
# "outbound_rules": []
# }
# result = self.mgmt_client.load_balancers.begin_create_or_update(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, parameters=BODY)
# result = result.result()
# # /LoadBalancers/put/Create load balancer with Frontend IP in Zone 1[put]
# BODY = {
# "location": "eastus",
# "sku": {
# "name": "Standard"
# },
# "frontend_ipconfigurations": [
# {
# "name": "fe-lb",
# "properties": {
# "subnet": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworksvnetlbsubnetssubnetlb"
# }
# },
# "zones": [
# "1"
# ]
# }
# ],
# "backend_address_pools": [
# {
# "name": "be-lb"
# }
# ],
# "load_balancing_rules": [
# {
# "name": "rulelb",
# "properties": {
# "frontend_ipconfiguration": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbfrontendIPConfigurationsfe-lb"
# },
# "frontend_port": "80",
# "backend_port": "80",
# "enable_floating_ip": True,
# "idle_timeout_in_minutes": "15",
# "protocol": "Tcp",
# "load_distribution": "Default",
# "backend_address_pool": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbbackendAddressPoolsbe-lb"
# },
# "probe": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbprobesprobe-lb"
# }
# }
# }
# ],
# "probes": [
# {
# "name": "probe-lb",
# "properties": {
# "protocol": "Http",
# "port": "80",
# "request_path": "healthcheck.aspx",
# "interval_in_seconds": "15",
# "number_of_probes": "2"
# }
# }
# ],
# "inbound_nat_rules": [
# {
# "name": "in-nat-rule",
# "properties": {
# "frontend_ipconfiguration": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbfrontendIPConfigurationsfe-lb"
# },
# "frontend_port": "3389",
# "backend_port": "3389",
# "enable_floating_ip": True,
# "idle_timeout_in_minutes": "15",
# "protocol": "Tcp"
# }
# }
# ],
# "inbound_nat_pools": [],
# "outbound_rules": []
# }
# result = self.mgmt_client.load_balancers.begin_create_or_update(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, parameters=BODY)
# result = result.result()
# # /LoadBalancers/put/Create load balancer with inbound nat pool[put]
# BODY = {
# "location": "eastus",
# "sku": {
# "name": "Standard"
# },
# "frontend_ipconfigurations": [
# {
# "properties": {
# "private_ipallocation_method": "Dynamic",
# "subnet": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworkslbvnetsubnetslbsubnet"
# }
# },
# "name": "test",
# "zones": [],
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbfrontendIPConfigurationstest"
# }
# ],
# "backend_address_pools": [],
# "load_balancing_rules": [],
# "probes": [],
# "inbound_nat_rules": [],
# "outbound_rules": [],
# "inbound_nat_pools": [
# {
# "properties": {
# "frontend_ipconfiguration": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbfrontendIPConfigurationstest"
# },
# "protocol": "Tcp",
# "frontend_port_range_start": "8080",
# "frontend_port_range_end": "8085",
# "backend_port": "8888",
# "idle_timeout_in_minutes": "10",
# "enable_floating_ip": True,
# "enable_tcp_reset": True
# },
# "name": "test",
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbinboundNatPoolstest"
# }
# ]
# }
# result = self.mgmt_client.load_balancers.begin_create_or_update(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, parameters=BODY)
# result = result.result()
# # /LoadBalancers/put/Create load balancer with outbound rules[put]
# BODY = {
# "location": "eastus",
# "sku": {
# "name": "Standard"
# },
# "frontend_ipconfigurations": [
# {
# "name": "fe-lb",
# "properties": {
# "public_ip_address": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/publicIPAddressespip"
# }
# }
# }
# ],
# "backend_address_pools": [
# {
# "name": "be-lb"
# }
# ],
# "load_balancing_rules": [
# {
# "name": "rulelb",
# "properties": {
# "frontend_ipconfiguration": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbfrontendIPConfigurationsfe-lb"
# },
# "backend_address_pool": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbbackendAddressPoolsbe-lb"
# },
# "probe": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbprobesprobe-lb"
# },
# "protocol": "Tcp",
# "load_distribution": "Default",
# "frontend_port": "80",
# "backend_port": "80",
# "idle_timeout_in_minutes": "15",
# "enable_floating_ip": True,
# "disable_outbound_snat": True
# }
# }
# ],
# "probes": [
# {
# "name": "probe-lb",
# "properties": {
# "protocol": "Http",
# "port": "80",
# "request_path": "healthcheck.aspx",
# "interval_in_seconds": "15",
# "number_of_probes": "2"
# }
# }
# ],
# "inbound_nat_rules": [
# {
# "name": "in-nat-rule",
# "properties": {
# "frontend_ipconfiguration": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbfrontendIPConfigurationsfe-lb"
# },
# "frontend_port": "3389",
# "backend_port": "3389",
# "enable_floating_ip": True,
# "idle_timeout_in_minutes": "15",
# "protocol": "Tcp"
# }
# }
# ],
# "inbound_nat_pools": [],
# "outbound_rules": [
# {
# "name": "rule1",
# "properties": {
# "backend_address_pool": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbbackendAddressPoolsbe-lb"
# },
# "frontend_ipconfigurations": [
# {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbfrontendIPConfigurationsfe-lb"
# }
# ],
# "protocol": "All"
# }
# }
# ]
# }
# result = self.mgmt_client.load_balancers.begin_create_or_update(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, parameters=BODY)
# result = result.result()
# # /LoadBalancers/put/Create load balancer[put]
# BODY = {
# "location": "eastus",
# "frontend_ipconfigurations": [
# {
# "name": "fe-lb",
# "properties": {
# "subnet": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworksvnetlbsubnetssubnetlb"
# }
# }
# }
# ],
# "backend_address_pools": [
# {
# "name": "be-lb"
# }
# ],
# "load_balancing_rules": [
# {
# "name": "rulelb",
# "properties": {
# "frontend_ipconfiguration": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbfrontendIPConfigurationsfe-lb"
# },
# "frontend_port": "80",
# "backend_port": "80",
# "enable_floating_ip": True,
# "idle_timeout_in_minutes": "15",
# "protocol": "Tcp",
# "enable_tcp_reset": False,
# "load_distribution": "Default",
# "backend_address_pool": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbbackendAddressPoolsbe-lb"
# },
# "probe": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbprobesprobe-lb"
# }
# }
# }
# ],
# "probes": [
# {
# "name": "probe-lb",
# "properties": {
# "protocol": "Http",
# "port": "80",
# "request_path": "healthcheck.aspx",
# "interval_in_seconds": "15",
# "number_of_probes": "2"
# }
# }
# ],
# "inbound_nat_rules": [
# {
# "name": "in-nat-rule",
# "properties": {
# "frontend_ipconfiguration": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancerslbfrontendIPConfigurationsfe-lb"
# },
# "frontend_port": "3389",
# "backend_port": "3389",
# "enable_floating_ip": True,
# "idle_timeout_in_minutes": "15",
# "protocol": "Tcp",
# "enable_tcp_reset": False
# }
# }
# ],
# "inbound_nat_pools": []
# }
# result = self.mgmt_client.load_balancers.begin_create_or_update(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, parameters=BODY)
# result = result.result()
# /InboundNatRules/put/InboundNatRuleCreate[put]
BODY = {
"protocol": "Tcp",
"frontend_ip_configuration": {
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancers/" + LOAD_BALANCER_NAME + "/frontendIPConfigurations/" + FRONTEND_IPCONFIGURATION_NAME
},
"frontend_port": "3390",
"backend_port": "3389",
"idle_timeout_in_minutes": "4",
"enable_tcp_reset": False,
"enable_floating_ip": False
}
result = self.mgmt_client.inbound_nat_rules.begin_create_or_update(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, inbound_nat_rule_name=INBOUND_NAT_RULE_NAME, inbound_nat_rule_parameters=BODY)
result = result.result()
# /LoadBalancerFrontendIPConfigurations/get/LoadBalancerFrontendIPConfigurationGet[get]
result = self.mgmt_client.load_balancer_frontend_ip_configurations.get(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, frontend_ip_configuration_name=FRONTEND_IPCONFIGURATION_NAME)
# /LoadBalancerBackendAddressPools/get/LoadBalancerBackendAddressPoolGet[get]
result = self.mgmt_client.load_balancer_backend_address_pools.get(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, backend_address_pool_name=BACKEND_ADDRESS_POOL_NAME)
# /LoadBalancerLoadBalancingRules/get/LoadBalancerLoadBalancingRuleGet[get]
result = self.mgmt_client.load_balancer_load_balancing_rules.get(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, load_balancing_rule_name=LOAD_BALANCING_RULE_NAME)
# /InboundNatRules/get/InboundNatRuleGet[get]
result = self.mgmt_client.inbound_nat_rules.get(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, inbound_nat_rule_name=INBOUND_NAT_RULE_NAME)
# /LoadBalancerOutboundRules/get/LoadBalancerOutboundRuleGet[get]
result = self.mgmt_client.load_balancer_outbound_rules.get(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, outbound_rule_name=OUTBOUND_RULE_NAME)
# /LoadBalancerFrontendIPConfigurations/get/LoadBalancerFrontendIPConfigurationList[get]
result = self.mgmt_client.load_balancer_frontend_ip_configurations.list(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME)
# /LoadBalancerProbes/get/LoadBalancerProbeGet[get]
result = self.mgmt_client.load_balancer_probes.get(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, probe_name=PROBE_NAME)
# /LoadBalancerBackendAddressPools/get/LoadBalancerBackendAddressPoolList[get]
result = self.mgmt_client.load_balancer_backend_address_pools.list(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME)
# /LoadBalancerLoadBalancingRules/get/LoadBalancerLoadBalancingRuleList[get]
result = self.mgmt_client.load_balancer_load_balancing_rules.list(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME)
# /LoadBalancerNetworkInterfaces/get/LoadBalancerNetworkInterfaceListVmss[get]
result = self.mgmt_client.load_balancer_network_interfaces.list(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME)
# /LoadBalancerNetworkInterfaces/get/LoadBalancerNetworkInterfaceListSimple[get]
result = self.mgmt_client.load_balancer_network_interfaces.list(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME)
# /InboundNatRules/get/InboundNatRuleList[get]
result = self.mgmt_client.inbound_nat_rules.list(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME)
# /LoadBalancerOutboundRules/get/LoadBalancerOutboundRuleList[get]
result = self.mgmt_client.load_balancer_outbound_rules.list(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME)
# /LoadBalancerProbes/get/LoadBalancerProbeList[get]
result = self.mgmt_client.load_balancer_probes.list(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME)
# /LoadBalancers/get/Get load balancer[get]
result = self.mgmt_client.load_balancers.get(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME)
# /LoadBalancers/get/List load balancers in resource group[get]
result = self.mgmt_client.load_balancers.list(resource_group_name=RESOURCE_GROUP)
# /LoadBalancers/get/List all load balancers[get]
result = self.mgmt_client.load_balancers.list_all()
# /LoadBalancers/patch/Update load balancer tags[patch]
BODY = {
"tags": {
"tag1": "value1",
"tag2": "value2"
}
}
result = self.mgmt_client.load_balancers.update_tags(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, parameters=BODY)
# /InboundNatRules/delete/InboundNatRuleDelete[delete]
result = self.mgmt_client.inbound_nat_rules.begin_delete(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME, inbound_nat_rule_name=INBOUND_NAT_RULE_NAME)
result = result.result()
# /LoadBalancers/delete/Delete load balancer[delete]
result = self.mgmt_client.load_balancers.begin_delete(resource_group_name=RESOURCE_GROUP, load_balancer_name=LOAD_BALANCER_NAME)
result = result.result()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 44.576
| 229
| 0.556748
|
79504f0ee5fd6b8c735ce3eba663a95f13ae16a7
| 1,491
|
py
|
Python
|
test/rules/functions/test_sub.py
|
samhays/cfn-python-lint
|
191ec30640fd2d81159976bd077f4da4f2078309
|
[
"MIT-0"
] | 1
|
2018-04-19T14:51:13.000Z
|
2018-04-19T14:51:13.000Z
|
test/rules/functions/test_sub.py
|
samhays/cfn-python-lint
|
191ec30640fd2d81159976bd077f4da4f2078309
|
[
"MIT-0"
] | null | null | null |
test/rules/functions/test_sub.py
|
samhays/cfn-python-lint
|
191ec30640fd2d81159976bd077f4da4f2078309
|
[
"MIT-0"
] | null | null | null |
"""
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint.rules.functions.Sub import Sub # pylint: disable=E0401
from .. import BaseRuleTestCase
class TestRulesSub(BaseRuleTestCase):
"""Test Rules Get Att """
def setUp(self):
"""Setup"""
super(TestRulesSub, self).setUp()
self.collection.register(Sub())
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('templates/bad/functions_sub.yaml', 4)
| 42.6
| 87
| 0.733736
|
795050e5aa69e130c76b516a355b5c518e1f3407
| 5,698
|
py
|
Python
|
pymc/sampling_jax.py
|
MarcoGorelli/pymc
|
140dab0199dfb751951ba99175295c07feb00264
|
[
"Apache-2.0"
] | 1,554
|
2015-01-03T05:50:28.000Z
|
2022-03-31T03:32:29.000Z
|
pymc/sampling_jax.py
|
MarcoGorelli/pymc
|
140dab0199dfb751951ba99175295c07feb00264
|
[
"Apache-2.0"
] | 922
|
2015-01-03T17:51:09.000Z
|
2022-03-31T22:49:44.000Z
|
pymc/sampling_jax.py
|
MarcoGorelli/pymc
|
140dab0199dfb751951ba99175295c07feb00264
|
[
"Apache-2.0"
] | 484
|
2015-01-12T16:44:47.000Z
|
2022-03-31T13:40:01.000Z
|
# pylint: skip-file
import os
import re
import sys
import warnings
from typing import Callable, List
from aesara.graph import optimize_graph
from aesara.tensor import TensorVariable
xla_flags = os.getenv("XLA_FLAGS", "")
xla_flags = re.sub(r"--xla_force_host_platform_device_count=.+\s", "", xla_flags).split()
os.environ["XLA_FLAGS"] = " ".join([f"--xla_force_host_platform_device_count={100}"] + xla_flags)
import aesara.tensor as at
import arviz as az
import jax
import numpy as np
import pandas as pd
from aesara.assert_op import Assert
from aesara.compile import SharedVariable
from aesara.graph.basic import clone_replace, graph_inputs
from aesara.graph.fg import FunctionGraph
from aesara.link.jax.dispatch import jax_funcify
from pymc import Model, modelcontext
from pymc.aesaraf import compile_rv_inplace
warnings.warn("This module is experimental.")
@jax_funcify.register(Assert)
def jax_funcify_Assert(op, **kwargs):
# Jax does not allow assert whose values aren't known during JIT compilation
# within it's JIT-ed code. Hence we need to make a simple pass through
# version of the Assert Op.
# https://github.com/google/jax/issues/2273#issuecomment-589098722
def assert_fn(value, *inps):
return value
return assert_fn
def replace_shared_variables(graph: List[TensorVariable]) -> List[TensorVariable]:
"""Replace shared variables in graph by their constant values
Raises
------
ValueError
If any shared variable contains default_updates
"""
shared_variables = [var for var in graph_inputs(graph) if isinstance(var, SharedVariable)]
if any(hasattr(var, "default_update") for var in shared_variables):
raise ValueError(
"Graph contains shared variables with default_update which cannot "
"be safely replaced."
)
replacements = {var: at.constant(var.get_value(borrow=True)) for var in shared_variables}
new_graph = clone_replace(graph, replace=replacements)
return new_graph
def get_jaxified_logp(model: Model) -> Callable:
"""Compile model.logpt into an optimized jax function"""
logpt = replace_shared_variables([model.logpt])[0]
logpt_fgraph = FunctionGraph(outputs=[logpt], clone=False)
optimize_graph(logpt_fgraph, include=["fast_run"], exclude=["cxx_only", "BlasOpt"])
# We now jaxify the optimized fgraph
logp_fn = jax_funcify(logpt_fgraph)
if isinstance(logp_fn, (list, tuple)):
# This handles the new JAX backend, which always returns a tuple
logp_fn = logp_fn[0]
def logp_fn_wrap(x):
res = logp_fn(*x)
if isinstance(res, (list, tuple)):
# This handles the new JAX backend, which always returns a tuple
res = res[0]
# Jax expects a potential with the opposite sign of model.logpt
return -res
return logp_fn_wrap
def sample_numpyro_nuts(
draws=1000,
tune=1000,
chains=4,
target_accept=0.8,
random_seed=10,
model=None,
progress_bar=True,
keep_untransformed=False,
):
from numpyro.infer import MCMC, NUTS
model = modelcontext(model)
tic1 = pd.Timestamp.now()
print("Compiling...", file=sys.stdout)
rv_names = [rv.name for rv in model.value_vars]
init_state = [model.initial_point[rv_name] for rv_name in rv_names]
init_state_batched = jax.tree_map(lambda x: np.repeat(x[None, ...], chains, axis=0), init_state)
logp_fn = get_jaxified_logp(model)
nuts_kernel = NUTS(
potential_fn=logp_fn,
target_accept_prob=target_accept,
adapt_step_size=True,
adapt_mass_matrix=True,
dense_mass=False,
)
pmap_numpyro = MCMC(
nuts_kernel,
num_warmup=tune,
num_samples=draws,
num_chains=chains,
postprocess_fn=None,
chain_method="parallel",
progress_bar=progress_bar,
)
tic2 = pd.Timestamp.now()
print("Compilation time = ", tic2 - tic1, file=sys.stdout)
print("Sampling...", file=sys.stdout)
seed = jax.random.PRNGKey(random_seed)
map_seed = jax.random.split(seed, chains)
pmap_numpyro.run(map_seed, init_params=init_state_batched, extra_fields=("num_steps",))
raw_mcmc_samples = pmap_numpyro.get_samples(group_by_chain=True)
tic3 = pd.Timestamp.now()
print("Sampling time = ", tic3 - tic2, file=sys.stdout)
print("Transforming variables...", file=sys.stdout)
mcmc_samples = []
for i, (value_var, raw_samples) in enumerate(zip(model.value_vars, raw_mcmc_samples)):
raw_samples = at.constant(np.asarray(raw_samples))
rv = model.values_to_rvs[value_var]
transform = getattr(value_var.tag, "transform", None)
if transform is not None:
# TODO: This will fail when the transformation depends on another variable
# such as in interval transform with RVs as edges
trans_samples = transform.backward(raw_samples, *rv.owner.inputs)
trans_samples.name = rv.name
mcmc_samples.append(trans_samples)
if keep_untransformed:
raw_samples.name = value_var.name
mcmc_samples.append(raw_samples)
else:
raw_samples.name = rv.name
mcmc_samples.append(raw_samples)
mcmc_varnames = [var.name for var in mcmc_samples]
mcmc_samples = compile_rv_inplace(
[],
mcmc_samples,
mode="JAX",
)()
tic4 = pd.Timestamp.now()
print("Transformation time = ", tic4 - tic3, file=sys.stdout)
posterior = {k: v for k, v in zip(mcmc_varnames, mcmc_samples)}
az_trace = az.from_dict(posterior=posterior)
return az_trace
| 30.308511
| 100
| 0.686908
|
795050f2bc1a45cce9b4772507958da3b115697d
| 7,082
|
py
|
Python
|
hw5/sac/sac.py
|
tambetm/homework
|
2ed2c9cbecb3daf9b1d77023a45ad0b35da5b542
|
[
"MIT"
] | null | null | null |
hw5/sac/sac.py
|
tambetm/homework
|
2ed2c9cbecb3daf9b1d77023a45ad0b35da5b542
|
[
"MIT"
] | null | null | null |
hw5/sac/sac.py
|
tambetm/homework
|
2ed2c9cbecb3daf9b1d77023a45ad0b35da5b542
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import time
class SAC:
"""Soft Actor-Critic (SAC)
Original code from Tuomas Haarnoja, Soroush Nasiriany, and Aurick Zhou for CS294-112 Fall 2018
References
----------
[1] Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine, "Soft
Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement Learning
with a Stochastic Actor," ICML 2018.
"""
def __init__(self,
alpha=1.0,
batch_size=256,
discount=0.99,
epoch_length=1000,
learning_rate=3e-3,
reparameterize=False,
tau=0.01,
**kwargs):
"""
Args:
"""
self._alpha = alpha
self._batch_size = batch_size
self._discount = discount
self._epoch_length = epoch_length
self._learning_rate = learning_rate
self._reparameterize = reparameterize
self._tau = tau
self._training_ops = []
def build(self, env, policy, q_function, q_function2, value_function,
target_value_function):
self._create_placeholders(env)
policy_loss = self._policy_loss_for(policy, q_function, q_function2, value_function)
value_function_loss = self._value_function_loss_for(
policy, q_function, q_function2, value_function)
q_function_loss = self._q_function_loss_for(q_function,
target_value_function)
if q_function2 is not None:
q_function2_loss = self._q_function_loss_for(q_function2,
target_value_function)
optimizer = tf.train.AdamOptimizer(
self._learning_rate, name='optimizer')
policy_training_op = optimizer.minimize(
loss=policy_loss, var_list=policy.trainable_variables)
value_training_op = optimizer.minimize(
loss=value_function_loss,
var_list=value_function.trainable_variables)
q_function_training_op = optimizer.minimize(
loss=q_function_loss, var_list=q_function.trainable_variables)
if q_function2 is not None:
q_function2_training_op = optimizer.minimize(
loss=q_function2_loss, var_list=q_function2.trainable_variables)
self._training_ops = [
policy_training_op, value_training_op, q_function_training_op
]
if q_function2 is not None:
self._training_ops += [q_function2_training_op]
self._target_update_ops = self._create_target_update(
source=value_function, target=target_value_function)
tf.get_default_session().run(tf.global_variables_initializer())
def _create_placeholders(self, env):
observation_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
self._observations_ph = tf.placeholder(
tf.float32,
shape=(None, observation_dim),
name='observation',
)
self._next_observations_ph = tf.placeholder(
tf.float32,
shape=(None, observation_dim),
name='next_observation',
)
self._actions_ph = tf.placeholder(
tf.float32,
shape=(None, action_dim),
name='actions',
)
self._rewards_ph = tf.placeholder(
tf.float32,
shape=(None, ),
name='rewards',
)
self._terminals_ph = tf.placeholder(
tf.float32,
shape=(None, ),
name='terminals',
)
def _policy_loss_for(self, policy, q_function, q_function2, value_function):
a, logp = policy(self._observations_ph)
q = q_function((self._observations_ph, a))
if q_function2 is not None:
q2 = q_function2((self._observations_ph, a))
q = tf.minimum(q, q2)
if not self._reparameterize:
### Problem 1.3.A
### YOUR CODE HERE
# TODO: try 0
b = value_function(self._observations_ph)
loss = tf.reduce_mean(logp * tf.stop_gradient(self._alpha * logp - q + b))
return loss
else:
### Problem 1.3.B
### YOUR CODE HERE
loss = tf.reduce_mean(self._alpha * logp - q)
return loss
def _value_function_loss_for(self, policy, q_function, q_function2, value_function):
### Problem 1.2.A
### YOUR CODE HERE
a, logp = policy(self._observations_ph)
v = value_function(self._observations_ph)
q = q_function((self._observations_ph, a))
if q_function2 is not None:
q2 = q_function2((self._observations_ph, a))
q = tf.minimum(q, q2)
loss = tf.reduce_mean((v - (q - self._alpha * logp))**2)
return loss
def _q_function_loss_for(self, q_function, target_value_function):
### Problem 1.1.A
### YOUR CODE HERE
q = q_function((self._observations_ph, self._actions_ph))
v = target_value_function(self._next_observations_ph)
loss = tf.reduce_mean((q - (self._rewards_ph + (1 - self._terminals_ph) * self._discount * v))**2)
return loss
def _create_target_update(self, source, target):
"""Create tensorflow operations for updating target value function."""
return [
tf.assign(target, (1 - self._tau) * target + self._tau * source)
for target, source in zip(target.trainable_variables, source.
trainable_variables)
]
def train(self, sampler, n_epochs=1000):
"""Return a generator that performs RL training.
Args:
env (`rllab.Env`): Environment used for training
policy (`Policy`): Policy used for training
initial_exploration_policy ('Policy'): Policy used for exploration
If None, then all exploration is done using policy
pool (`PoolBase`): Sample pool to add samples to
"""
self._start = time.time()
for epoch in range(n_epochs):
for t in range(self._epoch_length):
sampler.sample()
batch = sampler.random_batch(self._batch_size)
feed_dict = {
self._observations_ph: batch['observations'],
self._actions_ph: batch['actions'],
self._next_observations_ph: batch['next_observations'],
self._rewards_ph: batch['rewards'],
self._terminals_ph: batch['terminals'],
}
tf.get_default_session().run(self._training_ops, feed_dict)
tf.get_default_session().run(self._target_update_ops)
yield epoch
def get_statistics(self):
statistics = {
'Time': time.time() - self._start,
'TimestepsThisBatch': self._epoch_length,
}
return statistics
| 37.470899
| 106
| 0.589946
|
795051883251d49686c772d4897f199bdbcf4566
| 3,572
|
py
|
Python
|
np cOMPLETENESS/plan_party/plan_party.py
|
lovroselic/Coursera
|
1598b4fe02eb3addbc847f4f3ec21fb5b6e0be08
|
[
"MIT"
] | null | null | null |
np cOMPLETENESS/plan_party/plan_party.py
|
lovroselic/Coursera
|
1598b4fe02eb3addbc847f4f3ec21fb5b6e0be08
|
[
"MIT"
] | null | null | null |
np cOMPLETENESS/plan_party/plan_party.py
|
lovroselic/Coursera
|
1598b4fe02eb3addbc847f4f3ec21fb5b6e0be08
|
[
"MIT"
] | null | null | null |
#uses python3
import sys
import threading
#DEBUG = False
DEBUG = True
#LOG = True
LOG = False
if DEBUG: test = open("tests\\03", "r")
global T
# This code is used to avoid stack overflow issues
sys.setrecursionlimit(10**6) # max depth of recursion
threading.stack_size(2**26) # new thread will get stack of such size
class Vertex:
def __init__(self, weight):
self.weight = weight
self.children = []
self.fun = -1
def hasChildren(self, parent):
count = 0
for child in self.children:
if child != parent:
count += 1
if count > 0:
return True
else:
return False
def ReadTree():
if DEBUG:
size = int(test.readline())
else:
size = int(input())
if DEBUG:
tree = [Vertex(w) for w in map(int, test.readline().split())]
else:
tree = [Vertex(w) for w in map(int, input().split())]
for i in range(1, size):
if DEBUG:
a, b = list(map(int, test.readline().split()))
else:
a, b = list(map(int, input().split()))
tree[a - 1].children.append(b - 1)
tree[b - 1].children.append(a - 1)
return tree
def displayTree(tree, vertex, parent):
print("NODE", vertex + 1, "FUN", tree[vertex].fun)
for child in tree[vertex].children:
if child != parent:
displayTree(tree, child, vertex)
def dfs(tree, vertex, parent):
if LOG:
print("\n* vertex", vertex, "children", tree[vertex].children)
for child in tree[vertex].children:
if child != parent:
dfs(tree, child, vertex)
# This is a template function for processing a tree using depth-first search.
# Write your code here.
# You may need to add more parameters to this function for child processing.
#if LOG: print("** child", child)
#for leaf in tree[child].children:
#if LOG: print("*** leaf", leaf +1)
#vertex
if LOG:
print("*** vertex", vertex, "Parent:", parent)
print("\n**** NODE", vertex + 1, "W:",tree[vertex].weight, "fun:", tree[vertex].fun)
if tree[vertex].hasChildren(parent) != True:
if LOG: print("~~~~~~~~ no children")
tree[vertex].fun = tree[vertex].weight
else:
M1 = tree[vertex].weight
for u in tree[vertex].children:
if u != parent:
if LOG:
print("GC, child", u+1, tree[u].children)
for w in tree[u].children:
if w != vertex:
print("GC, gc", w+1, "fun", tree[w].fun)
M1 += tree[w].fun
if LOG: print("M1", M1)
M0 = 0
for u in tree[vertex].children:
if u != parent:
if LOG:
print("C, child", u+1, "fun", tree[u].fun)
M0 += tree[u].fun
if LOG: print("M0", M0)
tree[vertex].fun = max(M1, M0)
if LOG: print("FUN", tree[vertex].fun)
def MaxWeightIndependentTreeSubset(tree):
size = len(tree)
if size == 0:
return 0
dfs(tree, 0, -1)
# You must decide what to return.
if LOG:
print("\nlast fun", tree[0].fun)
print("\TREE ...........")
displayTree(tree, 0, -1)
return tree[0].fun
def main():
tree = ReadTree();
weight = MaxWeightIndependentTreeSubset(tree);
print(weight)
# This is to avoid stack overflow issues
threading.Thread(target=main).start()
| 27.267176
| 92
| 0.532195
|
795051bc305026300f903b806801e88b0121e674
| 2,806
|
py
|
Python
|
render/cam_render.py
|
liruilong940607/A-NeRF
|
19cb6c4fd389266214ac0d7215a44011cb1bebf5
|
[
"MIT"
] | 110
|
2021-12-07T13:30:47.000Z
|
2022-03-31T16:17:09.000Z
|
render/cam_render.py
|
liruilong940607/A-NeRF
|
19cb6c4fd389266214ac0d7215a44011cb1bebf5
|
[
"MIT"
] | 12
|
2021-12-20T10:04:13.000Z
|
2022-03-28T18:15:41.000Z
|
render/cam_render.py
|
liruilong940607/A-NeRF
|
19cb6c4fd389266214ac0d7215a44011cb1bebf5
|
[
"MIT"
] | 11
|
2021-12-07T13:59:16.000Z
|
2022-03-28T09:00:24.000Z
|
'''
MIT License
Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
#from OpenGL.GLUT import *
from .render import Render
class CamRender(Render):
def __init__(self, width=1600, height=1200, name='Cam Renderer',
program_files=['simple.fs', 'simple.vs'], color_size=1, ms_rate=1, egl=True):
Render.__init__(self, width, height, name, program_files, color_size, ms_rate, egl=egl)
self.camera = None
#glutDisplayFunc(self.display)
#glutKeyboardFunc(self.keyboard)
def set_camera(self, camera):
self.camera = camera
self.projection_matrix, self.model_view_matrix = camera.get_gl_matrix()
def set_matrices(self, projection, modelview):
self.projection_matrix = projection
self.model_view_matrix = modelview
def keyboard(self, key, x, y):
# up
eps = 1
# print(key)
if key == b'w':
self.camera.center += eps * self.camera.direction
elif key == b's':
self.camera.center -= eps * self.camera.direction
if key == b'a':
self.camera.center -= eps * self.camera.right
elif key == b'd':
self.camera.center += eps * self.camera.right
if key == b' ':
self.camera.center += eps * self.camera.up
elif key == b'x':
self.camera.center -= eps * self.camera.up
elif key == b'i':
self.camera.near += 0.1 * eps
self.camera.far += 0.1 * eps
elif key == b'o':
self.camera.near -= 0.1 * eps
self.camera.far -= 0.1 * eps
self.projection_matrix, self.model_view_matrix = self.camera.get_gl_matrix()
def show(self):
if not self.egl:
glutMainLoop()
| 37.918919
| 95
| 0.665716
|
795053dd095bcb1f06416179b6199b4f9eaadce2
| 1,125
|
py
|
Python
|
script/plot.py
|
Tricker-z/CSE5001-GA-mTSP
|
108916cafecbe325302dbce4ddd07c477a0c5f79
|
[
"Apache-2.0"
] | 3
|
2021-12-14T00:46:55.000Z
|
2021-12-19T08:41:21.000Z
|
script/plot.py
|
Tricker-z/CSE5001-GA-mTSP
|
108916cafecbe325302dbce4ddd07c477a0c5f79
|
[
"Apache-2.0"
] | null | null | null |
script/plot.py
|
Tricker-z/CSE5001-GA-mTSP
|
108916cafecbe325302dbce4ddd07c477a0c5f79
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
routes = [
[[37, 52], [31, 62], [37, 69], [43, 67], [49, 49], [52, 41], [38, 46], [45, 35], [52, 33], [56, 37], [37, 52]],
[[37, 52], [48, 28], [51, 21], [25, 55], [36, 16], [39, 10], [46, 10], [59, 15], [58, 27], [5, 64], [16, 57], [27, 68], [32, 39], [30, 48], [37, 52], [17, 63], [62, 42], [58, 48], [57, 58], [63, 69], [62, 63], [37, 52]],
[[37, 52], [52, 64], [42, 57], [42, 41], [40, 30], [30, 40], [21, 47], [37, 52]],
[[37, 52], [25, 32], [31, 32], [17, 33], [20, 26], [27, 23], [32, 22], [30, 15], [21, 10], [13, 13], [10, 17], [5, 6], [37, 52]],
[[37, 52], [5, 25], [7, 38], [12, 42], [8, 52], [61, 33], [37, 52]]
]
colors = ['#0085c3', '#6600cc', '#ff3333', '#ff8000', '#009999']
plt.figure(figsize = (75, 75))
plt.xlim(0, 75, 1)
plt.ylim(0, 75, 1)
for i, dots in enumerate(routes):
for idx in range(len(dots)):
plt.plot(dots[idx][0], dots[idx][1], 'o', color=colors[i])
for idx in range(len(dots) - 1):
start = (dots[idx][0], dots[idx+1][0])
end = (dots[idx][1], dots[idx+1][1])
plt.plot(start, end, color=colors[i])
plt.show()
| 40.178571
| 221
| 0.468444
|
7950546a606d414867746484240e0a7528c08c0f
| 111,532
|
py
|
Python
|
diofant/combinatorics/perm_groups.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | 1
|
2021-08-22T09:34:15.000Z
|
2021-08-22T09:34:15.000Z
|
diofant/combinatorics/perm_groups.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/combinatorics/perm_groups.py
|
project-kotinos/diofant___diofant
|
882549ac3a4dac238695aa620c02fce6ca33f9d3
|
[
"BSD-3-Clause"
] | null | null | null |
from itertools import islice
from math import log
from random import choice, randrange
from ..core import Basic
from ..functions import factorial
from ..ntheory import sieve
from ..utilities import has_variety
from ..utilities.iterables import is_sequence, uniq
from ..utilities.randtest import _randrange
from .permutations import (Cycle, Permutation, _af_commutes_with, _af_invert,
_af_pow, _af_rmul, _af_rmuln)
from .util import (_base_ordering, _check_cycles_alt_sym,
_distribute_gens_by_base, _handle_precomputed_bsgs,
_orbits_transversals_from_bsgs, _strip, _strip_af,
_strong_gens_from_distr)
rmul = Permutation.rmul_with_af
_af_new = Permutation._af_new
class PermutationGroup(Basic):
"""The class defining a Permutation group.
PermutationGroup([p1, p2, ..., pn]) returns the permutation group
generated by the list of permutations. This group can be supplied
to Polyhedron if one desires to decorate the elements to which the
indices of the permutation refer.
Examples
========
>>> Permutation.print_cyclic = True
The permutations corresponding to motion of the front, right and
bottom face of a 2x2 Rubik's cube are defined:
>>> F = Permutation(2, 19, 21, 8)(3, 17, 20, 10)(4, 6, 7, 5)
>>> R = Permutation(1, 5, 21, 14)(3, 7, 23, 12)(8, 10, 11, 9)
>>> D = Permutation(6, 18, 14, 10)(7, 19, 15, 11)(20, 22, 23, 21)
These are passed as permutations to PermutationGroup:
>>> G = PermutationGroup(F, R, D)
>>> G.order()
3674160
The group can be supplied to a Polyhedron in order to track the
objects being moved. An example involving the 2x2 Rubik's cube is
given there, but here is a simple demonstration:
>>> a = Permutation(2, 1)
>>> b = Permutation(1, 0)
>>> G = PermutationGroup(a, b)
>>> P = Polyhedron(list('ABC'), pgroup=G)
>>> P.corners
(A, B, C)
>>> P.rotate(0) # apply permutation 0
>>> P.corners
(A, C, B)
>>> P.reset()
>>> P.corners
(A, B, C)
Or one can make a permutation as a product of selected permutations
and apply them to an iterable directly:
>>> P10 = G.make_perm([0, 1])
>>> P10('ABC')
['C', 'A', 'B']
See Also
========
diofant.combinatorics.polyhedron.Polyhedron,
diofant.combinatorics.permutations.Permutation
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
[2] Seress, A.
"Permutation Group Algorithms"
[3] https://en.wikipedia.org/wiki/Schreier_vector
[4] https://en.wikipedia.org/wiki/Nielsen_transformation
#Product_replacement_algorithm
[5] Frank Celler, Charles R.Leedham-Green, Scott H.Murray,
Alice C.Niemeyer, and E.A.O'Brien. "Generating Random
Elements of a Finite Group"
[6] https://en.wikipedia.org/wiki/Block_%28permutation_group_theory%29
[7] https://web.archive.org/web/20170105021515/http://www.algorithmist.com:80/index.php/Union_Find
[8] https://en.wikipedia.org/wiki/Multiply_transitive_group#Multiply_transitive_groups
[9] https://en.wikipedia.org/wiki/Center_%28group_theory%29
[10] https://en.wikipedia.org/wiki/Centralizer_and_normalizer
[11] https://groupprops.subwiki.org/wiki/Derived_subgroup
[12] https://en.wikipedia.org/wiki/Nilpotent_group
[13] https://www.math.colostate.edu/~hulpke/CGT/cgtnotes.pdf
"""
is_group = True
def __new__(cls, *args, **kwargs):
"""The default constructor. Accepts Cycle and Permutation forms.
Removes duplicates unless ``dups`` keyword is False.
"""
if not args:
args = [Permutation()]
else:
args = list(args[0] if is_sequence(args[0]) else args)
if any(isinstance(a, Cycle) for a in args):
args = [Permutation(a) for a in args]
if has_variety(a.size for a in args):
degree = kwargs.pop('degree', None)
if degree is None:
degree = max(a.size for a in args)
for i in range(len(args)):
if args[i].size != degree:
args[i] = Permutation(args[i], size=degree)
if kwargs.pop('dups', True):
args = list(uniq([_af_new(list(a)) for a in args]))
obj = Basic.__new__(cls, *args, **kwargs)
obj._generators = args
obj._order = None
obj._center = []
obj._is_abelian = None
obj._is_transitive = None
obj._is_sym = None
obj._is_alt = None
obj._is_primitive = None
obj._is_nilpotent = None
obj._is_solvable = None
obj._is_trivial = None
obj._transitivity_degree = None
obj._max_div = None
obj._r = len(obj._generators)
obj._degree = obj._generators[0].size
# these attributes are assigned after running schreier_sims
obj._base = []
obj._strong_gens = []
obj._basic_orbits = []
obj._transversals = []
# these attributes are assigned after running _random_pr_init
obj._random_gens = []
return obj
def __getitem__(self, i):
return self._generators[i]
def __contains__(self, i):
"""Return True if `i` is contained in PermutationGroup.
Examples
========
>>> p = Permutation(1, 2, 3)
>>> Permutation(3) in PermutationGroup(p)
True
"""
if not isinstance(i, Permutation):
raise TypeError("A PermutationGroup contains only Permutations as "
"elements, not elements of type %s" % type(i))
return self.contains(i)
def __len__(self):
return len(self._generators)
def __eq__(self, other):
"""Return True if PermutationGroup generated by elements in the
group are same i.e they represent the same PermutationGroup.
Examples
========
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G = PermutationGroup([p, p**2])
>>> H = PermutationGroup([p**2, p])
>>> G.generators == H.generators
False
>>> G == H
True
"""
if not isinstance(other, PermutationGroup):
return False
set_self_gens = set(self.generators)
set_other_gens = set(other.generators)
# before reaching the general case there are also certain
# optimisation and obvious cases requiring less or no actual
# computation.
if set_self_gens == set_other_gens:
return True
# in the most general case it will check that each generator of
# one group belongs to the other PermutationGroup and vice-versa
for gen1 in set_self_gens:
if not other.contains(gen1):
return False
for gen2 in set_other_gens:
if not self.contains(gen2):
return False
return True
def __hash__(self):
return super().__hash__()
def __mul__(self, other):
"""Return the direct product of two permutation groups as a permutation
group.
This implementation realizes the direct product by shifting
the index set for the generators of the second group: so if we have
G acting on n1 points and H acting on n2 points, G*H acts on n1 + n2
points.
Examples
========
>>> G = CyclicGroup(5)
>>> H = G*G
>>> H
PermutationGroup([
Permutation(9)(0, 1, 2, 3, 4),
Permutation(5, 6, 7, 8, 9)])
>>> H.order()
25
"""
gens1 = [perm._array_form for perm in self.generators]
gens2 = [perm._array_form for perm in other.generators]
n1 = self._degree
n2 = other._degree
start = list(range(n1))
end = list(range(n1, n1 + n2))
for i in range(len(gens2)):
gens2[i] = [x + n1 for x in gens2[i]]
gens2 = [start + gen for gen in gens2]
gens1 = [gen + end for gen in gens1]
together = gens1 + gens2
gens = [_af_new(x) for x in together]
return PermutationGroup(gens)
def _random_pr_init(self, r, n, _random_prec_n=None):
r"""Initialize random generators for the product replacement algorithm.
The implementation uses a modification of the original product
replacement algorithm due to Leedham-Green, as described in [1],
pp. 69-71; also, see [2], pp. 27-29 for a detailed theoretical
analysis of the original product replacement algorithm, and [4].
The product replacement algorithm is used for producing random,
uniformly distributed elements of a group ``G`` with a set of generators
``S``. For the initialization ``_random_pr_init``, a list ``R`` of
``\max\{r, |S|\}`` group generators is created as the attribute
``G._random_gens``, repeating elements of ``S`` if necessary, and the
identity element of ``G`` is appended to ``R`` - we shall refer to this
last element as the accumulator. Then the function ``random_pr()``
is called ``n`` times, randomizing the list ``R`` while preserving
the generation of ``G`` by ``R``. The function ``random_pr()`` itself
takes two random elements ``g, h`` among all elements of ``R`` but
the accumulator and replaces ``g`` with a randomly chosen element
from ``\{gh, g(~h), hg, (~h)g\}``. Then the accumulator is multiplied
by whatever ``g`` was replaced by. The new value of the accumulator is
then returned by ``random_pr()``.
The elements returned will eventually (for ``n`` large enough) become
uniformly distributed across ``G`` ([5]). For practical purposes however,
the values ``n = 50, r = 11`` are suggested in [1].
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: it changes the attribute
self._random_gens
See Also
========
random_pr
"""
deg = self.degree
random_gens = [x._array_form for x in self.generators]
k = len(random_gens)
if k < r:
for i in range(k, r):
random_gens.append(random_gens[i - k])
acc = list(range(deg))
random_gens.append(acc)
self._random_gens = random_gens
# handle randomized input for testing purposes
if _random_prec_n is None:
for i in range(n):
self.random_pr()
else:
for i in range(n):
self.random_pr(_random_prec=_random_prec_n[i])
def _union_find_merge(self, first, second, ranks, parents, not_rep):
"""Merges two classes in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. The class merging process uses union by rank as an
optimization. ([7])
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, the list of class sizes, ``ranks``, and the list of
elements that are not representatives, ``not_rep``, are changed due to
class merging.
See Also
========
minimal_block, _union_find_rep
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
[7] https://web.archive.org/web/20170105021515/http://www.algorithmist.com:80/index.php/Union_Find
"""
rep_first = self._union_find_rep(first, parents)
rep_second = self._union_find_rep(second, parents)
if rep_first != rep_second:
# union by rank
if ranks[rep_first] >= ranks[rep_second]:
new_1, new_2 = rep_first, rep_second
else:
new_1, new_2 = rep_second, rep_first
total_rank = ranks[new_1] + ranks[new_2]
if total_rank > self.max_div:
return -1
parents[new_2] = new_1
ranks[new_1] = total_rank
not_rep.append(new_2)
return 1
return 0
def _union_find_rep(self, num, parents):
"""Find representative of a class in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. After the representative of the class to which ``num``
belongs is found, path compression is performed as an optimization
([7]).
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, is altered due to path compression.
See Also
========
minimal_block, _union_find_merge
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
[7] https://web.archive.org/web/20170105021515/http://www.algorithmist.com:80/index.php/Union_Find
"""
rep, parent = num, parents[num]
while parent != rep:
rep = parent
parent = parents[rep]
# path compression
temp, parent = num, parents[num]
while parent != rep:
parents[temp] = rep
temp = parent
parent = parents[temp]
return rep
@property
def base(self):
"""Return a base from the Schreier-Sims algorithm.
For a permutation group ``G``, a base is a sequence of points
``B = (b_1, b_2, ..., b_k)`` such that no element of ``G`` apart
from the identity fixes all the points in ``B``. The concepts of
a base and strong generating set and their applications are
discussed in depth in [1], pp. 87-89 and [2], pp. 55-57.
An alternative way to think of ``B`` is that it gives the
indices of the stabilizer cosets that contain more than the
identity permutation.
Examples
========
>>> G = PermutationGroup([Permutation(0, 1, 3)(2, 4)])
>>> G.base
[0, 2]
See Also
========
strong_gens, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._base == []:
self.schreier_sims()
return self._base
def baseswap(self, base, strong_gens, pos, randomized=False,
transversals=None, basic_orbits=None, strong_gens_distr=None):
r"""Swap two consecutive base points in base and strong generating set.
If a base for a group ``G`` is given by ``(b_1, b_2, ..., b_k)``, this
function returns a base ``(b_1, b_2, ..., b_{i+1}, b_i, ..., b_k)``,
where ``i`` is given by ``pos``, and a strong generating set relative
to that base. The original base and strong generating set are not
modified.
The randomized version (default) is of Las Vegas type.
Parameters
==========
base, strong_gens
The base and strong generating set.
pos
The position at which swapping is performed.
randomized
A switch between randomized and deterministic version.
transversals
The transversals for the basic orbits, if known.
basic_orbits
The basic orbits, if known.
strong_gens_distr
The strong generators distributed by basic stabilizers, if known.
Returns
=======
(base, strong_gens)
``base`` is the new base, and ``strong_gens`` is a generating set
relative to it.
Examples
========
>>> from diofant.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(4)
>>> S.schreier_sims()
>>> S.base
[0, 1, 2]
>>> base, gens = S.baseswap(S.base, S.strong_gens, 1, randomized=False)
>>> base, gens
([0, 2, 1],
[Permutation(0, 1, 2, 3), Permutation(3)(0, 1), Permutation(1, 3, 2),
Permutation(2, 3), Permutation(1, 3)])
check that base, gens is a BSGS
>>> S1 = PermutationGroup(gens)
>>> _verify_bsgs(S1, base, gens)
True
See Also
========
schreier_sims
Notes
=====
The deterministic version of the algorithm is discussed in
[1], pp. 102-103; the randomized version is discussed in [1], p.103, and
[2], p.98. It is of Las Vegas type.
Notice that [1] contains a mistake in the pseudocode and
discussion of BASESWAP: on line 3 of the pseudocode,
``|\beta_{i+1}^{\left\langle T\right\rangle}|`` should be replaced by
``|\beta_{i}^{\left\langle T\right\rangle}|``, and the same for the
discussion of the algorithm.
"""
# construct the basic orbits, generators for the stabilizer chain
# and transversal elements from whatever was provided
transversals, basic_orbits, strong_gens_distr = \
_handle_precomputed_bsgs(base, strong_gens, transversals,
basic_orbits, strong_gens_distr)
base_len = len(base)
degree = self.degree
# size of orbit of base[pos] under the stabilizer we seek to insert
# in the stabilizer chain at position pos + 1
size = len(basic_orbits[pos])*len(basic_orbits[pos + 1]) \
// len(_orbit(degree, strong_gens_distr[pos], base[pos + 1]))
# initialize the wanted stabilizer by a subgroup
if pos + 2 > base_len - 1:
T = []
else:
T = strong_gens_distr[pos + 2][:]
# randomized version
if randomized is True:
stab_pos = PermutationGroup(strong_gens_distr[pos])
schreier_vector = stab_pos.schreier_vector(base[pos + 1])
# add random elements of the stabilizer until they generate it
while len(_orbit(degree, T, base[pos])) != size:
new = stab_pos.random_stab(base[pos + 1],
schreier_vector=schreier_vector)
T.append(new)
# deterministic version
else:
Gamma = set(basic_orbits[pos])
Gamma.remove(base[pos])
if base[pos + 1] in Gamma:
Gamma.remove(base[pos + 1])
# add elements of the stabilizer until they generate it by
# ruling out member of the basic orbit of base[pos] along the way
while len(_orbit(degree, T, base[pos])) != size:
gamma = next(iter(Gamma))
x = transversals[pos][gamma]
temp = x._array_form.index(base[pos + 1]) # (~x)(base[pos + 1])
if temp not in basic_orbits[pos + 1]:
Gamma = Gamma - _orbit(degree, T, gamma)
else:
y = transversals[pos + 1][temp]
el = rmul(x, y)
if el(base[pos]) not in _orbit(degree, T, base[pos]):
T.append(el)
Gamma = Gamma - _orbit(degree, T, base[pos])
# build the new base and strong generating set
strong_gens_new_distr = strong_gens_distr[:]
strong_gens_new_distr[pos + 1] = T
base_new = base[:]
base_new[pos], base_new[pos + 1] = base_new[pos + 1], base_new[pos]
strong_gens_new = _strong_gens_from_distr(strong_gens_new_distr)
for gen in T:
if gen not in strong_gens_new:
strong_gens_new.append(gen)
return base_new, strong_gens_new
@property
def basic_orbits(self):
"""
Return the basic orbits relative to a base and strong generating set.
If ``(b_1, b_2, ..., b_k)`` is a base for a group ``G``, and
``G^{(i)} = G_{b_1, b_2, ..., b_{i-1}}`` is the ``i``-th basic stabilizer
(so that ``G^{(1)} = G``), the ``i``-th basic orbit relative to this base
is the orbit of ``b_i`` under ``G^{(i)}``. See [1], pp. 87-89 for more
information.
Examples
========
>>> S = SymmetricGroup(4)
>>> S.basic_orbits
[[0, 1, 2, 3], [1, 2, 3], [2, 3]]
See Also
========
base, strong_gens, basic_transversals, basic_stabilizers
"""
if self._basic_orbits == []:
self.schreier_sims()
return self._basic_orbits
@property
def basic_stabilizers(self):
"""
Return a chain of stabilizers relative to a base and strong generating
set.
The ``i``-th basic stabilizer ``G^{(i)}`` relative to a base
``(b_1, b_2, ..., b_k)`` is ``G_{b_1, b_2, ..., b_{i-1}}``. For more
information, see [1], pp. 87-89.
Examples
========
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> A.base
[0, 1]
>>> for g in A.basic_stabilizers:
... print(g)
...
PermutationGroup([
Permutation(3)(0, 1, 2),
Permutation(1, 2, 3)])
PermutationGroup([
Permutation(1, 2, 3)])
See Also
========
base, strong_gens, basic_orbits, basic_transversals
"""
if self._transversals == []:
self.schreier_sims()
strong_gens = self._strong_gens
base = self._base
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_stabilizers = []
for gens in strong_gens_distr:
basic_stabilizers.append(PermutationGroup(gens))
return basic_stabilizers
@property
def basic_transversals(self):
"""
Return basic transversals relative to a base and strong generating set.
The basic transversals are transversals of the basic orbits. They
are provided as a list of dictionaries, each dictionary having
keys - the elements of one of the basic orbits, and values - the
corresponding transversal elements. See [1], pp. 87-89 for more
information.
Examples
========
>>> A = AlternatingGroup(4)
>>> A.basic_transversals
[{0: Permutation(3),
1: Permutation(3)(0, 1, 2),
2: Permutation(3)(0, 2, 1),
3: Permutation(0, 3, 1)},
{1: Permutation(3),
2: Permutation(1, 2, 3),
3: Permutation(1, 3, 2)}]
See Also
========
strong_gens, base, basic_orbits, basic_stabilizers
"""
if self._transversals == []:
self.schreier_sims()
return self._transversals
def center(self):
r"""
Return the center of a permutation group.
The center for a group ``G`` is defined as
``Z(G) = \{z\in G | \forall g\in G, zg = gz \}``,
the set of elements of ``G`` that commute with all elements of ``G``.
It is equal to the centralizer of ``G`` inside ``G``, and is naturally a
subgroup of ``G`` ([9]).
Examples
========
>>> D = DihedralGroup(4)
>>> G = D.center()
>>> G.order()
2
See Also
========
centralizer
Notes
=====
This is a naive implementation that is a straightforward application
of ``.centralizer()``
"""
return self.centralizer(self)
def centralizer(self, other):
r"""
Return the centralizer of a group/set/element.
The centralizer of a set of permutations ``S`` inside
a group ``G`` is the set of elements of ``G`` that commute with all
elements of ``S``::
``C_G(S) = \{ g \in G | gs = sg \forall s \in S\}`` ([10])
Usually, ``S`` is a subset of ``G``, but if ``G`` is a proper subgroup of
the full symmetric group, we allow for ``S`` to have elements outside
``G``.
It is naturally a subgroup of ``G``; the centralizer of a permutation
group is equal to the centralizer of any set of generators for that
group, since any element commuting with the generators commutes with
any product of the generators.
Parameters
==========
other
a permutation group/list of permutations/single permutation
Examples
========
>>> S = SymmetricGroup(6)
>>> C = CyclicGroup(6)
>>> H = S.centralizer(C)
>>> H.is_subgroup(C)
True
See Also
========
subgroup_search
Notes
=====
The implementation is an application of ``.subgroup_search()`` with
tests using a specific base for the group ``G``.
"""
if hasattr(other, 'generators'):
if other.is_trivial or self.is_trivial:
return self
degree = self.degree
identity = _af_new(list(range(degree)))
orbits = other.orbits()
num_orbits = len(orbits)
orbits.sort(key=lambda x: -len(x))
long_base = []
orbit_reps = [None]*num_orbits
orbit_reps_indices = [None]*num_orbits
orbit_descr = [None]*degree
for i in range(num_orbits):
orbit = list(orbits[i])
orbit_reps[i] = orbit[0]
orbit_reps_indices[i] = len(long_base)
for point in orbit:
orbit_descr[point] = i
long_base = long_base + orbit
base, strong_gens = self.schreier_sims_incremental(base=long_base)
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
i = 0
for i in range(len(base)):
if strong_gens_distr[i] == [identity]:
break
base = base[:i]
base_len = i
for j in range(num_orbits):
if base[base_len - 1] in orbits[j]:
break
rel_orbits = orbits[: j + 1]
num_rel_orbits = len(rel_orbits)
transversals = [None]*num_rel_orbits
for j in range(num_rel_orbits):
rep = orbit_reps[j]
transversals[j] = dict(
other.orbit_transversal(rep, pairs=True))
def trivial_test(x):
return True
tests = [None]*base_len
for l in range(base_len):
if base[l] in orbit_reps:
tests[l] = trivial_test
else:
def test(computed_words, l=l):
g = computed_words[l]
rep_orb_index = orbit_descr[base[l]]
rep = orbit_reps[rep_orb_index]
im = g._array_form[base[l]]
im_rep = g._array_form[rep]
tr_el = transversals[rep_orb_index][base[l]]
# using the definition of transversal,
# base[l]^g = rep^(tr_el*g);
# if g belongs to the centralizer, then
# base[l]^g = (rep^g)^tr_el
return im == tr_el._array_form[im_rep]
tests[l] = test
def prop(g):
return [rmul(g, gen) for gen in other.generators] == \
[rmul(gen, g) for gen in other.generators]
return self.subgroup_search(prop, base=base,
strong_gens=strong_gens, tests=tests)
elif hasattr(other, '__getitem__'):
gens = list(other)
return self.centralizer(PermutationGroup(gens))
elif hasattr(other, 'array_form'):
return self.centralizer(PermutationGroup([other]))
def commutator(self, G, H):
"""
Return the commutator of two subgroups.
For a permutation group ``K`` and subgroups ``G``, ``H``, the
commutator of ``G`` and ``H`` is defined as the group generated
by all the commutators ``[g, h] = hgh^{-1}g^{-1}`` for ``g`` in ``G`` and
``h`` in ``H``. It is naturally a subgroup of ``K`` ([1], p.27).
Examples
========
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> G = S.commutator(S, A)
>>> G.is_subgroup(A)
True
See Also
========
derived_subgroup
Notes
=====
The commutator of two subgroups ``H, G`` is equal to the normal closure
of the commutators of all the generators, i.e. ``hgh^{-1}g^{-1}`` for ``h``
a generator of ``H`` and ``g`` a generator of ``G`` ([1], p.28)
"""
ggens = G.generators
hgens = H.generators
commutators = []
for ggen in ggens:
for hgen in hgens:
commutator = rmul(hgen, ggen, ~hgen, ~ggen)
if commutator not in commutators:
commutators.append(commutator)
res = self.normal_closure(commutators)
return res
def coset_factor(self, g, factor_index=False):
"""Return ``G``'s (self's) coset factorization of ``g``
If ``g`` is an element of ``G`` then it can be written as the product
of permutations drawn from the Schreier-Sims coset decomposition,
The permutations returned in ``f`` are those for which
the product gives ``g``: ``g = f[n]*...f[1]*f[0]`` where ``n = len(B)``
and ``B = G.base``. f[i] is one of the permutations in
``self._basic_orbits[i]``.
If factor_index==True,
returns a tuple ``[b[0],..,b[n]]``, where ``b[i]``
belongs to ``self._basic_orbits[i]``
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
Define g:
>>> g = Permutation(7)(1, 2, 4)(3, 6, 5)
Confirm that it is an element of G:
>>> G.contains(g)
True
Thus, it can be written as a product of factors (up to
3) drawn from u. See below that a factor from u1 and u2
and the Identity permutation have been used:
>>> f = G.coset_factor(g)
>>> f[2]*f[1]*f[0] == g
True
>>> f1 = G.coset_factor(g, True); f1
[0, 4, 4]
>>> tr = G.basic_transversals
>>> f[0] == tr[0][f1[0]]
True
If g is not an element of G then [] is returned:
>>> c = Permutation(5, 6, 7)
>>> G.coset_factor(c)
[]
see util._strip
"""
if isinstance(g, (Cycle, Permutation)):
g = g.list()
if len(g) != self._degree:
# this could either adjust the size or return [] immediately
# but we don't choose between the two and just signal a possible
# error
raise ValueError('g should be the same size as permutations of G')
I = list(range(self._degree))
basic_orbits = self.basic_orbits
transversals = self._transversals
factors = []
base = self.base
h = g
for i in range(len(base)):
beta = h[base[i]]
if beta == base[i]:
factors.append(beta)
continue
if beta not in basic_orbits[i]:
return []
u = transversals[i][beta]._array_form
h = _af_rmul(_af_invert(u), h)
factors.append(beta)
if h != I:
return []
if factor_index:
return factors
tr = self.basic_transversals
factors = [tr[i][factors[i]] for i in range(len(base))]
return factors
def coset_rank(self, g):
"""rank using Schreier-Sims representation
The coset rank of ``g`` is the ordering number in which
it appears in the lexicographic listing according to the
coset decomposition
The ordering is the same as in G.generate(method='coset').
If ``g`` does not belong to the group it returns None.
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
>>> c = Permutation(7)(2, 4)(3, 5)
>>> G.coset_rank(c)
16
>>> G.coset_unrank(16)
Permutation(7)(2, 4)(3, 5)
See Also
========
coset_factor
"""
factors = self.coset_factor(g, True)
if not factors:
return
rank = 0
b = 1
transversals = self._transversals
base = self._base
basic_orbits = self._basic_orbits
for i in range(len(base)):
k = factors[i]
j = basic_orbits[i].index(k)
rank += b*j
b = b*len(transversals[i])
return rank
def coset_unrank(self, rank, af=False):
"""unrank using Schreier-Sims representation
coset_unrank is the inverse operation of coset_rank
if 0 <= rank < order; otherwise it returns None.
"""
if rank < 0 or rank >= self.order():
return
base = self._base
transversals = self._transversals
basic_orbits = self._basic_orbits
m = len(base)
v = [0]*m
for i in range(m):
rank, c = divmod(rank, len(transversals[i]))
v[i] = basic_orbits[i][c]
a = [transversals[i][v[i]]._array_form for i in range(m)]
h = _af_rmuln(*a)
if af:
return h
else:
return _af_new(h)
@property
def degree(self):
"""Returns the size of the permutations in the group.
The number of permutations comprising the group is given by
len(group); the number of permutations that can be generated
by the group is given by group.order().
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[Permutation(2), Permutation(2)(0, 1)]
See Also
========
order
"""
return self._degree
@property
def elements(self):
"""Returns all the elements of the permutation group in
a list
"""
return set(list(islice(self.generate(), None)))
def derived_series(self):
r"""Return the derived series for the group.
The derived series for a group ``G`` is defined as
``G = G_0 > G_1 > G_2 > \ldots`` where ``G_i = [G_{i-1}, G_{i-1}]``,
i.e. ``G_i`` is the derived subgroup of ``G_{i-1}``, for
``i\in\mathbb{N}``. When we have ``G_k = G_{k-1}`` for some
``k\in\mathbb{N}``, the series terminates.
Returns
=======
A list of permutation groups containing the members of the derived
series in the order ``G = G_0, G_1, G_2, \ldots``.
Examples
========
>>> A = AlternatingGroup(5)
>>> len(A.derived_series())
1
>>> S = SymmetricGroup(4)
>>> len(S.derived_series())
4
>>> S.derived_series()[1].is_subgroup(AlternatingGroup(4))
True
>>> S.derived_series()[2].is_subgroup(DihedralGroup(2))
True
See Also
========
derived_subgroup
"""
res = [self]
current = self
next = self.derived_subgroup()
while not current.is_subgroup(next):
res.append(next)
current = next
next = next.derived_subgroup()
return res
def derived_subgroup(self):
r"""Compute the derived subgroup.
The derived subgroup, or commutator subgroup is the subgroup generated
by all commutators ``[g, h] = hgh^{-1}g^{-1}`` for ``g, h\in G`` ; it is
equal to the normal closure of the set of commutators of the generators
([1], p.28, [11]).
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([1, 0, 2, 4, 3])
>>> b = Permutation([0, 1, 3, 2, 4])
>>> G = PermutationGroup([a, b])
>>> C = G.derived_subgroup()
>>> list(C.generate(af=True))
[[0, 1, 2, 3, 4], [0, 1, 3, 4, 2], [0, 1, 4, 2, 3]]
See Also
========
derived_series
"""
r = self._r
gens = [p._array_form for p in self.generators]
set_commutators = set()
degree = self._degree
rng = list(range(degree))
for i in range(r):
for j in range(r):
p1 = gens[i]
p2 = gens[j]
c = list(range(degree))
for k in rng:
c[p2[p1[k]]] = p1[p2[k]]
ct = tuple(c)
if ct not in set_commutators:
set_commutators.add(ct)
cms = [_af_new(p) for p in set_commutators]
G2 = self.normal_closure(cms)
return G2
def generate(self, method="coset", af=False):
"""Return iterator to generate the elements of the group
Iteration is done with one of these methods::
method='coset' using the Schreier-Sims coset representation
method='dimino' using the Dimino method
If af = True it yields the array form of the permutations
Examples
========
>>> Permutation.print_cyclic = True
The permutation group given in the tetrahedron object is also
true groups:
>>> G = tetrahedron.pgroup
>>> G.is_group
True
Also the group generated by the permutations in the tetrahedron
pgroup -- even the first two -- is a proper group:
>>> H = PermutationGroup(G[0], G[1])
>>> J = PermutationGroup(list(H.generate())); J
PermutationGroup([
Permutation(0, 1)(2, 3),
Permutation(3),
Permutation(1, 2, 3),
Permutation(1, 3, 2),
Permutation(0, 3, 1),
Permutation(0, 2, 3),
Permutation(0, 3)(1, 2),
Permutation(0, 1, 3),
Permutation(3)(0, 2, 1),
Permutation(0, 3, 2),
Permutation(3)(0, 1, 2),
Permutation(0, 2)(1, 3)])
>>> _.is_group
True
"""
if method == "coset":
return self.generate_schreier_sims(af)
elif method == "dimino":
return self.generate_dimino(af)
else:
raise NotImplementedError('No generation defined for %s' % method)
def generate_dimino(self, af=False):
"""Yield group elements using Dimino's algorithm
If af == True it yields the array form of the permutations
References
==========
[1] The Implementation of Various Algorithms for Permutation Groups in
the Computer Algebra System: AXIOM, N.J. Doye, M.Sc. Thesis
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_dimino(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 2, 3, 1],
[0, 1, 3, 2], [0, 3, 2, 1], [0, 3, 1, 2]]
"""
idn = list(range(self.degree))
order = 0
element_list = [idn]
set_element_list = {tuple(idn)}
if af:
yield idn
else:
yield _af_new(idn)
gens = [p._array_form for p in self.generators]
for i in range(len(gens)):
# D elements of the subgroup G_i generated by gens[:i]
D = element_list[:]
N = [idn]
while N:
A = N
N = []
for a in A:
for g in gens[:i + 1]:
ag = _af_rmul(a, g)
if tuple(ag) not in set_element_list:
# produce G_i*g
for d in D:
order += 1
ap = _af_rmul(d, ag)
if af:
yield ap
else:
p = _af_new(ap)
yield p
element_list.append(ap)
set_element_list.add(tuple(ap))
N.append(ap)
self._order = len(element_list)
def generate_schreier_sims(self, af=False):
"""Yield group elements using the Schreier-Sims representation
in coset_rank order
If af = True it yields the array form of the permutations
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_schreier_sims(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 3, 2, 1],
[0, 1, 3, 2], [0, 2, 3, 1], [0, 3, 1, 2]]
"""
n = self._degree
u = self.basic_transversals
basic_orbits = self._basic_orbits
if len(u) == 0:
for x in self.generators:
if af:
yield x._array_form
else:
yield x
return
if len(u) == 1:
for i in basic_orbits[0]:
if af:
yield u[0][i]._array_form
else:
yield u[0][i]
return
u = list(reversed(u))
basic_orbits = basic_orbits[::-1]
# stg stack of group elements
stg = [list(range(n))]
posmax = [len(x) for x in u]
n1 = len(posmax) - 1
pos = [0]*n1
h = 0
while 1:
# backtrack when finished iterating over coset
if pos[h] >= posmax[h]:
if h == 0:
return
pos[h] = 0
h -= 1
stg.pop()
continue
p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1])
pos[h] += 1
stg.append(p)
h += 1
if h == n1:
if af:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
yield p
else:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
p1 = _af_new(p)
yield p1
stg.pop()
h -= 1
@property
def generators(self):
"""Returns the generators of the group.
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.generators
[Permutation(1, 2), Permutation(2)(0, 1)]
"""
return self._generators
def contains(self, g, strict=True):
"""Test if permutation ``g`` belong to self, ``G``.
If ``g`` is an element of ``G`` it can be written as a product
of factors drawn from the cosets of ``G``'s stabilizers. To see
if ``g`` is one of the actual generators defining the group use
``G.has(g)``.
If ``strict`` is not True, ``g`` will be resized, if necessary,
to match the size of permutations in ``self``.
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation(1, 2)
>>> b = Permutation(2, 3, 1)
>>> G = PermutationGroup(a, b, degree=5)
>>> G.contains(G[0]) # trivial check
True
>>> elem = Permutation([[2, 3]], size=5)
>>> G.contains(elem)
True
>>> G.contains(Permutation(4)(0, 1, 2, 3))
False
If strict is False, a permutation will be resized, if
necessary:
>>> H = PermutationGroup(Permutation(5))
>>> H.contains(Permutation(3))
False
>>> H.contains(Permutation(3), strict=False)
True
To test if a given permutation is present in the group:
>>> elem in G.generators
False
>>> G.has(elem)
False
See Also
========
coset_factor, diofant.core.basic.Basic.has
"""
if not isinstance(g, Permutation):
return False
if g.size != self.degree:
if strict:
return False
g = Permutation(g, size=self.degree)
if g in self.generators:
return True
return bool(self.coset_factor(g.array_form, True))
@property
def is_abelian(self):
"""Test if the group is Abelian.
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.is_abelian
False
>>> a = Permutation([0, 2, 1])
>>> G = PermutationGroup([a])
>>> G.is_abelian
True
"""
if self._is_abelian is not None:
return self._is_abelian
self._is_abelian = True
gens = [p._array_form for p in self.generators]
for x in gens:
for y in gens:
if y <= x:
continue
if not _af_commutes_with(x, y):
self._is_abelian = False
return False
return True
def is_alt_sym(self, eps=0.05, _random_prec=None):
r"""Monte Carlo test for the symmetric/alternating group for degrees
>= 8.
More specifically, it is one-sided Monte Carlo with the
answer True (i.e., G is symmetric/alternating) guaranteed to be
correct, and the answer False being incorrect with probability eps.
Notes
=====
The algorithm itself uses some nontrivial results from group theory and
number theory:
1) If a transitive group ``G`` of degree ``n`` contains an element
with a cycle of length ``n/2 < p < n-2`` for ``p`` a prime, ``G`` is the
symmetric or alternating group ([1], pp. 81-82)
2) The proportion of elements in the symmetric/alternating group having
the property described in 1) is approximately ``\log(2)/\log(n)``
([1], p.82; [2], pp. 226-227).
The helper function ``_check_cycles_alt_sym`` is used to
go over the cycles in a permutation and look for ones satisfying 1).
Examples
========
>>> D = DihedralGroup(10)
>>> D.is_alt_sym()
False
See Also
========
diofant.combinatorics.util._check_cycles_alt_sym
"""
if _random_prec is None:
n = self.degree
if n < 8:
return False
if not self.is_transitive():
return False
if n < 17:
c_n = 0.34
else:
c_n = 0.57
d_n = (c_n*log(2))/log(n)
N_eps = int(-log(eps)/d_n)
for i in range(N_eps):
perm = self.random_pr()
if _check_cycles_alt_sym(perm):
return True
return False
else:
for i in range(_random_prec['N_eps']):
perm = _random_prec[i]
if _check_cycles_alt_sym(perm):
return True
return False
@property
def is_nilpotent(self):
"""Test if the group is nilpotent.
A group ``G`` is nilpotent if it has a central series of finite length.
Alternatively, ``G`` is nilpotent if its lower central series terminates
with the trivial group. Every nilpotent group is also solvable
([1], p.29, [12]).
Examples
========
>>> C = CyclicGroup(6)
>>> C.is_nilpotent
True
>>> S = SymmetricGroup(5)
>>> S.is_nilpotent
False
See Also
========
lower_central_series, is_solvable
"""
if self._is_nilpotent is None:
lcs = self.lower_central_series()
terminator = lcs[len(lcs) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
self._is_nilpotent = True
return True
else:
self._is_nilpotent = False
return False
else:
return self._is_nilpotent
def is_normal(self, gr, strict=True):
"""Test if G=self is a normal subgroup of gr.
G is normal in gr if
for each g2 in G, g1 in gr, g = g1*g2*g1**-1 belongs to G
It is sufficient to check this for each g1 in gr.generator and
g2 g2 in G.generator
Examples
========
>>> Permutation.print_cyclic = True
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G1 = PermutationGroup([a, Permutation([2, 0, 1])])
>>> G1.is_normal(G)
True
"""
d_self = self.degree
d_gr = gr.degree
new_self = self.copy()
if not strict and d_self != d_gr:
if d_self < d_gr:
new_self = PermGroup(new_self.generators + [Permutation(d_gr - 1)])
else:
gr = PermGroup(gr.generators + [Permutation(d_self - 1)])
gens2 = [p._array_form for p in new_self.generators]
gens1 = [p._array_form for p in gr.generators]
for g1 in gens1:
for g2 in gens2:
p = _af_rmuln(g1, g2, _af_invert(g1))
if not new_self.coset_factor(p, True):
return False
return True
def is_primitive(self, randomized=True):
r"""Test if a group is primitive.
A permutation group ``G`` acting on a set ``S`` is called primitive if
``S`` contains no nontrivial block under the action of ``G``
(a block is nontrivial if its cardinality is more than ``1``).
Notes
=====
The algorithm is described in [1], p.83, and uses the function
minimal_block to search for blocks of the form ``\{0, k\}`` for ``k``
ranging over representatives for the orbits of ``G_0``, the stabilizer of
``0``. This algorithm has complexity ``O(n^2)`` where ``n`` is the degree
of the group, and will perform badly if ``G_0`` is small.
There are two implementations offered: one finds ``G_0``
deterministically using the function ``stabilizer``, and the other
(default) produces random elements of ``G_0`` using ``random_stab``,
hoping that they generate a subgroup of ``G_0`` with not too many more
orbits than G_0 (this is suggested in [1], p.83). Behavior is changed
by the ``randomized`` flag.
Examples
========
>>> D = DihedralGroup(10)
>>> D.is_primitive()
False
See Also
========
minimal_block, random_stab
"""
if self._is_primitive is not None:
return self._is_primitive
n = self.degree
if randomized:
random_stab_gens = []
v = self.schreier_vector(0)
for i in range(len(self)):
random_stab_gens.append(self.random_stab(0, v))
stab = PermutationGroup(random_stab_gens)
else:
stab = self.stabilizer(0)
orbits = stab.orbits()
for orb in orbits:
x = orb.pop()
if x != 0 and self.minimal_block([0, x]) != [0]*n:
self._is_primitive = False
return False
self._is_primitive = True
return True
@property
def is_solvable(self):
"""Test if the group is solvable.
``G`` is solvable if its derived series terminates with the trivial
group ([1], p.29).
Examples
========
>>> S = SymmetricGroup(3)
>>> S.is_solvable
True
See Also
========
is_nilpotent, derived_series
"""
if self._is_solvable is None:
ds = self.derived_series()
terminator = ds[len(ds) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
return True
else:
self._is_solvable = False
return False
else:
return self._is_solvable
def is_subgroup(self, G, strict=True):
"""Return True if all elements of self belong to G.
If ``strict`` is False then if ``self``'s degree is smaller
than ``G``'s, the elements will be resized to have the same degree.
Examples
========
Testing is strict by default: the degree of each group must be the
same:
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G1 = PermutationGroup([Permutation(0, 1, 2), Permutation(0, 1)])
>>> G2 = PermutationGroup([Permutation(0, 2), Permutation(0, 1, 2)])
>>> G3 = PermutationGroup([p, p**2])
>>> assert G1.order() == G2.order() == G3.order() == 6
>>> G1.is_subgroup(G2)
True
>>> G1.is_subgroup(G3)
False
>>> G3.is_subgroup(PermutationGroup(G3[1]))
False
>>> G3.is_subgroup(PermutationGroup(G3[0]))
True
To ignore the size, set ``strict`` to False:
>>> S3 = SymmetricGroup(3)
>>> S5 = SymmetricGroup(5)
>>> S3.is_subgroup(S5, strict=False)
True
>>> C7 = CyclicGroup(7)
>>> G = S5*C7
>>> S5.is_subgroup(G, False)
True
>>> C7.is_subgroup(G, 0)
False
"""
if not isinstance(G, PermutationGroup):
return False
if self == G:
return True
if G.order() % self.order() != 0:
return False
if self.degree == G.degree or \
(self.degree < G.degree and not strict):
gens = self.generators
else:
return False
return all(G.contains(g, strict=strict) for g in gens)
def is_transitive(self, strict=True):
"""Test if the group is transitive.
A group is transitive if it has a single orbit.
If ``strict`` is False the group is transitive if it has
a single orbit of length different from 1.
Examples
========
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([2, 0, 1, 3])
>>> G1 = PermutationGroup([a, b])
>>> G1.is_transitive()
False
>>> G1.is_transitive(strict=False)
True
>>> c = Permutation([2, 3, 0, 1])
>>> G2 = PermutationGroup([a, c])
>>> G2.is_transitive()
True
>>> d = Permutation([1, 0, 2, 3])
>>> e = Permutation([0, 1, 3, 2])
>>> G3 = PermutationGroup([d, e])
>>> G3.is_transitive() or G3.is_transitive(strict=False)
False
"""
if self._is_transitive: # strict or not, if True then True
return self._is_transitive
if strict:
if self._is_transitive is not None: # we only store strict=True
return self._is_transitive
ans = len(self.orbit(0)) == self.degree
self._is_transitive = ans
return ans
got_orb = False
for x in self.orbits():
if len(x) > 1:
if got_orb:
return False
got_orb = True
return got_orb
@property
def is_trivial(self):
"""Test if the group is the trivial group.
This is true if the group contains only the identity permutation.
Examples
========
>>> G = PermutationGroup([Permutation([0, 1, 2])])
>>> G.is_trivial
True
"""
if self._is_trivial is None:
self._is_trivial = len(self) == 1 and self[0].is_Identity
return self._is_trivial
def lower_central_series(self):
r"""Return the lower central series for the group.
The lower central series for a group ``G`` is the series
``G = G_0 > G_1 > G_2 > \ldots`` where
``G_k = [G, G_{k-1}]``, i.e. every term after the first is equal to the
commutator of ``G`` and the previous term in ``G1`` ([1], p.29).
Returns
=======
A list of permutation groups in the order
``G = G_0, G_1, G_2, \ldots``
Examples
========
>>> A = AlternatingGroup(4)
>>> len(A.lower_central_series())
2
>>> A.lower_central_series()[1].is_subgroup(DihedralGroup(2))
True
See Also
========
commutator, derived_series
"""
res = [self]
current = self
next = self.commutator(self, current)
while not current.is_subgroup(next):
res.append(next)
current = next
next = self.commutator(self, current)
return res
@property
def max_div(self):
"""Maximum proper divisor of the degree of a permutation group.
Notes
=====
Obviously, this is the degree divided by its minimal proper divisor
(larger than ``1``, if one exists). As it is guaranteed to be prime,
the ``sieve`` from ``diofant.ntheory`` is used.
This function is also used as an optimization tool for the functions
``minimal_block`` and ``_union_find_merge``.
Examples
========
>>> G = PermutationGroup([Permutation([0, 2, 1, 3])])
>>> G.max_div
2
See Also
========
minimal_block, _union_find_merge
"""
if self._max_div is not None:
return self._max_div
n = self.degree
if n == 1:
return 1
for x in sieve:
if n % x == 0:
d = n//x
self._max_div = d
return d
def minimal_block(self, points):
r"""For a transitive group, finds the block system generated by
``points``.
If a group ``G`` acts on a set ``S``, a nonempty subset ``B`` of ``S``
is called a block under the action of ``G`` if for all ``g`` in ``G``
we have ``gB = B`` (``g`` fixes ``B``) or ``gB`` and ``B`` have no
common points (``g`` moves ``B`` entirely). ([1], p.23; [6]).
The distinct translates ``gB`` of a block ``B`` for ``g`` in ``G``
partition the set ``S`` and this set of translates is known as a block
system. Moreover, we obviously have that all blocks in the partition
have the same size, hence the block size divides ``|S|`` ([1], p.23).
A ``G``-congruence is an equivalence relation ``~`` on the set ``S``
such that ``a ~ b`` implies ``g(a) ~ g(b)`` for all ``g`` in ``G``.
For a transitive group, the equivalence classes of a ``G``-congruence
and the blocks of a block system are the same thing ([1], p.23).
The algorithm below checks the group for transitivity, and then finds
the ``G``-congruence generated by the pairs ``(p_0, p_1), (p_0, p_2),
..., (p_0,p_{k-1})`` which is the same as finding the maximal block
system (i.e., the one with minimum block size) such that
``p_0, ..., p_{k-1}`` are in the same block ([1], p.83).
It is an implementation of Atkinson's algorithm, as suggested in [1],
and manipulates an equivalence relation on the set ``S`` using a
union-find data structure. The running time is just above
``O(|points||S|)``. ([1], pp. 83-87; [7]).
Examples
========
>>> D = DihedralGroup(10)
>>> D.minimal_block([0, 5])
[0, 6, 2, 8, 4, 0, 6, 2, 8, 4]
>>> D.minimal_block([0, 1])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
See Also
========
_union_find_rep, _union_find_merge, is_transitive, is_primitive
"""
if not self.is_transitive():
return False
n = self.degree
gens = self.generators
# initialize the list of equivalence class representatives
parents = list(range(n))
ranks = [1]*n
not_rep = []
k = len(points)
# the block size must divide the degree of the group
if k > self.max_div:
return [0]*n
for i in range(k - 1):
parents[points[i + 1]] = points[0]
not_rep.append(points[i + 1])
ranks[points[0]] = k
i = 0
len_not_rep = k - 1
while i < len_not_rep:
temp = not_rep[i]
i += 1
for gen in gens:
# find has side effects: performs path compression on the list
# of representatives
delta = self._union_find_rep(temp, parents)
# union has side effects: performs union by rank on the list
# of representatives
temp = self._union_find_merge(gen(temp), gen(delta), ranks,
parents, not_rep)
if temp == -1:
return [0]*n
len_not_rep += temp
for i in range(n):
# force path compression to get the final state of the equivalence
# relation
self._union_find_rep(i, parents)
return parents
def normal_closure(self, other, k=10):
r"""Return the normal closure of a subgroup/set of permutations.
If ``S`` is a subset of a group ``G``, the normal closure of ``A`` in ``G``
is defined as the intersection of all normal subgroups of ``G`` that
contain ``A`` ([1], p.14). Alternatively, it is the group generated by
the conjugates ``x^{-1}yx`` for ``x`` a generator of ``G`` and ``y`` a
generator of the subgroup ``\left\langle S\right\rangle`` generated by
``S`` (for some chosen generating set for ``\left\langle S\right\rangle``)
([1], p.73).
Parameters
==========
other
a subgroup/list of permutations/single permutation
k
an implementation-specific parameter that determines the number
of conjugates that are adjoined to ``other`` at once
Examples
========
>>> S = SymmetricGroup(5)
>>> C = CyclicGroup(5)
>>> G = S.normal_closure(C)
>>> G.order()
60
>>> G.is_subgroup(AlternatingGroup(5))
True
See Also
========
commutator, derived_subgroup, random_pr
Notes
=====
The algorithm is described in [1], pp. 73-74; it makes use of the
generation of random elements for permutation groups by the product
replacement algorithm.
"""
if hasattr(other, 'generators'):
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in other.generators):
return other
Z = PermutationGroup(other.generators[:])
base, strong_gens = Z.schreier_sims_incremental()
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr)
self._random_pr_init(r=10, n=20)
_loop = True
while _loop:
Z._random_pr_init(r=10, n=10)
for i in range(k):
g = self.random_pr()
h = Z.random_pr()
conj = h ^ g
res = _strip(conj, base, basic_orbits, basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
gens = Z.generators
gens.append(conj)
Z = PermutationGroup(gens)
strong_gens.append(conj)
temp_base, temp_strong_gens = \
Z.schreier_sims_incremental(base, strong_gens)
base, strong_gens = temp_base, temp_strong_gens
strong_gens_distr = \
_distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base,
strong_gens_distr)
_loop = False
for g in self.generators:
for h in Z.generators:
conj = h ^ g
res = _strip(conj, base, basic_orbits,
basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
_loop = True
break
if _loop:
break
return Z
elif hasattr(other, '__getitem__'):
return self.normal_closure(PermutationGroup(other))
elif hasattr(other, 'array_form'):
return self.normal_closure(PermutationGroup([other]))
def orbit(self, alpha, action='tuples'):
r"""Compute the orbit of alpha ``\{g(\alpha) | g \in G\}`` as a set.
The time complexity of the algorithm used here is ``O(|Orb|*r)`` where
``|Orb|`` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1,2,3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> G.orbit(0)
{0, 1, 2}
>>> G.orbit([0, 4], 'union')
{0, 1, 2, 3, 4, 5, 6}
See Also
========
orbit_transversal
"""
return _orbit(self.degree, self.generators, alpha, action)
def orbit_rep(self, alpha, beta, schreier_vector=None):
"""Return a group element which sends ``alpha`` to ``beta``.
If ``beta`` is not in the orbit of ``alpha``, the function returns
``False``. This implementation makes use of the schreier vector.
For a proof of correctness, see [1], p.80
Examples
========
>>> Permutation.print_cyclic = True
>>> G = AlternatingGroup(5)
>>> G.orbit_rep(0, 4)
Permutation(0, 4, 1, 2, 3)
See Also
========
schreier_vector
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if schreier_vector[beta] is None:
return False
k = schreier_vector[beta]
gens = [x._array_form for x in self.generators]
a = []
while k != -1:
a.append(gens[k])
beta = gens[k].index(beta) # beta = (~gens[k])(beta)
k = schreier_vector[beta]
if a:
return _af_new(_af_rmuln(*a))
else:
return _af_new(list(range(self._degree)))
def orbit_transversal(self, alpha, pairs=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
For a permutation group ``G``, a transversal for the orbit
``Orb = \{g(\alpha) | g \in G\}`` is a set
``\{g_\beta | g_\beta(\alpha) = \beta\}`` for ``\beta \in Orb``.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
``(\beta, g_\beta)``. For a proof of correctness, see [1], p.79
Examples
========
>>> Permutation.print_cyclic = True
>>> G = DihedralGroup(6)
>>> G.orbit_transversal(0)
[Permutation(5),
Permutation(0, 1, 2, 3, 4, 5),
Permutation(0, 5)(1, 4)(2, 3),
Permutation(0, 2, 4)(1, 3, 5),
Permutation(5)(0, 4)(1, 3),
Permutation(0, 3)(1, 4)(2, 5)]
See Also
========
orbit
"""
return _orbit_transversal(self._degree, self.generators, alpha, pairs)
def orbits(self, rep=False):
"""Return the orbits of self, ordered according to lowest element
in each orbit.
Examples
========
>>> a = Permutation(1, 5)(2, 3)(4, 0, 6)
>>> b = Permutation(1, 5)(3, 4)(2, 6, 0)
>>> G = PermutationGroup([a, b])
>>> G.orbits()
[{0, 2, 3, 4, 6}, {1, 5}]
"""
return _orbits(self._degree, self._generators)
def order(self):
"""Return the order of the group: the number of permutations that
can be generated from elements of the group.
The number of permutations comprising the group is given by
len(group); the length of each permutation in the group is
given by group.size.
Examples
========
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[Permutation(2), Permutation(2)(0, 1)]
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.order()
6
See Also
========
degree
"""
if self._order is not None:
return self._order
if self._is_sym:
n = self._degree
self._order = factorial(n)
return self._order
if self._is_alt:
n = self._degree
self._order = factorial(n)/2
return self._order
basic_transversals = self.basic_transversals
m = 1
for x in basic_transversals:
m *= len(x)
self._order = m
return m
def pointwise_stabilizer(self, points, incremental=True):
r"""Return the pointwise stabilizer for a set of points.
For a permutation group ``G`` and a set of points
``\{p_1, p_2,\ldots, p_k\}``, the pointwise stabilizer of
``p_1, p_2, \ldots, p_k`` is defined as
``G_{p_1,\ldots, p_k} =
\{g\in G | g(p_i) = p_i \forall i\in\{1, 2,\ldots,k\}\} ([1],p20).
It is a subgroup of ``G``.
Examples
========
>>> S = SymmetricGroup(7)
>>> Stab = S.pointwise_stabilizer([2, 3, 5])
>>> Stab.is_subgroup(S.stabilizer(2).stabilizer(3).stabilizer(5))
True
See Also
========
stabilizer, schreier_sims_incremental
Notes
=====
When incremental == True,
rather than the obvious implementation using successive calls to
.stabilizer(), this uses the incremental Schreier-Sims algorithm
to obtain a base with starting segment - the given points.
"""
if incremental:
base, strong_gens = self.schreier_sims_incremental(base=points)
stab_gens = []
degree = self.degree
for gen in strong_gens:
if [gen(point) for point in points] == points:
stab_gens.append(gen)
if not stab_gens:
stab_gens = _af_new(list(range(degree)))
return PermutationGroup(stab_gens)
else:
gens = self._generators
degree = self.degree
for x in points:
gens = _stabilizer(degree, gens, x)
return PermutationGroup(gens)
def make_perm(self, n, seed=None):
"""
Multiply ``n`` randomly selected permutations from
pgroup together, starting with the identity
permutation. If ``n`` is a list of integers, those
integers will be used to select the permutations and they
will be applied in L to R order: make_perm((A, B, C)) will
give CBA(I) where I is the identity permutation.
``seed`` is used to set the seed for the random selection
of permutations from pgroup. If this is a list of integers,
the corresponding permutations from pgroup will be selected
in the order give. This is mainly used for testing purposes.
Examples
========
>>> Permutation.print_cyclic = True
>>> a, b = [Permutation([1, 0, 3, 2]), Permutation([1, 3, 0, 2])]
>>> G = PermutationGroup([a, b])
>>> G.make_perm(1, [0])
Permutation(0, 1)(2, 3)
>>> G.make_perm(3, [0, 1, 0])
Permutation(0, 2, 3, 1)
>>> G.make_perm([0, 1, 0])
Permutation(0, 2, 3, 1)
See Also
========
random
"""
if is_sequence(n):
if seed is not None:
raise ValueError('If n is a sequence, seed should be None')
n, seed = len(n), n
else:
try:
n = int(n)
except TypeError:
raise ValueError('n must be an integer or a sequence.')
randrange = _randrange(seed)
# start with the identity permutation
result = Permutation(list(range(self.degree)))
m = len(self)
for i in range(n):
p = self[randrange(m)]
result = rmul(result, p)
return result
def random(self, af=False):
"""Return a random group element."""
rank = randrange(self.order())
return self.coset_unrank(rank, af)
def random_pr(self, gen_count=11, iterations=50, _random_prec=None):
"""Return a random group element using product replacement.
For the details of the product replacement algorithm, see
``_random_pr_init`` In ``random_pr`` the actual 'product replacement'
is performed. Notice that if the attribute ``_random_gens``
is empty, it needs to be initialized by ``_random_pr_init``.
See Also
========
_random_pr_init
"""
if self._random_gens == []:
self._random_pr_init(gen_count, iterations)
random_gens = self._random_gens
r = len(random_gens) - 1
# handle randomized input for testing purposes
if _random_prec is None:
s = randrange(r)
t = randrange(r - 1)
if t == s:
t = r - 1
x = choice([1, 2])
e = choice([-1, 1])
else:
s = _random_prec['s']
t = _random_prec['t']
if t == s:
t = r - 1
x = _random_prec['x']
e = _random_prec['e']
if x == 1:
random_gens[s] = _af_rmul(random_gens[s], _af_pow(random_gens[t], e))
random_gens[r] = _af_rmul(random_gens[r], random_gens[s])
else:
random_gens[s] = _af_rmul(_af_pow(random_gens[t], e), random_gens[s])
random_gens[r] = _af_rmul(random_gens[s], random_gens[r])
return _af_new(random_gens[r])
def random_stab(self, alpha, schreier_vector=None, _random_prec=None):
"""Random element from the stabilizer of ``alpha``.
The schreier vector for ``alpha`` is an optional argument used
for speeding up repeated calls. The algorithm is described in [1], p.81
See Also
========
random_pr, orbit_rep
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if _random_prec is None:
rand = self.random_pr()
else:
rand = _random_prec['rand']
beta = rand(alpha)
h = self.orbit_rep(alpha, beta, schreier_vector)
return rmul(~h, rand)
def schreier_sims(self):
"""Schreier-Sims algorithm.
It computes the generators of the chain of stabilizers
G > G_{b_1} > .. > G_{b1,..,b_r} > 1
in which G_{b_1,..,b_i} stabilizes b_1,..,b_i,
and the corresponding ``s`` cosets.
An element of the group can be written as the product
h_1*..*h_s.
We use the incremental Schreier-Sims algorithm.
Examples
========
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_sims()
>>> G.basic_transversals
[{0: Permutation(2)(0, 1), 1: Permutation(2), 2: Permutation(1, 2)},
{0: Permutation(2), 2: Permutation(0, 2)}]
"""
if self._transversals:
return
base, strong_gens = self.schreier_sims_incremental()
self._base = base
self._strong_gens = strong_gens
if not base:
self._transversals = []
self._basic_orbits = []
return
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,
strong_gens_distr)
self._transversals = transversals
self._basic_orbits = [sorted(x) for x in basic_orbits]
def schreier_sims_incremental(self, base=None, gens=None):
"""Extend a sequence of points and generating set to a base and strong
generating set.
Parameters
==========
base
The sequence of points to be extended to a base. Optional
parameter with default value ``[]``.
gens
The generating set to be extended to a strong generating set
relative to the base obtained. Optional parameter with default
value ``self.generators``.
Returns
=======
(base, strong_gens)
``base`` is the base obtained, and ``strong_gens`` is the strong
generating set relative to it. The original parameters ``base``,
``gens`` remain unchanged.
Examples
========
>>> from diofant.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(7)
>>> base = [2, 3]
>>> seq = [2, 3]
>>> base, strong_gens = A.schreier_sims_incremental(base=seq)
>>> _verify_bsgs(A, base, strong_gens)
True
>>> base[:2]
[2, 3]
Notes
=====
This version of the Schreier-Sims algorithm runs in polynomial time.
There are certain assumptions in the implementation - if the trivial
group is provided, ``base`` and ``gens`` are returned immediately,
as any sequence of points is a base for the trivial group. If the
identity is present in the generators ``gens``, it is removed as
it is a redundant generator.
The implementation is described in [1], pp. 90-93.
See Also
========
schreier_sims, schreier_sims_random
"""
if base is None:
base = []
if gens is None:
gens = self.generators[:]
degree = self.degree
id_af = list(range(degree))
# handle the trivial group
if len(gens) == 1 and gens[0].is_Identity:
return base, gens
# prevent side effects
_base, _gens = base[:], gens[:]
# remove the identity as a generator
_gens = [x for x in _gens if not x.is_Identity]
# make sure no generator fixes all base points
for gen in _gens:
if all(x == gen._array_form[x] for x in _base):
for new in id_af:
if gen._array_form[new] != new:
break
else:
assert None # can this ever happen?
_base.append(new)
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(_base, _gens)
# initialize the basic stabilizers, basic orbits and basic transversals
orbs = {}
transversals = {}
base_len = len(_base)
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(degree, strong_gens_distr[i],
_base[i], pairs=True, af=True))
orbs[i] = list(transversals[i])
# main loop: amend the stabilizer chain until we have generators
# for all stabilizers
i = base_len - 1
while i >= 0:
# this flag is used to continue with the main loop from inside
# a nested loop
continue_i = False
# test the generators for being a strong generating set
db = {}
for beta, u_beta in list(transversals[i].items()):
for gen in strong_gens_distr[i]:
gb = gen._array_form[beta]
u1 = transversals[i][gb]
g1 = _af_rmul(gen._array_form, u_beta)
if g1 != u1:
# test if the schreier generator is in the i+1-th
# would-be basic stabilizer
y = True
try:
u1_inv = db[gb]
except KeyError:
u1_inv = db[gb] = _af_invert(u1)
schreier_gen = _af_rmul(u1_inv, g1)
h, j = _strip_af(schreier_gen, _base, orbs, transversals, i)
if j <= base_len:
# new strong generator h at level j
y = False
elif h:
# h fixes all base points
y = False
moved = 0
while h[moved] == moved:
moved += 1
_base.append(moved)
base_len += 1
strong_gens_distr.append([])
if y is False:
# if a new strong generator is found, update the
# data structures and start over
h = _af_new(h)
for l in range(i + 1, j):
strong_gens_distr[l].append(h)
transversals[l] =\
dict(_orbit_transversal(degree, strong_gens_distr[l],
_base[l], pairs=True, af=True))
orbs[l] = list(transversals[l])
i = j - 1
# continue main loop using the flag
continue_i = True
if continue_i is True:
break
if continue_i is True:
break
if continue_i is True:
continue
i -= 1
# build the strong generating set
strong_gens = list(uniq(i for gens in strong_gens_distr for i in gens))
return _base, strong_gens
def schreier_sims_random(self, base=None, gens=None, consec_succ=10,
_random_prec=None):
r"""Randomized Schreier-Sims algorithm.
The randomized Schreier-Sims algorithm takes the sequence ``base``
and the generating set ``gens``, and extends ``base`` to a base, and
``gens`` to a strong generating set relative to that base with
probability of a wrong answer at most ``2^{-consec\_succ}``,
provided the random generators are sufficiently random.
Parameters
==========
base
The sequence to be extended to a base.
gens
The generating set to be extended to a strong generating set.
consec_succ
The parameter defining the probability of a wrong answer.
_random_prec
An internal parameter used for testing purposes.
Returns
=======
(base, strong_gens)
``base`` is the base and ``strong_gens`` is the strong generating
set relative to it.
Examples
========
>>> from diofant.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(5)
>>> base, strong_gens = S.schreier_sims_random(consec_succ=5)
>>> _verify_bsgs(S, base, strong_gens) # doctest: +SKIP
True
Notes
=====
The algorithm is described in detail in [1], pp. 97-98. It extends
the orbits ``orbs`` and the permutation groups ``stabs`` to
basic orbits and basic stabilizers for the base and strong generating
set produced in the end.
The idea of the extension process
is to "sift" random group elements through the stabilizer chain
and amend the stabilizers/orbits along the way when a sift
is not successful.
The helper function ``_strip`` is used to attempt
to decompose a random group element according to the current
state of the stabilizer chain and report whether the element was
fully decomposed (successful sift) or not (unsuccessful sift). In
the latter case, the level at which the sift failed is reported and
used to amend ``stabs``, ``base``, ``gens`` and ``orbs`` accordingly.
The halting condition is for ``consec_succ`` consecutive successful
sifts to pass. This makes sure that the current ``base`` and ``gens``
form a BSGS with probability at least ``1 - 1/\text{consec\_succ}``.
See Also
========
schreier_sims
"""
if base is None:
base = []
if gens is None:
gens = self.generators
base_len = len(base)
n = self.degree
# make sure no generator fixes all base points
for gen in gens:
if all(gen(x) == x for x in base):
new = 0
while gen._array_form[new] == new:
new += 1
base.append(new)
base_len += 1
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(base, gens)
# initialize the basic stabilizers, basic transversals and basic orbits
transversals = {}
orbs = {}
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(n, strong_gens_distr[i],
base[i], pairs=True))
orbs[i] = list(transversals[i])
# initialize the number of consecutive elements sifted
c = 0
# start sifting random elements while the number of consecutive sifts
# is less than consec_succ
while c < consec_succ:
if _random_prec is None:
g = self.random_pr()
else:
g = _random_prec['g'].pop()
h, j = _strip(g, base, orbs, transversals)
y = True
# determine whether a new base point is needed
if j <= base_len:
y = False
elif not h.is_Identity:
y = False
moved = 0
while h(moved) == moved:
moved += 1
base.append(moved)
base_len += 1
strong_gens_distr.append([])
# if the element doesn't sift, amend the strong generators and
# associated stabilizers and orbits
if y is False:
for l in range(1, j):
strong_gens_distr[l].append(h)
transversals[l] = dict(_orbit_transversal(n,
strong_gens_distr[l], base[l], pairs=True))
orbs[l] = list(transversals[l])
c = 0
else:
c += 1
# build the strong generating set
strong_gens = strong_gens_distr[0][:]
for gen in strong_gens_distr[1]:
if gen not in strong_gens:
strong_gens.append(gen)
return base, strong_gens
def schreier_vector(self, alpha):
"""Computes the schreier vector for ``alpha``.
The Schreier vector efficiently stores information
about the orbit of ``alpha``. It can later be used to quickly obtain
elements of the group that send ``alpha`` to a particular element
in the orbit. Notice that the Schreier vector depends on the order
in which the group generators are listed. For a definition, see [3].
Since list indices start from zero, we adopt the convention to use
"None" instead of 0 to signify that an element doesn't belong
to the orbit.
For the algorithm and its correctness, see [2], pp.78-80.
Examples
========
>>> a = Permutation([2, 4, 6, 3, 1, 5, 0])
>>> b = Permutation([0, 1, 3, 5, 4, 6, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_vector(0)
[-1, None, 0, 1, None, 1, 0]
See Also
========
orbit
"""
n = self.degree
v = [None]*n
v[alpha] = -1
orb = [alpha]
used = [False]*n
used[alpha] = True
gens = self.generators
r = len(gens)
for b in orb:
for i in range(r):
temp = gens[i]._array_form[b]
if used[temp] is False:
orb.append(temp)
used[temp] = True
v[temp] = i
return v
def stabilizer(self, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
The stabilizer of ``\alpha`` is the group ``G_\alpha =
\{g \in G | g(\alpha) = \alpha\}``.
For a proof of correctness, see [1], p.79.
Examples
========
>>> Permutation.print_cyclic = True
>>> G = DihedralGroup(6)
>>> G.stabilizer(5)
PermutationGroup([
Permutation(5)(0, 4)(1, 3),
Permutation(5)])
See Also
========
orbit
"""
return PermGroup(_stabilizer(self._degree, self._generators, alpha))
@property
def strong_gens(self):
r"""Return a strong generating set from the Schreier-Sims algorithm.
A generating set ``S = \{g_1, g_2, ..., g_t\}`` for a permutation group
``G`` is a strong generating set relative to the sequence of points
(referred to as a "base") ``(b_1, b_2, ..., b_k)`` if, for
``1 \leq i \leq k`` we have that the intersection of the pointwise
stabilizer ``G^{(i+1)} := G_{b_1, b_2, ..., b_i}`` with ``S`` generates
the pointwise stabilizer ``G^{(i+1)}``. The concepts of a base and
strong generating set and their applications are discussed in depth
in [1], pp. 87-89 and [2], pp. 55-57.
Examples
========
>>> D = DihedralGroup(4)
>>> D.strong_gens
[Permutation(0, 1, 2, 3), Permutation(0, 3)(1, 2), Permutation(1, 3)]
>>> D.base
[0, 1]
See Also
========
base, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._strong_gens == []:
self.schreier_sims()
return self._strong_gens
def subgroup_search(self, prop, base=None, strong_gens=None, tests=None,
init_subgroup=None):
"""Find the subgroup of all elements satisfying the property ``prop``.
This is done by a depth-first search with respect to base images that
uses several tests to prune the search tree.
Parameters
==========
prop
The property to be used. Has to be callable on group elements
and always return ``True`` or ``False``. It is assumed that
all group elements satisfying ``prop`` indeed form a subgroup.
base
A base for the supergroup.
strong_gens
A strong generating set for the supergroup.
tests
A list of callables of length equal to the length of ``base``.
These are used to rule out group elements by partial base images,
so that ``tests[l](g)`` returns False if the element ``g`` is known
not to satisfy prop base on where g sends the first ``l + 1`` base
points.
init_subgroup
if a subgroup of the sought group is
known in advance, it can be passed to the function as this
parameter.
Returns
=======
res
The subgroup of all elements satisfying ``prop``. The generating
set for this group is guaranteed to be a strong generating set
relative to the base ``base``.
Examples
========
>>> from diofant.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(7)
>>> prop_even = lambda x: x.is_even
>>> base, strong_gens = S.schreier_sims_incremental()
>>> G = S.subgroup_search(prop_even, base=base, strong_gens=strong_gens)
>>> G.is_subgroup(AlternatingGroup(7))
True
>>> _verify_bsgs(G, base, G.generators)
True
Notes
=====
This function is extremely lenghty and complicated and will require
some careful attention. The implementation is described in
[1], pp. 114-117, and the comments for the code here follow the lines
of the pseudocode in the book for clarity.
The complexity is exponential in general, since the search process by
itself visits all members of the supergroup. However, there are a lot
of tests which are used to prune the search tree, and users can define
their own tests via the ``tests`` parameter, so in practice, and for
some computations, it's not terrible.
A crucial part in the procedure is the frequent base change performed
(this is line 11 in the pseudocode) in order to obtain a new basic
stabilizer. The book mentiones that this can be done by using
``.baseswap(...)``, however the current implementation uses a more
straightforward way to find the next basic stabilizer - calling the
function ``.stabilizer(...)`` on the previous basic stabilizer.
"""
# initialize BSGS and basic group properties
def get_reps(orbits):
# get the minimal element in the base ordering
return [min(orbit, key=lambda x: base_ordering[x])
for orbit in orbits]
def update_nu(l):
temp_index = len(basic_orbits[l]) + 1 -\
len(res_basic_orbits_init_base[l])
# this corresponds to the element larger than all points
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
if base is None:
base, strong_gens = self.schreier_sims_incremental()
base_len = len(base)
degree = self.degree
identity = _af_new(list(range(degree)))
base_ordering = _base_ordering(base, degree)
# add an element larger than all points
base_ordering.append(degree)
# add an element smaller than all points
base_ordering.append(-1)
# compute BSGS-related structures
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,
strong_gens_distr)
# handle subgroup initialization and tests
if init_subgroup is None:
init_subgroup = PermutationGroup([identity])
if tests is None:
def trivial_test(x):
return True
tests = []
for i in range(base_len):
tests.append(trivial_test)
# line 1: more initializations.
res = init_subgroup
f = base_len - 1
l = base_len - 1
# line 2: set the base for K to the base for G
res_base = base[:]
# line 3: compute BSGS and related structures for K
res_base, res_strong_gens = res.schreier_sims_incremental(
base=res_base)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_generators = res.generators
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i])
for i in range(base_len)]
# initialize orbit representatives
orbit_reps = [None]*base_len
# line 4: orbit representatives for f-th basic stabilizer of K
orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(orbits)
# line 5: remove the base point from the representatives to avoid
# getting the identity element as a generator for K
orbit_reps[f].remove(base[f])
# line 6: more initializations
c = [0]*base_len
u = [identity]*base_len
sorted_orbits = [None]*base_len
for i in range(base_len):
sorted_orbits[i] = basic_orbits[i][:]
sorted_orbits[i].sort(key=lambda point: base_ordering[point])
# line 7: initializations
mu = [None]*base_len
nu = [None]*base_len
# this corresponds to the element smaller than all points
mu[l] = degree + 1
update_nu(l)
# initialize computed words
computed_words = [identity]*base_len
# line 8: main loop
while True:
# apply all the tests
while l < base_len - 1 and \
computed_words[l](base[l]) in orbit_reps[l] and \
base_ordering[mu[l]] < \
base_ordering[computed_words[l](base[l])] < \
base_ordering[nu[l]] and \
tests[l](computed_words):
# line 11: change the (partial) base of K
new_point = computed_words[l](base[l])
res_base[l] = new_point
new_stab_gens = _stabilizer(degree, res_strong_gens_distr[l],
new_point)
res_strong_gens_distr[l + 1] = new_stab_gens
# line 12: calculate minimal orbit representatives for the
# l+1-th basic stabilizer
orbits = _orbits(degree, new_stab_gens)
orbit_reps[l + 1] = get_reps(orbits)
# line 13: amend sorted orbits
l += 1
temp_orbit = [computed_words[l - 1](point) for point
in basic_orbits[l]]
temp_orbit.sort(key=lambda point: base_ordering[point])
sorted_orbits[l] = temp_orbit
# lines 14 and 15: update variables used minimality tests
new_mu = degree + 1
for i in range(l):
if base[l] in res_basic_orbits_init_base[i]:
candidate = computed_words[i](base[i])
if base_ordering[candidate] > base_ordering[new_mu]:
new_mu = candidate
mu[l] = new_mu
update_nu(l)
# line 16: determine the new transversal element
c[l] = 0
temp_point = sorted_orbits[l][c[l]]
gamma = computed_words[l - 1]._array_form.index(temp_point)
u[l] = transversals[l][gamma]
# update computed words
computed_words[l] = rmul(computed_words[l - 1], u[l])
# lines 17 & 18: apply the tests to the group element found
g = computed_words[l]
temp_point = g(base[l])
if l == base_len - 1 and \
base_ordering[mu[l]] < \
base_ordering[temp_point] < base_ordering[nu[l]] and \
temp_point in orbit_reps[l] and \
tests[l](computed_words) and \
prop(g):
# line 19: reset the base of K
res_generators.append(g)
res_base = base[:]
# line 20: recalculate basic orbits (and transversals)
res_strong_gens.append(g)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i])
for i in range(base_len)]
# line 21: recalculate orbit representatives
# line 22: reset the search depth
orbit_reps[f] = get_reps(orbits)
l = f
# line 23: go up the tree until in the first branch not fully
# searched
while l >= 0 and c[l] == len(basic_orbits[l]) - 1:
l = l - 1
# line 24: if the entire tree is traversed, return K
if l == -1:
return PermutationGroup(res_generators)
# lines 25-27: update orbit representatives
if l < f:
# line 26
f = l
c[l] = 0
# line 27
temp_orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(temp_orbits)
# line 28: update variables used for minimality testing
mu[l] = degree + 1
temp_index = len(basic_orbits[l]) + 1 - \
len(res_basic_orbits_init_base[l])
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
# line 29: set the next element from the current branch and update
# accorndingly
c[l] += 1
if l == 0:
gamma = sorted_orbits[l][c[l]]
else:
gamma = computed_words[l - 1]._array_form.index(sorted_orbits[l][c[l]])
u[l] = transversals[l][gamma]
if l == 0:
computed_words[l] = u[l]
else:
computed_words[l] = rmul(computed_words[l - 1], u[l])
@property
def transitivity_degree(self):
r"""Compute the degree of transitivity of the group.
A permutation group ``G`` acting on ``\Omega = \{0, 1, ..., n-1\}`` is
``k``-fold transitive, if, for any k points
``(a_1, a_2, ..., a_k)\in\Omega`` and any k points
``(b_1, b_2, ..., b_k)\in\Omega`` there exists ``g\in G`` such that
``g(a_1)=b_1, g(a_2)=b_2, ..., g(a_k)=b_k``
The degree of transitivity of ``G`` is the maximum ``k`` such that
``G`` is ``k``-fold transitive. ([8])
Examples
========
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.transitivity_degree
3
See Also
========
is_transitive, orbit
"""
if self._transitivity_degree is None:
n = self.degree
G = self
# if G is k-transitive, a tuple (a_0,..,a_k)
# can be brought to (b_0,...,b_(k-1), b_k)
# where b_0,...,b_(k-1) are fixed points;
# consider the group G_k which stabilizes b_0,...,b_(k-1)
# if G_k is transitive on the subset excluding b_0,...,b_(k-1)
# then G is (k+1)-transitive
for i in range(n):
orb = G.orbit(i)
if len(orb) != n - i:
self._transitivity_degree = i
return i
G = G.stabilizer(i)
self._transitivity_degree = n
return n
else:
return self._transitivity_degree
def _orbit(degree, generators, alpha, action='tuples'):
r"""Compute the orbit of alpha ``\{g(\alpha) | g \in G\}`` as a set.
The time complexity of the algorithm used here is ``O(|Orb|*r)`` where
``|Orb|`` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1, 2, 3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> _orbit(G.degree, G.generators, 0)
{0, 1, 2}
>>> _orbit(G.degree, G.generators, [0, 4], 'union')
{0, 1, 2, 3, 4, 5, 6}
See Also
========
orbit, orbit_transversal
"""
if not hasattr(alpha, '__getitem__'):
alpha = [alpha]
gens = [x._array_form for x in generators]
if len(alpha) == 1 or action == 'union':
orb = alpha
used = [False]*degree
for el in alpha:
used[el] = True
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] is False:
orb.append(temp)
used[temp] = True
return set(orb)
elif action == 'tuples':
alpha = tuple(alpha)
orb = [alpha]
used = {alpha}
for b in orb:
for gen in gens:
temp = tuple(gen[x] for x in b)
if temp not in used:
orb.append(temp)
used.add(temp)
return set(orb)
elif action == 'sets':
alpha = frozenset(alpha)
orb = [alpha]
used = {alpha}
for b in orb:
for gen in gens:
temp = frozenset(gen[x] for x in b)
if temp not in used:
orb.append(temp)
used.add(temp)
return {tuple(x) for x in orb}
def _orbits(degree, generators):
"""Compute the orbits of G.
If rep=False it returns a list of sets else it returns a list of
representatives of the orbits
Examples
========
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> _orbits(a.size, [a, b])
[{0, 1, 2}]
"""
orbs = []
sorted_I = list(range(degree))
I = set(sorted_I)
while I:
i = sorted_I[0]
orb = _orbit(degree, generators, i)
orbs.append(orb)
# remove all indices that are in this orbit
I -= orb
sorted_I = [i for i in sorted_I if i not in orb]
return orbs
def _orbit_transversal(degree, generators, alpha, pairs, af=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
generators generators of the group ``G``
For a permutation group ``G``, a transversal for the orbit
``Orb = \{g(\alpha) | g \in G\}`` is a set
``\{g_\beta | g_\beta(\alpha) = \beta\}`` for ``\beta \in Orb``.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
``(\beta, g_\beta)``. For a proof of correctness, see [1], p.79
if af is True, the transversal elements are given in array form
Examples
========
>>> Permutation.print_cyclic = True
>>> G = DihedralGroup(6)
>>> _orbit_transversal(G.degree, G.generators, 0, False)
[Permutation(5),
Permutation(0, 1, 2, 3, 4, 5),
Permutation(0, 5)(1, 4)(2, 3),
Permutation(0, 2, 4)(1, 3, 5),
Permutation(5)(0, 4)(1, 3),
Permutation(0, 3)(1, 4)(2, 5)]
"""
tr = [(alpha, list(range(degree)))]
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
for x, px in tr:
for gen in gens:
temp = gen[x]
if used[temp] is False:
tr.append((temp, _af_rmul(gen, px)))
used[temp] = True
if pairs:
if not af:
tr = [(x, _af_new(y)) for x, y in tr]
return tr
if af:
return [y for _, y in tr]
return [_af_new(y) for _, y in tr]
def _stabilizer(degree, generators, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
The stabilizer of ``\alpha`` is the group ``G_\alpha =
\{g \in G | g(\alpha) = \alpha\}``.
For a proof of correctness, see [1], p.79.
degree degree of G
generators generators of G
Examples
========
>>> Permutation.print_cyclic = True
>>> G = DihedralGroup(6)
>>> _stabilizer(G.degree, G.generators, 5)
[Permutation(5)(0, 4)(1, 3), Permutation(5)]
See Also
========
orbit
"""
orb = [alpha]
table = {alpha: list(range(degree))}
table_inv = {alpha: list(range(degree))}
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
stab_gens = []
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] is False:
gen_temp = _af_rmul(gen, table[b])
orb.append(temp)
table[temp] = gen_temp
table_inv[temp] = _af_invert(gen_temp)
used[temp] = True
else:
schreier_gen = _af_rmuln(table_inv[temp], gen, table[b])
if schreier_gen not in stab_gens:
stab_gens.append(schreier_gen)
return [_af_new(x) for x in stab_gens]
PermGroup = PermutationGroup
| 33.982937
| 106
| 0.530493
|
7950548f401a478279dbcdad80347932d03f8a6e
| 10,487
|
py
|
Python
|
backends/izhikevich.py
|
rgerkin/jit_hub
|
d132225fae29286cf7ac6f08bd4099fd65dcab0f
|
[
"CC0-1.0"
] | null | null | null |
backends/izhikevich.py
|
rgerkin/jit_hub
|
d132225fae29286cf7ac6f08bd4099fd65dcab0f
|
[
"CC0-1.0"
] | null | null | null |
backends/izhikevich.py
|
rgerkin/jit_hub
|
d132225fae29286cf7ac6f08bd4099fd65dcab0f
|
[
"CC0-1.0"
] | null | null | null |
from sciunit.models.backends import Backend
from quantities import mV, ms, s, V
from neo import AnalogSignal
import numpy as np
import quantities as pq
import numpy
import copy
from numba import jit#, autojit
import cython
class JIT_IzhiBackend(Backend):
def init_backend(self):
super().init_backend()
self.attrs = self.model.attrs
def _backend_run(self):
"""Must return a neo.core.AnalogSignal"""
if self.vM is not None:
return self.vM
else:
everything = copy.copy(self.model.attrs)
if hasattr(self,'Iext'):
everything.update({'Iext':self.Iext})
if 'current_inj' in everything.keys():
everything.pop('current_inj',None)
everything = copy.copy(self.model.attrs)
self.model.attrs['celltype'] = round(self.model.attrs['celltype'])
if self.model.attrs['celltype'] <= 3:
everything.pop('celltype',None)
v = get_vm_matlab_one_two_three(**everything)
else:
if self.model.attrs['celltype'] == 4:
v = get_vm_matlab_four(**everything)
if self.model.attrs['celltype'] == 5:
v = get_vm_matlab_five(**everything)
if self.model.attrs['celltype'] == 6:
v = get_vm_matlab_six(**everything)
if self.model.attrs['celltype'] == 7:
#print('gets into multiple regimes',self.attrs['celltype'])
v = get_vm_matlab_seven(**everything)
return AnalogSignal(v, units=pq.mV,
sampling_period=0.125*pq.ms)
def inject_ramp_current(self, t_stop, gradient=0.000015, onset=30.0, baseline=0.0, t_start=0.0):
times, amps = self.ramp(gradient, onset, t_stop, baseline=0.0, t_start=0.0)
everything = copy.copy(self.attrs)
everything.update({'ramp':amps})
everything.update({'start':onset})
everything.update({'stop':t_stop})
if 'current_inj' in everything.keys():
everything.pop('current_inj',None)
self.attrs['celltype'] = round(self.attrs['celltype'])
if np.bool_(self.attrs['celltype'] <= 3):
everything.pop('celltype',None)
v = get_vm_matlab_one_two_three(**everything)
else:
if np.bool_(self.attrs['celltype'] == 4):
v = get_vm_matlab_four(**everything)
if np.bool_(self.attrs['celltype'] == 5):
v = get_vm_matlab_five(**everything)
if np.bool_(self.attrs['celltype'] == 6):
v = get_vm_matlab_six(**everything)
if np.bool_(self.attrs['celltype'] == 7):
v = get_vm_matlab_seven(**everything)
self.attrs
self.vM = AnalogSignal(v,
units=pq.mV,
sampling_period=0.125*pq.ms)
return self.vM
@cython.boundscheck(False)
@cython.wraparound(False)
def inject_square_current(self, amplitude=100*pq.pA, delay=10*pq.ms, duration=500*pq.ms):
"""
Inputs: current : a dictionary with exactly three items, whose keys are: 'amplitude', 'delay', 'duration'
Example: current = {'amplitude':float*pq.pA, 'delay':float*pq.ms, 'duration':float*pq.ms}}
where \'pq\' is a physical unit representation, implemented by casting float values to the quanitities \'type\'.
Description: A parameterized means of applying current injection into defined
Currently only single section neuronal models are supported, the neurite section is understood to be simply the soma.
"""
attrs = self.model.attrs
if attrs is None:
attrs = self.model.default_attrs
self.attrs = attrs
square = True
amplitude = float(amplitude.magnitude)
duration = float(duration)
delay = float(delay)
#print(amplitude,duration,delay)
tMax = delay + duration #+ 200.0#*pq.ms
#self.set_stop_time(tMax*pq.ms)
tMax = self.tstop = float(tMax)
N = int(tMax/0.125)
Iext = np.zeros(N)
delay_ind = int((delay/tMax)*N)
duration_ind = int((duration/tMax)*N)
Iext[0:delay_ind-1] = 0.0
Iext[delay_ind:delay_ind+duration_ind-1] = amplitude
Iext[delay_ind+duration_ind::] = 0.0
self.Iext = None
self.Iext = Iext
everything = copy.copy(self.attrs)
everything.update({'N':len(Iext)})
#everything.update({'Iext':Iext})
everything.update({'start':delay_ind})
everything.update({'stop':delay_ind+duration_ind})
everything.update({'amp':amplitude})
if 'current_inj' in everything.keys():
everything.pop('current_inj',None)
#import pdb; pdb.set_trace()
self.attrs['celltype'] = round(self.attrs['celltype'])
if np.bool_(self.attrs['celltype'] <= 3):
everything.pop('celltype',None)
v = get_vm_matlab_one_two_three(**everything)
else:
if np.bool_(self.attrs['celltype'] == 4):
v = get_vm_matlab_four(**everything)
if np.bool_(self.attrs['celltype'] == 5):
v = get_vm_matlab_five(**everything)
if np.bool_(self.attrs['celltype'] == 6):
v = get_vm_matlab_six(**everything)
if np.bool_(self.attrs['celltype'] == 7):
v = get_vm_matlab_seven(**everything)
self.attrs
self.vM = AnalogSignal(v,
units=pq.mV,
sampling_period=0.125*pq.ms)
#print(np.std(v))
return self.vM
@jit(nopython=True)
def get_vm_matlab_four(C=89.7960714285714,
a=0.01, b=15, c=-60, d=10, k=1.6,
vPeak=(86.364525297619-65.2261863636364),
vr=-65.2261863636364, vt=-50,celltype=1, N=0,start=0,stop=0,amp=0,ramp=None):
tau = dt = 0.125
if ramp is not None:
N = len(ramp)
v = np.zeros(N)
u = np.zeros(N)
v[0] = vr
for i in range(N-1):
I = 0
if ramp is not None:
I = ramp[i]
elif start <= i <= stop:
I = amp
# forward Euler method
v[i+1] = v[i] + tau * (k * (v[i] - vr) * (v[i] - vt) - u[i] + I) / C
u[i+1] = u[i]+tau*a*(b*(v[i]-vr)-u[i]); # Calculate recovery variable
if v[i+1] > (vPeak - 0.1*u[i+1]):
v[i] = vPeak - 0.1*u[i+1]
v[i+1] = c + 0.04*u[i+1]; # Reset voltage
if (u[i]+d)<670:
u[i+1] = u[i+1]+d; # Reset recovery variable
else:
u[i+1] = 670;
return v
@jit(nopython=True)
def get_vm_matlab_five(C=89.7960714285714,
a=0.01, b=15, c=-60, d=10, k=1.6,
vPeak=(86.364525297619-65.2261863636364),
vr=-65.2261863636364, vt=-50,celltype=1, N=0,start=0,stop=0,amp=0,ramp=None):
tau= dt = 0.125; #dt
if ramp is not None:
N = len(ramp)
v = np.zeros(N)
u = np.zeros(N)
v[0] = vr
for i in range(N-1):
I = 0
if ramp is not None:
I = ramp[i]
elif start <= i <= stop:
I = amp
# forward Euler method
v[i+1] = v[i] + tau * (k * (v[i] - vr) * (v[i] - vt) - u[i] + I) / C
#u[i+1]=u[i]+tau*a*(b*(v[i]-vr)-u[i]); # Calculate recovery variable
if v[i+1] < d:
u[i+1] = u[i] + tau*a*(0-u[i])
else:
u[i+1] = u[i] + tau*a*((0.125*(v[i]-d)**3)-u[i])
if v[i+1]>=vPeak:
v[i]=vPeak;
v[i+1]=c;
return v
@jit(nopython=True)
def get_vm_matlab_seven(C=89.7960714285714,
a=0.01, b=15, c=-60, d=10, k=1.6,
vPeak=(86.364525297619-65.2261863636364),
vr=-65.2261863636364, vt=-50,celltype=1, N=0,start=0,stop=0,amp=0,ramp=None):
tau= dt = 0.125; #dt
if ramp is not None:
N = len(ramp)
v = np.zeros(N)
u = np.zeros(N)
v[0] = vr
for i in range(N-1):
I = 0
if ramp is not None:
I = ramp[i]
elif start <= i <= stop:
I = amp
# forward Euler method
v[i+1] = v[i] + tau * (k * (v[i] - vr) * (v[i] - vt) - u[i] + I) / C
if v[i+1] > -65:
b=2;
else:
b=10;
u[i+1]=u[i]+tau*a*(b*(v[i]-vr)-u[i]);
if v[i+1]>=vPeak:
v[i]=vPeak;
v[i+1]=c;
u[i+1]=u[i+1]+d; # reset u, except for FS cells
return v
@jit(nopython=True)
def get_vm_matlab_six(C=89.7960714285714,
a=0.01, b=15, c=-60, d=10, k=1.6,
vPeak=(86.364525297619-65.2261863636364),
vr=-65.2261863636364, vt=-50,celltype=1, N=0,start=0,stop=0,amp=0,ramp=None):
tau= dt = 0.125; #dt
if ramp is not None:
N = len(ramp)
v = np.zeros(N)
u = np.zeros(N)
v[0] = vr
for i in range(N-1):
I = 0
if ramp is not None:
I = ramp[i]
elif start <= i <= stop:
I = amp
# forward Euler method
v[i+1] = v[i] + tau * (k * (v[i] - vr) * (v[i] - vt) - u[i] + I) / C
u[i+1]=u[i]+tau*a*(b*(v[i]-vr)-u[i]);
if v[i+1] > -65:
b=0;
else:
b=15;
if v[i+1] > (vPeak + 0.1*u[i+1]):
v[i]= vPeak + 0.1*u[i+1];
v[i+1] = c-0.1*u[i+1]; # Reset voltage
u[i+1]=u[i+1]+d;
return v
@jit(nopython=True)
def get_vm_matlab_one_two_three(C=89.7960714285714,
a=0.01, b=15, c=-60, d=10, k=1.6,
vPeak=(86.364525297619-65.2261863636364),
vr=-65.2261863636364, vt=-50,
N=0,start=0,stop=0,amp=0,ramp=None):
tau= dt = 0.125; #dt
if ramp is not None:
N = len(ramp)
v = np.zeros(N)
u = np.zeros(N)
v[0] = vr
for i in range(N-1):
I = 0
if ramp is not None:
I = ramp[i]
elif start <= i <= stop:
I = amp
# forward Euler method
v[i+1] = v[i] + tau * (k * (v[i] - vr) * (v[i] - vt) - u[i] + I) / C
u[i+1]=u[i]+tau*a*(b*(v[i]-vr)-u[i]); # Calculate recovery variable
#u[i+1]=u[i]+tau*a*(b*(v[i]-vr)-u[i]); # Calculate recovery variable
if v[i+1]>=vPeak:
v[i]=vPeak
v[i+1]=c
u[i+1]=u[i+1]+d # reset u, except for FS cells
return v
| 31.587349
| 125
| 0.517307
|
795054cb0a72a2baeee6d0fd2234ce38c59a5dc4
| 23,116
|
py
|
Python
|
chapter/two_exercises.py
|
sloscal1/ML-Prob-Perspective
|
3474faf8559cc2229426ab773460000c6c40fbb3
|
[
"MIT"
] | null | null | null |
chapter/two_exercises.py
|
sloscal1/ML-Prob-Perspective
|
3474faf8559cc2229426ab773460000c6c40fbb3
|
[
"MIT"
] | null | null | null |
chapter/two_exercises.py
|
sloscal1/ML-Prob-Perspective
|
3474faf8559cc2229426ab773460000c6c40fbb3
|
[
"MIT"
] | null | null | null |
import random
def question_1():
"""
p(gender=boy) = 0.5
p(gender=girl) = 0.5
Possible outcomes of 2 children:
boy, girl
boy, boy
girl, boy
girl, girl
a) If you know the neighbor has at least one boy, what is the probability the neighbor has a girl?
Sample space: (b,g), (b,b), (g,b). 2/3 events have a girl involved, and they all have equal probability so 2/3.
b) What is the probability that the other child is a girl if you see that one is a boy?
Sample space: (b,g), (b,b). 1/2. The children are independent of each other, so it's the same as the probability
of one child being a girl.
Returns:
None.
"""
return None
def question_2():
"""
There is a blood found at a crime scene that has no innocent explanation. The blood is of a type found in
1% of the population.
a) Prosecutor's fallacy: 1% chance that the defendant would have the crime scene blood type if he was innocent,
therefore there is a 99% chance that he is guilty.
This is not what the evidence states: 1% of the population could have committed the crime because only they have
the suspect blood type. The defendant has that blood type, so he is 1/K people who are in consideration for
committing the crime, not 99% likely to have committed the crime. 99% of the population is not in consideration
for the crime at all, but based on the blood evidence alone we cannot state the likelihood of this single
defendent having committed this crime, only that he is in the consideration set.
b) Defendant's fallacy: There are 800K people in the city, 8000 have the blood type in question. There is just
1 in 8000 chance that the defendant is guilty and so has no relevance.
While it is true that the defendant is just 1 of 8000 city dwellers that have the matching blood type, the blood
is relevant. The true culprit must have that blood type, and so it establishes that further evidence must be
produced to establish the innocence or guilt of the defendant. This is far from the situation that we can ignore
the blood type, the guilty part(ies) must have that match to be considered for the crime.
Returns:
None.
"""
return None
def question_3():
r"""
Variance of a sum.
.. math::
cov[X, Y] &= \mathbb{E}[[X - \mathbb{E}[X]][Y - \mathbb{E}[Y]]]\\
&= \mathbb{E}[XY - X\mathbb{E}[Y] - Y\mathbb{E}[X] + \mathbb{E}[X]\mathbb{E}[Y]]\\
&= \mathbb{E}[XY] - \mathbb{E}[X]\mathbb{E}[Y] - \mathbb{E}[X]\mathbb{E}[Y] + \mathbb{E}[X]\mathbb{E}[Y]\\
&= \mathbb{E}[XY] - \mathbb{E}[X]\mathbb{E}[Y]
.. math::
var[X + Y] &= \mathbb{E}[(X + Y - \mathbb{E}[X+Y])^2]\\
&= \mathbb{E}[X^2] + \mathbb{E}[XY] - \mathbb{E}[X\mathbb{E}[X+Y]] + \mathbb{E}[XY] + \mathbb{E}[Y^2] - \mathbb{E}[Y\mathbb{E}[X+Y]] - \mathbb{E}[X\mathbb{E}[X+Y]] - \mathbb{E}[Y\mathbb{E}[X+Y]] + \mathbb{E}[X+Y]^2\\
&= \mathbb{E}[X^2] - \mathbb{E}[X]^2 - \mathbb{E}[X]\mathbb{E}[Y] + \mathbb{E}[Y^2] - \mathbb{E}[Y]^2 - \mathbb{E}[X]\mathbb{E}[Y] +2\mathbb{E}[XY] - \mathbb{E}[X]^2 - 2\mathbb{E}[X]\mathbb{E}[Y] - \mathbb{E}[Y]^2 + \mathbb{E}[X+Y]^2\\
&= var(X) + var(Y) + 2\mathbb{E}[XY] - 4\mathbb{E}[X]\mathbb{E}[Y] - \mathbb{E}[X]^2 - \mathbb{E}[Y]^2 + \mathbb{E}[X+Y]^2\\
&= var(X) + var(Y) + 2cov(X, Y) - 2\mathbb{E}[X]\mathbb{E}[Y] - \mathbb{E}[X]^2 - \mathbb{E}[Y]^2 + \mathbb{E}[X]^2 + 2\mathbb{E}[X]\mathbb{E}[Y] + \mathbb{E}[Y]^2\\
&= var(X) + var(Y) + 2cov(X, Y)\\
Returns:
None.
"""
return None
def question_4():
r"""
Given:
.. math::
P(T=p|D=p) &= 0.99\\
P(T=n|D=n) &= 0.99\\
P(D=p) &= 1/10,000
This is an application of Bayes Theorem since we want to update the prior probability of having
the disease after knowing the test came back positive. So we have:
.. math::
P(D=p|T=p) &= \frac{P(T=p|D=p) \cdot P(D=p)}{P(T=p)}, &~\textrm{Bayes Thm.}\\
&= \frac{P(T=p|D=p) \cdot P(D=p)}{\Sigma_d P(T=p|D=d)\cdot P(D=d)}, &~\textrm{Law of Total Prob.}\\
&= \frac{P(T=p|D=p) \cdot P(D=p)}{P(T=p|D=p) \cdot P(D=p) + P(T=p|D=n) \cdot P(D=n)}, &~\textrm{Notation}\\
&= \frac{0.99 \cdot 0.0001}{0.99 \cdot 0.0001 + 0.01 \cdot 0.9999}, &~\textrm{Law of Total Prob.}\\
&\approx 0.0098.
This means that the good news is the probability of having the disease is still a little less than 1/100. Also,
The second application of the Law of Total Probability is actually two applications:
.. math::
1 &= P(D=p) + P(D=n)\\
1 &= P(T=p|D=p) + P(T=p|D=n)
Returns:
None.
"""
print(0.99*0.0001/(0.99*0.0001+0.01*0.9999))
return None
def question_5(num_samples=1000000, seed=1337):
r""" The Monty Hall Problem using Bayes theorem.
We're interested in determining whether switching doors is better than sticking with the original.
Let :math:`C \sim Unif(3)` be the random variable representing where the car (prize) is,
:math:`F \sim Unif(3)` be the random variable
representing the first selection made by the contestant, and :math:`O` be the random variable representing
which door is opened after the first selection is made. This variable is deterministic when the first guess does
not equal the prize value but has a choice otherwise.
.. math::
P(F=P|O, P) &= \frac{P(O|F=P) \cdot P(F=P)}{P(O|P=F)},~&\textrm{Bayes Theorem}\\
&= \frac{1/2 \cdot 1/3}{1/2},~&\textrm{Counting}\\
&= 1/3.\\
P(F\neq P|O, P) &= \frac{P(O|F\neq P) \cdot P(F\neq P)}{P(O|P\neq F)},~&\textrm{Bayes Theorem}\\
&= \frac{1 \cdot 2/3}{1},~&\textrm{Counting}\\
&= 2/3.
So from this we see that our first guess has a 2/3 chance of being wrong given the open door, so switching would
give us a 2/3 of being correct in that case. Additionally, by the Law of Total Probability, we could've computed
the chances of the first guess being correct (1/3) and taking the complement of that.
Side-effect:
This code runs a simulation of the Monty Hall Problem to compute the probabilities and prints the
probability of being right when staying with the original choice or switching to the remaining door.
Args:
num_samples (int): the number of times to sample the distribution, must be positive.
seed (int): the random seed to ensure repeatability.
Returns:
None.
"""
random.seed = seed
stay = 0
switch = 0
for _ in range(num_samples):
prize = random.randint(0, 2)
first = random.randint(0, 2)
if prize != first:
# Trick: 3 - (0 + 1): 2; 3 - (0 + 2): 1; 3 - (1 + 2): 0.
open_door = 3 - (first + prize)
else:
# Trick: 1 + 0: 1, 2 + 0: 2; 1 + 1= 2, 1 + 2 = 0; 2 + 1 = 0, 2 + 2 = 1.
open_door = (random.randint(1, 2) + prize) % 3
if first == prize:
stay += 1
# Trick: 0 + 1 = 2, 0 + 2 = 1, 1 + 0 = 2, 1 + 2 = 0, 2 + 1 = 0 2 + 0 = 1
second = 3 - (open_door + first)
if prize == second:
switch += 1
print(f"Correct stay probability: {stay/num_samples*100:0.3f}%;"
f"\nCorrect switch probability: {switch/num_samples*100:0.3f}%")
def question_6():
r"""
Want to know if you can compute :math:`P(H|e_1,e_2)` with different givens.
Let's look at what this formula looks like after rearranging.
.. math::
P(H|e_1,e_2) &= \frac{P(e_1,e_2|H) \cdot P(H)}{P(e_1,e_2)},~&\textrm{Bayes Thm.}\\
&= \frac{P(e_1|H) \cdot P(e_2|H) \cdot P(H)}{P(e_1,e_2)},~&\textrm{Def. of Cond. Ind.}\\
&= \frac{P(e_1|H) \cdot P(e_2|H) \cdot P(H)}{\Sigma_h P(e_1,e_2|H) \cdot P(H)},~&\textrm{Total Probability}\\
&= \frac{P(e_1|H) \cdot P(e_2|H) \cdot P(H)}{\Sigma_h P(e_1|H)\cdot P(e_2|H) \cdot P(H)},~&\textrm{Def. of Cond. Ind.}
i. :math:`P(e_1,e_2), P(H), P(e_1|H), P(e_2|H)`. This is sufficient from the second line above if we assume
independence between the :math:`E` variables.
ii. :math:`P(e_1,e_2), P(H), P(e_1,e_2|H)`. This is sufficient from the first line above, a single
applications of Bayes Theorem.
iii. :math:`P(e_1|H), P(e_2|H), P(H)`. This is sufficient from the last line, after applying the Law of total
probability and Conditional Independence.
So (ii) is the answer to part a), when we don't know anything about the relationship between :math:`E_1` and
:math:`E_2`. All sets of givens are sufficient if we know the two variables are conditionally independent.
Returns:
None.
"""
# What is an example of conditional independence:
# https://en.wikipedia.org/wiki/Conditional_independence
# P(R|Y) = 4/12, P(B|Y) = 6/12, P(R|Y)*P(B|Y) = 6/36 = 2/12 = P(R,B|Y)
# P(!R|Y) = 8/12, P(!B|Y) = 6/12, P(!R|Y)*P(!B|Y) = 8/24 = 4/12 = P(!R,!B|Y)
# P(!R|Y) = 8/12, P(B|Y) = 6/12, P(!R|Y)*P(B|Y) = 8/24 = 4/12 = P(!R,B|Y)
# P(R|Y) = 4/12, P(!B|Y) = 6/12 P(R|Y)*P(!B|Y) = 6/36 = 2/12 = P(R,!B|Y)
# So R \ind B | Y.
return None
def question_7():
r""" Pairwise independence does not imply mutual independence.
Mutual independence means that :math:`P(X_i|X_S) = P(X_i) \forall S \subseteq \{1,\ldots,n\}\setminus\{i\}`
and so the joint distribution of :math:`P(X_{1:n}) = \prod_{i=1}^n P(X_i)`.
So it would be enough to show that for 3 variables that are all pairwise independent that they are
not mutually independent.
Consider a 5x5 grid where one variable :math:`(X_1)` is true only along the bottom 5 squares, another is true only
along the right side :math:`(X_2)`, and a third is true only along the main diagonal :math:`(X_3)`. The only overlap
any variable has with any other is in the lower right corner square.
.. math::
P(X_1=T) &= 5/25\\
P(X_1=F) &= 20/25\\
P(X_1=T,X_2=T) &= 1/25 = 5/25*5/25 = P(X_1=T)P(X_2=T)\\
P(X_1=T,X_2=F) &= 4/25 = 5/25*20/25 = P(X_1=T)P(X_2=F)\\
P(X_1=F,X_2=T) &= 4/25 = 20/25*5/25 = P(X_1=F)P(X_2=T)\\
P(X_1=F,X_2=F) &= 16/25 = 20/25*20/25 = P(X_1=F)P(X_2=F)\\
In this way, we see that each pair of variable is conditionally independent. The question is if they are
mutually independent. If they were, then :math:`P(X_1,X_2,X_3) = P(X_1)P(X_2)P(X_3)`, but we see for
:math:`P(X_1=T,X_2=T,X_3=T) = 1/25` (the lower right corner), but :math:`P(X_1=T)P(X_2=T)P(X_3=T) = 1/125` so
we see that being pairwise conditionally independent does not imply mutual independence.
Returns:
None.
"""
return None
def question_8():
r""" Conditional independence iff joint factorizes.
Prove that :math:`p(x,y|z)=g(x,z)h(y,z)~\textrm{iff}~X \perp Y | Z.`
First, let :math:`g(x,z) = p(x|z), h(y,z) = p(y|z)` since conditional probabilities
are functions of random variables these are permissible definitions of :math:`g, h`.
:math:`\textrm{The forward direction:}~X \perp Y | Z \Rightarrow p(x,y|z)=g(x,z)h(y,z).`
.. math::
p(x,y|z) &= p(x|z)p(y|z),~&\textrm{Def. of Cond. Ind.}\\
&= g(x,z)h(y,z),~&\textrm{Defined above.}.
Lemma: :math:`p(x|y,z) = p(x|z)~\textrm{if}~X \perp Y | Z.`
Proof:
.. math::
p(x|y,z) &= \frac{p(x,y,z)}{p(y,z)},~&\textrm{Def. of Cond. Prob.}\\
&= \frac{p(x,y|z)p(z)}{p(y|z)p(z)}~&\textrm{Def. of Cond. Prob.}\\
&= \frac{p(x|z)p(y|z)p(z)}{p(y|z)p(z)}~&\textrm{Def. of Cond. Ind.}\\
&= p(x|z).
:math:`\textrm{The reverse direction:}~p(x,y|z)=g(x,z)h(y,z) \Rightarrow X \perp Y | Z.`
.. math::
p(x,y|z) &= \frac{p(x,y,z)}{p(z)},~&\textrm{Def. of Cond. Prob.}\\
&= \frac{p(z)p(y|z)p(x|y,z)}{p(z)},~&\textrm{Chain rule of prob.}\\
&= p(y|z)p(x|z),~&\textrm{By the above lemma, Def. Cond. Ind.}\\
&= g(x,z)h(y,z),~&\textrm{Defined above.}
Returns:
None.
"""
return None
def question_9():
r""" Conditional independence statements...
a) Does :math:`(X \perp W|Z,Y) \wedge (X \perp Y|Z) \Rightarrow (X \perp Y,W|Z)`? Yes.
.. math::
p(X,Y,W|Z) &= \frac{p(X,Y,W,Z)}{p(Z)},~&\textrm{Def. Cond. Prob.}\\
&= \frac{p(X,W|Z,Y)p(Z,Y)}{p(Z)},~&\textrm{Def. Cond. Prob.}\\
&= \frac{p(X|Z,Y)p(W|Z,Y)p(Z,Y)}{p(Z)},~&\textrm{First given; Def. Cond. Ind.}\\
&= \frac{p(X,Z,Y)p(W|Z,Y)p(Z,Y)}{p(Z,Y)p(Z)},~&\textrm{Def. Cond. Prob.}\\
&= \frac{p(X,Y|Z)p(Z)p(W|Z,Y)}{p(Z)},~&\textrm{Def. Cond. Prob.}\\
&= p(X|Z)p(Y|Z)p(W|Z,Y),~&\textrm{Second given; Def. Cond. Ind.}\\
&= \frac{p(X|Z)p(Y|Z)p(W,Z,Y)}{p(Z,Y)},~&\textrm{Def. Cond. Prob.}\\
&= \frac{p(X|Z)p(Y|Z)p(Y,W|Z)p(Z)}{p(Z,Y)},~&\textrm{Def. Cond. Prob.}\\
&= \frac{p(X|Z)p(Y,Z)p(Y,W|Z)p(Z)}{p(Z,Y)p(Z)},~&\textrm{Def. Cond. Prob.}\\
&= p(X|Z)p(Y,W|Z).
b) Does :math:`(X \perp Y|Z) \wedge (X \perp Y|W) \Rightarrow (X \perp Y|Z,W)?` No.
If W and Z are describing the same event, then this is a true statement, but in general,
it fails. If we construct another discrete example using a 4x4 grid where X is true along
the bottom, Y is true along the right side, Z is true along the main diagonal and W is true
in the bottom right corner, the top left corner, and along the minor diagonal in the middle two
rows (not where Z is true), then we'll have a contradiction. We get the first two statements as
being true, :math:`(X \perp Y |Z) \wedge (X \perp Y|W)`, but we'll find that :math:`p(X|W,Z) = p(Y|W,Z) = 1/2`
while :math:`p(X,Y|W,Z) = 1/2` not 1/4, giving us a contradiction and allowing us to say that the
result is not true.
Returns:
None.
"""
return None
def question_10():
r""" Derive the inverse gamma distribution.
If :math:`X \sim Ga(a, b)`, and :math:`Y = 1/X`, show that :math:`Y \sim IG(a, b)`.
.. math::
p_y(y) &= p_x(x)\left|\frac{dy}{dx}\frac{1}{X}\right|\\
&= \frac{b^a}{\Gamma(a)}\left(\frac{1}{x}\right)^{a-1}e^{-b/x}x^{-2}\\
&= \frac{b^a}{\Gamma(a)}x^{-(a-1)}e^{-b/x}x^{-2}\\
&= \frac{b^a}{\Gamma(a)}x^{-(a+1)}e^{-b/x}\\
&= IG(a, b).
Returns:
None.
"""
return None
def question_11():
r""" Derive the 1D Gaussian normalization constant.
We're going to need to do a little u-substitution:
.. math::
u &= \frac{r^2}{2\sigma^2}\\
du &= \frac{2r}{2\sigma^2}dr\\
\frac{\sigma^2}{r}du &= dr.
.. math::
Z^2 &= \int_0^{2\pi}\int_0^{\infty}r exp\left(\frac{-r^2}{2\sigma^2}\right) dr d\theta\\
&= \int_0^{2\pi}\int_0^{\infty}r exp\left(\frac{-r^2}{2\sigma^2}\right) dr d\theta\\
&= \int_0^{2\pi}\int_0^{\infty}re^{-u} du\frac{\sigma^2}{r}d\theta\\
&= \sigma^2\int_0^{2\pi}\int_0^{\infty}e^{-u} du d\theta\\
&= \sigma^2\int_0^{2\pi} \left.-e^{-u}\right|_0^{\infty} d\theta\\
&= \sigma^2\int_0^{2\pi} 1 d\theta\\
&= \sigma^2\left.\theta\right|_0^{2\pi}\\
&= \sigma^2 2\pi\\
Z &= \sqrt{\sigma^2 2\pi}\\
Returns:
None.
"""
return None
def question_12():
r""" Express I(X,Y) as entropy...
.. math::
I(X,Y) &= \Sigma_x\Sigma_y p(x,y) \log\frac{p(x,y)}{p(x)p(y)}\\
&= \Sigma_x\Sigma_y p(x|y)p(y) \log\frac{p(x|y)p(y)}{p(x)p(y)}\\
&= \Sigma_x\Sigma_y p(x|y)p(y) \left[\log p(x|y) - \log p(x)\right]\\
&= \Sigma_x\Sigma_y p(x|y)p(y)\log p(x|y) - \Sigma_x\Sigma_y p(x|y)p(y)\log p(x)\\
&= \Sigma_y p(y) \Sigma_x p(x|y)\log p(x|y) - \Sigma_x \log p(x) \Sigma_y p(x|y)p(y)\\
&= -H(X|Y) - \Sigma_x \log p(x) \Sigma_y p(x|y)p(y),~&\textrm{Def. of Cond. Entropy}\\
&= -H(X|Y) - \Sigma_x p(x)\log p(x),~&\textrm{Law of Total Prob.}\\
&= -H(X|Y) + H(X),~&\textrm{Def. of Cond. Entropy}\\
&= H(X) - H(X|Y).
You could simply change the way you go from joint to conditional variables in first step of the proof.
Returns:
None.
"""
return None
def question_13():
r"""
Returns:
None.
"""
return None
def question_14():
r""" Show that normalized mutual information is a type of correlation.
:math:`r = 1-\frac{H(Y|X)}{H(X)}`.
a) Show :math:`r = \frac{I(X,Y)}{H(X)}`:
.. math::
r &= 1 - \frac{H(Y|X)}{H(X)}\\
&= \frac{H(X)}{H(X)} - \frac{H(Y|X)}{H(X)}\\
&= \frac{H(Y) - H(Y|X)}{H(X)},~&\textrm{X and Y are identically distributed.}\\
&= \frac{I(X,Y)}{H(X)},~&\textrm{From Q2.12}.
b) Show :math:`0 \leq r \leq 1`. We need to minimize and maximize the numerator. It is minimized when
the :math:`log\frac{p(x,y)}{p(x)p(y)}` is minimized, so:
.. math::
0 &= \log\frac{p(x,y)}{p(x)p(y)}\\
\log(p(x)p(y)) &= \log p(x,y)\\
\log(p(x)p(y)) &= \log(p(x)p(y)),~&X \perp Y.
If this term is 0 (and it can be if :math:`X \perp Y`), then the numerator is 0 and :math:`r=0`. The
numerator is maximized when :math:`X=Y`.
.. math::
I(X,X) &= \Sigma_x \Sigma_y p(x,x) \log\frac{p(x,x)}{p(x)p(x)}\\
&= \Sigma_x \Sigma_y p(x) \log\frac{1}{p(x)}\\
&= \Sigma_x p(x) \log\frac{1}{p(x)}\\
&= \Sigma_x p(x) \log 1 - \Sigma_x p(x) \log p(x)\\
&= 0 + H(X)\\.
So we end up with :math:`\frac{H(X)}{H(X)} = 1`. So we've seen the min and max and we have shown that
:math:`0 \leq r \leq 1`.
c) :math:`r = 0` when :math:`X \perp Y`.
d) :math:`r = 1` when :math:`X = Y`.
Returns:
None.
"""
return None
def question_16():
r""" Mean, median, and mode of the Beta distribution.
a) Mean:
.. math::
\mathbb{E}[X] &= \int_0^1 x B(a,b)dx\\
&= \int_0^1 x\frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}x^{a-1}(1-x)^{b-1}dx\\
&= \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}\int_0^1 x^a(1-x)^{b-1}dx\\
&= \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}\int_0^1 x^{(a+1)-1}-x^{a+b-1}dx\\
&= \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}B(a+1,b),~&\textrm{Integral form of Beta}\\
&= \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}\frac{\Gamma(a+1)\Gamma(b)}{\Gamma(a+b+1)},~&\textrm{Def. of Beta}\\
&= \frac{\Gamma(a+b)}{\Gamma(a)}\frac{a\Gamma(a)}{(a+b)\Gamma(a+b)},~&\textrm{Def. of }\Gamma,\\
&= \frac{a}{(a+b)}.
b) Mode:
We're going to take the derivative, set to 0, and solve to see what we get...
.. math::
B(a, b) &= \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}x^{a-1}(1-x)^{b-1}dx\\
0 &= \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}\left[(a-1)x^{a-2}(1-x)^{b-1}-x^{a-1}(b-1)(1-x)^{b-2}\right]\\
0 &= (a-1)(1-x)-x(b-1)\\
0 &= a-x-1-ax-xb+x\\
ax+bx-2x &= a-1\\
x &= \frac{a-1}{a+b-2}.
c) Variance:
Just going to use the standard formula and hope for the best!
.. math::
Var(B(a,b)) &= \int_0^1\frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}\left(x-\frac{a}{a+b}\right)^2 x^{a-1}(1-x)^{b-1}dx\\
&= \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}\int_0^1\left(x^2-\frac{2xa}{a+b}+\frac{a^2}{(a+b)^2}\right) x^{a-1}(1-x)^{b-1}dx\\
&= \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}\left[\int_0^1 x^{(a+2)-1}(1-x)^{b-1}dx -\frac{2a}{a+b}\int_0^1 x^{(a+1)-1}(1-x)^{b-1}dx+\frac{a^2}{(a+b)^2}\int_0^1 x^{a-1}(1-x)^{b-1}dx\right]\\
&= \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}\left[B(a+2,b) -\frac{2a}{a+b}B(a+1,b)+\frac{a^2}{(a+b)^2}B(a,b)\right]\\
&= \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}\left[\frac{\Gamma(a+2)\Gamma(b)}{\Gamma(a+b+2)} -\frac{2a}{a+b}\frac{\Gamma(a+1)\Gamma(b)}{\Gamma(a+b+1)}+\frac{a^2}{(a+b)^2}\frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)}\right]\\
&= \frac{\Gamma(a+b)}{\Gamma(a)}\left[\frac{(a+1)a\Gamma(a)}{(a+b+1)(a+b)\Gamma(a+b)} -\frac{2a}{a+b}\frac{a\Gamma(a)}{(a+b)\Gamma(a+b)}+\frac{a^2}{(a+b)^2}\frac{\Gamma(a)}{\Gamma(a+b)}\right]\\
&= \frac{(a+1)a}{(a+b+1)(a+b)} -\frac{2a}{a+b}\frac{a}{(a+b)}+\frac{a^2}{(a+b)^2}\\
&= \frac{a^2+a}{(a+b+1)(a+b)} -\frac{2a^2}{(a+b)^2}+\frac{a^2}{(a+b)^2}\\
&= \frac{(a^2+a)(a+b) - (2a^2)(a+b+1) + a^2(a+b+1)}{(a+b+1)(a+b)^2}\\
&= \frac{a^3+a^2b+a^2+ab - 2a^3-2a^2b-2a^2 + a^3+a^2b+a^2}{(a+b+1)(a+b)^2}\\
&= \frac{ab}{(a+b+1)(a+b)^2}.
Returns:
None.
"""
return None
def question_17(k=2, trials=1000, seed=1337):
r""" Expected value of the minimum of 2 uniformly distributed numbers...
The trick here is figuring out how to express the max function over two variables...
Assuming :math:`x_1,x_2 \sim Unif(0,1)`.
.. math::
\mathbb{E}[min(x_1,x_2)] &= \int_0^1\int_0^1 x_2\mathbb{I}(x_2 \leq x_1) + x_1\mathbb{I}(x_1 < x_2)dx_2 dx_1\\
&= \int_0^1\int_0^1 x_2\mathbb{I}(x_2 \leq x_1) dx_2 dx_1 + \int_0^1\int_0^1 x_1\mathbb{I}(x_1 < x_2)dx_2 dx_1\\
&= \int_0^1\int_0^{x_1} x_2 dx_2 dx_1 + \int_0^1\int_0^{x_2} x_1dx_1 dx_2\\
&= 2\int_0^1\int_0^{x_1} x_2 dx_2 dx_1\\
&= 2 \frac{1}{2\cdot 3}\\
&= \frac{1}{3}.
In general, if you have :math:`n` variables from this distribution you can find the expected value of the min
as :math:`n!\int_0^1\cdot\int_0^{x_n}x_n dx_n\cdots dx_1 = \frac{1}{n+1}` if you're talking about the uniform.
Also, as part of experimenting to get this solution, I also did a categorical case, which is very similar except
you need to worry about the situation where the two variables are equal (which has 0 probability in the continuous
case): :math:`\frac{2}{n^2}\sum_{i=1}^n\sum_{j=1}^i x_j - \sum_{i=1}^n x_i`, where :math:`n` is the number of elements
in the space and they are in ascending order. I believe going from 2 to :math:`k`
draws will be similar, replacing the numerator with :math:`k!` and the denominator with :math:`k`.
Args:
k (int): Number of draws from the distribution to compute the min over.
trials (int): Number of random min samples to select before computing the expected value.
seed (int): Random seed for reproducibility.
Returns:
float: the expected value of the min of ``k`` uniformly distributed variables.
"""
random.seed(a=seed)
min_samps = [min([random.random() for _ in range(k)]) for _ in range(trials)]
return sum(min_samps)/trials
if __name__ == "__main__":
question_5()
| 44.625483
| 247
| 0.547889
|
7950557dd1ea47834ab5027546b6c19544696291
| 2,974
|
py
|
Python
|
get_cluster_faults.py
|
schubb2003/element
|
14d79297b2dd6908e0c84ba533d507acd44fe054
|
[
"MIT"
] | null | null | null |
get_cluster_faults.py
|
schubb2003/element
|
14d79297b2dd6908e0c84ba533d507acd44fe054
|
[
"MIT"
] | null | null | null |
get_cluster_faults.py
|
schubb2003/element
|
14d79297b2dd6908e0c84ba533d507acd44fe054
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
# Author: Scott Chubb scott.chubb@netapp.com
# Written for Python 3.7 and above
# No warranty is offered, use at your own risk. While these scripts have been
# tested in lab situations, all use cases cannot be accounted for.
"""
import sys
import time
import os
import json
from datetime import datetime
from prettytable import PrettyTable
from modules.choose_inputs import get_inputs_default as get_inputs
from modules.build_auth import build_auth
from modules.connect_cluster import connect_cluster_rest as connect_cluster
def build_cluster_events():
fault_payload = json.dumps({"method": "ListClusterFaults",
"params": {"faultTypes": "current",
"bestPractices": False},
"id": 1})
return fault_payload
def parse_events(response_json):
"""
Build the events list
"""
fault_dict = {}
for res_out in response_json['result']['faults']:
if res_out['resolved'] is False:
flt_details = res_out['details']
flt_node = res_out['nodeID']
flt_drive = res_out['driveID']
flt_svc = res_out['serviceID']
flt_date = res_out['date']
flt_type = res_out['type']
flt_sev = res_out['severity']
flt_key = res_out['clusterFaultID']
fault_dict[flt_key] = [flt_node, flt_drive,
flt_svc, flt_date, flt_type,
flt_sev, flt_details]
if len(fault_dict) == 0:
print(f"No events found")
return fault_dict
def print_table(outfile_name, fault_dict):
flt_table = PrettyTable()
flt_table.field_names = ["Node ID", "Drive ID", "Service ID",
"Date", "Type", "Severity", "Details"]
# flt_table.max_width['Details'] = 60
for val in fault_dict.values():
flt_table.add_row([*val])
print(flt_table.get_string(sortby="Severity"))
flt_table_text = flt_table.get_string()
with open("./output_files/" + outfile_name, "a") as out_file:
out_file.write(flt_table_text + "\n")
def get_filename(mvip):
"""
Build the output filename
"""
now_date = datetime.now()
out_date = now_date.strftime("%Y-%m-%d_%H-%M")
outfile_name = mvip + "_cluster_faults_" + out_date + '.txt'
if os.path.exists(outfile_name):
os.remove(outfile_name)
print('Output file name is: {}'.format(outfile_name))
return outfile_name
def main():
"""
Do the work
"""
mvip, user, user_pass, mvip_node = get_inputs()
fault_payload = build_cluster_events()
headers, url = build_auth(mvip, user, user_pass, mvip_node)
response_json = connect_cluster(headers, url, fault_payload)
fault_dict = parse_events(response_json)
outfile_name = get_filename(mvip)
print_table(outfile_name, fault_dict)
if __name__ == "__main__":
main()
| 32.326087
| 78
| 0.628783
|
795055c3344b8001b3bab631594794720563bccb
| 570
|
py
|
Python
|
setup.py
|
heylouiz/flask-reactjs-bootstrap-sample-app
|
ee1d777501478cbd6af16d1c05d1889d4df5fdb6
|
[
"MIT"
] | null | null | null |
setup.py
|
heylouiz/flask-reactjs-bootstrap-sample-app
|
ee1d777501478cbd6af16d1c05d1889d4df5fdb6
|
[
"MIT"
] | null | null | null |
setup.py
|
heylouiz/flask-reactjs-bootstrap-sample-app
|
ee1d777501478cbd6af16d1c05d1889d4df5fdb6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Flask ReactJS Bootstrap setup file.
"""
import setuptools
import versioneer
setuptools.setup(name="sample_app",
version=versioneer.get_version(),
packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
#test_suite="tests",
scripts=["bin/sample_app"],
package_data={"sample_app": ["static/*", "templates/*"]},
install_requires=["flask>=0.10"],
zip_safe=False,
cmdclass=versioneer.get_cmdclass())
| 31.666667
| 81
| 0.554386
|
795055eca6fc9170d7b0c090bcd36aa9155f9314
| 35,968
|
py
|
Python
|
test/integration/component/test_mm_domain_limits.py
|
pavanaravapalli/cloudstack
|
1d59bd3fd9d153e3e8bbcd518ee49299a36f527c
|
[
"Apache-2.0"
] | 1
|
2022-03-06T08:43:27.000Z
|
2022-03-06T08:43:27.000Z
|
test/integration/component/test_mm_domain_limits.py
|
pavanaravapalli/cloudstack
|
1d59bd3fd9d153e3e8bbcd518ee49299a36f527c
|
[
"Apache-2.0"
] | 5
|
2018-03-15T20:07:30.000Z
|
2021-04-09T16:44:30.000Z
|
test/integration/component/test_mm_domain_limits.py
|
pavanaravapalli/cloudstack
|
1d59bd3fd9d153e3e8bbcd518ee49299a36f527c
|
[
"Apache-2.0"
] | 1
|
2021-05-11T19:22:59.000Z
|
2021-05-11T19:22:59.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for memory resource limits
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.lib.base import (
Account,
ServiceOffering,
VirtualMachine,
Resources,
Domain
)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
wait_for_cleanup,
findSuitableHostForMigration,
get_resource_type,
update_resource_count
)
from marvin.lib.utils import cleanup_resources
from marvin.codes import ERROR_NO_HOST_FOR_MIGRATION
class Services:
"""Test memory resource limit services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "resource",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 2048, # In MBs
},
"virtual_machine": {
"displayname": "TestVM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'KVM',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0'
},
"project": {
"name": "Project",
"displaytext": "Test project",
},
"domain": {
"name": "Domain",
},
"ostype": 'CentOS 5.3 (64-bit)',
"sleep": 60,
"timeout": 10,
"mode": 'advanced',
# Networking mode: Advanced, Basic
}
class TestDomainMemoryLimits(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDomainMemoryLimits, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [cls.service_offering, ]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def createInstance(self, service_off, networks=None, api_client=None):
"""Creates an instance in account"""
self.debug("Deploying an instance in account: %s" %
self.account.name)
if api_client is None:
api_client = self.apiclient
try:
vm = VirtualMachine.create(
api_client,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
networkids=networks,
serviceofferingid=service_off.id)
vms = VirtualMachine.list(api_client, id=vm.id, listall=True)
self.assertIsInstance(vms,
list,
"List VMs should return a valid response")
self.assertEqual(vms[0].state, "Running",
"Vm state should be running after deployment")
return vm
except Exception as e:
self.fail("Failed to deploy an instance: %s" % e)
def setupAccounts(self):
self.debug("Creating a sub-domain under: %s" % self.domain.name)
self.child_domain_1 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.child_do_admin_1 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.child_domain_1.id
)
# Cleanup the resources created at end of test
self.cleanup.append(self.child_do_admin_1)
self.cleanup.append(self.child_domain_1)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=6144,
account=self.child_do_admin_1.name,
domainid=self.child_do_admin_1.domainid)
self.child_domain_2 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.child_do_admin_2 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.child_domain_2.id)
# Cleanup the resources created at end of test
self.cleanup.append(self.child_do_admin_2)
self.cleanup.append(self.child_domain_2)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=6144,
account=self.child_do_admin_2.name,
domainid=self.child_do_admin_2.domainid)
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="true")
def test_01_change_service_offering(self):
"""Test Deploy VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM & Deploy VM in the created domain
# 2. List Resource count for the root admin Memory usage
# 3. Upgrade and downgrade service offering
# 4. Resource count should list properly for the domain
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = { self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
#Resetting memory count in service offering
self.services["service_offering"]["memory"] = 2048
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
api_client = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
vm = self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
self.debug("Stopping instance: %s" % vm.name)
try:
vm.stop(self.apiclient)
except Exception as e:
self.fail("Failed to stop instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_stop = account_list[0].memorytotal
self.assertEqual(resource_count_after_stop, expected_resource_count,
"Resource count should be same after stopping the instance")
self.debug("Creating service offering with 5 GB RAM")
self.services["service_offering"]["memory"] = 5120
self.service_offering_5gb = ServiceOffering.create(
self.apiclient,
self.services["service_offering"]
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering_5gb)
self.debug(
"Upgrade service offering of instance %s from %s to %s" %
(vm.name,
self.service_offering.name,
self.service_offering_5gb.name))
try:
vm.change_service_offering(self.apiclient,
serviceOfferingId=self.service_offering_5gb.id)
except Exception as e:
self.fail("Failed to change service offering of vm %s - %s" %
(vm.name, e))
update_resource_count(self.apiclient, domainid=self.domain.id, rtype=9) #RAM
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_upgrade = account_list[0].memorytotal
self.debug(resource_count_after_upgrade)
self.assertTrue(resource_count_after_upgrade > resource_count_after_stop,
"Resource count should be more than before, after upgrading service offering")
self.debug(
"Down grade service offering of instance %s from %s to %s" %
(vm.name,
self.service_offering_5gb.name,
self.service_offering.name))
try:
vm.change_service_offering(self.apiclient,
serviceOfferingId=self.service_offering.id)
except Exception as e:
self.fail("Failed to change service offering of vm %s - %s" %
(vm.name, e))
update_resource_count(self.apiclient, domainid=self.domain.id, rtype=9) #RAM
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_downgrade = account_list[0].memorytotal
self.debug(resource_count_after_downgrade)
self.assertTrue(resource_count_after_downgrade < resource_count_after_upgrade,
"Resource count should be less than before, after downgrading service offering")
self.debug("Starting instance: %s" % vm.name)
try:
vm.start(self.apiclient)
except Exception as e:
self.fail("Failed to start instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_start = account_list[0].memorytotal
self.assertTrue(resource_count_after_start == resource_count_after_downgrade,
"Resource count should be same after starting the instance")
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="true")
def test_02_migrate_vm(self):
"""Test Deploy VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM & Deploy VM in the created domain
# 2. List Resource count for the root admin Memory usage
# 3. Migrate vm to another host, resource count should list properly.
#Resetting memory count in service offering
self.hypervisor = self.testClient.getHypervisorInfo()
if self.hypervisor.lower() in ['lxc']:
self.skipTest("vm migrate is not supported in %s" % self.hypervisor)
self.services["service_offering"]["memory"] = 2048
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = { self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
api_client = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
vm = self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
host = findSuitableHostForMigration(self.apiclient, vm.id)
if host is None:
self.skipTest(ERROR_NO_HOST_FOR_MIGRATION)
self.debug("Migrating instance: %s to host: %s" %
(vm.name, host.name))
try:
vm.migrate(self.apiclient, host.id)
except Exception as e:
self.fail("Failed to migrate instance: %s" % e)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_migrate = account_list[0].memorytotal
self.assertTrue(resource_count_after_migrate == resource_count,
"Resource count should be same after migrating the instance")
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="false")
def test_03_delete_vm(self):
"""Test Deploy VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM & Deploy VM in the created domain
# 2. List Resource count for the root admin Memory usage
# 3. Delete vm, resource count should list as 0 after delete operation.
# Resetting the memory count of service offering
self.services["service_offering"]["memory"] = 2048
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = { self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
api_client = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
vm = self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"])
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
self.debug("Destroying instance: %s" % vm.name)
try:
vm.delete(self.apiclient)
except Exception as e:
self.fail("Failed to delete instance: %s" % e)
# Wait for expunge interval to cleanup Memory
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_after_delete = account_list[0].memorytotal
self.assertEqual(resource_count_after_delete, 0 , "Resource count for %s should be 0" % get_resource_type(resource_id=9))#RAM
return
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="false")
def test_04_deploy_multiple_vm(self):
"""Test Deploy multiple VM with specified RAM & verify the usage"""
# Validate the following
# 1. Create compute offering with specified RAM
# 2. Deploy multiple VMs with this service offering
# 3. List Resource count for the root admin Memory usage
# 4. Memory usage should list properly
# Resetting the memory count of service offering
self.services["service_offering"]["memory"] = 2048
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
users = { self.child_domain_1: self.child_do_admin_1,
self.child_domain_2: self.child_do_admin_2
}
for domain, admin in users.items():
self.account = admin
self.domain = domain
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
api_client = self.testClient.getUserApiClient(
UserName=self.account.name,
DomainName=self.account.domain)
vm_1 = self.createInstance(service_off=self.service_offering, api_client=api_client)
vm_2 = self.createInstance(service_off=self.service_offering, api_client=api_client)
vm_3 = self.createInstance(service_off=self.service_offering, api_client=api_client)
self.debug("Deploying instance - Memory capacity is fully utilized")
with self.assertRaises(Exception):
self.createInstance(service_off=self.service_offering, api_client=api_client)
account_list = Account.list(self.apiclient, id=self.account.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count = account_list[0].memorytotal
expected_resource_count = int(self.services["service_offering"]["memory"]) * 3 #Total 3 VMs
self.assertEqual(resource_count, expected_resource_count,
"Resource count should match with the expected resource count")
vm_2.delete(self.apiclient)
vm_3.delete(self.apiclient)
return
class TestMultipleChildDomainsMemory(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestMultipleChildDomainsMemory, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.services["mode"] = cls.zone.networktype
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
# Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def createInstance(self, account, service_off, networks=None, api_client=None):
"""Creates an instance in account"""
self.debug("Deploying an instance in account: %s" %
account.name)
if api_client is None:
api_client = self.apiclient
try:
vm = VirtualMachine.create(
api_client,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=account.name,
domainid=account.domainid,
networkids=networks,
serviceofferingid=service_off.id)
vms = VirtualMachine.list(api_client, id=vm.id, listall=True)
self.assertIsInstance(vms,
list,
"List VMs should return a valid response")
self.assertEqual(vms[0].state, "Running",
"Vm state should be running after deployment")
return vm
except Exception as e:
self.fail("Failed to deploy an instance: %s" % e)
def setupAccounts(self):
self.debug("Creating a domain under: %s" % self.domain.name)
self.parent_domain = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.parentd_admin = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.debug("Updating the Memory resource count for domain: %s" %
self.domain.name)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=4096,
account=self.parentd_admin.name,
domainid=self.parentd_admin.domainid)
self.debug("Creating a sub-domain under: %s" % self.parent_domain.name)
self.cdomain_1 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.parent_domain.id)
self.debug("Creating a sub-domain under: %s" % self.parent_domain.name)
self.cdomain_2 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.parent_domain.id)
self.cadmin_1 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.cdomain_1.id
)
self.debug("Updating the Memory resource count for domain: %s" %
self.cdomain_1.name)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=2048,
domainid=self.cadmin_1.domainid)
self.debug("Updating the Memory resource count for account: %s" %
self.cadmin_1.name)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=2048,
account=self.cadmin_1.name,
domainid=self.cadmin_1.domainid)
self.cadmin_2 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.cdomain_2.id
)
self.debug("Updating the Memory resource count for domain: %s" %
self.cdomain_2.name)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=2048,
domainid=self.cadmin_2.domainid)
self.debug("Updating the Memory resource count for domain: %s" %
self.cadmin_2.name)
Resources.updateLimit(self.apiclient,
resourcetype=9,
max=2048,
account=self.cadmin_2.name,
domainid=self.cadmin_2.domainid)
# Cleanup the resources created at end of test
self.cleanup.append(self.cadmin_1)
self.cleanup.append(self.cadmin_2)
self.cleanup.append(self.cdomain_1)
self.cleanup.append(self.cdomain_2)
self.cleanup.append(self.parentd_admin)
self.cleanup.append(self.parent_domain)
users = {
self.parent_domain: self.parentd_admin,
self.cdomain_1: self.cadmin_1,
self.cdomain_2: self.cadmin_2
}
return users
@attr(tags=["advanced", "advancedns","simulator"], required_hardware="false")
def test_01_multiple_child_domains(self):
"""Test memory limits with multiple child domains"""
# Validate the following
# 1. Create Domain1 with 4 GB RAM and 2 child domains with 2 GB
# each.
# 2. Deploy VM's by Domain1 admin1/user1/ Domain2 user1/Admin1 account
# and verify the resource updates
# 3. Deploy VM by admin account after reaching max parent domain limit
# 4. Deploy VM with child account after reaching max child domain limit
# 5. Delete user account and verify the resource updates
# 6. Destroy user/admin account VM's and verify the child & Parent
# domain resource updates
self.debug("Creating service offering with 2 GB RAM")
self.services["service_offering"]["memory"] = 2048
self.service_offering = ServiceOffering.create(
self.apiclient,
self.services["service_offering"]
)
# Adding to cleanup list after execution
self.cleanup.append(self.service_offering)
self.debug("Setting up account and domain hierarchy")
self.setupAccounts()
api_client_cadmin_1 = self.testClient.getUserApiClient(
UserName=self.cadmin_1.name,
DomainName=self.cadmin_1.domain)
api_client_cadmin_2 = self.testClient.getUserApiClient(
UserName=self.cadmin_2.name,
DomainName=self.cadmin_2.domain)
self.debug("Creating an instance with service offering: %s" %
self.service_offering.name)
vm_1 = self.createInstance(account=self.cadmin_1,
service_off=self.service_offering, api_client=api_client_cadmin_1)
vm_2 = self.createInstance(account=self.cadmin_2,
service_off=self.service_offering, api_client=api_client_cadmin_2)
account_list = Account.list(self.apiclient, id=self.cadmin_1.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_cadmin_1 = account_list[0].memorytotal
self.debug(resource_count_cadmin_1)
account_list = Account.list(self.apiclient, id=self.cadmin_2.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_cadmin_2 = account_list[0].memorytotal
self.debug(resource_count_cadmin_2)
self.debug(
"Creating instance when Memory limit is fully used in parent domain")
with self.assertRaises(Exception):
self.createInstance(account=self.cadmin_1,
service_off=self.service_offering, api_client=api_client_cadmin_1)
self.debug(
"Creating instance when Memory limit is fully used in child domain")
with self.assertRaises(Exception):
self.createInstance(account=self.cadmin_2,
service_off=self.service_offering, api_client=api_client_cadmin_2)
self.debug("Destroying instances: %s, %s" % (vm_1.name, vm_2.name))
try:
vm_1.delete(self.apiclient)
vm_2.delete(self.apiclient)
except Exception as e:
self.fail("Failed to delete instance: %s" % e)
self.debug("Checking resource count for account: %s" % self.cadmin_1.name)
account_list = Account.list(self.apiclient, id=self.cadmin_1.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_cadmin_1 = account_list[0].memorytotal
self.assertEqual(resource_count_cadmin_1, 0 , "Resource count for %s should be 0" % get_resource_type(resource_id=9))#RAM
self.debug("Checking resource count for account: %s" % self.cadmin_2.name)
account_list = Account.list(self.apiclient, id=self.cadmin_1.id)
self.assertIsInstance(account_list,
list,
"List Accounts should return a valid response"
)
resource_count_cadmin_2 = account_list[0].memorytotal
self.assertEqual(resource_count_cadmin_2, 0 , "Resource count for %s should be 0" % get_resource_type(resource_id=9))#RAM
return
| 45.586819
| 137
| 0.526079
|
795056be5a06cc47143fa9c4eabec046bb48da7f
| 87
|
py
|
Python
|
type_check_Cwhile.py
|
Mieschendahl/assignment-final-stub
|
19eea657fcc4f8a455c42028f34b918628514cc0
|
[
"MIT"
] | 3
|
2021-09-01T02:02:03.000Z
|
2022-03-07T05:39:34.000Z
|
type_check_Cwhile.py
|
Mieschendahl/assignment-final-stub
|
19eea657fcc4f8a455c42028f34b918628514cc0
|
[
"MIT"
] | 1
|
2022-03-20T11:08:45.000Z
|
2022-03-20T11:08:45.000Z
|
type_check_Cwhile.py
|
Mieschendahl/assignment-final-stub
|
19eea657fcc4f8a455c42028f34b918628514cc0
|
[
"MIT"
] | 6
|
2022-03-13T13:10:25.000Z
|
2022-03-28T22:18:12.000Z
|
from type_check_Cif import TypeCheckCif
class TypeCheckCwhile(TypeCheckCif):
pass
| 17.4
| 39
| 0.827586
|
795057279b57e507c1db697d38422267db884a07
| 26,658
|
py
|
Python
|
tools/third_party/hyperframe/hyperframe/frame.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
tools/third_party/hyperframe/hyperframe/frame.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 7,642
|
2018-05-28T09:38:03.000Z
|
2022-03-31T20:55:48.000Z
|
tools/third_party/hyperframe/hyperframe/frame.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# -*- coding: utf-8 -*-
"""
hyperframe/frame
~~~~~~~~~~~~~~~~
Defines framing logic for HTTP/2. Provides both classes to represent framed
data and logic for aiding the connection when it comes to reading from the
socket.
"""
import struct
import binascii
from .exceptions import (
UnknownFrameError, InvalidPaddingError, InvalidFrameError
)
from .flags import Flag, Flags
# The maximum initial length of a frame. Some frames have shorter maximum
# lengths.
FRAME_MAX_LEN = (2 ** 14)
# The maximum allowed length of a frame.
FRAME_MAX_ALLOWED_LEN = (2 ** 24) - 1
# Stream association enumerations.
_STREAM_ASSOC_HAS_STREAM = "has-stream"
_STREAM_ASSOC_NO_STREAM = "no-stream"
_STREAM_ASSOC_EITHER = "either"
# Structs for packing and unpacking
_STRUCT_HBBBL = struct.Struct(">HBBBL")
_STRUCT_LL = struct.Struct(">LL")
_STRUCT_HL = struct.Struct(">HL")
_STRUCT_LB = struct.Struct(">LB")
_STRUCT_L = struct.Struct(">L")
_STRUCT_H = struct.Struct(">H")
_STRUCT_B = struct.Struct(">B")
class Frame(object):
"""
The base class for all HTTP/2 frames.
"""
#: The flags defined on this type of frame.
defined_flags = []
#: The byte used to define the type of the frame.
type = None
# If 'has-stream', the frame's stream_id must be non-zero. If 'no-stream',
# it must be zero. If 'either', it's not checked.
stream_association = None
def __init__(self, stream_id, flags=()):
#: The stream identifier for the stream this frame was received on.
#: Set to 0 for frames sent on the connection (stream-id 0).
self.stream_id = stream_id
#: The flags set for this frame.
self.flags = Flags(self.defined_flags)
#: The frame length, excluding the nine-byte header.
self.body_len = 0
for flag in flags:
self.flags.add(flag)
if (not self.stream_id and
self.stream_association == _STREAM_ASSOC_HAS_STREAM):
raise ValueError('Stream ID must be non-zero')
if (self.stream_id and
self.stream_association == _STREAM_ASSOC_NO_STREAM):
raise ValueError('Stream ID must be zero')
def __repr__(self):
flags = ", ".join(self.flags) or "None"
body = binascii.hexlify(self.serialize_body()).decode('ascii')
if len(body) > 20:
body = body[:20] + "..."
return (
"{type}(Stream: {stream}; Flags: {flags}): {body}"
).format(
type=type(self).__name__,
stream=self.stream_id,
flags=flags,
body=body
)
@staticmethod
def parse_frame_header(header, strict=False):
"""
Takes a 9-byte frame header and returns a tuple of the appropriate
Frame object and the length that needs to be read from the socket.
This populates the flags field, and determines how long the body is.
:param strict: Whether to raise an exception when encountering a frame
not defined by spec and implemented by hyperframe.
:raises hyperframe.exceptions.UnknownFrameError: If a frame of unknown
type is received.
.. versionchanged:: 5.0.0
Added :param:`strict` to accommodate :class:`ExtensionFrame`
"""
try:
fields = _STRUCT_HBBBL.unpack(header)
except struct.error:
raise InvalidFrameError("Invalid frame header")
# First 24 bits are frame length.
length = (fields[0] << 8) + fields[1]
type = fields[2]
flags = fields[3]
stream_id = fields[4] & 0x7FFFFFFF
try:
frame = FRAMES[type](stream_id)
except KeyError:
if strict:
raise UnknownFrameError(type, length)
frame = ExtensionFrame(type=type, stream_id=stream_id)
frame.parse_flags(flags)
return (frame, length)
def parse_flags(self, flag_byte):
for flag, flag_bit in self.defined_flags:
if flag_byte & flag_bit:
self.flags.add(flag)
return self.flags
def serialize(self):
"""
Convert a frame into a bytestring, representing the serialized form of
the frame.
"""
body = self.serialize_body()
self.body_len = len(body)
# Build the common frame header.
# First, get the flags.
flags = 0
for flag, flag_bit in self.defined_flags:
if flag in self.flags:
flags |= flag_bit
header = _STRUCT_HBBBL.pack(
(self.body_len >> 8) & 0xFFFF, # Length spread over top 24 bits
self.body_len & 0xFF,
self.type,
flags,
self.stream_id & 0x7FFFFFFF # Stream ID is 32 bits.
)
return header + body
def serialize_body(self):
raise NotImplementedError()
def parse_body(self, data):
"""
Given the body of a frame, parses it into frame data. This populates
the non-header parts of the frame: that is, it does not populate the
stream ID or flags.
:param data: A memoryview object containing the body data of the frame.
Must not contain *more* data than the length returned by
:meth:`parse_frame_header
<hyperframe.frame.Frame.parse_frame_header>`.
"""
raise NotImplementedError()
class Padding(object):
"""
Mixin for frames that contain padding. Defines extra fields that can be
used and set by frames that can be padded.
"""
def __init__(self, stream_id, pad_length=0, **kwargs):
super(Padding, self).__init__(stream_id, **kwargs)
#: The length of the padding to use.
self.pad_length = pad_length
def serialize_padding_data(self):
if 'PADDED' in self.flags:
return _STRUCT_B.pack(self.pad_length)
return b''
def parse_padding_data(self, data):
if 'PADDED' in self.flags:
try:
self.pad_length = struct.unpack('!B', data[:1])[0]
except struct.error:
raise InvalidFrameError("Invalid Padding data")
return 1
return 0
@property
def total_padding(self):
return self.pad_length
class Priority(object):
"""
Mixin for frames that contain priority data. Defines extra fields that can
be used and set by frames that contain priority data.
"""
def __init__(self,
stream_id,
depends_on=0x0,
stream_weight=0x0,
exclusive=False,
**kwargs):
super(Priority, self).__init__(stream_id, **kwargs)
#: The stream ID of the stream on which this stream depends.
self.depends_on = depends_on
#: The weight of the stream. This is an integer between 0 and 256.
self.stream_weight = stream_weight
#: Whether the exclusive bit was set.
self.exclusive = exclusive
def serialize_priority_data(self):
return _STRUCT_LB.pack(
self.depends_on + (0x80000000 if self.exclusive else 0),
self.stream_weight
)
def parse_priority_data(self, data):
try:
self.depends_on, self.stream_weight = _STRUCT_LB.unpack(data[:5])
except struct.error:
raise InvalidFrameError("Invalid Priority data")
self.exclusive = True if self.depends_on >> 31 else False
self.depends_on &= 0x7FFFFFFF
return 5
class DataFrame(Padding, Frame):
"""
DATA frames convey arbitrary, variable-length sequences of octets
associated with a stream. One or more DATA frames are used, for instance,
to carry HTTP request or response payloads.
"""
#: The flags defined for DATA frames.
defined_flags = [
Flag('END_STREAM', 0x01),
Flag('PADDED', 0x08),
]
#: The type byte for data frames.
type = 0x0
stream_association = _STREAM_ASSOC_HAS_STREAM
def __init__(self, stream_id, data=b'', **kwargs):
super(DataFrame, self).__init__(stream_id, **kwargs)
#: The data contained on this frame.
self.data = data
def serialize_body(self):
padding_data = self.serialize_padding_data()
padding = b'\0' * self.total_padding
if isinstance(self.data, memoryview):
self.data = self.data.tobytes()
return b''.join([padding_data, self.data, padding])
def parse_body(self, data):
padding_data_length = self.parse_padding_data(data)
self.data = (
data[padding_data_length:len(data)-self.total_padding].tobytes()
)
self.body_len = len(data)
if self.total_padding and self.total_padding >= self.body_len:
raise InvalidPaddingError("Padding is too long.")
@property
def flow_controlled_length(self):
"""
The length of the frame that needs to be accounted for when considering
flow control.
"""
padding_len = 0
if 'PADDED' in self.flags:
# Account for extra 1-byte padding length field, which is still
# present if possibly zero-valued.
padding_len = self.total_padding + 1
return len(self.data) + padding_len
class PriorityFrame(Priority, Frame):
"""
The PRIORITY frame specifies the sender-advised priority of a stream. It
can be sent at any time for an existing stream. This enables
reprioritisation of existing streams.
"""
#: The flags defined for PRIORITY frames.
defined_flags = []
#: The type byte defined for PRIORITY frames.
type = 0x02
stream_association = _STREAM_ASSOC_HAS_STREAM
def serialize_body(self):
return self.serialize_priority_data()
def parse_body(self, data):
self.parse_priority_data(data)
self.body_len = len(data)
class RstStreamFrame(Frame):
"""
The RST_STREAM frame allows for abnormal termination of a stream. When sent
by the initiator of a stream, it indicates that they wish to cancel the
stream or that an error condition has occurred. When sent by the receiver
of a stream, it indicates that either the receiver is rejecting the stream,
requesting that the stream be cancelled or that an error condition has
occurred.
"""
#: The flags defined for RST_STREAM frames.
defined_flags = []
#: The type byte defined for RST_STREAM frames.
type = 0x03
stream_association = _STREAM_ASSOC_HAS_STREAM
def __init__(self, stream_id, error_code=0, **kwargs):
super(RstStreamFrame, self).__init__(stream_id, **kwargs)
#: The error code used when resetting the stream.
self.error_code = error_code
def serialize_body(self):
return _STRUCT_L.pack(self.error_code)
def parse_body(self, data):
if len(data) != 4:
raise InvalidFrameError(
"RST_STREAM must have 4 byte body: actual length %s." %
len(data)
)
try:
self.error_code = _STRUCT_L.unpack(data)[0]
except struct.error: # pragma: no cover
raise InvalidFrameError("Invalid RST_STREAM body")
self.body_len = 4
class SettingsFrame(Frame):
"""
The SETTINGS frame conveys configuration parameters that affect how
endpoints communicate. The parameters are either constraints on peer
behavior or preferences.
Settings are not negotiated. Settings describe characteristics of the
sending peer, which are used by the receiving peer. Different values for
the same setting can be advertised by each peer. For example, a client
might set a high initial flow control window, whereas a server might set a
lower value to conserve resources.
"""
#: The flags defined for SETTINGS frames.
defined_flags = [Flag('ACK', 0x01)]
#: The type byte defined for SETTINGS frames.
type = 0x04
stream_association = _STREAM_ASSOC_NO_STREAM
# We need to define the known settings, they may as well be class
# attributes.
#: The byte that signals the SETTINGS_HEADER_TABLE_SIZE setting.
HEADER_TABLE_SIZE = 0x01
#: The byte that signals the SETTINGS_ENABLE_PUSH setting.
ENABLE_PUSH = 0x02
#: The byte that signals the SETTINGS_MAX_CONCURRENT_STREAMS setting.
MAX_CONCURRENT_STREAMS = 0x03
#: The byte that signals the SETTINGS_INITIAL_WINDOW_SIZE setting.
INITIAL_WINDOW_SIZE = 0x04
#: The byte that signals the SETTINGS_MAX_FRAME_SIZE setting.
MAX_FRAME_SIZE = 0x05
#: The byte that signals the SETTINGS_MAX_HEADER_LIST_SIZE setting.
MAX_HEADER_LIST_SIZE = 0x06
#: The byte that signals SETTINGS_ENABLE_CONNECT_PROTOCOL setting.
ENABLE_CONNECT_PROTOCOL = 0x08
def __init__(self, stream_id=0, settings=None, **kwargs):
super(SettingsFrame, self).__init__(stream_id, **kwargs)
if settings and "ACK" in kwargs.get("flags", ()):
raise ValueError("Settings must be empty if ACK flag is set.")
#: A dictionary of the setting type byte to the value of the setting.
self.settings = settings or {}
def serialize_body(self):
return b''.join([_STRUCT_HL.pack(setting & 0xFF, value)
for setting, value in self.settings.items()])
def parse_body(self, data):
body_len = 0
for i in range(0, len(data), 6):
try:
name, value = _STRUCT_HL.unpack(data[i:i+6])
except struct.error:
raise InvalidFrameError("Invalid SETTINGS body")
self.settings[name] = value
body_len += 6
self.body_len = body_len
class PushPromiseFrame(Padding, Frame):
"""
The PUSH_PROMISE frame is used to notify the peer endpoint in advance of
streams the sender intends to initiate.
"""
#: The flags defined for PUSH_PROMISE frames.
defined_flags = [
Flag('END_HEADERS', 0x04),
Flag('PADDED', 0x08)
]
#: The type byte defined for PUSH_PROMISE frames.
type = 0x05
stream_association = _STREAM_ASSOC_HAS_STREAM
def __init__(self, stream_id, promised_stream_id=0, data=b'', **kwargs):
super(PushPromiseFrame, self).__init__(stream_id, **kwargs)
#: The stream ID that is promised by this frame.
self.promised_stream_id = promised_stream_id
#: The HPACK-encoded header block for the simulated request on the new
#: stream.
self.data = data
def serialize_body(self):
padding_data = self.serialize_padding_data()
padding = b'\0' * self.total_padding
data = _STRUCT_L.pack(self.promised_stream_id)
return b''.join([padding_data, data, self.data, padding])
def parse_body(self, data):
padding_data_length = self.parse_padding_data(data)
try:
self.promised_stream_id = _STRUCT_L.unpack(
data[padding_data_length:padding_data_length + 4]
)[0]
except struct.error:
raise InvalidFrameError("Invalid PUSH_PROMISE body")
self.data = data[padding_data_length + 4:].tobytes()
self.body_len = len(data)
if self.total_padding and self.total_padding >= self.body_len:
raise InvalidPaddingError("Padding is too long.")
class PingFrame(Frame):
"""
The PING frame is a mechanism for measuring a minimal round-trip time from
the sender, as well as determining whether an idle connection is still
functional. PING frames can be sent from any endpoint.
"""
#: The flags defined for PING frames.
defined_flags = [Flag('ACK', 0x01)]
#: The type byte defined for PING frames.
type = 0x06
stream_association = _STREAM_ASSOC_NO_STREAM
def __init__(self, stream_id=0, opaque_data=b'', **kwargs):
super(PingFrame, self).__init__(stream_id, **kwargs)
#: The opaque data sent in this PING frame, as a bytestring.
self.opaque_data = opaque_data
def serialize_body(self):
if len(self.opaque_data) > 8:
raise InvalidFrameError(
"PING frame may not have more than 8 bytes of data, got %s" %
self.opaque_data
)
data = self.opaque_data
data += b'\x00' * (8 - len(self.opaque_data))
return data
def parse_body(self, data):
if len(data) != 8:
raise InvalidFrameError(
"PING frame must have 8 byte length: got %s" % len(data)
)
self.opaque_data = data.tobytes()
self.body_len = 8
class GoAwayFrame(Frame):
"""
The GOAWAY frame informs the remote peer to stop creating streams on this
connection. It can be sent from the client or the server. Once sent, the
sender will ignore frames sent on new streams for the remainder of the
connection.
"""
#: The flags defined for GOAWAY frames.
defined_flags = []
#: The type byte defined for GOAWAY frames.
type = 0x07
stream_association = _STREAM_ASSOC_NO_STREAM
def __init__(self,
stream_id=0,
last_stream_id=0,
error_code=0,
additional_data=b'',
**kwargs):
super(GoAwayFrame, self).__init__(stream_id, **kwargs)
#: The last stream ID definitely seen by the remote peer.
self.last_stream_id = last_stream_id
#: The error code for connection teardown.
self.error_code = error_code
#: Any additional data sent in the GOAWAY.
self.additional_data = additional_data
def serialize_body(self):
data = _STRUCT_LL.pack(
self.last_stream_id & 0x7FFFFFFF,
self.error_code
)
data += self.additional_data
return data
def parse_body(self, data):
try:
self.last_stream_id, self.error_code = _STRUCT_LL.unpack(
data[:8]
)
except struct.error:
raise InvalidFrameError("Invalid GOAWAY body.")
self.body_len = len(data)
if len(data) > 8:
self.additional_data = data[8:].tobytes()
class WindowUpdateFrame(Frame):
"""
The WINDOW_UPDATE frame is used to implement flow control.
Flow control operates at two levels: on each individual stream and on the
entire connection.
Both types of flow control are hop by hop; that is, only between the two
endpoints. Intermediaries do not forward WINDOW_UPDATE frames between
dependent connections. However, throttling of data transfer by any receiver
can indirectly cause the propagation of flow control information toward the
original sender.
"""
#: The flags defined for WINDOW_UPDATE frames.
defined_flags = []
#: The type byte defined for WINDOW_UPDATE frames.
type = 0x08
stream_association = _STREAM_ASSOC_EITHER
def __init__(self, stream_id, window_increment=0, **kwargs):
super(WindowUpdateFrame, self).__init__(stream_id, **kwargs)
#: The amount the flow control window is to be incremented.
self.window_increment = window_increment
def serialize_body(self):
return _STRUCT_L.pack(self.window_increment & 0x7FFFFFFF)
def parse_body(self, data):
try:
self.window_increment = _STRUCT_L.unpack(data)[0]
except struct.error:
raise InvalidFrameError("Invalid WINDOW_UPDATE body")
self.body_len = 4
class HeadersFrame(Padding, Priority, Frame):
"""
The HEADERS frame carries name-value pairs. It is used to open a stream.
HEADERS frames can be sent on a stream in the "open" or "half closed
(remote)" states.
The HeadersFrame class is actually basically a data frame in this
implementation, because of the requirement to control the sizes of frames.
A header block fragment that doesn't fit in an entire HEADERS frame needs
to be followed with CONTINUATION frames. From the perspective of the frame
building code the header block is an opaque data segment.
"""
#: The flags defined for HEADERS frames.
defined_flags = [
Flag('END_STREAM', 0x01),
Flag('END_HEADERS', 0x04),
Flag('PADDED', 0x08),
Flag('PRIORITY', 0x20),
]
#: The type byte defined for HEADERS frames.
type = 0x01
stream_association = _STREAM_ASSOC_HAS_STREAM
def __init__(self, stream_id, data=b'', **kwargs):
super(HeadersFrame, self).__init__(stream_id, **kwargs)
#: The HPACK-encoded header block.
self.data = data
def serialize_body(self):
padding_data = self.serialize_padding_data()
padding = b'\0' * self.total_padding
if 'PRIORITY' in self.flags:
priority_data = self.serialize_priority_data()
else:
priority_data = b''
return b''.join([padding_data, priority_data, self.data, padding])
def parse_body(self, data):
padding_data_length = self.parse_padding_data(data)
data = data[padding_data_length:]
if 'PRIORITY' in self.flags:
priority_data_length = self.parse_priority_data(data)
else:
priority_data_length = 0
self.body_len = len(data)
self.data = (
data[priority_data_length:len(data)-self.total_padding].tobytes()
)
if self.total_padding and self.total_padding >= self.body_len:
raise InvalidPaddingError("Padding is too long.")
class ContinuationFrame(Frame):
"""
The CONTINUATION frame is used to continue a sequence of header block
fragments. Any number of CONTINUATION frames can be sent on an existing
stream, as long as the preceding frame on the same stream is one of
HEADERS, PUSH_PROMISE or CONTINUATION without the END_HEADERS flag set.
Much like the HEADERS frame, hyper treats this as an opaque data frame with
different flags and a different type.
"""
#: The flags defined for CONTINUATION frames.
defined_flags = [Flag('END_HEADERS', 0x04)]
#: The type byte defined for CONTINUATION frames.
type = 0x09
stream_association = _STREAM_ASSOC_HAS_STREAM
def __init__(self, stream_id, data=b'', **kwargs):
super(ContinuationFrame, self).__init__(stream_id, **kwargs)
#: The HPACK-encoded header block.
self.data = data
def serialize_body(self):
return self.data
def parse_body(self, data):
self.data = data.tobytes()
self.body_len = len(data)
class AltSvcFrame(Frame):
"""
The ALTSVC frame is used to advertise alternate services that the current
host, or a different one, can understand. This frame is standardised as
part of RFC 7838.
This frame does no work to validate that the ALTSVC field parameter is
acceptable per the rules of RFC 7838.
.. note:: If the ``stream_id`` of this frame is nonzero, the origin field
must have zero length. Conversely, if the ``stream_id`` of this
frame is zero, the origin field must have nonzero length. Put
another way, a valid ALTSVC frame has ``stream_id != 0`` XOR
``len(origin) != 0``.
"""
type = 0xA
stream_association = _STREAM_ASSOC_EITHER
def __init__(self, stream_id, origin=b'', field=b'', **kwargs):
super(AltSvcFrame, self).__init__(stream_id, **kwargs)
if not isinstance(origin, bytes):
raise ValueError("AltSvc origin must be bytestring.")
if not isinstance(field, bytes):
raise ValueError("AltSvc field must be a bytestring.")
self.origin = origin
self.field = field
def serialize_body(self):
origin_len = _STRUCT_H.pack(len(self.origin))
return b''.join([origin_len, self.origin, self.field])
def parse_body(self, data):
try:
origin_len = _STRUCT_H.unpack(data[0:2])[0]
self.origin = data[2:2+origin_len].tobytes()
if len(self.origin) != origin_len:
raise InvalidFrameError("Invalid ALTSVC frame body.")
self.field = data[2+origin_len:].tobytes()
except (struct.error, ValueError):
raise InvalidFrameError("Invalid ALTSVC frame body.")
self.body_len = len(data)
class ExtensionFrame(Frame):
"""
ExtensionFrame is used to wrap frames which are not natively interpretable
by hyperframe.
Although certain byte prefixes are ordained by specification to have
certain contextual meanings, frames with other prefixes are not prohibited,
and may be used to communicate arbitrary meaning between HTTP/2 peers.
Thus, hyperframe, rather than raising an exception when such a frame is
encountered, wraps it in a generic frame to be properly acted upon by
upstream consumers which might have additional context on how to use it.
.. versionadded:: 5.0.0
"""
stream_association = _STREAM_ASSOC_EITHER
def __init__(self, type, stream_id, **kwargs):
super(ExtensionFrame, self).__init__(stream_id, **kwargs)
self.type = type
self.flag_byte = None
def parse_flags(self, flag_byte):
"""
For extension frames, we parse the flags by just storing a flag byte.
"""
self.flag_byte = flag_byte
def parse_body(self, data):
self.body = data.tobytes()
self.body_len = len(data)
def serialize(self):
"""
A broad override of the serialize method that ensures that the data
comes back out exactly as it came in. This should not be used in most
user code: it exists only as a helper method if frames need to be
reconstituted.
"""
# Build the frame header.
# First, get the flags.
flags = self.flag_byte
header = _STRUCT_HBBBL.pack(
(self.body_len >> 8) & 0xFFFF, # Length spread over top 24 bits
self.body_len & 0xFF,
self.type,
flags,
self.stream_id & 0x7FFFFFFF # Stream ID is 32 bits.
)
return header + self.body
_FRAME_CLASSES = [
DataFrame,
HeadersFrame,
PriorityFrame,
RstStreamFrame,
SettingsFrame,
PushPromiseFrame,
PingFrame,
GoAwayFrame,
WindowUpdateFrame,
ContinuationFrame,
AltSvcFrame,
]
#: FRAMES maps the type byte for each frame to the class used to represent that
#: frame.
FRAMES = {cls.type: cls for cls in _FRAME_CLASSES}
| 32.391252
| 79
| 0.642021
|
795057b755f91cdae5d836b865808ac727dd141e
| 10,639
|
py
|
Python
|
dephell/repositories/_conda/_cloud.py
|
OliverHofkens/dephell
|
6303f416018910668f1635b70cd828a2fd2b2d9e
|
[
"MIT"
] | 1,880
|
2019-03-21T10:08:25.000Z
|
2022-03-31T12:41:55.000Z
|
dephell/repositories/_conda/_cloud.py
|
rachmadaniHaryono/dephell
|
0ef500c8f2d5f05244bac191b1b1383f68464cd2
|
[
"MIT"
] | 356
|
2019-03-21T19:08:56.000Z
|
2021-01-08T17:45:43.000Z
|
dephell/repositories/_conda/_cloud.py
|
rachmadaniHaryono/dephell
|
0ef500c8f2d5f05244bac191b1b1383f68464cd2
|
[
"MIT"
] | 157
|
2019-04-23T01:13:37.000Z
|
2022-03-24T22:41:18.000Z
|
# built-in
import json
import sys
from bz2 import BZ2Decompressor
from collections import OrderedDict, defaultdict
from datetime import datetime
from logging import getLogger
from platform import uname
from typing import Any, Dict, Iterable, Iterator, List
# external
import attr
from dephell_specifier import RangeSpecifier
from packaging.utils import canonicalize_name
from packaging.version import parse
# app
from ...cache import JSONCache
from ...cached_property import cached_property
from ...config import config
from ...constants import HOMEPAGE_FIELD
from ...models.release import Release
from ...models.simple_dependency import SimpleDependency
from ...networking import requests_session
from ._base import CondaBaseRepo
# https://conda.anaconda.org/conda-forge/linux-64
# https://conda.anaconda.org/conda-forge/noarch
# https://repo.anaconda.com/pkgs/main/linux-64
# https://repo.anaconda.com/pkgs/main/noarch
# https://repo.anaconda.com/pkgs/free/linux-64
# https://repo.anaconda.com/pkgs/free/noarch
# https://repo.anaconda.com/pkgs/r/linux-64
# https://repo.anaconda.com/pkgs/r/noarch
URL_FIELDS = {
'home': HOMEPAGE_FIELD,
'dev_url': 'repository',
'doc_url': 'documentation',
'license_url': 'license',
'source_url': 'source',
}
logger = getLogger('dephell.repositories.conda.cloud')
@attr.s()
class CondaCloudRepo(CondaBaseRepo):
channels = attr.ib(type=List[str], factory=list)
# https://conda.anaconda.org/{channel}/channeldata.json
_user_urls = dict(
repo='https://conda.anaconda.org/{channel}/{arch}/repodata.json.bz2',
chan='https://conda.anaconda.org/{channel}/channeldata.json',
)
_main_urls = dict(
repo='https://repo.anaconda.com/pkgs/{channel}/{arch}/repodata.json.bz2',
chan='https://repo.anaconda.com/pkgs/main/channeldata.json',
)
_search_url = 'https://api.anaconda.org/search'
_allowed_values = dict(
type=frozenset({'conda', 'pypi', 'env', 'ipynb'}),
platform=frozenset({
'osx-32', 'osx-64',
'win-32', 'win-64',
'linux-32', 'linux-64',
'linux-armv6l', 'linux-armv7l', 'linux-ppc64le',
'noarch',
}),
)
def get_releases(self, dep) -> tuple:
self._update_dep(dep=dep)
raw_releases = self._releases.get(dep.name)
if not raw_releases:
return ()
raw_releases = OrderedDict(sorted(
raw_releases.items(),
key=lambda rel: parse(rel[0]),
reverse=True,
))
releases = []
for version, release_info in raw_releases.items():
release = Release(
raw_name=dep.raw_name,
version=version,
time=datetime.fromtimestamp(release_info['timestamp']),
hashes=tuple(file['sha256'] for file in release_info['files'] if file['sha256']),
)
# get deps
deps = set()
pythons = set()
for req in release_info['depends']:
parsed = self.parse_req(req)
if parsed['name'] == 'python':
if 'version' in parsed:
pythons.add(parsed['version'])
continue
deps.add(SimpleDependency(
name=parsed['name'],
specifier=parsed.get('version', '*'),
))
release.python = RangeSpecifier(' || '.join(pythons))
release.dependencies = tuple(sorted(deps))
releases.append(release)
return tuple(releases)
async def get_dependencies(self, *args, **kwargs):
raise NotImplementedError('use get_releases to get deps')
def search(self, query: Iterable[str]) -> List[Dict[str, str]]:
fields = self._parse_query(query=query)
logger.debug('search on anaconda cloud', extra=dict(query=fields))
invalid_fields = set(fields) - {'name', 'type', 'platform'}
if invalid_fields:
raise ValueError('Invalid fields: {}'.format(', '.join(invalid_fields)))
if 'name' not in fields:
raise ValueError('please, specify search text')
for field, value in fields.items():
if field in self._allowed_values and value not in self._allowed_values[field]:
raise ValueError('invalid {field} value. Given: {given}. Allowed: {allowed}'.format(
field=field,
given=value,
allowed=', '.join(self._allowed_values[field]),
))
with requests_session() as session:
response = session.get(self._search_url, params=fields)
response.raise_for_status()
results = []
for info in response.json():
urls = dict(anaconda=info['html_url'])
for field, value in info.items():
if value and value != 'None' and field in URL_FIELDS:
urls[URL_FIELDS[field]] = value
results.append(dict(
name=info['name'],
version=info['versions'][-1],
description=info['summary'],
license=info['license'],
channel=info['owner'],
links=urls,
))
return results
# hidden methods
def _get_chan_url(self, channel: str) -> str:
if channel == 'defaults':
return self._main_urls['chan'].format(channel=channel)
return self._user_urls['chan'].format(channel=channel)
def _get_urls(self, channel: str) -> Iterator[str]:
translation = {
'Linux': 'linux',
'Windows': 'win',
'darwin': 'osx',
}
system = translation.get(uname().system, 'linux')
system += '-64' if sys.maxsize > 2**32 else '-32'
for arch in (system, 'noarch'):
if channel == 'defaults':
for channel in ('main', 'free'):
yield self._main_urls['repo'].format(arch=arch, channel=channel)
else:
yield self._user_urls['repo'].format(arch=arch, channel=channel)
def _update_dep(self, dep) -> None:
info = self._packages.get(dep.name)
if not info:
return
if not dep.links:
dep.links = info['links']
if not dep.license and 'license' in info:
dep.license = self._get_license(info['license'])
if not dep.description and 'summary' in info:
dep.description = info['summary']
# hidden properties
@cached_property
def _channels(self) -> List[str]:
channels = list(self.channels)
if not channels:
channels.append('conda-forge')
if 'defaults' not in channels:
channels.append('defaults')
return channels[::-1]
@cached_property
def _packages(self) -> Dict[str, Dict[str, Any]]:
all_packages = dict()
for channel in self._channels:
cache = JSONCache('conda.anaconda.org', 'packages', channel, ttl=config['cache']['ttl'])
channel_packages = cache.load()
if channel_packages is not None:
all_packages.update(channel_packages)
continue
url = self._get_chan_url(channel=channel)
with requests_session() as session:
response = session.get(url)
response.raise_for_status()
channel_packages = dict()
for name, info in response.json()['packages'].items():
name = canonicalize_name(name)
links = dict(
anaconda='https://anaconda.org/{channel}/{name}'.format(
channel=channel,
name=name,
),
)
for field, value in info.items():
if value and value != 'None' and field in URL_FIELDS:
links[URL_FIELDS[field]] = value
channel_packages[name] = dict(
channel=channel,
links=links,
)
license = info.get('license')
if license and license.lower() not in ('none', 'unknown'):
channel_packages[name]['license'] = license
summary = info.get('summary')
if summary:
channel_packages[name]['summary'] = summary
all_packages.update(channel_packages)
cache.dump(channel_packages)
return all_packages
@cached_property
def _releases(self) -> Dict[str, Dict[str, Dict[str, Any]]]:
all_deps = defaultdict(dict)
for channel in self._channels:
cache = JSONCache('conda.anaconda.org', 'releases', channel, ttl=config['cache']['ttl'])
channel_deps = cache.load()
if channel_deps is not None:
for dep, releases in channel_deps.items():
all_deps[dep].update(releases)
continue
channel_deps = defaultdict(dict)
for url in self._get_urls(channel=channel):
with requests_session() as session:
response = session.get(url)
response.raise_for_status()
content = BZ2Decompressor().decompress(response.content).decode('utf-8')
base_url = url.rsplit('/', 1)[0]
for fname, info in json.loads(content)['packages'].items():
# release info
name = canonicalize_name(info.pop('name'))
version = info.pop('version')
if version not in channel_deps[name]:
channel_deps[name][version] = dict(
depends=set(),
timestamp=info.get('timestamp', 0) // 1000,
files=[],
)
# file info
channel_deps[name][version]['depends'].update(info['depends'])
channel_deps[name][version]['files'].append(dict(
url=base_url + '/' + fname,
sha256=info.get('sha256', None),
size=info['size'],
))
for dep, releases in channel_deps.items():
for release in releases.values():
release['depends'] = list(release['depends'])
all_deps[dep].update(releases)
cache.dump(channel_deps)
return dict(all_deps)
| 37.72695
| 100
| 0.557383
|
7950582b321db6af01af1cffc3061468cb01e989
| 1,125
|
py
|
Python
|
day6/python/main.py
|
mjkoo/aoc2019
|
89e0090688dad2ccd8bdd94f657c6f08320e125a
|
[
"MIT"
] | null | null | null |
day6/python/main.py
|
mjkoo/aoc2019
|
89e0090688dad2ccd8bdd94f657c6f08320e125a
|
[
"MIT"
] | null | null | null |
day6/python/main.py
|
mjkoo/aoc2019
|
89e0090688dad2ccd8bdd94f657c6f08320e125a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
def path_to_com(orbits, source):
ret = []
cur = source
while cur != "COM":
ret.append(cur)
cur = orbits[cur]
return ret
def num_transfers(orbits, source, dest):
# Ignore YOU and SAN
source_to_com = path_to_com(orbits, source)[1:]
dest_to_com = path_to_com(orbits, dest)[1:]
prefix_len = 0
for a, b in zip(reversed(source_to_com), reversed(dest_to_com)):
if a == b:
prefix_len += 1
# Strip common suffix, add one for rendezvous point, subtract one to get
# number of transfers rather than number of stops
return len(source_to_com) + len(dest_to_com) - 2 * prefix_len
def main(argv):
orbits = {}
with open(argv[1], "r") as f:
for line in f:
parent, child = line.strip().split(")")
orbits[child] = parent
count = 0
for obj in orbits.keys():
cur = obj
while cur != "COM":
count += 1
cur = orbits[cur]
print(count)
print(num_transfers(orbits, "YOU", "SAN"))
if __name__ == "__main__":
main(sys.argv)
| 22.959184
| 76
| 0.584889
|
795058a248648fe01868797e870ba899749c2163
| 859
|
py
|
Python
|
turf/great_circle/_great_circle.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 11
|
2020-08-26T11:04:55.000Z
|
2022-01-26T14:53:10.000Z
|
turf/great_circle/_great_circle.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 36
|
2020-04-09T16:49:05.000Z
|
2020-06-01T14:39:37.000Z
|
turf/great_circle/_great_circle.py
|
malroc/pyturf
|
c89b6ea7094bd5ca26cf589d9dcd15bd819d82e9
|
[
"MIT"
] | 5
|
2020-07-30T23:37:35.000Z
|
2021-08-24T08:10:28.000Z
|
from turf.invariant import get_coords_from_features
from turf.great_circle._arc import GreatCircle
def great_circle(start, end, options=None):
"""
Returns the great circle route as LineString
:param start: source point feature
:param end: destination point feature
:param options: Optional parameters
[options["properties"]={}] line feature properties
[options.npoints=100] number of points
:return: great circle line feature
"""
if not options or not isinstance(options, dict):
options = {}
start = get_coords_from_features(start, ["Point"])
end = get_coords_from_features(end, ["Point"])
properties = options.get("properties", {})
npoints = options.get("npoints", 100)
properties["npoints"] = npoints
gc = GreatCircle(start, end, properties)
return gc.to_geojson()
| 29.62069
| 58
| 0.692666
|
7950596bcbb42785666a3990d00316ccdfc4eb9d
| 30
|
py
|
Python
|
hello.py
|
StrayDragon/github-action-rehearsal
|
884e971681569c83d69bc51e2096ccfd7c88a149
|
[
"MIT"
] | null | null | null |
hello.py
|
StrayDragon/github-action-rehearsal
|
884e971681569c83d69bc51e2096ccfd7c88a149
|
[
"MIT"
] | 4
|
2021-02-09T09:25:15.000Z
|
2021-05-10T07:34:20.000Z
|
hello.py
|
StrayDragon/github-action-rehearsal
|
884e971681569c83d69bc51e2096ccfd7c88a149
|
[
"MIT"
] | null | null | null |
def hello():
print("你好👋")
| 10
| 16
| 0.5
|
79505c6b1abd7a037567206ff89f2375920541ca
| 955
|
py
|
Python
|
Sandbox/qemu-sgx-master/scripts/tracetool/backend/dtrace.py
|
Maxul/sgx_vmx_protocol
|
b18dcdd6cbbf10c7d609649295676f0163dd9a5e
|
[
"MIT"
] | 8
|
2020-03-16T06:34:49.000Z
|
2021-12-06T01:50:54.000Z
|
Sandbox/qemu-sgx-master/scripts/tracetool/backend/dtrace.py
|
Maxul/sgx_vmx_protocol
|
b18dcdd6cbbf10c7d609649295676f0163dd9a5e
|
[
"MIT"
] | null | null | null |
Sandbox/qemu-sgx-master/scripts/tracetool/backend/dtrace.py
|
Maxul/sgx_vmx_protocol
|
b18dcdd6cbbf10c7d609649295676f0163dd9a5e
|
[
"MIT"
] | 1
|
2021-12-06T01:50:56.000Z
|
2021-12-06T01:50:56.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DTrace/SystemTAP backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2016, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
PUBLIC = True
PROBEPREFIX = None
def probeprefix():
if PROBEPREFIX is None:
raise ValueError("you must set PROBEPREFIX")
return PROBEPREFIX
BINARY = None
def binary():
if BINARY is None:
raise ValueError("you must set BINARY")
return BINARY
def generate_h_begin(events, group):
out('#include "trace/generated-tracers-dtrace.h"',
'')
def generate_h(event, group):
out(' QEMU_%(uppername)s(%(argnames)s);',
uppername=event.name.upper(),
argnames=", ".join(event.args.names()))
| 20.319149
| 76
| 0.66178
|
79505ed766359ae4a2367aa04e22ee0a36d987f9
| 990
|
py
|
Python
|
terrascript/data/e_breuninger/netbox.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/data/e_breuninger/netbox.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/data/e_breuninger/netbox.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/data/e-breuninger/netbox.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:22:23 UTC)
import terrascript
class netbox_cluster(terrascript.Data):
pass
class netbox_cluster_group(terrascript.Data):
pass
class netbox_device_role(terrascript.Data):
pass
class netbox_interfaces(terrascript.Data):
pass
class netbox_platform(terrascript.Data):
pass
class netbox_prefix(terrascript.Data):
pass
class netbox_tag(terrascript.Data):
pass
class netbox_tenant(terrascript.Data):
pass
class netbox_tenant_group(terrascript.Data):
pass
class netbox_virtual_machines(terrascript.Data):
pass
class netbox_vrf(terrascript.Data):
pass
__all__ = [
"netbox_cluster",
"netbox_cluster_group",
"netbox_device_role",
"netbox_interfaces",
"netbox_platform",
"netbox_prefix",
"netbox_tag",
"netbox_tenant",
"netbox_tenant_group",
"netbox_virtual_machines",
"netbox_vrf",
]
| 15.714286
| 73
| 0.730303
|
79505f123cc8026810733de84ba021366bcfaf12
| 4,214
|
py
|
Python
|
airflow/providers/amazon/aws/transfers/imap_attachment_to_s3.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 1
|
2019-05-07T06:46:55.000Z
|
2019-05-07T06:46:55.000Z
|
airflow/providers/amazon/aws/transfers/imap_attachment_to_s3.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 7
|
2021-06-28T20:24:56.000Z
|
2022-02-26T02:01:36.000Z
|
airflow/providers/amazon/aws/transfers/imap_attachment_to_s3.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 1
|
2019-06-15T08:38:53.000Z
|
2019-06-15T08:38:53.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module allows you to transfer mail attachments from a mail server into s3 bucket.
"""
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.imap.hooks.imap import ImapHook
from airflow.utils.decorators import apply_defaults
class ImapAttachmentToS3Operator(BaseOperator):
"""
Transfers a mail attachment from a mail server into s3 bucket.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ImapAttachmentToS3Operator`
:param imap_attachment_name: The file name of the mail attachment that you want to transfer.
:type imap_attachment_name: str
:param s3_key: The destination file name in the s3 bucket for the attachment.
:type s3_key: str
:param imap_check_regex: If set checks the `imap_attachment_name` for a regular expression.
:type imap_check_regex: bool
:param imap_mail_folder: The folder on the mail server to look for the attachment.
:type imap_mail_folder: str
:param imap_mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:type imap_mail_filter: str
:param s3_overwrite: If set overwrites the s3 key if already exists.
:type s3_overwrite: bool
:param imap_conn_id: The reference to the connection details of the mail server.
:type imap_conn_id: str
:param s3_conn_id: The reference to the s3 connection details.
:type s3_conn_id: str
"""
template_fields = ('imap_attachment_name', 's3_key', 'imap_mail_filter')
@apply_defaults
def __init__(self, *,
imap_attachment_name,
s3_key,
imap_check_regex=False,
imap_mail_folder='INBOX',
imap_mail_filter='All',
s3_overwrite=False,
imap_conn_id='imap_default',
s3_conn_id='aws_default',
**kwargs):
super().__init__(**kwargs)
self.imap_attachment_name = imap_attachment_name
self.s3_key = s3_key
self.imap_check_regex = imap_check_regex
self.imap_mail_folder = imap_mail_folder
self.imap_mail_filter = imap_mail_filter
self.s3_overwrite = s3_overwrite
self.imap_conn_id = imap_conn_id
self.s3_conn_id = s3_conn_id
def execute(self, context):
"""
This function executes the transfer from the email server (via imap) into s3.
:param context: The context while executing.
:type context: dict
"""
self.log.info(
'Transferring mail attachment %s from mail server via imap to s3 key %s...',
self.imap_attachment_name, self.s3_key
)
with ImapHook(imap_conn_id=self.imap_conn_id) as imap_hook:
imap_mail_attachments = imap_hook.retrieve_mail_attachments(
name=self.imap_attachment_name,
check_regex=self.imap_check_regex,
latest_only=True,
mail_folder=self.imap_mail_folder,
mail_filter=self.imap_mail_filter,
)
s3_hook = S3Hook(aws_conn_id=self.s3_conn_id)
s3_hook.load_bytes(bytes_data=imap_mail_attachments[0][1],
key=self.s3_key,
replace=self.s3_overwrite)
| 41.722772
| 96
| 0.685572
|
79505f1cb4462aa19567669481e8c655be8c0ac3
| 2,420
|
py
|
Python
|
spacy/tests/spans/test_span.py
|
TerminalWitchcraft/spaCy
|
29adbef095c04e21a691e912671e4ec21082b047
|
[
"MIT"
] | 1
|
2018-09-24T17:00:23.000Z
|
2018-09-24T17:00:23.000Z
|
spacy/tests/spans/test_span.py
|
TerminalWitchcraft/spaCy
|
29adbef095c04e21a691e912671e4ec21082b047
|
[
"MIT"
] | null | null | null |
spacy/tests/spans/test_span.py
|
TerminalWitchcraft/spaCy
|
29adbef095c04e21a691e912671e4ec21082b047
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
from ..util import get_doc
import pytest
@pytest.fixture
def doc(en_tokenizer):
text = "This is a sentence. This is another sentence. And a third."
heads = [1, 0, 1, -2, -3, 1, 0, 1, -2, -3, 0, 1, -2, -1]
deps = ['nsubj', 'ROOT', 'det', 'attr', 'punct', 'nsubj', 'ROOT', 'det',
'attr', 'punct', 'ROOT', 'det', 'npadvmod', 'punct']
tokens = en_tokenizer(text)
return get_doc(tokens.vocab, [t.text for t in tokens], heads=heads, deps=deps)
def test_spans_sent_spans(doc):
sents = list(doc.sents)
assert sents[0].start == 0
assert sents[0].end == 5
assert len(sents) == 3
assert sum(len(sent) for sent in sents) == len(doc)
def test_spans_root(doc):
span = doc[2:4]
assert len(span) == 2
assert span.text == 'a sentence'
assert span.root.text == 'sentence'
assert span.root.head.text == 'is'
def test_spans_root2(en_tokenizer):
text = "through North and South Carolina"
heads = [0, 3, -1, -2, -4]
tokens = en_tokenizer(text)
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads)
assert doc[-2:].root.text == 'Carolina'
def test_spans_span_sent(doc):
"""Test span.sent property"""
assert len(list(doc.sents))
assert doc[:2].sent.root.text == 'is'
assert doc[:2].sent.text == 'This is a sentence .'
assert doc[6:7].sent.root.left_edge.text == 'This'
def test_spans_default_sentiment(en_tokenizer):
"""Test span.sentiment property's default averaging behaviour"""
text = "good stuff bad stuff"
tokens = en_tokenizer(text)
tokens.vocab[tokens[0].text].sentiment = 3.0
tokens.vocab[tokens[2].text].sentiment = -2.0
doc = get_doc(tokens.vocab, [t.text for t in tokens])
assert doc[:2].sentiment == 3.0 / 2
assert doc[-2:].sentiment == -2. / 2
assert doc[:-1].sentiment == (3.+-2) / 3.
def test_spans_override_sentiment(en_tokenizer):
"""Test span.sentiment property's default averaging behaviour"""
text = "good stuff bad stuff"
tokens = en_tokenizer(text)
tokens.vocab[tokens[0].text].sentiment = 3.0
tokens.vocab[tokens[2].text].sentiment = -2.0
doc = get_doc(tokens.vocab, [t.text for t in tokens])
doc.user_span_hooks['sentiment'] = lambda span: 10.0
assert doc[:2].sentiment == 10.0
assert doc[-2:].sentiment == 10.0
assert doc[:-1].sentiment == 10.0
| 32.702703
| 82
| 0.642149
|
79505f460b45aef9e3d1b3809b263cd282a3994b
| 2,085
|
py
|
Python
|
jupyter-clear-output.py
|
caallinson/jupyter-clean-output
|
84c19933469105092d89acd5fa2204a350b0d8c9
|
[
"MIT"
] | 2
|
2020-10-03T04:24:31.000Z
|
2021-04-07T13:41:10.000Z
|
jupyter-clear-output.py
|
caallinson/jupyter-clean-output
|
84c19933469105092d89acd5fa2204a350b0d8c9
|
[
"MIT"
] | null | null | null |
jupyter-clear-output.py
|
caallinson/jupyter-clean-output
|
84c19933469105092d89acd5fa2204a350b0d8c9
|
[
"MIT"
] | 1
|
2020-10-03T20:27:09.000Z
|
2020-10-03T20:27:09.000Z
|
import json
import sys
def process_json(json_parsed):
"""
Clear outputs from Notebook saved in JSON format
"""
if isinstance(json_parsed, dict):
if 'cells' in json_parsed.keys():
for obj in json_parsed['cells']:
if 'outputs' in obj.keys():
obj['outputs'] = []
else:
return None
else:
return None
return json_parsed
def main():
"""
Clears outputs from Notebook. Print errors and help messages as needed
"""
if len(sys.argv) == 1:
print('\t')
print('\tClean Output of Jupyter Notebook Files (note: must be in JSON format)')
print('\t')
print('\t\t-f : Force read of non-ipynb file')
print('\t')
print('\t---------------------------------')
print('\tError: Provide Jupyter notebook file path')
print('\t')
return 1
process_fx = None
fName = None
for arg in sys.argv[1:]:
if arg[0] != '-':
fName = arg
if fName is None:
print('\tError: File name not provided')
return 1
fExtension = fName.lower().split(".")[-1]
if fExtension == "ipynb":
process_fx = process_json
else:
if '-f' not in sys.argv:
print('\tError: File not .ipynb extension')
print('\t\tUse -f to force')
return 1
else:
process_fx = process_json
try:
fRead = open(fName)
except Exception as err:
print("\tError in reading file")
print(err)
return 1
try:
f_contents = json.load(fRead)
except Exception as err:
print("\tError Parsing JSON")
print(err)
return 1
fRead.close()
f_contents = process_fx(f_contents)
if f_contents is not None:
fWrite = open(fName, 'w')
json.dump(f_contents, fWrite, indent=1)
else:
if process_fx is process_json:
print("\tError: Couldn't process JSON")
return 1
return 0
if __name__ == "__main__":
main()
| 24.244186
| 88
| 0.534772
|
79505f9067c48ff97b8159280551e9d5b2c59742
| 931
|
py
|
Python
|
envdsys/envdatasystem/migrations/0040_auto_20210714_2208.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 1
|
2021-11-06T19:22:53.000Z
|
2021-11-06T19:22:53.000Z
|
envdsys/envdatasystem/migrations/0040_auto_20210714_2208.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 25
|
2019-06-18T20:40:36.000Z
|
2021-07-23T20:56:48.000Z
|
envdsys/envdatasystem/migrations/0040_auto_20210714_2208.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.2.3 on 2021-07-14 22:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envdatasystem', '0039_auto_20210714_2203'),
]
operations = [
migrations.AlterModelOptions(
name='platformevent',
options={'verbose_name': 'Platform Event', 'verbose_name_plural': 'Platform Events'},
),
migrations.AlterModelOptions(
name='projectevent',
options={'verbose_name': 'Project Event', 'verbose_name_plural': 'Project Events'},
),
migrations.AlterField(
model_name='platform',
name='platform_type',
field=models.CharField(choices=[('AIRCRAFT', 'Aircraft'), ('STATION', 'Station/Lab'), ('MOORING', 'Mooring'), ('SHIP', 'Ship'), ('UAS', 'UAS')], default='STATION', max_length=10, verbose_name='Platform Type'),
),
]
| 34.481481
| 221
| 0.609023
|
79505fea5a9fd2d54f0e74f2dbbf2cbcc6bb818b
| 10,949
|
py
|
Python
|
observation/bf_phaseup.py
|
ska-sa/katsdpscripts
|
f9eaa867aad8b94c715f7286953124df00b5781c
|
[
"BSD-3-Clause"
] | null | null | null |
observation/bf_phaseup.py
|
ska-sa/katsdpscripts
|
f9eaa867aad8b94c715f7286953124df00b5781c
|
[
"BSD-3-Clause"
] | 21
|
2019-09-16T15:26:53.000Z
|
2022-01-11T09:14:39.000Z
|
observation/bf_phaseup.py
|
ska-sa/katsdpscripts
|
f9eaa867aad8b94c715f7286953124df00b5781c
|
[
"BSD-3-Clause"
] | 1
|
2019-11-11T11:47:54.000Z
|
2019-11-11T11:47:54.000Z
|
#!/usr/bin/env python
#
# Track calibrator target for a specified time.
# Obtain calibrated gains and apply them to the F-engine afterwards.
import numpy as np
import scipy.ndimage
from katcorelib.observe import (standard_script_options, verify_and_connect,
collect_targets, start_session, user_logger,
CalSolutionsUnavailable)
class NoTargetsUpError(Exception):
"""No targets are above the horizon at the start of the observation."""
def clean_bandpass(bp_gains, cal_channel_freqs, max_gap_Hz):
"""Clean up bandpass gains by linear interpolation across narrow flagged regions."""
clean_gains = {}
# Linearly interpolate across flagged regions as long as they are not too large
for inp, bp in bp_gains.items():
flagged = np.isnan(bp)
if flagged.all():
clean_gains[inp] = bp
continue
chans = np.arange(len(bp))
interp_bp = np.interp(chans, chans[~flagged], bp[~flagged])
# Identify flagged regions and tag each with unique integer label
gaps, n_gaps = scipy.ndimage.label(flagged)
for n in range(n_gaps):
gap = np.nonzero(gaps == n + 1)[0]
gap_freqs = cal_channel_freqs[gap]
lower = gap_freqs.min()
upper = gap_freqs.max()
if upper - lower > max_gap_Hz:
interp_bp[gap] = np.nan
clean_gains[inp] = interp_bp
return clean_gains
def calculate_corrections(G_gains, B_gains, delays, cal_channel_freqs,
random_phase, flatten_bandpass,
target_average_correction):
"""Turn cal pipeline products into corrections to be passed to F-engine."""
average_gain = {}
gain_corrections = {}
# First find relative corrections per input with arbitrary global average
for inp in G_gains:
# Combine all calibration products for input into single array of gains
K_gains = np.exp(-2j * np.pi * delays[inp] * cal_channel_freqs)
gains = K_gains * B_gains[inp] * G_gains[inp]
if np.isnan(gains).all():
average_gain[inp] = gain_corrections[inp] = 0.0
continue
abs_gains = np.abs(gains)
# Track the average gain to fix overall power level (and as diagnostic)
average_gain[inp] = np.nanmedian(abs_gains)
corrections = 1.0 / gains
if not flatten_bandpass:
# Let corrections have constant magnitude equal to 1 / (avg gain),
# which ensures that power levels are still equalised between inputs
corrections *= abs_gains / average_gain[inp]
if random_phase:
corrections *= np.exp(2j * np.pi * np.random.rand(len(corrections)))
gain_corrections[inp] = np.nan_to_num(corrections)
# All invalid gains (NaNs) have now been turned into zeros
valid_average_gains = [g for g in average_gain.values() if g > 0]
if not valid_average_gains:
raise ValueError("All gains invalid and beamformer output will be zero!")
global_average_gain = np.median(valid_average_gains)
# Iterate over inputs again and fix average values of corrections
for inp in sorted(G_gains):
relative_gain = average_gain[inp] / global_average_gain
if relative_gain == 0.0:
user_logger.warning("%s has no valid gains and will be zeroed", inp)
continue
# This ensures that input at the global average gets target correction
gain_corrections[inp] *= target_average_correction * global_average_gain
safe_relative_gain = np.clip(relative_gain, 0.5, 2.0)
if relative_gain == safe_relative_gain:
user_logger.info("%s: average gain relative to global average = %5.2f",
inp, relative_gain)
else:
user_logger.warning("%s: average gain relative to global average "
"= %5.2f out of range, clipped to %.1f",
inp, relative_gain, safe_relative_gain)
gain_corrections[inp] *= relative_gain / safe_relative_gain
return gain_corrections
# Set up standard script options
usage = "%prog [options] <'target/catalogue'> [<'target/catalogue'> ...]"
description = 'Track the first source above the horizon and calibrate ' \
'gains based on it. At least one target must be specified.'
parser = standard_script_options(usage, description)
# Add experiment-specific options
parser.add_option('-t', '--track-duration', type='float', default=64.0,
help='Length of time to track the source for calibration, '
'in seconds (default=%default)')
parser.add_option('--verify-duration', type='float', default=64.0,
help='Length of time to revisit the source for verification, '
'in seconds (default=%default)')
parser.add_option('--fengine-gain', type='int_or_default', default='default',
help='Set correlator F-engine gain (average magnitude)')
parser.add_option('--fft-shift', type='int_or_default',
help='Override correlator F-engine FFT shift')
parser.add_option('--flatten-bandpass', action='store_true', default=False,
help='Apply bandpass magnitude correction on top of phase correction')
parser.add_option('--random-phase', action='store_true', default=False,
help='Apply random phases in F-engine (incoherent beamformer)')
parser.add_option('--disable-hv-correction', action='store_true', default=False,
help='Do not correct HV phase (but still fire the noise diode)')
parser.add_option('--max-gap-MHz', type='float', default=128.0,
help='The maximum gap in the bandpass gain that will still be '
'interpolated across, in MHz (default=%default)')
# Set default value for any option (both standard and experiment-specific options)
parser.set_defaults(observer='comm_test', nd_params='off', project_id='COMMTEST',
description='Phase-up observation that sets F-engine gains')
# Parse the command line
opts, args = parser.parse_args()
if len(args) == 0:
raise ValueError("Please specify at least one target argument via name "
"('J1939-6342'), description ('radec, 19:39, -63:42') or "
"catalogue file name ('three_calib.csv')")
# Check options and build KAT configuration, connecting to proxies and devices
with verify_and_connect(opts) as kat:
observation_sources = collect_targets(kat, args)
# Start capture session
with start_session(kat, **vars(opts)) as session:
session.standard_setup(**vars(opts))
# Reset F-engine to a known good state first
if opts.fft_shift is not None:
session.set_fengine_fft_shift(opts.fft_shift)
fengine_gain = session.set_fengine_gains(opts.fengine_gain)
# Quit if there are no sources to observe or not enough antennas for cal
if len(session.ants) < 4:
raise ValueError('Not enough receptors to do calibration - you '
'need 4 and you have %d' % (len(session.ants),))
sources_above_horizon = observation_sources.filter(el_limit_deg=opts.horizon)
if not sources_above_horizon:
raise NoTargetsUpError("No targets are currently visible - "
"please re-run the script later")
# Pick the first source that is up (this assumes that the sources in
# the catalogue are ordered from highest to lowest priority)
target = sources_above_horizon.targets[0]
target.add_tags('bfcal single_accumulation')
session.capture_start()
session.label('un_corrected')
user_logger.info("Initiating %g-second track on target %r",
opts.track_duration, target.description)
# Get onto the source
session.track(target, duration=0, announce=False)
# Fire noise diode during track
session.fire_noise_diode(on=opts.track_duration, off=0)
# Attempt to jiggle cal pipeline to drop its gain solutions
session.stop_antennas()
user_logger.info("Waiting for gains to materialise in cal pipeline")
hv_gains = {}
hv_delays = {}
timeout = 60 + opts.track_duration
# Wait for the last relevant bfcal product from the pipeline
if opts.disable_hv_correction:
user_logger.warning('HV phase correction disabled by script option')
else:
try:
hv_gains = session.get_cal_solutions('BCROSS_DIODE_SKY', timeout)
except CalSolutionsUnavailable as err:
user_logger.warning("No BCROSS_DIODE_SKY solutions found - "
"falling back to BCROSS_DIODE only: %s", err)
hv_gains = session.get_cal_solutions('BCROSS_DIODE')
hv_delays = session.get_cal_solutions('KCROSS_DIODE')
timeout = 0.0
gains = session.get_cal_solutions('G', timeout)
bp_gains = session.get_cal_solutions('B')
delays = session.get_cal_solutions('K')
# Add HV delay to the usual delay
for inp in sorted(delays):
delays[inp] += hv_delays.get(inp, 0.0)
if np.isnan(delays[inp]):
user_logger.warning("Delay fit failed on input %s (all its "
"data probably flagged)", inp)
# Add HV phase to bandpass phase
for inp in bp_gains:
bp_gains[inp] *= hv_gains.get(inp, 1.0)
cal_channel_freqs = session.get_cal_channel_freqs()
bp_gains = clean_bandpass(bp_gains, cal_channel_freqs, max_gap_Hz=opts.max_gap_MHz*1e6)
if opts.random_phase:
user_logger.warning("Setting F-engine gains with random phases "
"(you asked for it)")
else:
user_logger.info("Setting F-engine gains to phase up antennas")
if not kat.dry_run:
corrections = calculate_corrections(gains, bp_gains, delays,
cal_channel_freqs, opts.random_phase,
opts.flatten_bandpass, fengine_gain)
session.set_fengine_gains(corrections)
if opts.verify_duration > 0:
session.label('corrected')
user_logger.info("Revisiting target %r for %g seconds to verify "
"phase-up", target.name, opts.verify_duration)
session.track(target, duration=0, announce=False)
session.fire_noise_diode(on=opts.verify_duration, off=0)
if not opts.random_phase:
# Set last-phaseup script sensor on the subarray.
session.sub.req.set_script_param('script-last-phaseup', kat.sb_id_code)
| 51.890995
| 95
| 0.63677
|
7950608b26cd17a783302586746917ab293122a9
| 5,306
|
py
|
Python
|
pyconstruct/learners/base.py
|
unitn-sml/pyconstruct
|
f7174b00f16e34ca582bb1923ef46e871fc892b9
|
[
"MIT"
] | 13
|
2018-04-03T09:27:28.000Z
|
2021-04-01T07:52:01.000Z
|
pyconstruct/learners/base.py
|
unitn-sml/pyconstruct
|
f7174b00f16e34ca582bb1923ef46e871fc892b9
|
[
"MIT"
] | 1
|
2019-08-06T18:46:47.000Z
|
2019-08-13T08:48:28.000Z
|
pyconstruct/learners/base.py
|
unitn-sml/pyconstruct
|
f7174b00f16e34ca582bb1923ef46e871fc892b9
|
[
"MIT"
] | null | null | null |
import copy
from ..models import BaseModel
from abc import ABC, abstractmethod
from sklearn.base import BaseEstimator
__all__ = ['BaseLearner']
class BaseLearner(BaseEstimator, ABC):
"""A basic learning model class.
A learner fits a model with some data over some given domain. If only the
domain is given, a default model class (depending on the learner subclass)
will be used. If a model is also given, the learner will make a copy and
overwrite the domain of the copy with the one given to the learner. If no
domain is given to the learner, the given model must be initialized with a
domain.
The fitted model is accessible through the `model_` attribute (not to be
confused with the input parameter `model` which is copied if given).
Arguments
---------
domain : BaseDomain
The domain of the data.
model : BaseModel
The model the learner should fit.
"""
def __init__(self, domain=None, model=None, **kwargs):
self.domain = domain
self.model = model
def _get_model(self, default=None):
if hasattr(self, 'model_'):
return self.model_
if self.model is None:
if self.domain is None:
raise ValueError('Either domain or model must be given')
model_class = default if default is not None else BaseModel
self.model_ = model_class(self.domain)
self.domain_ = self.domain
else:
self.model_ = copy.deepcopy(self.model)
if self.domain is not None:
self.model_.domain = self.domain_ = self.domain
elif self.model_.domain is not None:
self.domain_ = self.model_.domain
else:
raise ValueError('No domain given to the learner nor the model')
return self.model_
@property
def _model(self):
return self._get_model()
def phi(self, X, Y, **kwargs):
"""Computes the feature vector for the given input and output objects.
Parameters
----------
X : numpy.ndarray
An array of input examples. The first dimension must be the number
of samples.
Y : numpy.ndarray
An array of output objects.
Returns
-------
numpy.ndarray
The array of feature vectors.
"""
return self._model.phi(X, Y, **kwargs)
def predict(self, X, *args, **kwargs):
"""Computes the prediction of the current model for the given input.
Parameters
----------
X : numpy.ndarray
An array of input examples. The first dimension must be the number
of samples.
Returns
-------
numpy.ndarray
The array of predicted objects.
"""
return self._model.predict(X, *args, **kwargs)
def decision_function(self, X, Y, **kwargs):
return self._model.decision_function(X, Y, **kwargs)
def loss(self, X, Y, Y_pred, **kwargs):
return self._model.loss(X, Y, Y_pred, **kwargs)
def score(self, X, Y, Y_pred=None, **kwargs):
"""Compute the score as the average loss over the examples.
This method is needed for scikit-learn estimation in GridSearchCV and
other model selection methods.
Parameters
----------
X : numpy.ndarray
An array of input examples. The first dimension must be the number
of samples.
Y : numpy.ndarray
An array of true output objects.
Y_pred : numpy.ndarray
An array of predicted object.
Returns
-------
score : float
The score of the model over the examples.
"""
if Y_pred is None:
Y_pred = self.predict(X, **kwargs)
return (- self.loss(X, Y, Y_pred)).mean()
@abstractmethod
def partial_fit(self, X, Y, Y_pred=None, **kwargs):
"""Updates the current model with a mini-batch (X, Y).
Parameters
----------
X : numpy.ndarray
Input examples. The first dimension must be the batch size.
Y : numpy.ndarray
Output objects. The first dimension must be the batch size. This
must coincide with batch size for X.
Y_pred : numpy.ndarray
Predictions of the algorithm. The first dimension must be the batch
size. This must coincide with batch size for X. If None, either not
needed or done internally.
Returns
-------
self
"""
@abstractmethod
def fit(self, X, Y, Y_pred=None, **kwargs):
"""Fit a model with data (X, Y).
Parameters
----------
X : numpy.ndarray
Input examples. The first dimension must be the dataset size.
Y : numpy.ndarray
Output objects. The first dimension must be the dataset size. This
must coincide with batch size for X.
Y_pred : numpy.ndarray
Predictions of the algorithm. The first dimension must be the batch
size. This must coincide with dataset size for X. If None, either
not needed or done internally.
Returns
-------
self
"""
| 31.963855
| 80
| 0.589333
|
795061fa1fcc684b321080c936267e623169c9a9
| 46,587
|
py
|
Python
|
nexus/lib/gamess_input.py
|
djstaros/qmcpack
|
280f67e638bae280448b47fa618f05b848c530d2
|
[
"NCSA"
] | null | null | null |
nexus/lib/gamess_input.py
|
djstaros/qmcpack
|
280f67e638bae280448b47fa618f05b848c530d2
|
[
"NCSA"
] | 11
|
2020-05-09T20:57:21.000Z
|
2020-06-10T00:00:17.000Z
|
nexus/lib/gamess_input.py
|
djstaros/qmcpack
|
280f67e638bae280448b47fa618f05b848c530d2
|
[
"NCSA"
] | null | null | null |
##################################################################
## (c) Copyright 2015- by Jaron T. Krogel ##
##################################################################
#====================================================================#
# gamess_input.py #
# Support for GAMESS input file I/O #
# #
# Content summary: #
# GamessInput #
# Input class for the GAMESS code. #
# Capable of reading/writing arbitrary GAMESS input files. #
# #
# generate_gamess_input #
# User function to create arbitrary GAMESS input. #
# #
# KeywordGroup #
# Represents an arbitrary keyword group in the input file. #
# #
# KeywordSpecGroup #
# Base class for specialized keyword groups. #
# Derived classes enforce the keyword specification. #
# See ContrlGroup, SystemGroup, GuessGroup, ScfGroup, #
# McscfGroup, DftGroup, GugdiaGroup, DrtGroup, CidrtGroup, #
# and DetGroup #
# #
# FormattedGroup #
# Represents strict machine-formatted input groups. #
# #
#====================================================================#
import os
from numpy import array,ndarray,abs
from generic import obj
from periodic_table import pt
from developer import DevBase
from nexus_base import nexus_noncore
from simulation import SimulationInput
from debug import *
class GIbase(DevBase):
def message(self,msg,**kwargs):
self.error(msg,**kwargs)
#end def message
#end class GIbase
class GIarray(GIbase):
def __init__(self,d):
for n,v in d.items():
if not isinstance(n,int):
self.error("keys must be integers\nattempted to initialize array from input provided: {0}\nnote that dict's are used only for arrays".format(d))
#end if
if isinstance(v,(tuple,list,ndarray)):
nv = array(v,type(v[0]))
else:
nv = array([v],type[v])
#end if
self[n]=nv
#end for
#end def __init__
#end class GIarray
class Group(GIbase):
def __init__(self,text=None,**kwargs):
if text!=None:
self.read(text)
#end if
self.set(**kwargs)
#end def __init__
def read(self,text):
self.not_implemented()
#end def read
def write(self,text):
self.not_implemented()
#end def read
#end class Group
class KeywordGroup(Group):
linewrap = 70
lineindent = ' '
booldict = {'.true.':True,'.TRUE.':True,'.t.':True,'.T.':True,
'.false.':False,'.FALSE.':False,'.f.':False,'.F.':False}
def readval(self,val):
fail = False
if val in self.booldict:
v = self.booldict[val]
else:
try:
v = int(val)
except:
try:
v = float(val.replace('d','e'))
except:
#val = val.replace(',',' ')
if ' ' in val:
val = val.split()
try:
v = array(val,dtype=int)
except:
try:
v = array(val,dtype=float)
except:
try:
v = array(val,dtype=str)
except:
fail = True
#end try
#end try
#end try
else:
v = val
#end if
#end try
#end try
#end if
if fail:
self.error('failed to read value: "{0}"'.format(val))
#end if
return v
#end def readval
def read(self,text):
tokens = text.replace(',',' ').split()
for token in tokens:
if '=' in token:
var,val = token.split('=')
var = var.lower()
val = val.lower()
self[var]=val
else:
self[var]+=' '+token.lower()
#end if
#end for
vars = list(self.keys())
for var in vars:
val = self.readval(self[var])
if not '(' in var:
self[var] = val
else:
del self[var]
var,index = var.replace('(',' ').replace(')','').split()
index = int(index)
if not var in self:
arr = GIarray({index:val})
self[var] = arr
else:
self[var][index]=val
#end if
#end if
#end for
#end def read
def writeval(self,val):
if isinstance(val,bool):
if val:
sval = '.true.'
else:
sval = '.false.'
#end if
elif isinstance(val,str):
sval = val
elif isinstance(val,int):
sval = str(val)
elif isinstance(val,float):
sval = str(val).replace('e','d')
elif isinstance(val,(ndarray,list)):
sval = ''
for v in val:
vs = str(v)+','
if len(sval)+len(vs)<self.linewrap:
sval+=vs
else:
sval+='\n'+self.lineindent+vs
#end if
#end for
sval = sval[0:-1]
else:
self.error('unknown type encountered on write: {0}'.format(val))
#end if
return sval
#end def writeval
def write(self,name):
text = ''
line = ' ${0:<6} '.format(name)
for var in sorted(self.keys()):
val = self[var]
if not isinstance(val,GIarray):
vtext='{0}={1} '.format(var,self.writeval(val))
if len(line)+len(vtext) < self.linewrap:
line+=vtext
else:
text+=line+'\n'
line = self.lineindent+vtext
#end if
else:
for n in sorted(val.keys()):
vtext = '{0}({1})={2} '.format(var,n,self.writeval(val[n]))
if len(line)+len(vtext) < self.linewrap:
line+=vtext
else:
text+=line+'\n'
line = self.lineindent+vtext
#end if
#end for
#end if
#end for
text += line+' $end\n'
return text
#end def write
#end class KeywordGroup
class CardGroup(Group):
#input spec page numbers
# ecp 287
# data 37
def readval(self,val):
try:
v = int(val)
except:
try:
v = float(val.replace('d','e'))
except:
v = val
#end try
#end try
return v
#end def readval
def read_tokens(self,line):
tokens = []
for token in line.split():
tokens.append(self.readval(token))
#end for
return tokens
#end def read_tokens
def read_line_tokens(self,text):
line_tokens = []
for line in text.splitlines():
line_tokens.append(self.read_tokens(line))
#end for
return line_tokens
#end def read_line_tokens
def append_text(self,text):
for tokens in self.read_line_tokens(text):
self.append(tokens)
#end for
#end def append_text
def append_list(self,lst):
for tokens in lst:
self.append(tokens)
#end for
#end def append_list
def read(self,inp):
self.clear()
if isinstance(inp,str):
self.append_text(inp)
elif isinstance(inp,list):
self.append_list(inp)
#end if
#end def read
def writeval(self,val):
if isinstance(val,float):
sval = str(val).replace('e','d')
if len(sval)>8 and abs(val)>=10.0:
sval = '{0:16.8e}'.format(val).replace('e','d')
#end if
else:
sval = str(val)
#end if
return sval
#end def writeval
def write(self,name):
text = ' ${0}\n'.format(name)
contents = ''
for n in range(len(self)):
for token in self[n]:
contents += self.writeval(token)+' '
#end for
contents += '\n'
#end for
text+= contents.lstrip()
text+=' $end\n'
return text
#end def write
def list(self):
lst = []
for n in range(len(self)):
lst.append(self[n])
#end for
return lst
#end def list
#end class CardGroup
class FormattedGroup(Group):
def read(self,text):
self.text = str(text)
#end def read
def write(self,name):
#return ' ${0}\n{1} $END\n'.format(name.upper(),self.text.lstrip())
return ' ${0}\n{1} $END\n'.format(name.upper(),self.text)
#end def write
#end class FormattedGroup
# detailed keyword specification groups to check names and types of keyword inputs
class KeywordSpecGroup(KeywordGroup):
keywords = set()
integers = set()
reals = set()
bools = set()
strings = set()
arrays = set()
allowed_values = obj()
def is_consistent(self):
return len(set(self.keys())-self.keywords)==0
#end def is_consistent
def is_valid(self):
valid = self.is_consistent()
for name,val in self.items():
if name in self.allowed_values:
if isinstance(val,str):
val = val.lower()
#end if
valid &= val in self.allowed_values[name]
#end if
#end for
return valid
#end def is_valid
#end class KeywordSpecGroup
class ContrlGroup(KeywordSpecGroup):
keywords = set([
'scftyp','dfttyp','tddft' ,'vbtyp' ,'mplevl','cityp' ,'cctyp' ,
'cimtyp','relwfn','runtyp','numgrd','exetyp','icharg','mult' ,
'coord' ,'units' ,'nzvar' ,'pp' ,'local' ,'ispher','qmttol',
'maxit' ,'molplt','pltorb','aimpac','friend','nfflvl','nprint',
'nosym' ,'etollz','inttyp','grdtyp','normf' ,'normp' ,'itol' ,
'icut' ,'iskprp','irest' ,'geom' ,'ecp' ,'casino'
])
integers = set([
'mplevl','icharg','mult' ,'nzvar' ,'ispher','maxit' ,'nfflvl',
'nprint','nosym' ,'normf','normp' ,'itol' ,'icut' ,'iskprp',
'irest'
])
reals = set(['qmttol' ,'etollz'])
bools = set(['numgrd' ,'molplt','pltorb','aimpac','casino'])
strings = set([
'scftyp','dfttyp','tddft' ,'vbtyp' ,'cityp' ,'cctyp' ,'cimtyp',
'relwfn','runtyp','exetyp','coord' ,'units' ,'pp' ,'local' ,
'friend','inttyp','grdtyp','geom' ,'ecp'
])
allowed_values = obj(
scftyp = set(['rhf','uhf','rohf','gvb','mcscf','none']),
dfttyp = set(['none','slater','becke','gill','optx','pw91x','pbex',
'vwn','vwn3','vwn1rpa','pz81','p86','lyp','pw91c','pbec',
'op','svwn','wvwn1rpa','blyp','bop','bp86','gvwn','gpw91',
'pbevwn','pbeop','olyp','pw91','pbe','edf1','pbe','revpbe',
'rpbe','pbesol','hcth93','hcth120','hcth147','hcth407',
'sogga','mohlyp','b97-d','sogga11','bhhlyp','b3pw91',
'b3lyp','b3lypv1r','b3lypv3','b3p86','b3p86v1r','b3p86v5',
'b97','b97-1','b97-2','b97-3','b97-k','b98','pbe0','x3lyp',
'sogga11x','camb3lyp','wb97','wb97x','wb97x-d','b2plyp',
'wb97x-2','wb97x-2l','vs98','pkzb','thcth','thcthhyb','bmk',
'tpss','tpssh','tpssm','revtpss','dldf','m05','m05-2x',
'm06','m06-l','m06-2x','m06-hf','m08-hx','m08-s0','m11','m11-l',
'none','xalpha','slater','becke','depristo','cama','half',
'vwn','pwloc','lyp','bvwn','blyp','bpwloc','b3lyp','camb',
'xvwn','xpwloc','svwn','spwloc','wigner','ws','wigexp']),
tddft = set(['none','excite','spnflp']),
vbtyp = set(['none','vb2000']),
mplevl = set([0,2]),
cityp = set(['none','cis','sfcis','aldet','ormas','fsoci','genci','guga']),
cctyp = set(['none','lccd','ccd','ccsd','ccsd(t)','r-cc','cr-cc','cr-ccl',
'ccsd(tq)','cr-cc(q)','eom-ccsd','cr-eom','cr-eoml','ip-eom2',
'ip-eom2','ip-eom3a','ea-eom2','ea-eom3a']),
cimtyp = set(['none','secim','decim','gsecim']),
relwfn = set(['none','iotc','dk','resc','nesc']),
runtyp = set(['energy','gradient','hessian','gamma','optimize','trudge',
'sadpoint','mex','conical','irc','vscf','drc','md','globop',
'optfmo','gradextr','surface','comp','g3mp2','prop','raman',
'nacme','nmr','eda','qmefpea','transitn','ffield','tdhf',
'tdhfx','makefp','fmo0']),
exetyp = set(['run','check']),
coord = set(['unique','hint','prinaxis','zmt','zmtmpc','fragonly']),
units = set(['angs','bohr']),
pp = set(['none','read','sbkjc','hw','mcp']),
local = set(['none','boys','ruednbrg','pop','svd']),
ispher = set([-1,0,1]),
friend = set(['hondo','meldf','gamessuk','gaussian','all']),
nfflvl = set([2,3]),
nprint = set([-7,-6,-5,-4,-3,-2,1,2,3,4,5,6,7,8,9]),
nosym = set([0,1]),
inttyp = set(['best','rotaxis','eric','rysquad']),
grdtyp = set(['best rsyquad']),
normf = set([0,1]),
normp = set([0,1]),
iskprp = set([0,1]),
irest = set([-1,0,1,2,3,4]),
geom = set(['input','daf']),
)
#end class ContrlGroup
class SystemGroup(KeywordSpecGroup):
keywords = set(['mwords','memddi','timlim','parall','kdiag','corefl',
'baltyp','mxseq2','mxseq3','nodext','iosmp','modio' ,
'memory'])
integers = set(['mwords','memddi','kdiag','mxseq2','mxseq3','modio','memory'])
reals = set(['timlim'])
bools = set(['parall','corefl'])
strings = set(['baltyp'])
arrays = set(['nodext','iosmp'])
allowed_values = obj(
kdiag = set([0,1,2,3]),
baltyp = set(['slb','dlb','loop','nxtval']),
modio = set([1,2,4,8,15]),
)
#end class SystemGroup
class GuessGroup(KeywordSpecGroup):
keywords = set(['guess' ,'prtmo' ,'punmo' ,'mix' ,'norb','norder','iorder',
'jorder','insorb','purify','tolz','tole','symden'])
integers = set(['norb','norder','insorb'])
reals = set(['tolz','tole'])
bools = set(['prtmo','punmo','mix','purify','symden'])
strings = set(['guess'])
arrays = set(['iorder','jorder'])
allowed_values = obj(
guess = set(['huckel','hcore','moread','rdmini','mosaved','skip','fmo','hucsub','dmread']),
norder = set([0,1]),
)
#end class GuessGroup
class ScfGroup(KeywordSpecGroup):
keywords = set([
'dirscf','fdiff' ,'noconv','diis' ,'soscf' ,'extrap','damp' ,
'shift' ,'rstrct','dem' ,'cuhf' ,'conv' ,'sogtol','ethrsh',
'maxdii','swdiis','locopt','demcut','dmpcut','uhfnos','vvos' ,
'mvoq' ,'acavo' ,'pacavo','uhfchk','nhomo' ,'nlumo' ,'mom' ,
'kproj' ,'nco' ,'nseto' ,'no' ,'npair' ,'cicoef','couple',
'f' ,'alpha' ,'beta' ,'npunch','npreo' ,'vtscal','scalf' ,
'maxvt' ,'vtconv'
])
integers = set([
'maxdii','mvoq' ,'nhomo' ,'nlumo' ,'kproj','nco','nseto',
'npair' ,'npunch','maxvt'
])
reals = set([
'conv' ,'sogtol','ethrsh' ,'swdiis','demcut','dmpcut',
'scalf' ,'vtconv'
])
bools = set([
'dirscf','fdiff' ,'noconv' ,'diis' ,'soscf' ,'extrap',
'damp' ,'shift' ,'rstrct' ,'dem' ,'cuhf' ,'locopt',
'uhfnos','vvos' ,'acavo' ,'uhfchk','mom' ,'couple',
'vtscal'
])
arrays = set([
'pacavo','no' ,'cicoef','f' ,'alpha' ,'beta' ,
'npreo'
])
allowed_values = obj(
kproj = set([0,1,2]),
)
#end class ScfGroup
class McscfGroup(KeywordSpecGroup):
keywords = set([
'cistep','focas' ,'soscf' ,'fullnr','quad' ,'jacobi','acurcy',
'engtol','maxit' ,'micit' ,'nword' ,'fors' ,'canonc','finci' ,
'diabat','ekt' ,'npunch','npflg' ,'nofo' ,'mcfmo' ,'casdii',
'cashft','nrmcas','qudthr','damp' ,'method','linser','fcore' ,
'mofrz' ,'norb' ,'norot' ,'dropc'
])
integers = set(['maxit','micit','nword','npunch','nofo','mcfmo','nrmcas','norb'])
reals = set(['acurcy','engtol','casdii','cashft','qudthr','damp'])
bools = set(['focas','soscf','fullnr','quad','jacobi','fors','canonc',
'diabat','ekt','linser','fcore','dropc'])
strings = set(['cistep','finci','method'])
arrays = set(['npflg','mofrz','norot'])
allowed_values = obj(
cistep = set(['aldet','ormas','guga','genci','gmcci']),
finci = set(['none','mos','nos']),
nrmcas = set([0,1]),
method = set(['dm2','tei']),
)
#end class McscfGroup
class DftGroup(KeywordSpecGroup):
keywords = set([
'method','dc' ,'idcver','dcchg' ,'dcabc' ,'dcalp' ,'dcsr' ,
'dcs6' ,'dcs8' ,'lrdflg','mltint','lambda','kappa' ,'rzero' ,
'prpol' ,'prcoef','prpair','lc' ,'mu' ,'chf' ,'cmp2' ,
'nrad' ,'nleb' ,'sg1' ,'jans' ,'nrad' ,'nthe' ,'nphi' ,
'swoff' ,'switch','nrad0' ,'nleb0' ,'nthe0' ,'nphi0' ,'thresh',
'gthre' ,'auxfun','three'
])
integers = set(['idcver','prcoef','prpair','nrad','nleb','jans','nthe',
'nphi','nrad0','nleb0','nthe0','nphi0','gthre'])
reals = set(['dcalp','dcsr','dcs6','dcs8','lambda','kappa','rzero',
'mu','chf','cmp2','swoff','switch','thresh'])
bools = set(['dc','dcchg','dcabc','lrdflg','mltint','prpol','lc','sg1',
'three'])
strings = set(['method','auxfun'])
allowed_values = obj(
method = set(['grid','gridfree']),
idcver = set([1,2,3]),
jans = set([1,2]),
auxfun = set(['aux0','aux3']),
)
#end class DftGroup
class GugdiaGroup(KeywordSpecGroup):
keywords = set([
'nstate','prttol','mxxpan','itermx','cvgtol' ,'nword' ,'maxham',
'maxdia','nimprv','nselct','selthr','nextra','kprint','nref','eref'
])
integers = set(['nstate','mxxpan','itermx','nword','maxham','maxdia',
'nimprv','nselct','nextra','nref'])
reals = set(['prttol','cvgtol','selthr','eref'])
arrays = set(['kprint'])
#end class GugdiaGroup
class DrtGroup(KeywordSpecGroup):
keywords = set([
'group','fors' ,'foci' ,'soci','iexcit','intact','nmcc',
'ndoc' ,'naos' ,'nbos' ,'nalp','nval' ,'next' ,'nfzv','stsym',
'noirr','mxnint','mxneme','nprt'
])
integers = set(['iexcit','nmcc','ndoc','naos','nbos','nalp','nval',
'next','nfzv','noirr','mxnint','mxneme','nprt'])
bools = set(['fors','foci','soci','intact'])
strings = set(['group','stsym'])
allowed_values = obj(
group = set(['c1','c2','ci','cs','c2v','c2h','d2','d2h','c4v','d4','d4h']),
stsym = set(['a','ag','au','ap','app','a','b','a1','a2','b1','b2','ag',
'bu','bg','au','a','b1','b2','b3','ag','b1g','b2g','b3g',
'au','b1u','b2u','b3u']),
nprt = set([0,1,2,3]),
)
#end class DrtGroup
class CidrtGroup(KeywordSpecGroup):
keywords = set([
'group','fors' ,'foci' ,'soci','iexcit','intact','nfzc' ,
'ndoc' ,'naos' ,'nbos' ,'nalp','nval' ,'next' ,'nfzv' ,'stsym',
'noirr','mxnint','mxneme','nprt'
])
integers = set(['iexcit','nfzc','ndoc','naos','nbos','nalp','nval',
'next','nfzv','noirr','mxnint','mxneme','nprt'])
bools = set(['fors','foci','soci','intact'])
strings = set(['group','stsym'])
allowed_values = obj(
group = set(['c1','c2','ci','cs','c2v','c2h','d2','d2h','c4v','d4','d4h']),
stsym = set(['a','ag','au','ap','app','a','b','a1','a2','b1','b2','ag',
'bu','bg','au','a','b1','b2','b3','ag','b1g','b2g','b3g',
'au','b1u','b2u','b3u']),
nprt = set([0,1,2,3]),
)
#end class CidrtGroup
class DetGroup(KeywordSpecGroup):
keywords = set([
'ncore' ,'nact' ,'nels' ,'sz' ,'group' ,'stsym' ,'irreps',
'nstate','prttol','analys','itermx','cvgtol','nhgss' ,'nstgss',
'mxxpan','clobbr','pures' ,'iroot' ,'nflgdm','saflg' ,'wstate',
'idwref','dwparm'
])
integers = set(['ncore','nact','nels','nstate','itermx','nhgss','nstgss',
'mxxpan','iroot','idwref'])
reals = set(['sz','prttol','cvgtol','dwparm'])
bools = set(['analys','clobbr','pures','saflg'])
strings = set(['group','stsym'])
arrays = set(['irreps','nflgdm','wstate'])
allowed_values = obj(
group = set(['c1','c2','ci','cs','c2v','c2h','d2','d2h','c4v','d4','d4h']),
stsym = set(['a','ag','au','ap','app','a','b','a1','a2','b1','b2','ag',
'bu','bg','au','a','b1','b2','b3','ag','b1g','b2g','b3g',
'au','b1u','b2u','b3u']),
)
#end class DetGroup
class BasisGroup(KeywordSpecGroup):
keywords = set([
'gbasis','ngauss','ndfunc','nffunc','npfunc','diffsp','diffs',
'polar' ,'split2','split3','basnam','extfil'
])
integers = set(['ngauss','ndfunc','nffunc','npfunc'])
bools = set(['diffsp','diffs','extfil'])
strings = set(['gbasis','polar'])
arrays = set(['split2','split3','basnam'])
allowed_values = obj(
#gbasis = set(['sto','n21','n31','n311','g3l','g3lx','mini','midi','dzv',
# 'dh','tzv','mc']) # many others
ndfunc = set([0,1,2,3]),
nffunc = set([0,1]),
polar = set(['common','popn31','popn311','dunning','huzinaga','hondo7']),
)
#end class BasisGroup
#class XGroup(KeywordSpecGroup):
# keywords = set([''])
# integers = set([''])
# reals = set([''])
# bools = set([''])
# strings = set([''])
# arrays = set([''])
# allowed_values = obj(
# = set([]),
# )
##end class XGroup
class GamessInput(SimulationInput,GIbase):
group_order = '''
contrl system basis ecp data zmat libe
scf scfmi dft tddft cis cisvec mp2
rimp2 auxbas ccinp eominp mopac guess vec
mofrz statpt trudge trurst force cphf cpmchf
mass hess grad dipdr vib vib2 vscf
vibscf gamma eqgeom hlowt glowt irc drc
mex conicl md rdf globop gradex surf
local truncn elmom elpot eldens elfldg points
grid pdc mgc radial molgrf stone raman
alpdr comp nmr morokm lmoeda qmefp ffcalc
tdhf tdhfx efrag fragname frgrpl ewald makefp
prtefp damp dampgs pcm pcmgrd mcpcav tescav
newcav iefpcm pcmitr disbs disrep svp svpirf
cosgms scrf mcp relwfn efield intgrl fmm
trans fmo fmoprp fmoxyz optfmo fmohyb fmobnd
fmoenm fmoend optrst gddi elg dandc dccorr
subscf subcor mp2res ccres ciminp cimatm cimfrg
ffdata ffpdb ciinp det cidet gen cigen
ormas ceeis cedata gcilst gmcpt pdet adddet
remdet sodet drt cidrt mcscf mrmp detpt
mcqdpt excorr casci ivoorb cisort gugem gugdia
gugdm gugdm2 lagran trfdm2 diabat transt
drt1 drt2 vec1 vec2 det1 det2 hess2
'''.split()
all_groups = set(group_order)
key_groups = set(['contrl','system','guess','scf','mcscf','dft',
'gugdia','drt','cidrt','det','basis'])
card_groups = set()
#card_groups = set(['ecp','data','mcp','gcilst','points','stone','efrag',
# 'fragname','frgrpl','dampgs'])#,'fmoxyz'])
formatted_groups = set()
# detailed specifications for certain groups
keyspec_groups = obj(
contrl = ContrlGroup,
system = SystemGroup,
guess = GuessGroup,
scf = ScfGroup,
mcscf = McscfGroup,
dft = DftGroup,
gugdia = GugdiaGroup,
drt = DrtGroup,
cidrt = CidrtGroup,
det = DetGroup,
basis = BasisGroup
)
keyspec_group_order = []
for gname in group_order:
if gname in keyspec_groups:
keyspec_group_order.append(gname)
#end if
#end for
all_keywords = set()
for g in keyspec_groups:
all_keywords |= g.keywords
#end for
group_keyword_overlap = all_groups & all_keywords
all_names = all_groups | all_keywords
#cardspec_groups = obj()
# aliases for generate_gamess_input
group_aliases = obj()
for gname in group_order:
group_aliases['gamess_'+gname]=gname
#end for
all_group_aliases = all_groups | set(group_aliases.keys())
all_name_aliases = all_group_aliases | all_keywords
# gamess file I/O
file_units = obj(
#MCPPATH = -5,BASPATH = -4,EXTCAB = -3,
#MAKEFP = 1, ERICFMT = 2, EXTBAS = 3,
TRAJECT = 4, INPUT = 5,
OUTPUT = 6, PUNCH = 7, AOINTS = 8, MOINTS = 9, DICTNRY = 10,
DRTFILE = 11, CIVECTR = 12, CASINTS = 13, CIINTS = 14, WORK15 = 15,
WORK16 = 16, CSFSAVE = 17, FOCKDER = 18, WORK19 = 19, DASORT = 20,
DFTINTS = 21, DFTGRID = 22, JKFILE = 23, ORDINT = 24, EFPIND = 25,
PCMDATA = 26, PCMINTS = 27, MLTPL = 28, MLTPLT = 29, DAFL30 = 30,
RESTART = 35, HESSIAN = 38, SOCCDAT = 40, AABB41 = 41, BBAA42 = 42,
BBBB43 = 43, REMD = 44, MCQD50 = 50, MCQD51 = 51, MCQD52 = 52,
MCQD53 = 53, MCQD54 = 54, MCQD55 = 55, MCQD56 = 56, MCQD57 = 57,
MCQD58 = 58, MCQD59 = 59, MCQD60 = 60, MCQD61 = 61, MCQD62 = 62,
MCQD63 = 63, MCQD64 = 64, DCPHFH2 = 67, NMRINT1 = 61, CCREST = 70,
CCDIIS = 71, CCINTS = 72, CCT1AMP = 73, CCT2AMP = 74, CCT3AMP = 75,
CCVM = 76, CCVE = 77, CCQUADS = 78, QUADSVO = 79, EOMSTAR = 80,
EOMVEC1 = 81, EOMVEC2 = 82, EOMHC1 = 83, EOMHC2 = 84, EOMHHHH = 85,
EOMPPPP = 86, EOMRAMP = 87, EOMRTMP = 88, EOMDG12 = 89, MMPP = 90,
MMHPP = 91, MMCIVEC = 92, MMCIVC1 = 93, MMCIITR = 94, EOMVL1 = 95,
EOMVL2 = 96, EOMLVEC = 97, EOMHL1 = 98, EOMHL2 = 99, EFMOI = 102,
EFMOF = 103
)
def __init__(self,filepath=None):
if filepath!=None:
self.read(filepath)
#end if
#end def __init__
def read_text(self,contents,filepath=None):
groups = obj()
lines = contents.splitlines()
ingroup = False
incard = False
group_name = None
group_text = ''
gname = ''
gtext = ''
n=0
for line in lines:
ended = False
ls = line.strip()
# specialized parsing for unknown card groups
if ingroup and ls!='$END' and ls!='$end':
gtext+=line+'\n'
#end if
if incard:
ended = ls=='$END' or ls=='$end'
ingroup = not ended
incard = not ended
if ended:
groups[group_name] = group_text
group_name = None
group_text = ''
else:
group_text+=line+'\n'
#end if
elif len(line)>0 and line[0]==' ' and ls!='':
if len(line)>1 and line[1]=='$' and not ingroup:
if not ' ' in ls:
group_name = ls.replace('$','').lower()
gname = group_name
ingroup = True
else:
group_name,ls = ls.split(' ',1)
group_name = group_name.replace('$','').lower()
gname = group_name
text,ended = self.process_line(ls)
group_text += text
ingroup = not ended
if ended:
groups[group_name] = group_text
group_name = None
group_text = ''
#end if
#end if
incard = group_name in self.card_groups
elif ingroup:
text,ended = self.process_line(ls)
group_text += text
ingroup = not ended
if ended:
groups[group_name] = group_text
group_name = None
group_text = ''
#end if
elif not ingroup:
None
else:
self.error('invalid text encountered during read of line number {0}:\n{1}'.format(n,line))
#end if
elif ls=='' or line[0]!=' ' or not ingroup:
None
else:
self.error('invalid text encountered during read of line number {0}:\n{1}'.format(n,line))
#end if
# specialized parsing for unknown card groups
if ended:
if not '=' in groups[gname]:
groups[gname]=gtext
#end if
gtext = ''
gname = ''
#end if
#end for
for group_name,group_text in groups.items():
failed = False
if group_name in self.keyspec_groups:
self[group_name] = self.keyspec_groups[group_name](group_text)
#elif group_name in self.cardspec_groups:
# self[group_name] = self.cardspec_groups[group_name](group_text)
elif group_name in self.key_groups:
self[group_name] = KeywordGroup(group_text)
elif group_name in self.card_groups:
self[group_name] = CardGroup(group_text)
elif '=' in group_text:
try:
self[group_name] = KeywordGroup(group_text)
except:
try:
self[group_name] = FormattedGroup(group_text)
except:
failed = True
#end try
#end try
else:
try:
self[group_name] = FormattedGroup(group_text)
except:
failed = True
#end try
#end if
if failed:
self.message('Read failure: group "{0}" does not appear to be a keyword group\nand a generic read of card data failed\ndata for this group will not be available'.format(group_name))
#end if
#end for
#end def read_text
def process_line(self,ls):
ended = True
if ls.endswith('$END'):
text = ls.replace('$END','')
elif ls.endswith('$end'):
text = ls.replace('$end','')
else:
text = ls
ended = False
#end if
cloc = text.find('!')
if cloc!=-1:
text = text[0:cloc]
#end if
text +='\n'
return text,ended
#end def process_line
def write_text(self,filepath=None):
contents = ''
extra_groups = set(self.keys())-set(self.group_order)
if len(extra_groups)>0:
self.error('write failed\nthe following groups are unknown: {0}'.format(sorted(extra_groups)))
#end if
for group in self.group_order:
if group in self and isinstance(self[group],KeywordGroup):
contents += self[group].write(group)
#end if
#end for
for group in self.group_order:
if group in self and isinstance(self[group],(CardGroup,FormattedGroup)):
contents += self[group].write(group)
#end if
#end for
return contents
#end def write_text
def incorporate_system(self,system):
self.not_implemented()
#end def incorporate_system
#end class GamessInput
def generate_gamess_input(**kwargs):
if 'input_type' in kwargs:
input_type = kwargs['input_type']
del kwargs['input_type']
else:
input_type = 'general'
#end if
if input_type=='general':
gi = generate_any_gamess_input(**kwargs)
else:
GamessInput.class_error('input_type {0} is unrecognized\nvalid options are: general'.format(input_type))
#end if
return gi
#end def generate_gamess_input
ps_inputs = set('descriptor symmetry system pseudos pseudo_bases bases'.split())
ps_defaults = obj()
for var in ps_inputs:
ps_defaults[var]=None
#end for
ps_defaults.set(
descriptor = 'A molecule.',
symmetry = 'C1'
)
kw_defaults = obj()
for var in GamessInput.all_keywords:
kw_defaults[var]=None
#end for
def generate_any_gamess_input(**kwargs):
kwset = set(kwargs.keys())
pskw = ps_defaults.copy()
ps_overlap = ps_inputs & kwset
if len(ps_overlap)>0:
pskw.move_from(kwargs,ps_overlap)
kwset = set(kwargs.keys())
#end if
for name in kwargs.keys():
val = kwargs[name]
if isinstance(val,dict):
kwargs[name] = GIarray(val)
#end if
#end for
kw = kw_defaults.copy()
kw.set(**kwargs)
kwrem = obj(**kwargs)
invalid_names = kwset-GamessInput.all_name_aliases
if len(invalid_names)>0:
GamessInput.class_error('invalid group names or keywords encountered\ninvalid names/keywords provided: {0}\nplease check if these group names or keywords are actually valid GAMESS inputs\nif so, unsupported groups can be generated by providing the keywords as a single argument:\ngenerate_gamess_input(\n ...,\n group_name = obj(assign keywords),\n ...,\n )'.format(sorted(invalid_names)),'generate_gamess_input')
#end if
gi = GamessInput()
# handle groups provided directly by the user
# use aliases to guard against namespace collisions w/ nexus (e.g. system)
group_names = kwset & GamessInput.all_group_aliases
for name in group_names:
group_info = kw[name]
vname = name
if name in GamessInput.group_aliases:
name = GamessInput.group_aliases[name]
#end if
if isinstance(group_info,obj):
for n in group_info.keys():
v = group_info[n]
if isinstance(v,dict):
group_info[n] = GIarray(v)
#end if
#end for
if isinstance(group_info,Group):
gi[name] = group_info
elif name in GamessInput.keyspec_groups:
gi[name] = GamessInput.keyspec_groups[name](**group_info)
#elif name in GamessInput.cardspec_groups:
# gi[name] = GamessInput.cardspec_groups[name](**group_info)
elif name in GamessInput.key_groups:
gi[name] = KeywordGroup(**group_info)
elif name in GamessInput.card_groups:
GamessInput.class_error('card group {0} cannot be generated from a keyword list\nkeyword list provided:\n{1}'.format(name,group_info),'generate_gamess_input')
elif name in GamessInput.formatted_groups:
GamessInput.class_error('formatted group {0} cannot be generated from a keyword list\nkeyword list provided:\n{1}'.format(name,group_info),'generate_gamess_input')
else:
gi[name] = KeywordGroup(**group_info) # assume keyword group
#end if
del kw[vname]
del kwrem[vname]
elif name in GamessInput.group_keyword_overlap:
None
else:
GamessInput.class_error('invalid information provided to initialize group {0}\nyou must provide a dict, obj, or Group\nyou provided {1}'.format(vname,group_info),'generate_gamess_input')
#end if
#end for
# load keywords into groups by group order
# this may not be correct for overlapping keywords between groups!
# user will have to supply explicit keyword subsets by group in obj's as above
for name in GamessInput.keyspec_group_order:
group_type = GamessInput.keyspec_groups[name]
keywords = group_type.keywords & set(kwrem.keys())
if len(keywords)>0:
group_info = obj()
group_info.move_from(kwrem,keywords)
gi[name] = group_type(**group_info)
#end if
#end for
if len(kwrem)>0:
GamessInput.class_error('encountered unrecognized keywords\nunrecognized keywords: {0}\nthese keywords may belong to groups not fully implemented here\nfully supported groups: {1}\nunsupported groups can be generated by providing the keywords as a single argument: group_name = obj(assign keywords)'.format(sorted(kwrem),GamessInput.keyspec_group_order))
#end if
# handle nexus specific input generation keywords
# ecp 287
# data 37
if pskw.system!=None and not 'data' in gi:
system = pskw.system
if not 'contrl' in gi:
gi.contrl = ContrlGroup()
#end if
# allow user override of charge and multiplicity from physical system
gi.contrl.set_optional(
icharg = system.net_charge,
mult = system.net_spin+1,
)
s = system.structure
if s.has_folded():
sf = s.folded_structure
else:
sf = s
#end if
elem_ecp = s.elem
elem = sf.elem
pos = sf.pos
pskw.symmetry = pskw.symmetry.strip()
data = '{0}\n{1}\n'.format(pskw.descriptor,pskw.symmetry)
if pskw.symmetry!='C1':
data+='\n'
#end if
if pskw.pseudos is None:
if pskw.bases!=None:
bss = nexus_noncore.basissets.bases_by_atom(*pskw.bases)
else:
bss = obj()
if 'coord' not in gi.contrl:
gi.contrl.coord = 'unique'
#end if
#end if
for i in range(len(elem)):
a = elem[i]
Z = pt[a].atomic_number
data+='{0} {1:3.2f} {2:16.8f} {3:16.8f} {4:16.8f}\n'.format(a,Z,*pos[i])
if a in bss:
data+=bss[a].text+'\n\n'
#end if
#end for
else:
gi.contrl.set(
coord = 'unique',
ecp = 'read'
)
pps = nexus_noncore.pseudopotentials.pseudos_by_atom(*pskw.pseudos)
for i,a in enumerate(elem):
Z = pt[a].atomic_number
data+='{0} {1} {2:16.8f} {3:16.8f} {4:16.8f}\n'.format(a,Z,*pos[i])
if a in pps:
data += pps[a].basis_text+'\n\n'
#end if
#end for
ecp = ''
atoms = set()
for i,a in enumerate(elem_ecp):
if a in pps:
pp = pps[a]
if a in atoms:
ecp += pp.pp_name+'\n'
else:
ecp += pp.pp_text+'\n'
#end if
#end if
atoms.add(a)
#end for
gi.ecp = FormattedGroup(ecp)
#end if
gi.data = FormattedGroup(data)
#end if
return gi
#end def generate_any_gamess_input
def check_keyspec_groups():
from generic import error,warn
groups = GamessInput.keyspec_groups
group_order = GamessInput.group_order
glist = []
for group_name in group_order:
if group_name in groups:
glist.append(group_name)
#end if
#end for
err = ''
wrn = ''
#check for unrecognized groups
extra_groups = set(groups.keys())-set(group_order)
if len(extra_groups)>0:
err += ' encountered unrecognized keyspec groups: {0}\n'.format(sorted(extra_groups))
#end if
#check that integers, reals, bools, strings, and arrays are non-overlapping subsets of keywords
#check that allowed_values are a subset of keywords and values specified are of the correct type
for group_name in glist:
g = groups[group_name]
go = obj(
integers = g.integers,
reals = g.reals,
bools = g.bools,
strings = g.strings,
arrays = g.arrays
)
overlaps = obj()
for tname1,tset1 in go.items():
for tname2,tset2 in go.items():
if tname1!=tname2:
overlap = tset1 & tset2
if len(overlap)>0:
overlaps[tname1,tname2] = sorted(overlap)
#end if
#end if
#end for
#end for
if len(overlaps)>0:
msg = ' keyspec group {0} has overlapping keywords'.format(g.__name__)
for tname1,tname2 in sorted(overlaps.keys()):
msg += ' \n {0} {1} overlap: {2}\n'.format(tname1,tname2,overlaps[tname1,tname2])
#end for
err += msg
#end if
for tname in sorted(go.keys()):
extra_keys = go[tname]-g.keywords
if len(extra_keys)>0:
err += ' keyspec group {0} has unrecognized {1} keywords:\n {2}\n'.format(g.__name__,tname,sorted(extra_keys))
#end if
#end for
extra_keys = set(g.allowed_values.keys())-g.keywords
if len(extra_keys)>0:
err += ' keyspec group {0} has unrecognized allowed_value keywords:\n {1}\n'.format(g.__name__,sorted(extra_keys))
#end if
type_keys = set()
for keys in go:
type_keys |= keys
#end for
undefined = g.keywords-type_keys
if len(undefined)>0:
err += ' keyspec group {0} has keywords w/o type assignment:\n {1}\n'.format(g.__name__,sorted(undefined))
#end if
#check that allowed values for each keyword have the right type
to = obj(
integers = int,
reals = float,
bools = bool,
strings = str,
arrays = ndarray
)
for tname in sorted(go.keys()):
type = to[tname]
for kw in sorted(go[tname]):
if kw in g.allowed_values:
for val in g.allowed_values[kw]:
if not isinstance(val,type):
err += ' allowed values of {0} keyword {1} are not all {2}: {3}\n'.format(g.__name__,kw,tname,sorted(g.allowed_values[kw]))
break
#end if
#end for
#end if
#end for
#end for
#end for
#note any overlapping keywords between groups (this is a feature, not an error)
overlaps = obj()
for gname1 in glist:
kw1 = groups[gname1].keywords
for gname2 in glist:
kw2 = groups[gname2].keywords
if gname1!=gname2:
overlap = kw1 & kw2
if len(overlap)>0:
tup = tuple(sorted((gname1,gname2)))
overlaps[tup] = sorted(overlap)
#end if
#end if
#end for
#end for
if len(overlaps)>0:
wrn += '\n Note: some groups have overlapping keywords\n'
for gname1,gname2 in sorted(overlaps.keys()):
wrn += ' groups {0} and {1} have overlapping keywords:\n {2}\n'.format(gname1,gname2,overlaps[gname1,gname2])
#end for
#end if
#note any overlapping keyword and group names (also a feature)
overlap = GamessInput.all_keywords & set(GamessInput.group_order)
if len(overlap)>0:
wrn += '\n Note: some group names overlap with keywords:\n {0}\n'.format(sorted(overlap))
#end if
if len(err)>0:
error(err)
#end if
if len(wrn)>0:
warn(wrn)
#end if
#end def check_keyspec_groups
#check_keyspec_groups() # uncomment this to check keyword spec group self-consistency
| 35.644223
| 425
| 0.487411
|
795061fdf6421d0e791eb55f1f4dc0e50f7ffef5
| 6,924
|
py
|
Python
|
backend/long_snow_29679/settings.py
|
crowdbotics-apps/long-snow-29679
|
f754361a2931fb8a5c1ca3ec8df99227a4f1c60c
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/long_snow_29679/settings.py
|
crowdbotics-apps/long-snow-29679
|
f754361a2931fb8a5c1ca3ec8df99227a4f1c60c
|
[
"FTL",
"AML",
"RSA-MD"
] | 44
|
2021-08-09T23:18:56.000Z
|
2022-01-23T17:01:48.000Z
|
backend/long_snow_29679/settings.py
|
crowdbotics-apps/long-snow-29679
|
f754361a2931fb8a5c1ca3ec8df99227a4f1c60c
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
Django settings for long_snow_29679 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'long_snow_29679.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'long_snow_29679.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 29.589744
| 112
| 0.730791
|
79506242a402427e2655325f1d9d781c3b834020
| 2,265
|
py
|
Python
|
python/postprocess/1_report.py
|
jpolchlo/cloud-buster
|
c4480c02aaf8ea6fc8a53a484b2a30d0fb935f35
|
[
"MIT"
] | null | null | null |
python/postprocess/1_report.py
|
jpolchlo/cloud-buster
|
c4480c02aaf8ea6fc8a53a484b2a30d0fb935f35
|
[
"MIT"
] | null | null | null |
python/postprocess/1_report.py
|
jpolchlo/cloud-buster
|
c4480c02aaf8ea6fc8a53a484b2a30d0fb935f35
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# The MIT License (MIT)
# =====================
#
# Copyright © 2020 Azavea
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the “Software”), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import argparse
import copy
import json
import os
# Given the location of a tif on S3, store a proto source report on
# S3. This can be run on AWS batch or locally.
def cli_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, type=str, help='The S3 location of the tif')
parser.add_argument('--output', required=True, type=str, help='The S3 location where the geojson report should go')
return parser
if __name__ == '__main__':
args = cli_parser().parse_args()
input_name = copy.copy(args.input).replace('s3://', '/vsis3/')
info = json.loads(os.popen('gdalinfo -json {}'.format(input_name)).read())
[x, y] = info.get('size')
os.system('gdal_translate -b 14 -co TILED=YES -co SPARSE_OK=YES {} /tmp/out0.tif'.format(input_name))
os.system('gdalwarp -ts {} {} -r max -co TILED=YES -co SPARSE_OK=YES /tmp/out0.tif /tmp/out1.tif'.format(x//4, y//4))
os.system('gdal_polygonize.py /tmp/out1.tif -f GeoJSON /tmp/out.geojson')
os.system('aws s3 cp /tmp/out.geojson {}'.format(args.output))
| 41.944444
| 121
| 0.720971
|
79506318eec81b4aba9adbf4e504eafc07f815d9
| 8,924
|
py
|
Python
|
kospeech/data/audio/feature.py
|
jungwook518/KoSpeech
|
77b8daf2f821c8fa755e937096fdbc3536cafd81
|
[
"Apache-2.0"
] | 1
|
2021-05-10T11:47:03.000Z
|
2021-05-10T11:47:03.000Z
|
kospeech/data/audio/feature.py
|
jungwook518/KoSpeech
|
77b8daf2f821c8fa755e937096fdbc3536cafd81
|
[
"Apache-2.0"
] | null | null | null |
kospeech/data/audio/feature.py
|
jungwook518/KoSpeech
|
77b8daf2f821c8fa755e937096fdbc3536cafd81
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import platform
import numpy as np
from torch import Tensor, FloatTensor
class Spectrogram(object):
"""
Create a spectrogram from a audio signal.
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
feature_extract_by (str): which library to use for feature extraction (default: torch)
"""
def __init__(
self,
sample_rate: int = 16000,
frame_length: int = 20,
frame_shift: int = 10,
feature_extract_by: str = 'torch'
) -> None:
self.sample_rate = sample_rate
self.feature_extract_by = feature_extract_by.lower()
if self.feature_extract_by == 'kaldi':
# torchaudio is only supported on Linux (Linux, Mac)
assert platform.system().lower() == 'linux' or platform.system().lower() == 'darwin'
try:
import torchaudio
except ImportError:
raise ImportError("Please install torchaudio: `pip install torchaudio`")
self.transforms = torchaudio.compliance.kaldi.spectrogram
self.frame_length = frame_length
self.frame_shift = frame_shift
else:
self.n_fft = int(round(sample_rate * 0.001 * frame_length))
self.hop_length = int(round(sample_rate * 0.001 * frame_shift))
def __call__(self, signal):
if self.feature_extract_by == 'kaldi':
spectrogram = self.transforms(
Tensor(signal).unsqueeze(0),
frame_length=self.frame_length,
frame_shift=self.frame_shift,
sample_frequency=self.sample_rate,
).transpose(0, 1)
else:
spectrogram = torch.stft(
Tensor(signal), self.n_fft, hop_length=self.hop_length,
win_length=self.n_fft, window=torch.hamming_window(self.n_fft),
center=False, normalized=False, onesided=True
)
spectrogram = (spectrogram[:, :, 0].pow(2) + spectrogram[:, :, 1].pow(2)).pow(0.5)
spectrogram = np.log1p(spectrogram.numpy())
return spectrogram
class MelSpectrogram(object):
"""
Create MelSpectrogram for a raw audio signal. This is a composition of Spectrogram and MelScale.
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
n_mels (int): Number of mfc coefficients to retain. (Default: 80)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
feature_extract_by (str): which library to use for feature extraction (default: librosa)
"""
def __init__(
self,
sample_rate: int = 16000,
n_mels: int = 80,
frame_length: int = 20,
frame_shift: int = 10,
feature_extract_by: str = 'librosa'
) -> None:
self.sample_rate = sample_rate
self.n_mels = n_mels
self.n_fft = int(round(sample_rate * 0.001 * frame_length))
self.hop_length = int(round(sample_rate * 0.001 * frame_shift))
self.feature_extract_by = feature_extract_by.lower()
if self.feature_extract_by == 'torchaudio':
# torchaudio is only supported on Linux (Linux, Mac)
assert platform.system().lower() == 'linux' or platform.system().lower() == 'darwin'
import torchaudio
self.amplitude_to_db = torchaudio.transforms.AmplitudeToDB()
self.transforms = torchaudio.transforms.MelSpectrogram(
sample_rate=sample_rate,
win_length=frame_length,
hop_length=self.hop_length,
n_fft=self.n_fft,
n_mels=n_mels,
)
else:
import librosa
self.transforms = librosa.feature.melspectrogram
self.amplitude_to_db = librosa.amplitude_to_db
def __call__(self, signal):
if self.feature_extract_by == 'torchaudio':
melspectrogram = self.transforms(Tensor(signal))
melspectrogram = self.amplitude_to_db(melspectrogram)
melspectrogram = melspectrogram.numpy()
elif self.feature_extract_by == 'librosa':
melspectrogram = self.transforms(
signal,
sr=self.sample_rate,
n_mels=self.n_mels,
n_fft=self.n_fft,
hop_length=self.hop_length,
)
melspectrogram = self.amplitude_to_db(melspectrogram, ref=np.max)
else:
raise ValueError("Unsupported library : {0}".format(self.feature_extract_by))
return melspectrogram
class MFCC(object):
"""
Create the Mel-frequency cepstrum coefficients (MFCCs) from an audio signal.
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
n_mfcc (int): Number of mfc coefficients to retain. (Default: 40)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
feature_extract_by (str): which library to use for feature extraction(default: librosa)
"""
def __init__(
self,
sample_rate: int = 16000,
n_mfcc: int = 40,
frame_length: int = 20,
frame_shift: int = 10,
feature_extract_by: str = 'librosa'
) -> None:
self.sample_rate = sample_rate
self.n_mfcc = n_mfcc
self.n_fft = int(round(sample_rate * 0.001 * frame_length))
self.hop_length = int(round(sample_rate * 0.001 * frame_shift))
self.feature_extract_by = feature_extract_by.lower()
if self.feature_extract_by == 'torchaudio':
# torchaudio is only supported on Linux (Linux, Mac)
assert platform.system().lower() == 'linux' or platform.system().lower() == 'darwin'
import torchaudio
self.transforms = torchaudio.transforms.MFCC(
sample_rate=sample_rate,
n_mfcc=n_mfcc,
log_mels=True,
win_length=frame_length,
hop_length=self.hop_length,
n_fft=self.n_fft,
)
else:
import librosa
self.transforms = librosa.feature.mfcc
def __call__(self, signal):
if self.feature_extract_by == 'torchaudio':
mfcc = self.transforms(FloatTensor(signal))
mfcc = mfcc.numpy()
elif self.feature_extract_by == 'librosa':
mfcc = self.transforms(
y=signal,
sr=self.sample_rate,
n_mfcc=self.n_mfcc,
n_fft=self.n_fft,
hop_length=self.hop_length,
)
else:
raise ValueError("Unsupported library : {0}".format(self.feature_extract_by))
return mfcc
class FilterBank(object):
"""
Create a fbank from a raw audio signal. This matches the input/output of Kaldi’s compute-fbank-feats
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
n_mels (int): Number of mfc coefficients to retain. (Default: 80)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
"""
def __init__(
self,
sample_rate: int = 16000,
n_mels: int = 80,
frame_length: int = 20,
frame_shift: int = 10
) -> None:
import torchaudio
self.transforms = torchaudio.compliance.kaldi.fbank
self.sample_rate = sample_rate
self.n_mels = n_mels
self.frame_length = frame_length
self.frame_shift = frame_shift
def __call__(self, signal):
return self.transforms(
Tensor(signal).unsqueeze(0),
num_mel_bins=self.n_mels,
frame_length=self.frame_length,
frame_shift=self.frame_shift,
).transpose(0, 1).numpy()
| 37.813559
| 104
| 0.607127
|
7950636594260776e2b7ee197121c9b0ad116748
| 137
|
py
|
Python
|
notaso/home/urls.py
|
jpadilla/notaso
|
1c2f94d36b3d360d70f6c9937beb053beb8d8ad3
|
[
"MIT"
] | 11
|
2017-03-16T21:47:51.000Z
|
2021-11-30T12:38:59.000Z
|
notaso/home/urls.py
|
jpadilla/notaso
|
1c2f94d36b3d360d70f6c9937beb053beb8d8ad3
|
[
"MIT"
] | 43
|
2015-01-13T14:14:48.000Z
|
2021-12-29T14:21:25.000Z
|
notaso/home/urls.py
|
jpadilla/notaso
|
1c2f94d36b3d360d70f6c9937beb053beb8d8ad3
|
[
"MIT"
] | 5
|
2015-09-27T15:05:36.000Z
|
2019-05-14T17:09:06.000Z
|
from django.urls import path
from .views import HomeView
app_name = "home"
urlpatterns = [path("", HomeView.as_view(), name="index")]
| 17.125
| 58
| 0.715328
|
795064cafbb90517119789c162454bb23529d561
| 5,848
|
py
|
Python
|
core/objs/gap_opiniao.py
|
aanacleto/erp-
|
9c2d5388248cfe4b8cdb8454f6f47df4cb521f0e
|
[
"MIT"
] | null | null | null |
core/objs/gap_opiniao.py
|
aanacleto/erp-
|
9c2d5388248cfe4b8cdb8454f6f47df4cb521f0e
|
[
"MIT"
] | null | null | null |
core/objs/gap_opiniao.py
|
aanacleto/erp-
|
9c2d5388248cfe4b8cdb8454f6f47df4cb521f0e
|
[
"MIT"
] | 2
|
2017-12-04T14:59:22.000Z
|
2018-12-06T18:50:29.000Z
|
# !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = 'CVTek dev'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "CVTek dev"
__status__ = "Development"
__model_name__ = 'gap_opiniao.GAPOpiniao'
import auth, base_models
from orm import *
from form import *
try:
from my_gap_senha import GAPSenha
except:
from gap_senha import GAPSenha
class GAPOpiniao(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'gap_opiniao'
self.__title__ = 'Opinião'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__get_options__ = ['nome']
self.__order_by__ = 'gap_opiniao.nome'
self.__workflow__ = (
'estado', {'Confirmado':[]}
)
self.__auth__ = {
'read':['All'],
'write':['Atendedor'],
'create':['Gestor de Loja'],
'delete':['Gestor de Atendimento'],
'full_access':['Gestor de Atendimento']
}
self.__no_edit__ = [
('estado', ['Confirmado'])
]
self.nome = string_field(view_order = 1, name = 'Nome', args='readonly', size = 80, search=False, onlist=False)
self.contacto = string_field(view_order = 2, args='readonly', name = 'Contacto', onlist=False, size = 40)
self.data = date_field(view_order=3, name ='Data', args='readonly', default=datetime.date.today())
self.hora = time_field(view_order=4, name ='Hora', args='readonly', default=time.strftime('%H:%M:%S'))
self.observacao = text_field(view_order=5, name='Observação', size=100, args="rows=30", onlist=False, search=False)
self.classificacao = string_field(view_order = 6, args='readonly', name = 'Classificação', size = 40)
self.senha = string_field(view_order = 7, args='readonly', name = 'Senha', size = 50)
self.servico = string_field(view_order = 8, name = 'Serviço', args='readonly',size = 50)
self.loja = string_field(view_order = 9, name = 'Loja', size = 50, args='readonly')
self.estado = info_field(view_order = 10, name='Estado', default='Confirmado', args='readonly', hidden=True, nolabel=True, onlist=False)
#Apanha todas as opinioes disponiveis
def get_self(self):
return self.get_options()
def get_opts(self, get_str):
"""
Este get_opts em todos os modelos serve para alimentar os choice e combo deste modelo e não chama as funções
get_options deste modelo quando chamadas a partir de um outro!
"""
return eval(get_str)
#Apanha todas as opinioes por data
def get_opiniao_data(self, data=None):
#Essa funçao apanha opiniao por data
def get_results():
options = []
opts = self.get(order_by='nome')
for option in opts:
if option['data'] == data:
options.append((str(option['id']), option['nome'] + ' - ' + option['observacao']))
return options
return erp_cache.get(key=self.__model_name__ + '_opiniao_data', createfunc=get_results)
#Apanha todas as opinioes por nome
def get_opiniao_nome(self, nome=None):
#Essa funçao apanha opiniao por nome
def get_results():
options = []
opts = self.get(order_by='nome')
for option in opts:
if option['nome'] == nome:
options.append((str(option['id']), option['nome'] + ' - ' + option['observacao']))
return options
return erp_cache.get(key=self.__model_name__ + '_opiniao_nome', createfunc=get_results)
#adiciona dados na tabela gap_opiniao
def addOpiniao(self,user=None,nome=None,contacto=None,comentario=None,classificacao=None, loja=None, nome_atendedor=None):
try:
from gap_timeAtendimento import GAPTimeAtendimento
#Apanho o ultimo cliente atendido por esse atendedor que teoricamente foi aquele que fez a avaliaçao
result = GAPTimeAtendimento().getLastClient(nome_atendedor=nome_atendedor, loja=loja)
result = str(result).split(";")
senha = result[0]
servico = result[1]
data = datetime.date.today()
hora = datetime.datetime.now().time().strftime('%H:%M:%S')
content = {
'user': user,
'nome': nome,
'contacto':contacto,
'data':data,
'hora':hora,
'observacao':comentario,
'classificacao':classificacao,
'senha':senha,
'servico':servico,
'loja':loja,
'estado':'Confirmado',
}
GAPOpiniao(**content).put()
return True
except:
return False
#get avaliaçao do serviço
def getRating(self, servico=None, loja=None, dataInicio=None, dataFim=None):
try:
dataInicio = str(dataInicio).split("-")
dataFim = str(dataFim).split("-")
if servico == None:
self.where = "loja='{loja}'".format(loja=loja)
else:
self.where = "servico='{servico}' and loja='{loja}'".format(servico=servico, loja=loja)
opts = self.get()
for option in opts:
data_opiniao = str(option['data']).split("-")
if (datetime.date(int(data_opiniao[0]),int(data_opiniao[1]), int(data_opiniao[2]))>=datetime.date(int(dataInicio[0]),int(dataInicio[1]), int(dataInicio[2]))) and (datetime.date(int(data_opiniao[0]),int(data_opiniao[1]), int(data_opiniao[2]))<=datetime.date(int(dataFim[0]),int(dataFim[1]), int(dataFim[2]))):
return str(option['classificacao'])
return "0.0"
except:
return "0.0"
| 42.686131
| 324
| 0.592168
|
795065dc8d4becb5a3ad8a65c652804b0422514c
| 48
|
py
|
Python
|
torchaudio/compliance/__init__.py
|
zkneupper/audio
|
1f136671b84071a2fe1d5b762df64f3a76310c31
|
[
"BSD-2-Clause"
] | 4
|
2022-03-16T15:35:35.000Z
|
2022-03-22T23:55:41.000Z
|
torchaudio/compliance/__init__.py
|
zkneupper/audio
|
1f136671b84071a2fe1d5b762df64f3a76310c31
|
[
"BSD-2-Clause"
] | 6
|
2020-09-22T22:19:09.000Z
|
2021-06-21T17:37:32.000Z
|
torchaudio/compliance/__init__.py
|
zkneupper/audio
|
1f136671b84071a2fe1d5b762df64f3a76310c31
|
[
"BSD-2-Clause"
] | 1
|
2022-03-16T00:40:40.000Z
|
2022-03-16T00:40:40.000Z
|
from . import kaldi
__all__ = [
'kaldi',
]
| 8
| 19
| 0.5625
|
795065e3c345aaeebef3382663bec1f9002f1eb3
| 2,710
|
py
|
Python
|
arrow/constants.py
|
beucismis/arrow
|
7c9632c09161b1edb67fadb4bf8f3c1c0f5cb101
|
[
"Apache-2.0"
] | null | null | null |
arrow/constants.py
|
beucismis/arrow
|
7c9632c09161b1edb67fadb4bf8f3c1c0f5cb101
|
[
"Apache-2.0"
] | null | null | null |
arrow/constants.py
|
beucismis/arrow
|
7c9632c09161b1edb67fadb4bf8f3c1c0f5cb101
|
[
"Apache-2.0"
] | null | null | null |
"""Constants used internally in arrow."""
import sys
from datetime import datetime
if sys.version_info < (3, 8): # pragma: no cover
from typing_extensions import Final
else:
from typing import Final # pragma: no cover
# datetime.max.timestamp() errors on Windows, so we must hardcode
# the highest possible datetime value that can output a timestamp.
# tl;dr platform-independent max timestamps are hard to form
# See: https://stackoverflow.com/q/46133223
try:
# Get max timestamp. Works on POSIX-based systems like Linux and macOS,
# but will trigger an OverflowError, ValueError, or OSError on Windows
_MAX_TIMESTAMP = datetime.max.timestamp()
except (OverflowError, ValueError, OSError): # pragma: no cover
# Fallback for Windows and 32-bit systems if initial max timestamp call fails
# Must get max value of ctime on Windows based on architecture (x32 vs x64)
# https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/ctime-ctime32-ctime64-wctime-wctime32-wctime64
# Note: this may occur on both 32-bit Linux systems (issue #930) along with Windows systems
is_64bits = sys.maxsize > 2 ** 32
_MAX_TIMESTAMP = (
datetime(3000, 1, 1, 23, 59, 59, 999999).timestamp()
if is_64bits
else datetime(2038, 1, 1, 23, 59, 59, 999999).timestamp()
)
MAX_TIMESTAMP: Final[float] = _MAX_TIMESTAMP
MAX_TIMESTAMP_MS: Final[float] = MAX_TIMESTAMP * 1000
MAX_TIMESTAMP_US: Final[float] = MAX_TIMESTAMP * 1_000_000
MAX_ORDINAL: Final[int] = datetime.max.toordinal()
MIN_ORDINAL: Final[int] = 1
DEFAULT_LOCALE: Final[str] = "en-us"
# Supported dehumanize locales
DEHUMANIZE_LOCALES = {
"en",
"en-us",
"en-gb",
"en-au",
"en-be",
"en-jp",
"en-za",
"en-ca",
"en-ph",
"fr",
"fr-fr",
"fr-ca",
"it",
"it-it",
"es",
"es-es",
"el",
"el-gr",
"ja",
"ja-jp",
"se",
"se-fi",
"se-no",
"se-se",
"sv",
"sv-se",
"zh",
"zh-cn",
"zh-tw",
"zh-hk",
"nl",
"nl-nl",
"af",
"de",
"de-de",
"de-ch",
"de-at",
"nb",
"nb-no",
"nn",
"nn-no",
"pt",
"pt-pt",
"pt-br",
"tl",
"tl-ph",
"vi",
"vi-vn",
"tr",
"tr-tr",
"az",
"az-az",
"da",
"da-dk",
"ml",
"hi",
"fa",
"fa-ir",
"mr",
"ca",
"ca-es",
"ca-ad",
"ca-fr",
"ca-it",
"eo",
"eo-xx",
"bn",
"bn-bd",
"bn-in",
"rm",
"rm-ch",
"ro",
"ro-ro",
"sl",
"sl-si",
"id",
"id-id",
"sw",
"sw-ke",
"sw-tz",
"la",
"la-va",
"lt",
"lt-lt",
"ms",
"ms-my",
"ms-bn",
"or",
"or-in",
}
| 20.530303
| 117
| 0.553137
|
79506699c1eebb48a955024097d9410aedc876c2
| 825
|
py
|
Python
|
scripts/python/0895_max_freq_stack_2.py
|
weirdcoder247/leetcode_solutions
|
77e3579dadf40405411a15532cfca227329dad98
|
[
"MIT"
] | 1
|
2022-02-15T19:08:38.000Z
|
2022-02-15T19:08:38.000Z
|
scripts/python/0895_max_freq_stack_2.py
|
weirdcoder247/leetcode_solutions
|
77e3579dadf40405411a15532cfca227329dad98
|
[
"MIT"
] | null | null | null |
scripts/python/0895_max_freq_stack_2.py
|
weirdcoder247/leetcode_solutions
|
77e3579dadf40405411a15532cfca227329dad98
|
[
"MIT"
] | null | null | null |
class FreqStack:
def __init__(self):
from collections import Counter, defaultdict
self.maxFreq = 0
self.count = Counter()
self.countToStack = defaultdict(list)
def push(self, val):
self.count[val] += 1
self.countToStack[self.count[val]].append(val)
self.maxFreq = max(self.maxFreq, self.count[val])
def pop(self):
val = self.countToStack[self.maxFreq].pop()
self.count[val] -= 1
if not self.countToStack[self.maxFreq]:
self.maxFreq -= 1
return val
def main():
obj = FreqStack()
obj.push(5)
obj.push(7)
obj.push(5)
obj.push(7)
obj.push(4)
obj.push(5)
print(obj.pop())
print(obj.pop())
print(obj.pop())
print(obj.pop())
if __name__ == '__main__':
print(main())
| 22.297297
| 57
| 0.575758
|
79506724751efe20007370d5ce85206bcb9c0e0e
| 92
|
py
|
Python
|
onfleet/config.py
|
YokySantiago/pyonfleet
|
6a31eafdee81a1807a7f9a5e0aa0f0f91ff0bff1
|
[
"MIT"
] | 9
|
2019-05-25T19:01:31.000Z
|
2022-03-23T15:51:58.000Z
|
onfleet/config.py
|
YokySantiago/pyonfleet
|
6a31eafdee81a1807a7f9a5e0aa0f0f91ff0bff1
|
[
"MIT"
] | 15
|
2020-01-18T17:50:24.000Z
|
2022-01-06T00:36:14.000Z
|
onfleet/config.py
|
YokySantiago/pyonfleet
|
6a31eafdee81a1807a7f9a5e0aa0f0f91ff0bff1
|
[
"MIT"
] | 5
|
2019-12-29T23:03:17.000Z
|
2021-12-14T20:23:15.000Z
|
API_VERSION = 'v2'
API_BASE_URL = f'https://onfleet.com/api/{API_VERSION}'
RATE_LIMIT = 20
| 18.4
| 55
| 0.728261
|
795067e8dec6761b6dce5ba5efac62d0ea310744
| 3,034
|
py
|
Python
|
tests/unit/common/etl/transformers/test_cell_expression.py
|
HumanCellAtlas/matrix-service
|
ed9a23d407cce89127b8f0b662c3d2ef2e8fec77
|
[
"MIT"
] | 11
|
2018-10-26T20:47:55.000Z
|
2022-02-02T10:32:42.000Z
|
tests/unit/common/etl/transformers/test_cell_expression.py
|
HumanCellAtlas/matrix-service
|
ed9a23d407cce89127b8f0b662c3d2ef2e8fec77
|
[
"MIT"
] | 379
|
2018-06-04T22:44:33.000Z
|
2020-06-03T00:20:08.000Z
|
tests/unit/common/etl/transformers/test_cell_expression.py
|
HumanCellAtlas/matrix-service
|
ed9a23d407cce89127b8f0b662c3d2ef2e8fec77
|
[
"MIT"
] | 4
|
2018-11-22T01:00:27.000Z
|
2020-09-01T16:42:05.000Z
|
import mock
import os
import unittest
from matrix.common.aws.redshift_handler import TableName
from matrix.common.etl.transformers.cell_expression import CellExpressionTransformer
class TestCellExpressionTransformer(unittest.TestCase):
def setUp(self):
self.transformer = CellExpressionTransformer("")
self.test_table_data = [
(TableName.CELL, ["cell_row_1", "cell_row_2"], "path/to/bundle"),
(TableName.EXPRESSION, ["expr_row_1", "expr_row_2"], "path/to/bundle")
]
def test_write_rows_to_psvs(self):
with mock.patch("gzip.open", mock.mock_open()) as mock_open:
self.transformer._write_rows_to_psvs(self.test_table_data[0],
self.test_table_data[1])
handle = mock_open()
mock_open.assert_any_call("output/cell/bundle.cell.data.gz", "w")
mock_open.assert_any_call("output/expression/bundle.expression.data.gz", "w")
self.assertEqual(handle.writelines.call_count, 2)
def test_parse_ss2(self):
parsed = self.transformer._parse_from_metadatas(
"tests/functional/res/etl/ss2_bundle.version",
"tests/functional/res/etl/ss2_bundle_manifest.json")
cell_table = parsed[0][0]
cell_rows = parsed[0][1]
self.assertEqual(cell_table, TableName.CELL)
self.assertEqual(cell_rows[0],
"635badd5-7d62-4db3-b509-f290a12a1336|635badd5-7d62-4db3-b509-f290a12a1336|"
"c3ba122b-9158-4447-b379-6f5983a2416d|"
"265ab074-6db1-4038-836c-fba3cc2d09cb|f6ff0075-f93e-478a-8ba3-8c798e7f5021|"
"436cd3a5-e510-41db-937d-6c5f4f1b6df7|2019-01-28T133934.450115Z||3859||\n")
expression_table = parsed[1][0]
expression_rows = parsed[1][1]
self.assertEqual(expression_table, TableName.EXPRESSION)
self.assertEqual(expression_rows[0], "635badd5-7d62-4db3-b509-f290a12a1336|ENST00000373020|TPM|92.29\n")
def test_parse_optimus(self):
parsed = self.transformer._parse_from_metadatas(
bundle_dir=os.path.abspath("tests/functional/res/etl/optimus_bundle.version"),
bundle_manifest_path=os.path.abspath("tests/functional/res/etl/optimus_bundle_manifest.json")
)
cell_lines = parsed[0][1]
expression_lines = parsed[1][1]
self.assertEqual(len(cell_lines), 5)
self.assertTrue("5469c35c54d5b403cb00da7d9ea16879|493a6adc-54b5-4388-ba11-c37686562127|"
"dbb40797-8eba-44f8-81d8-6f0c2e2ed0b5|"
"ffb71426-42a4-42c0-89cc-f12b4a806554|17987139-5441-4335-8a36-2ec986eee282|"
"ae725a64-6cb4-4216-942f-37880ed52ed3|2019-05-08T155712.599791Z|"
"AGTGGGAGTACAGACG|12|142|f\n" in cell_lines)
self.assertEqual(len(expression_lines), 174)
self.assertEqual(expression_lines[0], "5469c35c54d5b403cb00da7d9ea16879|GUK1|Count|1.0\n")
| 48.935484
| 112
| 0.659525
|
79506822e4a905e05633eaa2d16f57f77b83ba24
| 31
|
py
|
Python
|
programming/9.1/main.py
|
Sasha-hk/saceit-practice
|
1c6ebfb3881ad1100b5849afa5bb9e306c0aeb51
|
[
"MIT"
] | 1
|
2022-03-22T07:10:37.000Z
|
2022-03-22T07:10:37.000Z
|
programming/9.1/main.py
|
Sasha-hk/saceit-practice
|
1c6ebfb3881ad1100b5849afa5bb9e306c0aeb51
|
[
"MIT"
] | null | null | null |
programming/9.1/main.py
|
Sasha-hk/saceit-practice
|
1c6ebfb3881ad1100b5849afa5bb9e306c0aeb51
|
[
"MIT"
] | null | null | null |
import os
os.system('./main')
| 7.75
| 19
| 0.645161
|
7950687a2fcd0c9f5693ffadcb1d6f2ff370eca4
| 378
|
py
|
Python
|
wrapper/tests/testrunner.py
|
mareklibra/manageiq-v2v-conversion_host
|
067625e3aa4c7188548114fe536dc822ab8c23c3
|
[
"Apache-2.0"
] | 5
|
2020-03-31T10:25:32.000Z
|
2022-01-18T05:06:35.000Z
|
wrapper/tests/testrunner.py
|
mareklibra/manageiq-v2v-conversion_host
|
067625e3aa4c7188548114fe536dc822ab8c23c3
|
[
"Apache-2.0"
] | 88
|
2020-01-09T17:01:12.000Z
|
2022-01-28T16:50:03.000Z
|
wrapper/tests/testrunner.py
|
mareklibra/manageiq-v2v-conversion_host
|
067625e3aa4c7188548114fe536dc822ab8c23c3
|
[
"Apache-2.0"
] | 15
|
2020-01-09T15:10:36.000Z
|
2022-03-30T06:49:39.000Z
|
#!/usr/bin/env python
import unittest
from test_openstack import * # NOQA
from test_output_parser import * # NOQA
from test_ovirt import * # NOQA
from test_routines import * # NOQA
from test_state import * # NOQA
from test_v2v_args import * # NOQA
from test_pre_copy import * # NOQA
from test_osp_wrapper import * # NOQA
if __name__ == '__main__':
unittest.main()
| 25.2
| 40
| 0.73545
|
7950687f88a40394ea0d7336f716dd7c69f7ccab
| 40,901
|
py
|
Python
|
src/xm/core/xmnode.py
|
xm-blockchain/xm-core
|
2282b435a02f061424d656155756d8f50238bcfd
|
[
"MIT"
] | null | null | null |
src/xm/core/xmnode.py
|
xm-blockchain/xm-core
|
2282b435a02f061424d656155756d8f50238bcfd
|
[
"MIT"
] | 1
|
2020-11-26T00:07:31.000Z
|
2020-11-26T00:07:31.000Z
|
src/xm/core/xmnode.py
|
xm-blockchain/xm-core
|
2282b435a02f061424d656155756d8f50238bcfd
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from decimal import Decimal
from typing import Optional, List, Iterator, Tuple
from pyxmlib.pyxmlib import xmHelper, bin2hstr
from twisted.internet import reactor
from xm.core import config
from xm.core.AddressState import AddressState
from xm.core.OptimizedAddressState import OptimizedAddressState
from xm.core.MultiSigAddressState import MultiSigAddressState
from xm.core.Block import Block
from xm.core.ChainManager import ChainManager
from xm.core.ESyncState import ESyncState
from xm.core.misc import ntp
from xm.core.misc.logger import logger
from xm.core.node import POW, SyncState
from xm.core.p2p.p2pChainManager import P2PChainManager
from xm.core.p2p.p2pPeerManager import P2PPeerManager
from xm.core.p2p.p2pTxManagement import P2PTxManagement
from xm.core.p2p.p2pfactory import P2PFactory
from xm.core.txs.CoinBase import CoinBase
from xm.core.txs.multisig.MultiSigCreate import MultiSigCreate
from xm.core.txs.multisig.MultiSigSpend import MultiSigSpend
from xm.core.txs.multisig.MultiSigVote import MultiSigVote
from xm.core.txs.LatticeTransaction import LatticeTransaction
from xm.core.txs.MessageTransaction import MessageTransaction
from xm.core.txs.SlaveTransaction import SlaveTransaction
from xm.core.txs.TokenTransaction import TokenTransaction
from xm.core.txs.TransferTokenTransaction import TransferTokenTransaction
from xm.core.txs.TransferTransaction import TransferTransaction
from xm.generated import xm_pb2
class xmNode:
def __init__(self, mining_address: bytes):
self.start_time = ntp.getTime()
self._sync_state = SyncState()
self.peer_manager = P2PPeerManager()
self.peer_manager.load_peer_addresses()
self.p2pchain_manager = P2PChainManager()
self.tx_manager = P2PTxManagement()
self._chain_manager = None # FIXME: REMOVE. This is temporary
self._p2pfactory = None # FIXME: REMOVE. This is temporary
self._pow = None
self.mining_address = mining_address
reactor.callLater(10, self.monitor_chain_state)
####################################################
####################################################
####################################################
####################################################
@property
def version(self):
return config.dev.version
@property
def sync_state(self) -> SyncState:
return self._sync_state
@property
def state(self):
if self._p2pfactory is None:
return ESyncState.unknown.value
# FIXME
return self._p2pfactory.sync_state.state.value
@property
def num_connections(self):
if self._p2pfactory is None:
return 0
return self._p2pfactory.num_connections
@property
def num_known_peers(self):
return len(self.peer_manager.known_peer_addresses)
@property
def uptime(self):
return ntp.getTime() - self.start_time
@property
def block_height(self):
return self._chain_manager.height
@property
def epoch(self):
if not self._chain_manager.last_block:
return 0
return self._chain_manager.last_block.block_number // config.dev.blocks_per_epoch
@property
def uptime_network(self):
block_one = self._chain_manager.get_block_by_number(1)
network_uptime = 0
if block_one:
network_uptime = ntp.getTime() - block_one.timestamp
return network_uptime
@property
def block_last_reward(self):
if not self._chain_manager.last_block:
return 0
return self._chain_manager.last_block.block_reward
@property
def block_time_mean(self):
block = self._chain_manager.last_block
prev_block_metadata = self._chain_manager.get_block_metadata(block.prev_headerhash)
if prev_block_metadata is None:
return config.dev.block_timing_in_seconds
movavg = self._chain_manager.get_measurement(config.dev,
block.timestamp,
block.prev_headerhash,
prev_block_metadata)
return movavg
@property
def block_time_sd(self):
# FIXME: Keep a moving var
return 0
@property
def coin_supply(self):
# FIXME: Keep a moving var
return self._chain_manager.total_coin_supply
@property
def coin_supply_max(self):
# FIXME: Keep a moving var
return config.dev.max_coin_supply
####################################################
####################################################
####################################################
####################################################
def get_peers_stat(self) -> list:
return self.peer_manager.get_peers_stat()
####################################################
####################################################
####################################################
####################################################
def monitor_chain_state(self):
self.peer_manager.monitor_chain_state()
last_block = self._chain_manager.last_block
block_metadata = self._chain_manager.get_block_metadata(last_block.headerhash)
node_chain_state = xm_pb2.NodeChainState(block_number=last_block.block_number,
header_hash=last_block.headerhash,
cumulative_difficulty=bytes(block_metadata.cumulative_difficulty),
version=config.dev.version,
timestamp=ntp.getTime())
self.peer_manager.broadcast_chain_state(node_chain_state=node_chain_state)
channel = self.peer_manager.get_better_difficulty(block_metadata.cumulative_difficulty)
logger.debug('Got better difficulty %s', channel)
if channel:
logger.debug('Connection id >> %s', channel.peer)
channel.send_get_headerhash_list(self._chain_manager.height)
reactor.callLater(config.user.chain_state_broadcast_period, self.monitor_chain_state)
# FIXME: REMOVE. This is temporary
def set_chain_manager(self, chain_manager: ChainManager):
self._chain_manager = chain_manager
####################################################
####################################################
####################################################
####################################################
def start_pow(self, mining_thread_count):
self._pow = POW(chain_manager=self._chain_manager,
p2p_factory=self._p2pfactory,
sync_state=self._sync_state,
time_provider=ntp,
mining_address=self.mining_address,
mining_thread_count=mining_thread_count)
self._pow.start()
def start_listening(self):
self._p2pfactory = P2PFactory(chain_manager=self._chain_manager,
sync_state=self.sync_state,
xm_node=self) # FIXME: Try to avoid cyclic references
self.peer_manager.set_p2p_factory(self._p2pfactory)
self._p2pfactory.start_listening()
####################################################
####################################################
####################################################
####################################################
@staticmethod
def validate_amount(amount_str: str) -> bool:
# FIXME: Refactored code. Review Decimal usage all over the code
Decimal(amount_str)
return True
####################################################
####################################################
####################################################
####################################################
@staticmethod
def create_multi_sig_txn(signatories: list,
weights: list,
threshold: int,
fee: int,
xmss_pk: bytes,
master_addr: bytes):
return MultiSigCreate.create(signatories=signatories,
weights=weights,
threshold=threshold,
fee=fee,
xmss_pk=xmss_pk,
master_addr=master_addr)
@staticmethod
def create_multi_sig_spend_txn(multi_sig_address: bytes,
addrs_to: list,
amounts: list,
expiry_block_number: int,
fee: int,
xmss_pk: bytes,
master_addr: bytes):
return MultiSigSpend.create(multi_sig_address=multi_sig_address,
addrs_to=addrs_to,
amounts=amounts,
expiry_block_number=expiry_block_number,
fee=fee,
xmss_pk=xmss_pk,
master_addr=master_addr)
@staticmethod
def create_multi_sig_vote_txn(shared_key: bytes,
unvote: bool,
fee: int,
xmss_pk: bytes,
master_addr: bytes):
return MultiSigVote.create(shared_key=shared_key,
unvote=unvote,
fee=fee,
xmss_pk=xmss_pk,
master_addr=master_addr)
@staticmethod
def create_message_txn(message_hash: bytes,
addr_to: bytes,
fee: int,
xmss_pk: bytes,
master_addr: bytes):
return MessageTransaction.create(message_hash=message_hash,
addr_to=addr_to,
fee=fee,
xmss_pk=xmss_pk,
master_addr=master_addr)
@staticmethod
def create_token_txn(symbol: bytes,
name: bytes,
owner: bytes,
decimals: int,
initial_balances,
fee: int,
xmss_pk: bytes,
master_addr: bytes):
return TokenTransaction.create(symbol,
name,
owner,
decimals,
initial_balances,
fee,
xmss_pk,
master_addr)
@staticmethod
def create_transfer_token_txn(addrs_to: list,
token_txhash: bytes,
amounts: list,
fee: int,
xmss_pk: bytes,
master_addr: bytes):
return TransferTokenTransaction.create(token_txhash,
addrs_to,
amounts,
fee,
xmss_pk,
master_addr)
def create_send_tx(self,
addrs_to: list,
amounts: list,
message_data: bytes,
fee: int,
xmss_pk: bytes,
master_addr: bytes) -> TransferTransaction:
addr_from = self.get_addr_from(xmss_pk, master_addr)
balance = self._chain_manager.get_address_balance(addr_from)
if sum(amounts) + fee > balance:
raise ValueError("Not enough funds in the source address")
return TransferTransaction.create(addrs_to=addrs_to,
amounts=amounts,
message_data=message_data,
fee=fee,
xmss_pk=xmss_pk,
master_addr=master_addr)
@staticmethod
def create_slave_tx(slave_pks: list,
access_types: list,
fee: int,
xmss_pk: bytes,
master_addr: bytes) -> SlaveTransaction:
return SlaveTransaction.create(slave_pks=slave_pks,
access_types=access_types,
fee=fee,
xmss_pk=xmss_pk,
master_addr=master_addr)
@staticmethod
def create_lattice_tx(pk1: bytes,
pk2: bytes,
pk3: bytes,
fee: int,
xmss_pk: bytes,
master_addr: bytes) -> LatticeTransaction:
return LatticeTransaction.create(pk1=pk1,
pk2=pk2,
pk3=pk3,
fee=fee,
xmss_pk=xmss_pk,
master_addr=master_addr)
# FIXME: Rename this appropriately
def submit_send_tx(self, tx) -> bool:
if tx is None:
raise ValueError("The transaction was empty")
if self._chain_manager.tx_pool.is_full_pending_transaction_pool():
raise ValueError("Pending Transaction Pool is full")
return self._p2pfactory.add_unprocessed_txn(tx, ip=None) # TODO (cyyber): Replace None with IP made API request
@staticmethod
def get_addr_from(xmss_pk, master_addr):
if master_addr:
return master_addr
return bytes(xmHelper.getAddress(xmss_pk))
def get_address_is_used(self, address: bytes) -> bool:
if not OptimizedAddressState.address_is_valid(address):
raise ValueError("Invalid Address")
return self._chain_manager.get_address_is_used(address)
def get_address_state(self, address: bytes) -> AddressState:
if address != config.dev.coinbase_address and not AddressState.address_is_valid(address):
raise ValueError("Invalid Address")
address_state = self._chain_manager.get_address_state(address)
return address_state
def get_optimized_address_state(self, address: bytes) -> OptimizedAddressState:
if address != config.dev.coinbase_address and not OptimizedAddressState.address_is_valid(address):
raise ValueError("Invalid Address")
address_state = self._chain_manager.get_optimized_address_state(address)
return address_state
def get_multi_sig_address_state(self, address: bytes) -> MultiSigAddressState:
if not MultiSigAddressState.address_is_valid(address):
raise ValueError("Invalid Address")
multi_sig_address_state = self._chain_manager.get_multi_sig_address_state(address)
return multi_sig_address_state
def get_ots(self,
address: bytes,
page_from: int,
page_count: int,
unused_ots_index_from: int) -> (list, Optional[int], bool):
if not OptimizedAddressState.address_is_valid(address):
return None, None, None
max_bitfield = 2 ** OptimizedAddressState.get_height_from_address(address)
max_pages = (max_bitfield // config.dev.ots_tracking_per_page) + 1
page_from = min(page_from, max_pages)
max_pages = min(page_from + page_count - 1, max_pages)
bitfields = list()
for page in range(page_from, max_pages + 1):
bitfield = self._chain_manager.get_bitfield(address, page)
bitfields.append(xm_pb2.OTSBitfieldByPage(ots_bitfield=bitfield, page_number=page))
unused_ots_index = self._chain_manager.get_unused_ots_index2(address, unused_ots_index_from)
unused_ots_index_found = unused_ots_index is not None
return bitfields, unused_ots_index, unused_ots_index_found
def is_slave(self, master_address: bytes, slave_pk: bytes):
return self._chain_manager.is_slave(master_address, slave_pk)
def get_all_address_state(self) -> list:
return self._chain_manager.get_all_address_state()
def _load_transaction_hashes(self, address: bytes, item_per_page: int, page_number: int) -> list:
address_state = self._chain_manager.get_optimized_address_state(address)
start_item_index = max(0, address_state.transaction_hash_count() - item_per_page * page_number)
end_item_index = min(address_state.transaction_hash_count(), start_item_index + item_per_page)
transaction_hashes = self._chain_manager.get_transaction_hashes(address,
start_item_index)
actual_start_item_index = (start_item_index // config.dev.data_per_page) * config.dev.data_per_page
transaction_hashes = transaction_hashes[start_item_index - actual_start_item_index:]
while actual_start_item_index < end_item_index:
actual_start_item_index += config.dev.data_per_page
transaction_hashes.extend(self._chain_manager.get_transaction_hashes(address,
actual_start_item_index))
return transaction_hashes[:item_per_page][-1::-1]
def _load_multi_sig_spend_txn_hashes(self,
address: bytes,
item_per_page: int,
page_number: int,
mode: int) -> list:
if OptimizedAddressState.address_is_valid(address):
address_state = self._chain_manager.get_optimized_address_state(address)
elif MultiSigAddressState.address_is_valid(address):
address_state = self._chain_manager.get_multi_sig_address_state(address)
else:
return []
start_item_index = max(0, address_state.multi_sig_spend_count() - item_per_page * page_number)
end_item_index = min(address_state.multi_sig_spend_count(), start_item_index + item_per_page)
if mode > 0:
start_item_index = 0
end_item_index = address_state.multi_sig_spend_count()
transaction_hashes = self._chain_manager.get_multi_sig_spend_txn_hashes(address,
start_item_index)
actual_start_item_index = (start_item_index // config.dev.data_per_page) * config.dev.data_per_page
multi_sig_spend_txn_hashes = transaction_hashes[start_item_index - actual_start_item_index:]
while actual_start_item_index < end_item_index and len(multi_sig_spend_txn_hashes) < item_per_page:
actual_start_item_index += config.dev.data_per_page
multi_sig_spend_txn_hashes.extend(self._chain_manager.get_multi_sig_spend_txn_hashes(address,
actual_start_item_index))
return multi_sig_spend_txn_hashes[:item_per_page][-1::-1]
def _load_token_transaction_hashes(self, address: bytes, item_per_page: int, page_number: int) -> list:
address_state = self._chain_manager.get_optimized_address_state(address)
start_item_index = max(0, address_state.tokens_count() - item_per_page * page_number)
end_item_index = min(address_state.tokens_count(), start_item_index + item_per_page)
transaction_hashes = self._chain_manager.get_token_transaction_hashes(address,
start_item_index)
actual_start_item_index = (start_item_index // config.dev.data_per_page) * config.dev.data_per_page
token_transaction_hashes = transaction_hashes[start_item_index - actual_start_item_index:]
while actual_start_item_index < end_item_index:
actual_start_item_index += config.dev.data_per_page
token_transaction_hashes.extend(self._chain_manager.get_token_transaction_hashes(address,
actual_start_item_index))
return token_transaction_hashes[:item_per_page][-1::-1]
def _load_slave_transaction_hashes(self, address: bytes, item_per_page: int, page_number: int) -> list:
address_state = self._chain_manager.get_optimized_address_state(address)
start_item_index = max(0, address_state.slaves_count() - item_per_page * page_number)
end_item_index = min(address_state.slaves_count(), start_item_index + item_per_page)
if start_item_index < 0:
return []
transaction_hashes = self._chain_manager.get_slave_transaction_hashes(address,
start_item_index)
actual_start_item_index = (start_item_index // config.dev.data_per_page) * config.dev.data_per_page
token_transaction_hashes = transaction_hashes[start_item_index - actual_start_item_index:]
while actual_start_item_index < end_item_index:
actual_start_item_index += config.dev.data_per_page
token_transaction_hashes.extend(self._chain_manager.get_slave_transaction_hashes(address,
actual_start_item_index))
return token_transaction_hashes[:item_per_page][-1::-1]
def _load_lattice_pks_transaction_hashes(self, address: bytes, item_per_page: int, page_number: int) -> list:
address_state = self._chain_manager.get_optimized_address_state(address)
start_item_index = max(0, address_state.lattice_pk_count() - item_per_page * page_number)
end_item_index = min(address_state.lattice_pk_count(), start_item_index + item_per_page)
transaction_hashes = self._chain_manager.get_lattice_pks_transaction_hashes(address,
start_item_index)
actual_start_item_index = (start_item_index // config.dev.data_per_page) * config.dev.data_per_page
lattice_pks_transaction_hashes = transaction_hashes[start_item_index - actual_start_item_index:]
while actual_start_item_index < end_item_index:
actual_start_item_index += config.dev.data_per_page
lattice_pks_transaction_hashes.extend(self._chain_manager.get_lattice_pks_transaction_hashes(address,
actual_start_item_index))
return lattice_pks_transaction_hashes[:item_per_page][-1::-1]
def _load_multi_sig_addresses(self, address: bytes, item_per_page: int, page_number: int) -> list:
address_state = self._chain_manager.get_optimized_address_state(address)
start_item_index = max(0, address_state.multi_sig_address_count() - item_per_page * page_number)
end_item_index = min(address_state.multi_sig_address_count(), start_item_index + item_per_page)
multi_sig_addresses = self._chain_manager.get_multi_sig_addresses(address,
start_item_index)
actual_start_item_index = (start_item_index // config.dev.data_per_page) * config.dev.data_per_page
multi_sig_addresses = multi_sig_addresses[start_item_index - actual_start_item_index:]
while actual_start_item_index < end_item_index:
actual_start_item_index += config.dev.data_per_page
multi_sig_addresses.extend(self._chain_manager.get_multi_sig_addresses(address,
actual_start_item_index))
return multi_sig_addresses[:item_per_page][-1::-1]
def _load_inbox_message_transaction_hashes(self, address: bytes, item_per_page: int, page_number: int) -> list:
address_state = self._chain_manager.get_optimized_address_state(address)
start_item_index = max(0, address_state.inbox_message_count() - item_per_page * page_number)
end_item_index = min(address_state.inbox_message_count(), start_item_index + item_per_page)
transaction_hashes = self._chain_manager.get_inbox_message_transaction_hashes(address,
start_item_index)
actual_start_item_index = (start_item_index // config.dev.data_per_page) * config.dev.data_per_page
inbox_message_transaction_hashes = transaction_hashes[start_item_index - actual_start_item_index:]
while actual_start_item_index < end_item_index:
actual_start_item_index += config.dev.data_per_page
inbox_message_transaction_hashes.extend(self._chain_manager.get_inbox_message_transaction_hashes(address,
actual_start_item_index))
return inbox_message_transaction_hashes[:item_per_page][-1::-1]
def get_mini_transactions_by_address(self, address: bytes, item_per_page: int, page_number: int):
if item_per_page == 0:
return None
mini_transactions = []
transaction_hashes = self._load_transaction_hashes(address, item_per_page, page_number)
response = xm_pb2.GetMiniTransactionsByAddressResp()
for tx_hash in transaction_hashes:
mini_transaction = xm_pb2.MiniTransaction()
mini_transaction.transaction_hash = bin2hstr(tx_hash)
tx, _ = self._chain_manager.get_tx_metadata(tx_hash)
amount = 0
if tx.addr_from == address:
amount -= tx.fee
if isinstance(tx, TransferTransaction):
if tx.addr_from == address:
amount -= tx.total_amount
try:
for i in range(len(tx.addrs_to)):
if tx.addrs_to[i] == address:
amount += tx.amounts[i]
except ValueError:
pass
elif isinstance(tx, CoinBase):
if tx.addr_to == address:
amount += tx.amount
elif isinstance(tx, MultiSigSpend):
try:
for i in range(len(tx.addrs_to)):
if tx.addrs_to[i] == address:
amount += tx.amounts[i]
except ValueError:
pass
if amount < 0:
mini_transaction.out = True
mini_transaction.amount = abs(amount)
mini_transactions.append(mini_transaction)
response.mini_transactions.extend(mini_transactions)
response.balance = self._chain_manager.get_address_balance(address)
return response
def get_transactions_by_address(self, address: bytes, item_per_page: int, page_number: int):
if item_per_page == 0:
return None
transaction_hashes = self._load_transaction_hashes(address,
item_per_page,
page_number)
response = xm_pb2.GetTransactionsByAddressResp()
for tx_hash in transaction_hashes:
tx, block_number = self._chain_manager.get_tx_metadata(tx_hash)
b = self.get_block_from_index(block_number)
transaction_detail = xm_pb2.GetTransactionResp(tx=tx.pbdata,
confirmations=self.block_height - block_number + 1,
block_number=block_number,
block_header_hash=b.headerhash,
timestamp=b.timestamp,
addr_from=tx.addr_from)
response.transactions_detail.extend([transaction_detail])
return response
def get_multi_sig_spend_txs_by_address(self,
address: bytes,
item_per_page: int,
page_number: int,
filter_type: int):
# filter_type = 0 | No Filter (default)
# filter_type = 1 | Executed Only (All executed are considered to be expired)
# filter_type = 2 | Non Executed
# filter_type = 3 | Expired
# filter_type = 4 | Non Expired
# filter_type = 5 | Non Executed & Expired
# filter_type = 6 | Non Executed & Non Expired
if item_per_page == 0:
return None
transaction_hashes = self._load_multi_sig_spend_txn_hashes(address,
item_per_page,
page_number,
filter_type)
response = xm_pb2.GetMultiSigSpendTxsByAddressResp()
for tx_hash in transaction_hashes:
if filter_type in (1, 2, 5, 6):
vote_stats = self._chain_manager.get_vote_stats(tx_hash)
if filter_type == 1 and not vote_stats.executed:
continue
if filter_type in (2, 5, 6) and vote_stats.executed:
continue
tx, block_number = self._chain_manager.get_tx_metadata(tx_hash)
current_block_number = self._chain_manager.height
is_expired = tx.expiry_block_number <= current_block_number
if filter_type in (4, 6):
if is_expired:
continue
if filter_type in (3, 5):
if not is_expired:
continue
b = self.get_block_from_index(block_number)
transaction_detail = xm_pb2.GetTransactionResp(tx=tx.pbdata,
confirmations=self.block_height - block_number + 1,
block_number=block_number,
block_header_hash=b.headerhash,
timestamp=b.timestamp,
addr_from=tx.addr_from)
response.transactions_detail.extend([transaction_detail])
return response
def get_vote_stats(self, multi_sig_spend_tx_hash: bytes):
vote_stats = self._chain_manager.get_vote_stats(multi_sig_spend_tx_hash)
return xm_pb2.GetVoteStatsResp(vote_stats=vote_stats.pbdata)
def get_inbox_messages_by_address(self, address: bytes, item_per_page: int, page_number: int):
if item_per_page == 0:
return None
transaction_hashes = self._load_inbox_message_transaction_hashes(address,
item_per_page,
page_number)
response = xm_pb2.GetTransactionsByAddressResp()
for tx_hash in transaction_hashes:
tx, block_number = self._chain_manager.get_tx_metadata(tx_hash)
b = self.get_block_from_index(block_number)
transaction_detail = xm_pb2.GetTransactionResp(tx=tx.pbdata,
confirmations=self.block_height - block_number + 1,
block_number=block_number,
block_header_hash=b.headerhash,
timestamp=b.timestamp,
addr_from=tx.addr_from)
response.transactions_detail.extend([transaction_detail])
return response
def get_tokens_by_address(self, address: bytes, item_per_page: int, page_number: int):
if item_per_page == 0:
return None
token_hashes = self._load_token_transaction_hashes(address, item_per_page, page_number)
response = xm_pb2.GetTokensByAddressResp()
for tx_hash in token_hashes:
tx, _ = self._chain_manager.get_tx_metadata(tx_hash)
balance = self._chain_manager.get_token(address, tx.txhash)
transaction_detail = xm_pb2.TokenDetail(token_txhash=tx.txhash,
name=tx.name,
symbol=tx.symbol,
balance=balance)
response.tokens_detail.extend([transaction_detail])
return response
def get_slaves_by_address(self, address: bytes, item_per_page: int, page_number: int):
if item_per_page > config.dev.data_per_page or item_per_page == 0:
return None
slave_hashes = self._load_slave_transaction_hashes(address, item_per_page, page_number)
response = xm_pb2.GetSlavesByAddressResp()
for tx_hash in slave_hashes:
tx, _ = self._chain_manager.get_tx_metadata(tx_hash)
for index in range(0, len(tx.slave_pks)):
transaction_detail = xm_pb2.SlaveDetail(slave_address=bytes(xmHelper.getAddress(tx.slave_pks[index])),
access_type=tx.access_types[index])
response.slaves_detail.extend([transaction_detail])
return response
def get_lattice_pks_by_address(self, address: bytes, item_per_page: int, page_number: int):
if item_per_page > config.dev.data_per_page or item_per_page == 0:
return None
lattice_pk_hashes = self._load_lattice_pks_transaction_hashes(address, item_per_page, page_number)
response = xm_pb2.GetLatticePKsByAddressResp()
for tx_hash in lattice_pk_hashes:
tx, _ = self._chain_manager.get_tx_metadata(tx_hash)
transaction_detail = xm_pb2.LatticePKsDetail(pk1=tx.pk1,
pk2=tx.pk2,
pk3=tx.pk3,
tx_hash=tx_hash)
response.lattice_pks_detail.extend([transaction_detail])
return response
def get_multi_sig_addresses_by_address(self, address: bytes, item_per_page: int, page_number: int):
if item_per_page > config.dev.data_per_page or item_per_page == 0:
return None
multi_sig_addresses = self._load_multi_sig_addresses(address,
item_per_page,
page_number)
response = xm_pb2.GetMultiSigAddressesByAddressResp()
for multi_sig_address in multi_sig_addresses:
multi_sig_detail = xm_pb2.MultiSigDetail(
address=multi_sig_address,
balance=self._chain_manager.get_multi_sig_address_state(multi_sig_address).balance,
)
response.multi_sig_detail.extend([multi_sig_detail])
return response
def get_transaction(self, query_hash: bytes):
"""
This method returns an object that matches the query hash
"""
# FIXME: At some point, all objects in DB will indexed by a hash
# TODO: Search tx hash
# FIXME: We dont need searches, etc.. getting a protobuf indexed by hash from DB should be enough
# FIXME: This is just a workaround to provide functionality
result = self._chain_manager.get_tx_metadata(query_hash)
return result
def get_block_header_hash_by_number(self, query_block_number: int):
return self._chain_manager.get_block_header_hash_by_number(query_block_number)
def get_unconfirmed_transaction(self, query_hash: bytes):
result = self._chain_manager.get_unconfirmed_transaction(query_hash)
return result
def get_block_last(self) -> Optional[Block]:
"""
This method returns an object that matches the query hash
"""
return self._chain_manager.last_block
def get_block_from_hash(self, query_hash: bytes) -> Optional[Block]:
"""
This method returns an object that matches the query hash
"""
return self._chain_manager.get_block(query_hash)
def get_block_from_index(self, index: int) -> Block:
"""
This method returns an object that matches the query hash
"""
return self._chain_manager.get_block_by_number(index)
def get_blockidx_from_txhash(self, transaction_hash):
result = self._chain_manager.get_tx_metadata(transaction_hash)
if result:
return result[1]
return None
def get_latest_blocks(self, offset, count) -> List[Block]:
answer = []
end = self.block_height - offset
start = max(0, end - count + 1)
for blk_idx in range(start, end + 1):
answer.append(self._chain_manager.get_block_by_number(blk_idx))
return answer
def get_latest_transactions(self, offset, count):
answer = []
skipped = 0
for tx in self._chain_manager.get_last_transactions():
if skipped >= offset:
answer.append(tx)
if len(answer) >= count:
break
else:
skipped += 1
return answer
def get_latest_transactions_unconfirmed(self, offset, count):
answer = []
skipped = 0
for tx_set in self._chain_manager.tx_pool.transactions:
if skipped >= offset:
answer.append(tx_set[1])
if len(answer) >= count:
break
else:
skipped += 1
return answer
def get_node_info(self) -> xm_pb2.NodeInfo:
info = xm_pb2.NodeInfo()
info.version = self.version
info.state = self.state
info.num_connections = self.num_connections
info.num_known_peers = self.num_known_peers
info.uptime = self.uptime
info.block_height = self.block_height
info.block_last_hash = self._chain_manager.last_block.headerhash
info.network_id = config.user.genesis_prev_headerhash
return info
def get_block_timeseries(self, block_count) -> Iterator[xm_pb2.BlockDataPoint]:
result = []
if self.block_height <= 0:
return result
block = self._chain_manager.last_block
if block is None:
return result
headerhash_current = block.headerhash
while len(result) < block_count:
data_point = self._chain_manager.get_block_datapoint(headerhash_current)
if data_point is None:
break
result.append(data_point)
headerhash_current = data_point.header_hash_prev
return reversed(result)
def get_blockheader_and_metadata(self, block_number=0) -> Tuple:
return self._chain_manager.get_blockheader_and_metadata(block_number)
def get_block_to_mine(self, wallet_address) -> list:
return self._chain_manager.get_block_to_mine(self._pow.miner, wallet_address)
def submit_mined_block(self, blob) -> bool:
return self._pow.miner.submit_mined_block(blob)
| 46.744
| 134
| 0.565756
|
795069f039b36c276fc23713680b7eaceb08a711
| 5,571
|
py
|
Python
|
configs/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py
|
AiLvv/mmdetection
|
2489aa4fc8df3b4c39c1cc72571a677e4451a44c
|
[
"Apache-2.0"
] | null | null | null |
configs/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py
|
AiLvv/mmdetection
|
2489aa4fc8df3b4c39c1cc72571a677e4451a44c
|
[
"Apache-2.0"
] | null | null | null |
configs/dcn/faster_rcnn_mdconv_c3-c5_r50_fpn_1x.py
|
AiLvv/mmdetection
|
2489aa4fc8df3b4c39c1cc72571a677e4451a44c
|
[
"Apache-2.0"
] | null | null | null |
# model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=20)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 300
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_mdconv_c3-c5_r50_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 31.122905
| 78
| 0.596302
|
79506b7328a6db4d8cb961e4a1464df5490eb29d
| 2,750
|
py
|
Python
|
gupb/controller/berserk/knowledge_decoder.py
|
domo23/GUPB
|
198e0cd1121fff71065b567af1e1809ce382c28d
|
[
"MIT"
] | null | null | null |
gupb/controller/berserk/knowledge_decoder.py
|
domo23/GUPB
|
198e0cd1121fff71065b567af1e1809ce382c28d
|
[
"MIT"
] | null | null | null |
gupb/controller/berserk/knowledge_decoder.py
|
domo23/GUPB
|
198e0cd1121fff71065b567af1e1809ce382c28d
|
[
"MIT"
] | null | null | null |
from gupb.model import characters
from gupb.model.coordinates import Coords
from gupb.model.arenas import Arena
from pathfinding.core.grid import Grid
class KnowledgeDecoder:
def __init__(self, knowledge: characters.ChampionKnowledge = None):
self._knowledge = knowledge
self._info = {}
self.arena = None
self.map = self.load_map('isolated_shrine')
def decode(self):
tile = self.knowledge.visible_tiles.get(self.knowledge.position)
character = tile.character if tile else None
weapon = character.weapon.name if character else "knife"
health = character.health
facing = character.facing
self._info['weapon'] = weapon
self._info['health'] = health
self._info['facing'] = facing
self._info['enemies_in_sight'] = self._get_enemies_in_sight()
self._info['weapons_in_sight'] = self._get_weapons_in_sight()
def _get_weapons_in_sight(self):
return [Coords(*coords) for coords, tile in self.knowledge.visible_tiles.items()
if tile.loot and coords != self.knowledge.position and tile.loot.name not in ["knife", "amulet", "bow"]]
def _get_enemies_in_sight(self):
return [Coords(*coords) for coords, tile in self.knowledge.visible_tiles.items()
if tile.character and coords != self.knowledge.position]
def _get_nearest_area(self, d=4):
nearest_area = []
for i in range(-d, d + 1):
for j in range(-d, d + 1):
nearest_area.append(self.knowledge.position + Coords(i, j))
return [point for point in nearest_area if point in self.knowledge.visible_tiles.keys()]
def _look_for_mist(self):
visible_tiles = self.knowledge.visible_tiles
mist_coords = []
for coord in self._get_nearest_area():
tile = visible_tiles[coord]
for effect in tile.effects:
if effect.type == 'mist':
mist_coords.append(coord)
return mist_coords
@property
def knowledge(self):
return self._knowledge
@knowledge.setter
def knowledge(self, new_knowledge):
self._knowledge = new_knowledge
self.decode()
def load_map(self, map_name):
arena = Arena.load(map_name)
self.arena = arena
map_matrix = [[1 for x in range(arena.size[0])] for y in range(arena.size[1])]
for cords, tile in arena.terrain.items():
map_matrix[cords.y][cords.x] = 0 if tile.description().type in ['wall', 'sea'] else 1
if tile.description().loot:
map_matrix[cords.x][cords.y] = 0 if tile.description().loot.name in ["knife", "amulet", "bow"] else 1
return map_matrix
| 38.732394
| 120
| 0.638909
|
79506c0bda256b86ee776eefeebae54115491fe3
| 15,805
|
py
|
Python
|
homeassistant/components/stream/__init__.py
|
hermelin44/core
|
4885fa01b9678eb5f5fa6b7eba64bf9af9f74ffb
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/stream/__init__.py
|
hermelin44/core
|
4885fa01b9678eb5f5fa6b7eba64bf9af9f74ffb
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/stream/__init__.py
|
hermelin44/core
|
4885fa01b9678eb5f5fa6b7eba64bf9af9f74ffb
|
[
"Apache-2.0"
] | null | null | null |
"""Provide functionality to stream video source.
Components use create_stream with a stream source (e.g. an rtsp url) to create
a new Stream object. Stream manages:
- Background work to fetch and decode a stream
- Desired output formats
- Home Assistant URLs for viewing a stream
- Access tokens for URLs for viewing a stream
A Stream consists of a background worker, and one or more output formats each
with their own idle timeout managed by the stream component. When an output
format is no longer in use, the stream component will expire it. When there
are no active output formats, the background worker is shut down and access
tokens are expired. Alternatively, a Stream can be configured with keepalive
to always keep workers active.
"""
from __future__ import annotations
from collections.abc import Callable, Mapping
import logging
import re
import secrets
import threading
import time
from types import MappingProxyType
from typing import cast
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .const import (
ATTR_ENDPOINTS,
ATTR_SETTINGS,
ATTR_STREAMS,
CONF_LL_HLS,
CONF_PART_DURATION,
CONF_SEGMENT_DURATION,
DOMAIN,
HLS_PROVIDER,
MAX_SEGMENTS,
OUTPUT_IDLE_TIMEOUT,
RECORDER_PROVIDER,
SEGMENT_DURATION_ADJUSTER,
STREAM_RESTART_INCREMENT,
STREAM_RESTART_RESET_TIME,
TARGET_SEGMENT_DURATION_NON_LL_HLS,
)
from .core import PROVIDERS, IdleTimer, KeyFrameConverter, StreamOutput, StreamSettings
from .hls import HlsStreamOutput, async_setup_hls
_LOGGER = logging.getLogger(__name__)
STREAM_SOURCE_REDACT_PATTERN = [
(re.compile(r"//.*:.*@"), "//****:****@"),
(re.compile(r"\?auth=.*"), "?auth=****"),
]
def redact_credentials(data: str) -> str:
"""Redact credentials from string data."""
for (pattern, repl) in STREAM_SOURCE_REDACT_PATTERN:
data = pattern.sub(repl, data)
return data
def create_stream(
hass: HomeAssistant,
stream_source: str,
options: dict[str, str],
stream_label: str | None = None,
) -> Stream:
"""Create a stream with the specified identfier based on the source url.
The stream_source is typically an rtsp url (though any url accepted by ffmpeg is fine) and
options are passed into pyav / ffmpeg as options.
The stream_label is a string used as an additional message in logging.
"""
if DOMAIN not in hass.config.components:
raise HomeAssistantError("Stream integration is not set up.")
# For RTSP streams, prefer TCP
if isinstance(stream_source, str) and stream_source[:7] == "rtsp://":
options = {
"rtsp_flags": "prefer_tcp",
"stimeout": "5000000",
**options,
}
stream = Stream(hass, stream_source, options=options, stream_label=stream_label)
hass.data[DOMAIN][ATTR_STREAMS].append(stream)
return stream
DOMAIN_SCHEMA = vol.Schema(
{
vol.Optional(CONF_LL_HLS, default=True): cv.boolean,
vol.Optional(CONF_SEGMENT_DURATION, default=6): vol.All(
cv.positive_float, vol.Range(min=2, max=10)
),
vol.Optional(CONF_PART_DURATION, default=1): vol.All(
cv.positive_float, vol.Range(min=0.2, max=1.5)
),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: DOMAIN_SCHEMA,
},
extra=vol.ALLOW_EXTRA,
)
def filter_libav_logging() -> None:
"""Filter libav logging to only log when the stream logger is at DEBUG."""
stream_debug_enabled = logging.getLogger(__name__).isEnabledFor(logging.DEBUG)
def libav_filter(record: logging.LogRecord) -> bool:
return stream_debug_enabled
for logging_namespace in (
"libav.mp4",
"libav.h264",
"libav.hevc",
"libav.rtsp",
"libav.tcp",
"libav.tls",
"libav.mpegts",
"libav.NULL",
):
logging.getLogger(logging_namespace).addFilter(libav_filter)
# Set log level to error for libav.mp4
logging.getLogger("libav.mp4").setLevel(logging.ERROR)
# Suppress "deprecated pixel format" WARNING
logging.getLogger("libav.swscaler").setLevel(logging.ERROR)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up stream."""
# Drop libav log messages if stream logging is above DEBUG
filter_libav_logging()
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .recorder import async_setup_recorder
hass.data[DOMAIN] = {}
hass.data[DOMAIN][ATTR_ENDPOINTS] = {}
hass.data[DOMAIN][ATTR_STREAMS] = []
conf = DOMAIN_SCHEMA(config.get(DOMAIN, {}))
if conf[CONF_LL_HLS]:
assert isinstance(conf[CONF_SEGMENT_DURATION], float)
assert isinstance(conf[CONF_PART_DURATION], float)
hass.data[DOMAIN][ATTR_SETTINGS] = StreamSettings(
ll_hls=True,
min_segment_duration=conf[CONF_SEGMENT_DURATION]
- SEGMENT_DURATION_ADJUSTER,
part_target_duration=conf[CONF_PART_DURATION],
hls_advance_part_limit=max(int(3 / conf[CONF_PART_DURATION]), 3),
hls_part_timeout=2 * conf[CONF_PART_DURATION],
)
else:
hass.data[DOMAIN][ATTR_SETTINGS] = StreamSettings(
ll_hls=False,
min_segment_duration=TARGET_SEGMENT_DURATION_NON_LL_HLS
- SEGMENT_DURATION_ADJUSTER,
part_target_duration=TARGET_SEGMENT_DURATION_NON_LL_HLS,
hls_advance_part_limit=3,
hls_part_timeout=TARGET_SEGMENT_DURATION_NON_LL_HLS,
)
# Setup HLS
hls_endpoint = async_setup_hls(hass)
hass.data[DOMAIN][ATTR_ENDPOINTS][HLS_PROVIDER] = hls_endpoint
# Setup Recorder
async_setup_recorder(hass)
@callback
def shutdown(event: Event) -> None:
"""Stop all stream workers."""
for stream in hass.data[DOMAIN][ATTR_STREAMS]:
stream.keepalive = False
stream.stop()
_LOGGER.info("Stopped stream workers")
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
return True
class Stream:
"""Represents a single stream."""
def __init__(
self,
hass: HomeAssistant,
source: str,
options: dict[str, str],
stream_label: str | None = None,
) -> None:
"""Initialize a stream."""
self.hass = hass
self.source = source
self.options = options
self._stream_label = stream_label
self.keepalive = False
self.access_token: str | None = None
self._thread: threading.Thread | None = None
self._thread_quit = threading.Event()
self._outputs: dict[str, StreamOutput] = {}
self._fast_restart_once = False
self._keyframe_converter = KeyFrameConverter(hass)
self._available: bool = True
self._update_callback: Callable[[], None] | None = None
self._logger = (
logging.getLogger(f"{__package__}.stream.{stream_label}")
if stream_label
else _LOGGER
)
def endpoint_url(self, fmt: str) -> str:
"""Start the stream and returns a url for the output format."""
if fmt not in self._outputs:
raise ValueError(f"Stream is not configured for format '{fmt}'")
if not self.access_token:
self.access_token = secrets.token_hex()
endpoint_fmt: str = self.hass.data[DOMAIN][ATTR_ENDPOINTS][fmt]
return endpoint_fmt.format(self.access_token)
def outputs(self) -> Mapping[str, StreamOutput]:
"""Return a copy of the stream outputs."""
# A copy is returned so the caller can iterate through the outputs
# without concern about self._outputs being modified from another thread.
return MappingProxyType(self._outputs.copy())
def add_provider(
self, fmt: str, timeout: int = OUTPUT_IDLE_TIMEOUT
) -> StreamOutput:
"""Add provider output stream."""
if not self._outputs.get(fmt):
@callback
def idle_callback() -> None:
if (
not self.keepalive or fmt == RECORDER_PROVIDER
) and fmt in self._outputs:
self.remove_provider(self._outputs[fmt])
self.check_idle()
provider = PROVIDERS[fmt](
self.hass, IdleTimer(self.hass, timeout, idle_callback)
)
self._outputs[fmt] = provider
return self._outputs[fmt]
def remove_provider(self, provider: StreamOutput) -> None:
"""Remove provider output stream."""
if provider.name in self._outputs:
self._outputs[provider.name].cleanup()
del self._outputs[provider.name]
if not self._outputs:
self.stop()
def check_idle(self) -> None:
"""Reset access token if all providers are idle."""
if all(p.idle for p in self._outputs.values()):
self.access_token = None
@property
def available(self) -> bool:
"""Return False if the stream is started and known to be unavailable."""
return self._available
def set_update_callback(self, update_callback: Callable[[], None]) -> None:
"""Set callback to run when state changes."""
self._update_callback = update_callback
@callback
def _async_update_state(self, available: bool) -> None:
"""Set state and Run callback to notify state has been updated."""
self._available = available
if self._update_callback:
self._update_callback()
def start(self) -> None:
"""Start a stream."""
if self._thread is None or not self._thread.is_alive():
if self._thread is not None:
# The thread must have crashed/exited. Join to clean up the
# previous thread.
self._thread.join(timeout=0)
self._thread_quit.clear()
self._thread = threading.Thread(
name="stream_worker",
target=self._run_worker,
)
self._thread.start()
self._logger.info(
"Started stream: %s", redact_credentials(str(self.source))
)
def update_source(self, new_source: str) -> None:
"""Restart the stream with a new stream source."""
self._logger.debug("Updating stream source %s", new_source)
self.source = new_source
self._fast_restart_once = True
self._thread_quit.set()
def _run_worker(self) -> None:
"""Handle consuming streams and restart keepalive streams."""
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .worker import StreamState, StreamWorkerError, stream_worker
stream_state = StreamState(self.hass, self.outputs)
wait_timeout = 0
while not self._thread_quit.wait(timeout=wait_timeout):
start_time = time.time()
self.hass.add_job(self._async_update_state, True)
try:
stream_worker(
self.source,
self.options,
stream_state,
self._keyframe_converter,
self._thread_quit,
)
except StreamWorkerError as err:
self._logger.error("Error from stream worker: %s", str(err))
stream_state.discontinuity()
if not self.keepalive or self._thread_quit.is_set():
if self._fast_restart_once:
# The stream source is updated, restart without any delay.
self._fast_restart_once = False
self._thread_quit.clear()
continue
break
self.hass.add_job(self._async_update_state, False)
# To avoid excessive restarts, wait before restarting
# As the required recovery time may be different for different setups, start
# with trying a short wait_timeout and increase it on each reconnection attempt.
# Reset the wait_timeout after the worker has been up for several minutes
if time.time() - start_time > STREAM_RESTART_RESET_TIME:
wait_timeout = 0
wait_timeout += STREAM_RESTART_INCREMENT
self._logger.debug(
"Restarting stream worker in %d seconds: %s",
wait_timeout,
self.source,
)
self._worker_finished()
def _worker_finished(self) -> None:
"""Schedule cleanup of all outputs."""
@callback
def remove_outputs() -> None:
for provider in self.outputs().values():
self.remove_provider(provider)
self.hass.loop.call_soon_threadsafe(remove_outputs)
def stop(self) -> None:
"""Remove outputs and access token."""
self._outputs = {}
self.access_token = None
if not self.keepalive:
self._stop()
def _stop(self) -> None:
"""Stop worker thread."""
if self._thread is not None:
self._thread_quit.set()
self._thread.join()
self._thread = None
self._logger.info(
"Stopped stream: %s", redact_credentials(str(self.source))
)
async def async_record(
self, video_path: str, duration: int = 30, lookback: int = 5
) -> None:
"""Make a .mp4 recording from a provided stream."""
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .recorder import RecorderOutput
# Check for file access
if not self.hass.config.is_allowed_path(video_path):
raise HomeAssistantError(f"Can't write {video_path}, no access to path!")
# Add recorder
if recorder := self.outputs().get(RECORDER_PROVIDER):
assert isinstance(recorder, RecorderOutput)
raise HomeAssistantError(
f"Stream already recording to {recorder.video_path}!"
)
recorder = cast(
RecorderOutput, self.add_provider(RECORDER_PROVIDER, timeout=duration)
)
recorder.video_path = video_path
self.start()
self._logger.debug("Started a stream recording of %s seconds", duration)
# Take advantage of lookback
hls: HlsStreamOutput = cast(HlsStreamOutput, self.outputs().get(HLS_PROVIDER))
if lookback > 0 and hls:
num_segments = min(int(lookback // hls.target_duration), MAX_SEGMENTS)
# Wait for latest segment, then add the lookback
await hls.recv()
recorder.prepend(list(hls.get_segments())[-num_segments:])
async def async_get_image(
self,
width: int | None = None,
height: int | None = None,
) -> bytes | None:
"""
Fetch an image from the Stream and return it as a jpeg in bytes.
Calls async_get_image from KeyFrameConverter. async_get_image should only be
called directly from the main loop and not from an executor thread as it uses
hass.add_executor_job underneath the hood.
"""
self.add_provider(HLS_PROVIDER)
self.start()
return await self._keyframe_converter.async_get_image(
width=width, height=height
)
| 35.357942
| 94
| 0.637583
|
79506cce3d28ebb2b4a8d870833595ff49b46b33
| 614
|
py
|
Python
|
pf9/inventory/commands.py
|
sirredbeard/express-cli
|
83c2a731e8019d54676604832c5f4ab400580c2e
|
[
"Apache-2.0"
] | 4
|
2019-12-06T20:39:53.000Z
|
2021-01-05T07:01:35.000Z
|
pf9/inventory/commands.py
|
sirredbeard/express-cli
|
83c2a731e8019d54676604832c5f4ab400580c2e
|
[
"Apache-2.0"
] | 42
|
2019-09-20T20:35:09.000Z
|
2021-09-23T23:28:05.000Z
|
pf9/inventory/commands.py
|
sirredbeard/express-cli
|
83c2a731e8019d54676604832c5f4ab400580c2e
|
[
"Apache-2.0"
] | 5
|
2019-11-19T02:36:01.000Z
|
2021-01-05T04:46:48.000Z
|
import click
import os
from prettytable import PrettyTable
@click.group()
def inventory():
"""Manage Platform9 Express Inventories"""
@inventory.command('list')
@click.pass_obj
def list(obj):
"""List Platform9 Express Inventories."""
# lists pf9-express inventories
pf9_exp_conf_dir = obj['pf9_exp_conf_dir']
if os.path.exists(pf9_exp_conf_dir):
count = 1
result = PrettyTable()
result.field_names = ["#","Active", "Conf", "Management Plane", "Region"]
files = [f for f in os.listdir(pf9_exp_conf_dir) if os.path.isfile(os.path.join(pf9_exp_conf_dir, f))]
| 26.695652
| 110
| 0.684039
|
79506da6feb664e1f409766d5aad7127c0535e9d
| 253
|
py
|
Python
|
src/tests/test_historylinks/models.py
|
matthiask/django-historylinks
|
44108d2aa26317d53ea9c26bdff5b9471958dcf5
|
[
"BSD-3-Clause"
] | 15
|
2015-04-09T14:35:45.000Z
|
2020-11-20T01:26:36.000Z
|
src/tests/test_historylinks/models.py
|
matthiask/django-historylinks
|
44108d2aa26317d53ea9c26bdff5b9471958dcf5
|
[
"BSD-3-Clause"
] | 3
|
2015-04-14T16:07:34.000Z
|
2019-01-11T17:41:22.000Z
|
src/tests/test_historylinks/models.py
|
matthiask/django-historylinks
|
44108d2aa26317d53ea9c26bdff5b9471958dcf5
|
[
"BSD-3-Clause"
] | 5
|
2015-04-07T10:39:45.000Z
|
2019-01-10T12:53:24.000Z
|
from __future__ import unicode_literals
from django.db import models
class HistoryLinkTestModel(models.Model):
slug = models.SlugField(
unique=True,
)
def get_absolute_url(self):
return "/{slug}/".format(slug=self.slug)
| 18.071429
| 48
| 0.695652
|
79506e32dd22c1fc1def1f748aa8a2578323155e
| 981
|
py
|
Python
|
tests/test_broadsteel_datascience.py
|
jirvingphd/broadsteel_datascience
|
878aa141160bfcd62803365cb613c515c1fa3ca7
|
[
"MIT"
] | null | null | null |
tests/test_broadsteel_datascience.py
|
jirvingphd/broadsteel_datascience
|
878aa141160bfcd62803365cb613c515c1fa3ca7
|
[
"MIT"
] | 196
|
2019-05-10T17:08:52.000Z
|
2022-03-28T00:19:18.000Z
|
tests/test_broadsteel_datascience.py
|
jirvingphd/broadsteel_datascience
|
878aa141160bfcd62803365cb613c515c1fa3ca7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `broadsteel_datascience` package."""
import unittest
from click.testing import CliRunner
from broadsteel_datascience import broadsteel_datascience
from broadsteel_datascience import cli
class TestBroadsteel_datascience(unittest.TestCase):
"""Tests for `broadsteel_datascience` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
def test_command_line_interface(self):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'broadsteel_datascience.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| 28.028571
| 74
| 0.671764
|
79506ec9c0ee7447e1e64b29e4dbda93a9531002
| 342
|
py
|
Python
|
covid19/db/base_model.py
|
jeonghaknam/cvd2019
|
045f2a6f63c97e176cd757d1cd5a86358f424a0a
|
[
"MIT"
] | null | null | null |
covid19/db/base_model.py
|
jeonghaknam/cvd2019
|
045f2a6f63c97e176cd757d1cd5a86358f424a0a
|
[
"MIT"
] | null | null | null |
covid19/db/base_model.py
|
jeonghaknam/cvd2019
|
045f2a6f63c97e176cd757d1cd5a86358f424a0a
|
[
"MIT"
] | null | null | null |
from django.db import models
class BaseModel(models.Model):
'''추상 모델 클래스'''
create_time = models.DateField(auto_now_add=True, verbose_name='작성일자')
update_time = models.DateTimeField(auto_now=True, verbose_name='없데이트')
is_delete = models.BooleanField(default=False, verbose_name='삭제')
class Meta:
abstract = True
| 31.090909
| 74
| 0.719298
|
79506efb0fbb9fc0f73c916de3079b8908e9d1a6
| 13,447
|
py
|
Python
|
examples/FasterRCNN/config.py
|
s36934512/tensorpack
|
78a16615f8c9e6993c2a14961ca3fdd05f7c273c
|
[
"Apache-2.0"
] | 4,404
|
2018-05-30T23:38:42.000Z
|
2022-03-31T22:30:11.000Z
|
examples/FasterRCNN/config.py
|
s36934512/tensorpack
|
78a16615f8c9e6993c2a14961ca3fdd05f7c273c
|
[
"Apache-2.0"
] | 771
|
2018-06-01T09:54:00.000Z
|
2022-03-31T23:12:29.000Z
|
examples/FasterRCNN/config.py
|
s36934512/tensorpack
|
78a16615f8c9e6993c2a14961ca3fdd05f7c273c
|
[
"Apache-2.0"
] | 1,412
|
2018-06-01T00:29:43.000Z
|
2022-03-26T17:37:39.000Z
|
# -*- coding: utf-8 -*-
# File: config.py
import numpy as np
import os
import pprint
import six
from tensorpack.utils import logger
from tensorpack.utils.gpu import get_num_gpu
__all__ = ['config', 'finalize_configs']
class AttrDict():
_freezed = False
""" Avoid accidental creation of new hierarchies. """
def __getattr__(self, name):
if self._freezed:
raise AttributeError(name)
if name.startswith('_'):
# Do not mess with internals. Otherwise copy/pickle will fail
raise AttributeError(name)
ret = AttrDict()
setattr(self, name, ret)
return ret
def __setattr__(self, name, value):
if self._freezed and name not in self.__dict__:
raise AttributeError(
"Config was freezed! Unknown config: {}".format(name))
super().__setattr__(name, value)
def __str__(self):
return pprint.pformat(self.to_dict(), indent=1, width=100, compact=True)
__repr__ = __str__
def to_dict(self):
"""Convert to a nested dict. """
return {k: v.to_dict() if isinstance(v, AttrDict) else v
for k, v in self.__dict__.items() if not k.startswith('_')}
def from_dict(self, d):
self.freeze(False)
for k, v in d.items():
self_v = getattr(self, k)
if isinstance(self_v, AttrDict):
self_v.from_dict(v)
else:
setattr(self, k, v)
def update_args(self, args):
"""Update from command line args. """
for cfg in args:
keys, v = cfg.split('=', maxsplit=1)
keylist = keys.split('.')
dic = self
for k in keylist[:-1]:
assert k in dir(dic), "Unknown config key: {}".format(keys)
dic = getattr(dic, k)
key = keylist[-1]
oldv = getattr(dic, key)
if not isinstance(oldv, str):
v = eval(v)
setattr(dic, key, v)
def freeze(self, freezed=True):
self._freezed = freezed
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.freeze(freezed)
# avoid silent bugs
def __eq__(self, _):
raise NotImplementedError()
def __ne__(self, _):
raise NotImplementedError()
config = AttrDict()
_C = config # short alias to avoid coding
# mode flags ---------------------
_C.TRAINER = 'replicated' # options: 'horovod', 'replicated'
_C.MODE_MASK = True # Faster R-CNN or Mask R-CNN
_C.MODE_FPN = True
# dataset -----------------------
_C.DATA.BASEDIR = '/path/to/your/DATA/DIR'
# All available dataset names are defined in `dataset/coco.py:register_coco`.
# All TRAIN dataset will be concatenated for training.
_C.DATA.TRAIN = ('coco_train2017',) # i.e. trainval35k
# Each VAL dataset will be evaluated separately (instead of concatenated)
_C.DATA.VAL = ('coco_val2017',) # AKA minival2014
# These two configs will be populated later inside `finalize_configs`.
_C.DATA.NUM_CATEGORY = -1 # without the background class (e.g., 80 for COCO)
_C.DATA.CLASS_NAMES = [] # NUM_CLASS (NUM_CATEGORY+1) strings, the first is "BG".
# whether the coordinates in your registered dataset are
# absolute pixel values in range [0, W or H] or relative values in [0, 1]
_C.DATA.ABSOLUTE_COORD = True
# Filter Negative Samples from dataset
_C.DATA.FILTER_EMPTY_ANNOTATIONS = True
# Number of data loading workers.
# In case of horovod training, this is the number of workers per-GPU (so you may want to use a smaller number).
# Set to 0 to disable parallel data loading
_C.DATA.NUM_WORKERS = 10
# backbone ----------------------
_C.BACKBONE.WEIGHTS = ''
# To train from scratch, set it to empty, and set FREEZE_AT to 0
# To train from ImageNet pre-trained models, use the one that matches your
# architecture from http://models.tensorpack.com under the 'FasterRCNN' section.
# To train from an existing COCO model, use the path to that file, and change
# the other configurations according to that model.
_C.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3] # for resnet50
# RESNET_NUM_BLOCKS = [3, 4, 23, 3] # for resnet101
_C.BACKBONE.FREEZE_AFFINE = False # do not train affine parameters inside norm layers
_C.BACKBONE.NORM = 'FreezeBN' # options: FreezeBN, SyncBN, GN, None
_C.BACKBONE.FREEZE_AT = 2 # options: 0, 1, 2. How many stages in backbone to freeze (not training)
# Use a base model with TF-preferred padding mode,
# which may pad more pixels on right/bottom than top/left.
# See https://github.com/tensorflow/tensorflow/issues/18213
# In tensorpack model zoo, ResNet models with TF_PAD_MODE=False are marked with "-AlignPadding".
# All other models under `ResNet/` in the model zoo are using TF_PAD_MODE=True.
# Using either one should probably give the same performance.
# We use the "AlignPadding" one just to be consistent with caffe2.
_C.BACKBONE.TF_PAD_MODE = False
_C.BACKBONE.STRIDE_1X1 = False # True for MSRA models
# schedule -----------------------
_C.TRAIN.NUM_GPUS = None # by default, will be set from code
_C.TRAIN.WEIGHT_DECAY = 1e-4
_C.TRAIN.BASE_LR = 1e-2 # defined for total batch size=8. Otherwise it will be adjusted automatically
_C.TRAIN.WARMUP = 1000 # in terms of iterations. This is not affected by #GPUs
_C.TRAIN.WARMUP_INIT_LR = 1e-5 # defined for total batch size=8. Otherwise it will be adjusted automatically
_C.TRAIN.STEPS_PER_EPOCH = 500
_C.TRAIN.STARTING_EPOCH = 1 # the first epoch to start with, useful to continue a training
# LR_SCHEDULE means equivalent steps when the total batch size is 8.
# It can be either a string like "3x" that refers to standard convention, or a list of int.
# LR_SCHEDULE=3x is the same as LR_SCHEDULE=[420000, 500000, 540000], which
# means to decrease LR at steps 420k and 500k and stop training at 540k.
# When the total bs!=8, the actual iterations to decrease learning rate, and
# the base learning rate are computed from BASE_LR and LR_SCHEDULE.
# Therefore, there is *no need* to modify the config if you only change the number of GPUs.
_C.TRAIN.LR_SCHEDULE = "1x" # "1x" schedule in detectron
_C.TRAIN.EVAL_PERIOD = 50 # period (epochs) to run evaluation
_C.TRAIN.CHECKPOINT_PERIOD = 20 # period (epochs) to save model
# preprocessing --------------------
# Alternative old (worse & faster) setting: 600
_C.PREPROC.TRAIN_SHORT_EDGE_SIZE = [800, 800] # [min, max] to sample from
_C.PREPROC.TEST_SHORT_EDGE_SIZE = 800
_C.PREPROC.MAX_SIZE = 1333
# mean and std in RGB order.
# Un-scaled version: [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
_C.PREPROC.PIXEL_MEAN = [123.675, 116.28, 103.53]
_C.PREPROC.PIXEL_STD = [58.395, 57.12, 57.375]
# anchors -------------------------
_C.RPN.ANCHOR_STRIDE = 16
_C.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512) # sqrtarea of the anchor box
_C.RPN.ANCHOR_RATIOS = (0.5, 1., 2.)
_C.RPN.POSITIVE_ANCHOR_THRESH = 0.7
_C.RPN.NEGATIVE_ANCHOR_THRESH = 0.3
# rpn training -------------------------
_C.RPN.FG_RATIO = 0.5 # fg ratio among selected RPN anchors
_C.RPN.BATCH_PER_IM = 256 # total (across FPN levels) number of anchors that are marked valid
_C.RPN.MIN_SIZE = 0
_C.RPN.PROPOSAL_NMS_THRESH = 0.7
# Anchors which overlap with a crowd box (IOA larger than threshold) will be ignored.
# Setting this to a value larger than 1.0 will disable the feature.
# It is disabled by default because Detectron does not do this.
_C.RPN.CROWD_OVERLAP_THRESH = 9.99
_C.RPN.HEAD_DIM = 1024 # used in C4 only
# RPN proposal selection -------------------------------
# for C4
_C.RPN.TRAIN_PRE_NMS_TOPK = 12000
_C.RPN.TRAIN_POST_NMS_TOPK = 2000
_C.RPN.TEST_PRE_NMS_TOPK = 6000
_C.RPN.TEST_POST_NMS_TOPK = 1000 # if you encounter OOM in inference, set this to a smaller number
# for FPN, #proposals per-level and #proposals after merging are (for now) the same
# if FPN.PROPOSAL_MODE = 'Joint', these options have no effect
_C.RPN.TRAIN_PER_LEVEL_NMS_TOPK = 2000
_C.RPN.TEST_PER_LEVEL_NMS_TOPK = 1000
# fastrcnn training ---------------------
_C.FRCNN.BATCH_PER_IM = 512
_C.FRCNN.BBOX_REG_WEIGHTS = [10., 10., 5., 5.] # Slightly better setting: 20, 20, 10, 10
_C.FRCNN.FG_THRESH = 0.5
_C.FRCNN.FG_RATIO = 0.25 # fg ratio in a ROI batch
# FPN -------------------------
_C.FPN.ANCHOR_STRIDES = (4, 8, 16, 32, 64) # strides for each FPN level. Must be the same length as ANCHOR_SIZES
_C.FPN.PROPOSAL_MODE = 'Level' # 'Level', 'Joint'
_C.FPN.NUM_CHANNEL = 256
_C.FPN.NORM = 'None' # 'None', 'GN'
# The head option is only used in FPN. For C4 models, the head is C5
_C.FPN.FRCNN_HEAD_FUNC = 'fastrcnn_2fc_head'
# choices: fastrcnn_2fc_head, fastrcnn_4conv1fc_{,gn_}head
_C.FPN.FRCNN_CONV_HEAD_DIM = 256
_C.FPN.FRCNN_FC_HEAD_DIM = 1024
_C.FPN.MRCNN_HEAD_FUNC = 'maskrcnn_up4conv_head' # choices: maskrcnn_up4conv_{,gn_}head
# Mask R-CNN
_C.MRCNN.HEAD_DIM = 256
_C.MRCNN.ACCURATE_PASTE = True # slightly more aligned results, but very slow on numpy
# Cascade R-CNN, only available in FPN mode
_C.FPN.CASCADE = False
_C.CASCADE.IOUS = [0.5, 0.6, 0.7]
_C.CASCADE.BBOX_REG_WEIGHTS = [[10., 10., 5., 5.], [20., 20., 10., 10.], [30., 30., 15., 15.]]
# testing -----------------------
_C.TEST.FRCNN_NMS_THRESH = 0.5
# Smaller threshold value gives significantly better mAP. But we use 0.05 for consistency with Detectron.
# mAP with 1e-4 threshold can be found at https://github.com/tensorpack/tensorpack/commit/26321ae58120af2568bdbf2269f32aa708d425a8#diff-61085c48abee915b584027e1085e1043 # noqa
_C.TEST.RESULT_SCORE_THRESH = 0.05
_C.TEST.RESULT_SCORE_THRESH_VIS = 0.5 # only visualize confident results
_C.TEST.RESULTS_PER_IM = 100
_C.freeze() # avoid typo / wrong config keys
def finalize_configs(is_training):
"""
Run some sanity checks, and populate some configs from others
"""
_C.freeze(False) # populate new keys now
if isinstance(_C.DATA.VAL, six.string_types): # support single string (the typical case) as well
_C.DATA.VAL = (_C.DATA.VAL, )
if isinstance(_C.DATA.TRAIN, six.string_types): # support single string
_C.DATA.TRAIN = (_C.DATA.TRAIN, )
# finalize dataset definitions ...
from dataset import DatasetRegistry
datasets = list(_C.DATA.TRAIN) + list(_C.DATA.VAL)
_C.DATA.CLASS_NAMES = DatasetRegistry.get_metadata(datasets[0], "class_names")
_C.DATA.NUM_CATEGORY = len(_C.DATA.CLASS_NAMES) - 1
assert _C.BACKBONE.NORM in ['FreezeBN', 'SyncBN', 'GN', 'None'], _C.BACKBONE.NORM
if _C.BACKBONE.NORM != 'FreezeBN':
assert not _C.BACKBONE.FREEZE_AFFINE
assert _C.BACKBONE.FREEZE_AT in [0, 1, 2]
_C.RPN.NUM_ANCHOR = len(_C.RPN.ANCHOR_SIZES) * len(_C.RPN.ANCHOR_RATIOS)
assert len(_C.FPN.ANCHOR_STRIDES) == len(_C.RPN.ANCHOR_SIZES)
# image size into the backbone has to be multiple of this number
_C.FPN.RESOLUTION_REQUIREMENT = _C.FPN.ANCHOR_STRIDES[3] # [3] because we build FPN with features r2,r3,r4,r5
if _C.MODE_FPN:
size_mult = _C.FPN.RESOLUTION_REQUIREMENT * 1.
_C.PREPROC.MAX_SIZE = np.ceil(_C.PREPROC.MAX_SIZE / size_mult) * size_mult
assert _C.FPN.PROPOSAL_MODE in ['Level', 'Joint']
assert _C.FPN.FRCNN_HEAD_FUNC.endswith('_head')
assert _C.FPN.MRCNN_HEAD_FUNC.endswith('_head')
assert _C.FPN.NORM in ['None', 'GN']
if _C.FPN.CASCADE:
# the first threshold is the proposal sampling threshold
assert _C.CASCADE.IOUS[0] == _C.FRCNN.FG_THRESH
assert len(_C.CASCADE.BBOX_REG_WEIGHTS) == len(_C.CASCADE.IOUS)
if is_training:
train_scales = _C.PREPROC.TRAIN_SHORT_EDGE_SIZE
if isinstance(train_scales, (list, tuple)) and train_scales[1] - train_scales[0] > 100:
# don't autotune if augmentation is on
os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '1'
assert _C.TRAINER in ['horovod', 'replicated'], _C.TRAINER
lr = _C.TRAIN.LR_SCHEDULE
if isinstance(lr, six.string_types):
if lr.endswith("x"):
LR_SCHEDULE_KITER = {
"{}x".format(k):
[180 * k - 120, 180 * k - 40, 180 * k]
for k in range(2, 10)}
LR_SCHEDULE_KITER["1x"] = [120, 160, 180]
_C.TRAIN.LR_SCHEDULE = [x * 1000 for x in LR_SCHEDULE_KITER[lr]]
else:
_C.TRAIN.LR_SCHEDULE = eval(lr)
# setup NUM_GPUS
if _C.TRAINER == 'horovod':
import horovod.tensorflow as hvd
ngpu = hvd.size()
logger.info("Horovod Rank={}, Size={}, LocalRank={}".format(
hvd.rank(), hvd.size(), hvd.local_rank()))
else:
assert 'OMPI_COMM_WORLD_SIZE' not in os.environ
ngpu = get_num_gpu()
assert ngpu > 0, "Has to train with GPU!"
assert ngpu % 8 == 0 or 8 % ngpu == 0, "Can only train with 1,2,4 or >=8 GPUs, but found {} GPUs".format(ngpu)
else:
# autotune is too slow for inference
os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'
ngpu = get_num_gpu()
if _C.TRAIN.NUM_GPUS is None:
_C.TRAIN.NUM_GPUS = ngpu
else:
if _C.TRAINER == 'horovod':
assert _C.TRAIN.NUM_GPUS == ngpu
else:
assert _C.TRAIN.NUM_GPUS <= ngpu
_C.freeze()
logger.info("Config: ------------------------------------------\n" + str(_C))
| 41.631579
| 176
| 0.65866
|
795070b2b747d02ca45d763147af32e857b8eafe
| 4,957
|
py
|
Python
|
src/api/datahub/databus/pullers/queue/puller.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 84
|
2021-06-30T06:20:23.000Z
|
2022-03-22T03:05:49.000Z
|
src/api/datahub/databus/pullers/queue/puller.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 7
|
2021-06-30T06:21:16.000Z
|
2022-03-29T07:36:13.000Z
|
src/api/datahub/databus/pullers/queue/puller.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 40
|
2021-06-30T06:21:26.000Z
|
2022-03-29T12:42:26.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from datahub.common.const import AUTO_OFFSET_RESET, KAFKA, LATEST
from datahub.databus.exceptions import TaskDataScenarioNotSupport
from datahub.databus.pullers.base_puller import BasePuller
from datahub.databus.settings import MODULE_PULLER, TYPE_PULSAR
from datahub.databus.task.pulsar import config as pulsar_config
from datahub.databus import common_helper
class QueuePuller(BasePuller):
component = "db"
module = MODULE_PULLER
def _before_add_task(self):
resource_type = self.resource_json.get("type", "unknown_type")
if resource_type != "kafka":
raise TaskDataScenarioNotSupport(message_kv={"Scenario": self.raw_data.data_scenario + "_" + resource_type})
super(QueuePuller, self)._before_add_task()
def _get_puller_task_conf(self):
if self.storage_channel.cluster_type == TYPE_PULSAR:
return self.__builder_pulsar_conf()
else:
return self.__builder_kafka_conf()
def _get_source_id(self):
return self.sink_topic
def __builder_pulsar_conf(self):
if self.resource_json.get("use_sasl", False):
return pulsar_config.build_puller_kafka_config_with_sasl(
self.data_id,
self.connector_name,
self.resource_json["master"],
self.resource_json["group"],
self.resource_json["topic"],
self.sink_topic,
self.resource_json.get("tasks", 1),
self.resource_json["security_protocol"],
self.resource_json["sasl_mechanism"],
self.resource_json["user"],
self.resource_json["password"],
self.resource_json.get(AUTO_OFFSET_RESET, LATEST),
)
else:
return pulsar_config.build_puller_kafka_config_param(
self.data_id,
self.connector_name,
self.resource_json["master"],
self.resource_json["group"],
self.resource_json["topic"],
self.sink_topic,
self.resource_json.get("tasks", 1),
self.resource_json.get(AUTO_OFFSET_RESET, LATEST),
)
def __builder_kafka_conf(self):
kafka = self.storage_channel
kafka_bs = "{}:{}".format(kafka.cluster_domain, kafka.cluster_port)
param = {
"data_id": self.data_id,
"cluster_name": self.cluster_name,
"group": self.resource_json["group"],
"max_tasks": self.resource_json.get("tasks", "1"),
"src_topic": self.resource_json["topic"],
"src_server": self.resource_json["master"],
"dest_topic": self.sink_topic,
"dest_server": kafka_bs,
AUTO_OFFSET_RESET: self.resource_json.get(AUTO_OFFSET_RESET, LATEST),
}
if self.resource_json.get("use_sasl", False):
param["security_protocol"] = self.resource_json["security_protocol"]
param["sasl_mechanism"] = self.resource_json["sasl_mechanism"]
param["user"] = self.resource_json["user"]
param["password"] = self.resource_json["password"]
return self.config_factory[KAFKA].build_puller_kafka_config_param(param)
@classmethod
def _compare_connector_conf(cls, cluster_name, connector_name, conf, running_conf):
return common_helper.check_keys_equal(
conf,
running_conf,
[
"rt.id",
"topics",
"tasks.max",
],
)
| 43.482456
| 120
| 0.65463
|
795073404d7342347f9795845da603deb4d1e260
| 2,252
|
py
|
Python
|
statsd/tests.py
|
krux/pystatsd
|
d6f88da52018c7a0a96615e3a77172d115b56740
|
[
"BSD-3-Clause"
] | null | null | null |
statsd/tests.py
|
krux/pystatsd
|
d6f88da52018c7a0a96615e3a77172d115b56740
|
[
"BSD-3-Clause"
] | null | null | null |
statsd/tests.py
|
krux/pystatsd
|
d6f88da52018c7a0a96615e3a77172d115b56740
|
[
"BSD-3-Clause"
] | null | null | null |
import random
import re
import mock
from nose.tools import eq_
from statsd import StatsClient
ADDR = ('localhost', 8125)
def _client(prefix=None):
sc = StatsClient(host=ADDR[0], port=ADDR[1], prefix=prefix)
sc._sock = mock.Mock()
return sc
def _sock_check(cl, count, val):
eq_(cl._sock.sendto.call_count, count)
eq_(cl._sock.sendto.call_args, ((val, ADDR), {}))
@mock.patch.object(random, 'random', lambda: -1)
def test_incr():
sc = _client()
sc.incr('foo')
_sock_check(sc, 1, 'foo:1|c')
sc.incr('foo', 10)
_sock_check(sc, 2, 'foo:10|c')
sc.incr('foo', 10, rate=0.5)
_sock_check(sc, 3, 'foo:10|c|@0.5')
@mock.patch.object(random, 'random', lambda: -1)
def test_decr():
sc = _client()
sc.decr('foo')
_sock_check(sc, 1, 'foo:-1|c')
sc.decr('foo', 10)
_sock_check(sc, 2, 'foo:-10|c')
@mock.patch.object(random, 'random', lambda: -1)
def test_timing():
sc = _client()
sc.timing('foo', 100)
_sock_check(sc, 1, 'foo:100|ms')
sc.timing('foo', 350)
_sock_check(sc, 2, 'foo:350|ms')
sc.timing('foo', 100, rate=0.5)
_sock_check(sc, 3, 'foo:100|ms|@0.5')
def test_prefix():
sc = _client('foo')
sc.incr('bar')
_sock_check(sc, 1, 'foo.bar:1|c')
def _timer_check(cl, count, start, end):
eq_(cl._sock.sendto.call_count, count)
value = cl._sock.sendto.call_args[0][0]
exp = re.compile('^%s:\d+|%s$' % (start, end))
assert exp.match(value)
def test_timer():
"""StatsClient.timer is a context decorator."""
sc = _client()
with sc.timer('foo'):
pass
_timer_check(sc, 1, 'foo', 'ms')
@sc.timer('bar')
def bar():
pass
bar()
_timer_check(sc, 2, 'bar', 'ms')
def test_timer_capture():
"""You can capture the output of StatsClient.timer."""
sc = _client()
with sc.timer('woo') as result:
eq_(result.ms, None)
assert isinstance(result.ms, int)
@mock.patch.object(random, 'random', lambda: -1)
def test_timer_rate():
sc = _client()
with sc.timer('foo', rate=0.5):
pass
_timer_check(sc, 1, 'foo', 'ms|@0.5')
@sc.timer('bar', rate=0.1)
def bar():
pass
bar()
_timer_check(sc, 2, 'bar', 'ms|@0.1')
| 18.766667
| 63
| 0.589698
|
7950750123b16c9fa4ee06a8a3be7bcb0eaecd5d
| 5,285
|
py
|
Python
|
test/vanilla/version-tolerant/Expected/AcceptanceTests/ObjectTypeVersionTolerant/objecttypeversiontolerant/_operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/ObjectTypeVersionTolerant/objecttypeversiontolerant/_operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/ObjectTypeVersionTolerant/objecttypeversiontolerant/_operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar("T")
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_get_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/objectType/get'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_put_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/objectType/put'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
# fmt: on
class ObjectTypeClientOperationsMixin(object):
@distributed_trace
def get(
self, **kwargs # type: Any
):
# type: (...) -> Any
"""Basic get that returns an object. Returns object { 'message': 'An object was successfully
returned' }.
:return: any
:rtype: any
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[Any]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_get_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/objectType/get"} # type: ignore
@distributed_trace
def put(
self,
put_object, # type: Any
**kwargs # type: Any
):
# type: (...) -> None
"""Basic put that puts an object. Pass in {'foo': 'bar'} to get a 200 and anything else to get an
object error.
:param put_object: Pass in {'foo': 'bar'} for a 200, anything else for an object error.
:type put_object: any
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = put_object
request = build_put_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put.metadata = {"url": "/objectType/put"} # type: ignore
| 32.826087
| 106
| 0.637086
|
795076c11ea4506de5dca46f8fe31114aca137e2
| 1,364
|
py
|
Python
|
param_sweep.py
|
gavincangan/alvin
|
4e1945a3f5bb061842f0e35633f254863f8923c8
|
[
"MIT"
] | null | null | null |
param_sweep.py
|
gavincangan/alvin
|
4e1945a3f5bb061842f0e35633f254863f8923c8
|
[
"MIT"
] | null | null | null |
param_sweep.py
|
gavincangan/alvin
|
4e1945a3f5bb061842f0e35633f254863f8923c8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys, os, shutil, math
from math import pi
from colorama import init, Fore, Style
from configsingleton import ConfigSingleton
def execute(cmd):
# Print the command to execute in green
print(Fore.GREEN + cmd)
print(Style.RESET_ALL)
os.system(cmd)
config = ConfigSingleton.get_instance('default.cfg')
#linear_speeds = [x / 2.0 for x in range(2, 10 + 1)]
#print linear_speeds
#angular_speeds = [x / 2.0 for x in range(1, 10 + 1)]
#print angular_speeds
#slow_factors = [x / 4.0 for x in range(1, 4 + 1)]
#print slow_factors
#front_angle_thresholds = [(pi/4)*(i/10.0) for i in range(0, 11)]
#print front_angle_thresholds
#number_robots = [i for i in range(2, 6)]
number_robots = [5]
print number_robots
output_dir = '/tmp/param_sweep'
shutil.rmtree(output_dir, ignore_errors=True)
os.mkdir(output_dir)
number_trials = 3
for nr in number_robots:
#config.set("GauciController", "linear_speed", l)
#config.set("GauciController", "angular_speed", a)
#config.set("GauciController", "front_angle_threshold", fat)
config.set("AlvinSim", "number_robots", nr)
filename_base = "{}/{}".format(output_dir, nr)
filename = filename_base.replace('.', 'p') + '.cfg'
config.write(open(filename, 'w'))
for trial in range(number_trials):
execute("alvin.py {} {}".format(filename, trial))
| 26.230769
| 65
| 0.695748
|
7950774340ad1238440be8e86e0ff851e1122df1
| 774
|
py
|
Python
|
esphome/components/cap1188/binary_sensor.py
|
OttoWinter/esphomeyaml
|
6a85259e4d6d1b0a0f819688b8e555efcb99ecb0
|
[
"MIT"
] | 249
|
2018-04-07T12:04:11.000Z
|
2019-01-25T01:11:34.000Z
|
esphome/components/cap1188/binary_sensor.py
|
OttoWinter/esphomeyaml
|
6a85259e4d6d1b0a0f819688b8e555efcb99ecb0
|
[
"MIT"
] | 243
|
2018-04-11T16:37:11.000Z
|
2019-01-25T16:50:37.000Z
|
esphome/components/cap1188/binary_sensor.py
|
OttoWinter/esphomeyaml
|
6a85259e4d6d1b0a0f819688b8e555efcb99ecb0
|
[
"MIT"
] | 40
|
2018-04-10T05:50:14.000Z
|
2019-01-25T15:20:36.000Z
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import binary_sensor
from esphome.const import CONF_CHANNEL
from . import cap1188_ns, CAP1188Component, CONF_CAP1188_ID
DEPENDENCIES = ["cap1188"]
CAP1188Channel = cap1188_ns.class_("CAP1188Channel", binary_sensor.BinarySensor)
CONFIG_SCHEMA = binary_sensor.binary_sensor_schema(CAP1188Channel).extend(
{
cv.GenerateID(CONF_CAP1188_ID): cv.use_id(CAP1188Component),
cv.Required(CONF_CHANNEL): cv.int_range(min=0, max=7),
}
)
async def to_code(config):
var = await binary_sensor.new_binary_sensor(config)
hub = await cg.get_variable(config[CONF_CAP1188_ID])
cg.add(var.set_channel(config[CONF_CHANNEL]))
cg.add(hub.register_channel(var))
| 32.25
| 80
| 0.776486
|
79507798335d24dc3978a832e3a06fc3d294011d
| 1,353
|
py
|
Python
|
src/UserInterface.py
|
RhysRead/camControl
|
7018203c5576b133407630e3ca4c975cb8c7fa7c
|
[
"Apache-2.0"
] | null | null | null |
src/UserInterface.py
|
RhysRead/camControl
|
7018203c5576b133407630e3ca4c975cb8c7fa7c
|
[
"Apache-2.0"
] | null | null | null |
src/UserInterface.py
|
RhysRead/camControl
|
7018203c5576b133407630e3ca4c975cb8c7fa7c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""UserInterface.py: Contains the code needed for managing the camControl user interface."""
__author__ = "Rhys Read"
__copyright__ = "Copyright 2018, Rhys Read"
import logging
import cv2
class InterfaceManager(object):
def __init__(self, cursor_manager):
"""
Used to read a user input and respond accordingly.
:param cursor_manager: CursorManager object.
"""
self.__windows = {}
self.__cursor_manager = cursor_manager
def get_user_input(self):
"""
Used to retrieve and handle the user input.
:return: The code relevant to the user input.
"""
# Use opencv2 to get the user's input
value = cv2.waitKey(1)
# Handle the user's input
if value == ord('q'):
# Return 1 if exit button is pressed.
return 1
elif value == ord('d'):
logging.info("Paused cursor movement.")
self.__cursor_manager.disable()
# Return 2 if cursor pause button is pressed.
return 2
elif value == ord('a'):
logging.info("Enabled cursor movement.")
self.__cursor_manager.enable()
# Return 3 if cursor enable button is pressed.
return 3
# Return 0 if no user input is found.
return 0
| 28.787234
| 92
| 0.595713
|
795077e5d3f0e4c23b800a74f2cb867ff7761709
| 131
|
py
|
Python
|
POP1/worksheets/three/ex03/code.py
|
silvafj/BBK-MSCCS-2017-18
|
d97b0f8e7434d19a1a4006989c32c4c1deb93842
|
[
"MIT"
] | 1
|
2021-12-29T19:38:56.000Z
|
2021-12-29T19:38:56.000Z
|
POP1/worksheets/three/ex03/code.py
|
silvafj/BBK-MSCCS-2017-18
|
d97b0f8e7434d19a1a4006989c32c4c1deb93842
|
[
"MIT"
] | null | null | null |
POP1/worksheets/three/ex03/code.py
|
silvafj/BBK-MSCCS-2017-18
|
d97b0f8e7434d19a1a4006989c32c4c1deb93842
|
[
"MIT"
] | 2
|
2021-04-08T22:58:03.000Z
|
2021-04-09T01:16:51.000Z
|
numbers = {int(i) for i in input().split()} & {int(i) for i in input().split()}
print(' '.join((str(i) for i in sorted(numbers))))
| 43.666667
| 79
| 0.603053
|
795078112677d63cd5f03126b3a980b6fb38e5f9
| 399
|
py
|
Python
|
19583.py
|
WaiNaat/BOJ-Python
|
3365ef090c7dcf6e6a598fea0b25c416a5a3e01b
|
[
"MIT"
] | null | null | null |
19583.py
|
WaiNaat/BOJ-Python
|
3365ef090c7dcf6e6a598fea0b25c416a5a3e01b
|
[
"MIT"
] | null | null | null |
19583.py
|
WaiNaat/BOJ-Python
|
3365ef090c7dcf6e6a598fea0b25c416a5a3e01b
|
[
"MIT"
] | null | null | null |
import sys
input = sys.stdin.readline
# input & process
start, end, stEnd = list(map(int, input().replace(':', '').split()))
valid = set([])
sol = 0
chat = input()
while chat != "":
time, id = chat.split()
time = int(time[:2]) * 100 + int(time[3:])
if time <= start:
valid.add(id)
elif end <= time <= stEnd and id in valid:
valid.discard(id)
sol += 1
chat = input()
# output
print(sol)
| 19
| 68
| 0.601504
|
7950783f9a1023028774fb97ef1ee6d895ced8ec
| 1,218
|
py
|
Python
|
thinc/tests/unit/test_rates.py
|
adamjm/thinc
|
219d8172faee83303bb78e338996d7b6c6d56155
|
[
"MIT"
] | 44
|
2015-01-02T22:04:48.000Z
|
2020-05-15T21:17:10.000Z
|
venv/lib/python3.6/site-packages/thinc/tests/unit/test_rates.py
|
assaufianggie/VerraBot
|
cbe46ccb219c2972871e760268b427e1f8e79f93
|
[
"MIT"
] | 4
|
2021-06-02T00:49:27.000Z
|
2022-01-13T01:59:34.000Z
|
venv/lib/python3.6/site-packages/thinc/tests/unit/test_rates.py
|
assaufianggie/VerraBot
|
cbe46ccb219c2972871e760268b427e1f8e79f93
|
[
"MIT"
] | 7
|
2015-06-18T00:50:57.000Z
|
2016-02-03T17:08:07.000Z
|
# coding: utf8
from __future__ import unicode_literals
from ...rates import decaying, compounding, annealing, slanted_triangular
def test_decaying_rate():
rates = decaying(0.001, 1e-4)
rate = next(rates)
assert rate == 0.001
next_rate = next(rates)
assert next_rate < rate
assert next_rate > 0
assert next_rate > next(rates)
def test_compounding_rate():
rates = compounding(1, 16, 1.01)
rate0 = next(rates)
assert rate0 == 1.0
rate1 = next(rates)
rate2 = next(rates)
rate3 = next(rates)
assert rate3 > rate2 > rate1 > rate0
assert (rate3 - rate2) > (rate2 - rate1) > (rate1 - rate0)
def test_annealing_rate():
rates = annealing(0.001, 1e-4, 1000)
rate0 = next(rates)
rate1 = next(rates)
rate2 = next(rates)
rate3 = next(rates)
assert rate0 == 0.001
assert rate3 < rate2 < rate1 < rate0
assert (rate2 - rate3) < (rate1 - rate2)
def test_slanted_triangular_rate():
rates = slanted_triangular(1.0, 20.0, ratio=10)
rate0 = next(rates)
assert rate0 < 1.0
rate1 = next(rates)
assert rate1 > rate0
rate2 = next(rates)
assert rate2 < rate1
rate3 = next(rates)
assert rate0 < rate3 < rate2
| 24.857143
| 73
| 0.646141
|
795078ae76ba0d1721e6a97c69747724b2c1f3db
| 3,625
|
py
|
Python
|
index/tests.py
|
PickBas/meta-social
|
f6fb0a50c30e240086a75917b705dfdc71dbebf9
|
[
"MIT"
] | null | null | null |
index/tests.py
|
PickBas/meta-social
|
f6fb0a50c30e240086a75917b705dfdc71dbebf9
|
[
"MIT"
] | 15
|
2020-06-07T07:58:05.000Z
|
2022-01-19T16:53:47.000Z
|
index/tests.py
|
PickBas/meta-social
|
f6fb0a50c30e240086a75917b705dfdc71dbebf9
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.test import TestCase, Client
from django.urls import reverse
# class AdminTest(TestCase):
# pass
# class LogoutViewTest(MetaSetUp):
# pass
class MetaSetUp(TestCase):
fixtures = ["test_friends_music_db.json"]
def setUp(self):
self.client = Client()
self.user = User.objects.get(username="test_user")
self.client.force_login(user=self.user)
class IndexViewTest(MetaSetUp):
def setUp(self):
super().setUp()
self.response = self.client.get(reverse("home"))
def test_login(self):
self.assertEqual(self.response.status_code, 200)
def test_unauth_client(self):
unauth_client = Client()
response = unauth_client.get(reverse("home"))
self.assertEqual(response.status_code, 302)
response = unauth_client.get(reverse("home"), follow=True)
last_url, status = response.redirect_chain[-1]
self.assertIn(reverse("account_login"), last_url)
class GlobalSearchTest(MetaSetUp):
def setUp(self):
super().setUp()
def test_search(self):
resp = self.client.post('/ajax/search/', {
'query': 'test',
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.context['users']), 2)
self.assertEqual(len(resp.context['communities']), 0)
self.assertTemplateUsed(resp, 'search_list.html')
resp = self.client.post('/ajax/search/', {
'query': 'Dreams',
})
self.assertEqual(len(resp.context['music']), 3)
def test_page(self):
self.response = self.client.get('/accounts/profile/test_user/')
self.assertEqual(self.response.status_code, 200)
# здесь тест содержимого
# class FriendsSearchView(MetaSetUp):
# def setUp(self):
# super().setUp()
# self.response = self.client.get('/friends/search/')
#
# def test_friend_post(self):
# print(self.response.context)
# response = self.client.post('/friends/search/', {'username': 'test_user'})
# self.assertEqual(response.status_code, 200)
# self.assertEqual(self.response.status_code, 200)
# self.assertTrue(
# User.objects.get(
# username="test_user2") in response.context['matches'])
class FriendsListView(MetaSetUp):
def setUp(self):
super().setUp()
self.response = self.client.get('/friends/')
def test_page(self):
self.assertEqual(self.response.status_code, 200)
class ChatView(MetaSetUp):
def setUp(self):
super().setUp()
self.response = self.client.get('/chats/')
def test_page(self):
self.assertEqual(self.response.status_code, 200)
# class CommunityView(MetaSetUp):
# def setUp(self):
# super().setUp()
# self.response = self.client.get('/community/asd/')
# def test_page(self):
# self.assertEqual(self.response.status_code, 200)
# class CommunityListView(MetaSetUp):
# def setUp(self):
# super().setUp()
# self.response = self.client.get('/community/user/list/')
# def test_page(self):
# self.assertEqual(self.response.status_code, 200)
class PostView(MetaSetUp):
def setUp(self):
super().setUp()
self.response = self.client.get('/post/1/')
def test_page(self):
self.assertEqual(self.response.status_code, 200)
# Админка не обладает данными об аккаунтах в
# сетях хотя там есть модели даже что-то предлагается ввести,
# отсутствуют подсказки.
# test_user : test_pass
# test_user2 : test_password2
| 29
| 84
| 0.643034
|
795078f58731d7996517e5d4a7747f81f7f3bfe7
| 1,883
|
py
|
Python
|
rdkit/VLib/NodeLib/SmilesSupply.py
|
docking-org/rdk
|
6eb710254f027b348a8e3089e6a92c3d40de0949
|
[
"PostgreSQL"
] | 1
|
2019-01-23T06:02:24.000Z
|
2019-01-23T06:02:24.000Z
|
rdkit/VLib/NodeLib/SmilesSupply.py
|
Mike575/rdkit
|
373a89021e478f878c6011a201e3fb8f4a122093
|
[
"PostgreSQL"
] | null | null | null |
rdkit/VLib/NodeLib/SmilesSupply.py
|
Mike575/rdkit
|
373a89021e478f878c6011a201e3fb8f4a122093
|
[
"PostgreSQL"
] | 1
|
2022-03-30T03:22:10.000Z
|
2022-03-30T03:22:10.000Z
|
# $Id$
#
# Copyright (C) 2003 Rational Discovery LLC
# All Rights Reserved
#
from rdkit import Chem
from rdkit import six
from rdkit.VLib.Supply import SupplyNode
class SmilesSupplyNode(SupplyNode):
""" Smiles supplier
Sample Usage:
>>> import os
>>> from rdkit import RDConfig
>>> fileN = os.path.join(RDConfig.RDCodeDir,'VLib','NodeLib',\
'test_data','pgp_20.txt')
>>> suppl = SmilesSupplyNode(fileN,delim="\\t",smilesColumn=2,nameColumn=1,titleLine=1)
>>> ms = [x for x in suppl]
>>> len(ms)
20
>>> ms[0].GetProp("_Name")
'ALDOSTERONE'
>>> ms[0].GetProp("ID")
'RD-PGP-0001'
>>> ms[1].GetProp("_Name")
'AMIODARONE'
>>> ms[3].GetProp("ID")
'RD-PGP-0004'
>>> suppl.reset()
>>> suppl.next().GetProp("_Name")
'ALDOSTERONE'
>>> suppl.next().GetProp("_Name")
'AMIODARONE'
>>> suppl.reset()
"""
def __init__(self, fileName, delim="\t", nameColumn=1, smilesColumn=0, titleLine=0, **kwargs):
SupplyNode.__init__(self, **kwargs)
self._fileName = fileName
self._supplier = Chem.SmilesMolSupplier(self._fileName, delimiter=delim,
smilesColumn=smilesColumn, nameColumn=nameColumn,
titleLine=titleLine)
def reset(self):
SupplyNode.reset(self)
self._supplier.reset()
def next(self):
"""
"""
r = None
while not r:
r = next(self._supplier)
return r
if six.PY3:
SmilesSupplyNode.__next__ = SmilesSupplyNode.next
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import doctest
import sys
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
| 24.141026
| 96
| 0.59692
|
79507948af14d62b2dd867d8592fc12065cd7e61
| 13,440
|
py
|
Python
|
mypy/test/testfinegrained.py
|
mhchia/mypy
|
b7d74f3e202fa60ff0d8a2efc299913aad68e2ed
|
[
"PSF-2.0"
] | 1
|
2022-03-26T20:37:26.000Z
|
2022-03-26T20:37:26.000Z
|
mypy/test/testfinegrained.py
|
mhchia/mypy
|
b7d74f3e202fa60ff0d8a2efc299913aad68e2ed
|
[
"PSF-2.0"
] | 2
|
2022-03-10T11:53:19.000Z
|
2022-03-22T15:24:21.000Z
|
mypy/test/testfinegrained.py
|
mhchia/mypy
|
b7d74f3e202fa60ff0d8a2efc299913aad68e2ed
|
[
"PSF-2.0"
] | null | null | null |
"""Test cases for fine-grained incremental checking.
Each test cases runs a batch build followed by one or more fine-grained
incremental steps. We verify that each step produces the expected output.
See the comment at the top of test-data/unit/fine-grained.test for more
information.
N.B.: Unlike most of the other test suites, testfinegrained does not
rely on an alt_lib_path for finding source files. This means that they
can test interactions with the lib_path that is built implicitly based
on specified sources.
"""
import os
import re
from typing import List, Dict, Any, Tuple, Union, cast
from mypy import build
from mypy.modulefinder import BuildSource
from mypy.errors import CompileError
from mypy.options import Options
from mypy.test.config import test_temp_dir
from mypy.test.data import (
DataDrivenTestCase, DataSuite, UpdateFile, DeleteFile
)
from mypy.test.helpers import (
assert_string_arrays_equal, parse_options, assert_module_equivalence,
assert_target_equivalence, perform_file_operations,
)
from mypy.server.mergecheck import check_consistency
from mypy.dmypy_util import DEFAULT_STATUS_FILE
from mypy.dmypy_server import Server
from mypy.config_parser import parse_config_file
from mypy.find_sources import create_source_list
import pytest
# Set to True to perform (somewhat expensive) checks for duplicate AST nodes after merge
CHECK_CONSISTENCY = False
class FineGrainedSuite(DataSuite):
files = [
'fine-grained.test',
'fine-grained-cycles.test',
'fine-grained-blockers.test',
'fine-grained-modules.test',
'fine-grained-follow-imports.test',
'fine-grained-suggest.test',
'fine-grained-attr.test',
]
# Whether to use the fine-grained cache in the testing. This is overridden
# by a trivial subclass to produce a suite that uses the cache.
use_cache = False
def should_skip(self, testcase: DataDrivenTestCase) -> bool:
# Decide whether to skip the test. This could have been structured
# as a filter() classmethod also, but we want the tests reported
# as skipped, not just elided.
if self.use_cache:
if testcase.only_when == '-only_when_nocache':
return True
# TODO: In caching mode we currently don't well support
# starting from cached states with errors in them.
if testcase.output and testcase.output[0] != '==':
return True
else:
if testcase.only_when == '-only_when_cache':
return True
return False
def run_case(self, testcase: DataDrivenTestCase) -> None:
if self.should_skip(testcase):
pytest.skip()
return
main_src = '\n'.join(testcase.input)
main_path = os.path.join(test_temp_dir, 'main')
with open(main_path, 'w', encoding='utf8') as f:
f.write(main_src)
options = self.get_options(main_src, testcase, build_cache=False)
build_options = self.get_options(main_src, testcase, build_cache=True)
server = Server(options, DEFAULT_STATUS_FILE)
num_regular_incremental_steps = self.get_build_steps(main_src)
step = 1
sources = self.parse_sources(main_src, step, options)
if step <= num_regular_incremental_steps:
messages = self.build(build_options, sources)
else:
messages = self.run_check(server, sources)
a = []
if messages:
a.extend(normalize_messages(messages))
assert testcase.tmpdir
a.extend(self.maybe_suggest(step, server, main_src, testcase.tmpdir.name))
if server.fine_grained_manager:
if CHECK_CONSISTENCY:
check_consistency(server.fine_grained_manager)
steps = testcase.find_steps()
all_triggered = []
for operations in steps:
step += 1
output, triggered = self.perform_step(
operations,
server,
options,
build_options,
testcase,
main_src,
step,
num_regular_incremental_steps,
)
a.append('==')
a.extend(output)
all_triggered.extend(triggered)
# Normalize paths in test output (for Windows).
a = [line.replace('\\', '/') for line in a]
assert_string_arrays_equal(
testcase.output, a,
'Invalid output ({}, line {})'.format(
testcase.file, testcase.line))
if testcase.triggered:
assert_string_arrays_equal(
testcase.triggered,
self.format_triggered(all_triggered),
'Invalid active triggers ({}, line {})'.format(testcase.file,
testcase.line))
def get_options(self,
source: str,
testcase: DataDrivenTestCase,
build_cache: bool,) -> Options:
# This handles things like '# flags: --foo'.
options = parse_options(source, testcase, incremental_step=1)
options.incremental = True
options.use_builtins_fixtures = True
options.show_traceback = True
options.error_summary = False
options.fine_grained_incremental = not build_cache
options.use_fine_grained_cache = self.use_cache and not build_cache
options.cache_fine_grained = self.use_cache
options.local_partial_types = True
if re.search('flags:.*--follow-imports', source) is None:
# Override the default for follow_imports
options.follow_imports = 'error'
for name, _ in testcase.files:
if 'mypy.ini' in name or 'pyproject.toml' in name:
parse_config_file(options, lambda: None, name)
break
return options
def run_check(self, server: Server, sources: List[BuildSource]) -> List[str]:
response = server.check(sources, is_tty=False, terminal_width=-1)
out = cast(str, response['out'] or response['err'])
return out.splitlines()
def build(self,
options: Options,
sources: List[BuildSource]) -> List[str]:
try:
result = build.build(sources=sources,
options=options)
except CompileError as e:
return e.messages
return result.errors
def format_triggered(self, triggered: List[List[str]]) -> List[str]:
result = []
for n, triggers in enumerate(triggered):
filtered = [trigger for trigger in triggers
if not trigger.endswith('__>')]
filtered = sorted(filtered)
result.append(('%d: %s' % (n + 2, ', '.join(filtered))).strip())
return result
def get_build_steps(self, program_text: str) -> int:
"""Get the number of regular incremental steps to run, from the test source"""
if not self.use_cache:
return 0
m = re.search('# num_build_steps: ([0-9]+)$', program_text, flags=re.MULTILINE)
if m is not None:
return int(m.group(1))
return 1
def perform_step(self,
operations: List[Union[UpdateFile, DeleteFile]],
server: Server,
options: Options,
build_options: Options,
testcase: DataDrivenTestCase,
main_src: str,
step: int,
num_regular_incremental_steps: int) -> Tuple[List[str], List[List[str]]]:
"""Perform one fine-grained incremental build step (after some file updates/deletions).
Return (mypy output, triggered targets).
"""
perform_file_operations(operations)
sources = self.parse_sources(main_src, step, options)
if step <= num_regular_incremental_steps:
new_messages = self.build(build_options, sources)
else:
new_messages = self.run_check(server, sources)
updated: List[str] = []
changed: List[str] = []
targets: List[str] = []
triggered = []
if server.fine_grained_manager:
if CHECK_CONSISTENCY:
check_consistency(server.fine_grained_manager)
triggered.append(server.fine_grained_manager.triggered)
updated = server.fine_grained_manager.updated_modules
changed = [mod for mod, file in server.fine_grained_manager.changed_modules]
targets = server.fine_grained_manager.processed_targets
expected_stale = testcase.expected_stale_modules.get(step - 1)
if expected_stale is not None:
assert_module_equivalence(
'stale' + str(step - 1),
expected_stale, changed)
expected_rechecked = testcase.expected_rechecked_modules.get(step - 1)
if expected_rechecked is not None:
assert_module_equivalence(
'rechecked' + str(step - 1),
expected_rechecked, updated)
expected = testcase.expected_fine_grained_targets.get(step)
if expected:
assert_target_equivalence(
'targets' + str(step),
expected, targets)
new_messages = normalize_messages(new_messages)
a = new_messages
assert testcase.tmpdir
a.extend(self.maybe_suggest(step, server, main_src, testcase.tmpdir.name))
return a, triggered
def parse_sources(self, program_text: str,
incremental_step: int,
options: Options) -> List[BuildSource]:
"""Return target BuildSources for a test case.
Normally, the unit tests will check all files included in the test
case. This differs from how testcheck works by default, as dmypy
doesn't currently support following imports.
You can override this behavior and instruct the tests to check
multiple modules by using a comment like this in the test case
input:
# cmd: main a.py
You can also use `# cmdN:` to have a different cmd for incremental
step N (2, 3, ...).
"""
m = re.search('# cmd: mypy ([a-zA-Z0-9_./ ]+)$', program_text, flags=re.MULTILINE)
regex = '# cmd{}: mypy ([a-zA-Z0-9_./ ]+)$'.format(incremental_step)
alt_m = re.search(regex, program_text, flags=re.MULTILINE)
if alt_m is not None:
# Optionally return a different command if in a later step
# of incremental mode, otherwise default to reusing the
# original cmd.
m = alt_m
if m:
# The test case wants to use a non-default set of files.
paths = [os.path.join(test_temp_dir, path) for path in m.group(1).strip().split()]
return create_source_list(paths, options)
else:
base = BuildSource(os.path.join(test_temp_dir, 'main'), '__main__', None)
# Use expand_dir instead of create_source_list to avoid complaints
# when there aren't any .py files in an increment
return [base] + create_source_list([test_temp_dir], options,
allow_empty_dir=True)
def maybe_suggest(self, step: int, server: Server, src: str, tmp_dir: str) -> List[str]:
output: List[str] = []
targets = self.get_suggest(src, step)
for flags, target in targets:
json = '--json' in flags
callsites = '--callsites' in flags
no_any = '--no-any' in flags
no_errors = '--no-errors' in flags
try_text = '--try-text' in flags
m = re.match('--flex-any=([0-9.]+)', flags)
flex_any = float(m.group(1)) if m else None
m = re.match(r'--use-fixme=(\w+)', flags)
use_fixme = m.group(1) if m else None
m = re.match('--max-guesses=([0-9]+)', flags)
max_guesses = int(m.group(1)) if m else None
res = cast(Dict[str, Any],
server.cmd_suggest(
target.strip(), json=json, no_any=no_any, no_errors=no_errors,
try_text=try_text, flex_any=flex_any, use_fixme=use_fixme,
callsites=callsites, max_guesses=max_guesses))
val = res['error'] if 'error' in res else res['out'] + res['err']
if json:
# JSON contains already escaped \ on Windows, so requires a bit of care.
val = val.replace('\\\\', '\\')
val = val.replace(os.path.realpath(tmp_dir) + os.path.sep, '')
output.extend(val.strip().split('\n'))
return normalize_messages(output)
def get_suggest(self, program_text: str,
incremental_step: int) -> List[Tuple[str, str]]:
step_bit = '1?' if incremental_step == 1 else str(incremental_step)
regex = '# suggest{}: (--[a-zA-Z0-9_\\-./=?^ ]+ )*([a-zA-Z0-9_.:/?^ ]+)$'.format(step_bit)
m = re.findall(regex, program_text, flags=re.MULTILINE)
return m
def normalize_messages(messages: List[str]) -> List[str]:
return [re.sub('^tmp' + re.escape(os.sep), '', message)
for message in messages]
| 39.646018
| 98
| 0.603497
|
7950794e82481fc87e14cd45b5c5e8df3ace890c
| 38,649
|
py
|
Python
|
PyInstaller/depend/bindepend.py
|
SnoopJeDi/pyinstaller
|
20f922cdac0207e943395bcae4a23d597558c5f7
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/depend/bindepend.py
|
SnoopJeDi/pyinstaller
|
20f922cdac0207e943395bcae4a23d597558c5f7
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/depend/bindepend.py
|
SnoopJeDi/pyinstaller
|
20f922cdac0207e943395bcae4a23d597558c5f7
|
[
"Apache-2.0"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2022, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Find external dependencies of binary libraries.
"""
import collections
import ctypes.util
import os
import re
import sys
# Required for extracting eggs.
import zipfile
import subprocess
from PyInstaller import compat
from PyInstaller import log as logging
from PyInstaller.depend import dylib, utils
from PyInstaller.utils.win32 import winutils
logger = logging.getLogger(__name__)
seen = set()
# Import windows specific stuff.
if compat.is_win:
from distutils.sysconfig import get_python_lib
import pefile
from PyInstaller.utils.win32 import winmanifest, winresource
def getfullnameof(mod, xtrapath=None):
"""
Return the full path name of MOD.
* MOD is the basename of a dll or pyd.
* XTRAPATH is a path or list of paths to search first.
Return the full path name of MOD. Will search the full Windows search path, as well as sys.path
"""
pywin32_paths = []
if compat.is_win:
pywin32_paths = [os.path.join(get_python_lib(), 'pywin32_system32')]
if compat.is_venv:
pywin32_paths.append(os.path.join(compat.base_prefix, 'Lib', 'site-packages', 'pywin32_system32'))
epath = (
sys.path + # Search sys.path first!
pywin32_paths + winutils.get_system_path() + compat.getenv('PATH', '').split(os.pathsep)
)
if xtrapath is not None:
if isinstance(xtrapath, str):
epath.insert(0, xtrapath)
else:
epath = xtrapath + epath
for p in epath:
npth = os.path.join(p, mod)
if os.path.exists(npth) and matchDLLArch(npth):
return npth
return ''
def _getImports_pe(pth):
"""
Find the binary dependencies of PTH.
This implementation walks through the PE header and uses library pefile for that and supports 32/64bit Windows
"""
dlls = set()
# By default, pefile library parses all PE information. We are only interested in the list of dependent dlls.
# Performance is improved by reading only needed information. https://code.google.com/p/pefile/wiki/UsageExamples
pe = pefile.PE(pth, fast_load=True)
pe.parse_data_directories(
directories=[
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'],
],
forwarded_exports_only=True,
import_dllnames_only=True,
)
# Some libraries have no other binary dependencies. Use empty list in that case. Otherwise pefile would return None.
# e.g., C:\windows\system32\kernel32.dll on Wine
for entry in getattr(pe, 'DIRECTORY_ENTRY_IMPORT', []):
dll_str = winutils.convert_dll_name_to_str(entry.dll)
dlls.add(dll_str)
# We must also read the exports table to find forwarded symbols:
# http://blogs.msdn.com/b/oldnewthing/archive/2006/07/19/671238.aspx
exportSymbols = getattr(pe, 'DIRECTORY_ENTRY_EXPORT', None)
if exportSymbols:
for sym in exportSymbols.symbols:
if sym.forwarder is not None:
# sym.forwarder is a bytes object. Convert it to a string.
forwarder = winutils.convert_dll_name_to_str(sym.forwarder)
# sym.forwarder is for example 'KERNEL32.EnterCriticalSection'
dll = forwarder.split('.')[0]
dlls.add(dll + ".dll")
pe.close()
return dlls
def _extract_from_egg(toc):
"""
Ensure all binary modules in zipped eggs get extracted and included with the frozen executable.
return modified table of content
"""
new_toc = []
for item in toc:
# Item is a tuple
# (mod_name, path, type)
modname, pth, typ = item
if not os.path.isfile(pth):
pth = check_extract_from_egg(pth)[0][0]
# Add value to new data structure.
new_toc.append((modname, pth, typ))
return new_toc
BindingRedirect = collections.namedtuple('BindingRedirect', 'name language arch oldVersion newVersion publicKeyToken')
def match_binding_redirect(manifest, redirect):
return all([
manifest.name == redirect.name,
manifest.version == redirect.oldVersion,
manifest.language == redirect.language,
manifest.processorArchitecture == redirect.arch,
manifest.publicKeyToken == redirect.publicKeyToken,
])
_exe_machine_type = None
def matchDLLArch(filename):
"""
Return True if the DLL given by filename matches the CPU type/architecture of the Python process running
PyInstaller.
Always returns True on non-Windows platforms.
:param filename:
:type filename:
:return:
:rtype:
"""
# TODO: check machine type on other platforms?
if not compat.is_win:
return True
global _exe_machine_type
try:
if _exe_machine_type is None:
pefilename = compat.python_executable # for exception handling
exe_pe = pefile.PE(pefilename, fast_load=True)
_exe_machine_type = exe_pe.FILE_HEADER.Machine
exe_pe.close()
pefilename = filename # for exception handling
pe = pefile.PE(filename, fast_load=True)
match_arch = pe.FILE_HEADER.Machine == _exe_machine_type
pe.close()
except pefile.PEFormatError as exc:
raise SystemExit('Cannot get architecture from file: %s\n Reason: %s' % (pefilename, exc))
return match_arch
def Dependencies(lTOC, xtrapath=None, manifest=None, redirects=None):
"""
Expand LTOC to include all the closure of binary dependencies.
`LTOC` is a logical table of contents, ie, a seq of tuples (name, path). Return LTOC expanded by all the binary
dependencies of the entries in LTOC, except those listed in the module global EXCLUDES
`manifest` may be a winmanifest.Manifest instance for a program manifest, so that all dependent assemblies of
python.exe can be added to the built exe.
`redirects` may be a list. Any assembly redirects found via policy files will be added to the list as
BindingRedirect objects so they can later be used to modify any manifests that reference the redirected assembly.
"""
# Extract all necessary binary modules from Python eggs to be included directly with PyInstaller.
lTOC = _extract_from_egg(lTOC)
for nm, pth, typ in lTOC:
if nm.upper() in seen:
continue
logger.debug("Analyzing %s", pth)
seen.add(nm.upper())
if compat.is_win:
for ftocnm, fn in getAssemblyFiles(pth, manifest, redirects):
lTOC.append((ftocnm, fn, 'BINARY'))
for lib, npth in selectImports(pth, xtrapath):
if lib.upper() in seen or npth.upper() in seen:
continue
seen.add(npth.upper())
lTOC.append((lib, npth, 'BINARY'))
return lTOC
def pkg_resources_get_default_cache():
"""
Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. Otherwise, on Windows, it returns a
'Python-Eggs' subdirectory of the 'Application Data' directory. On all other systems, it's '~/.python-eggs'.
"""
# This function borrowed from setuptools/pkg_resources
egg_cache = compat.getenv('PYTHON_EGG_CACHE')
if egg_cache is not None:
return egg_cache
if os.name != 'nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE', 'HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, compat.getenv(key))
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError("Please set the PYTHON_EGG_CACHE environment variable")
def check_extract_from_egg(pth, todir=None):
r"""
Check if path points to a file inside a python egg file, extract the file from the egg to a cache directory (
following pkg_resources convention) and return [(extracted path, egg file path, relative path inside egg file)].
Otherwise, just return [(original path, None, None)]. If path points to an egg file directly, return a list with
all files from the egg formatted like above.
Example:
>>> check_extract_from_egg(r'C:\Python26\Lib\site-packages\my.egg\mymodule\my.pyd')
[(r'C:\Users\UserName\AppData\Roaming\Python-Eggs\my.egg-tmp\mymodule\my.pyd',
r'C:\Python26\Lib\site-packages\my.egg', r'mymodule/my.pyd')]
"""
rv = []
if os.path.altsep:
pth = pth.replace(os.path.altsep, os.path.sep)
components = pth.split(os.path.sep)
for i, name in enumerate(components):
if name.lower().endswith(".egg"):
eggpth = os.path.sep.join(components[:i + 1])
if os.path.isfile(eggpth):
# eggs can also be directories!
try:
egg = zipfile.ZipFile(eggpth)
except zipfile.BadZipfile as e:
raise SystemExit("Error: %s %s" % (eggpth, e))
if todir is None:
# Use the same directory as setuptools/pkg_resources. So, if the specific egg was accessed before
# (not necessarily by pyinstaller), the extracted contents already exist (pkg_resources puts them
# there) and can be used.
todir = os.path.join(pkg_resources_get_default_cache(), name + "-tmp")
if components[i + 1:]:
members = ["/".join(components[i + 1:])]
else:
members = egg.namelist()
for member in members:
pth = os.path.join(todir, member)
if not os.path.isfile(pth):
dirname = os.path.dirname(pth)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(pth, "wb") as f:
f.write(egg.read(member))
rv.append((pth, eggpth, member))
return rv
return [(pth, None, None)]
def getAssemblies(pth):
"""
On Windows return the dependent Side-by-Side (SxS) assemblies of a binary as a list of Manifest objects.
Dependent assemblies are required only by binaries compiled with MSVC 9.0. Python 2.7 and 3.2 are compiled with
MSVC 9.0 and thus depend on Microsoft Redistributable runtime libraries 9.0.
Python 3.3+ is compiled with version 10.0 and does not use SxS assemblies.
FIXME: Can this be removed since we now only support Python 3.5+?
FIXME: IS there some test-case covering this?
"""
if pth.lower().endswith(".manifest"):
return []
# check for manifest file
manifestnm = pth + ".manifest"
if os.path.isfile(manifestnm):
with open(manifestnm, "rb") as fd:
res = {winmanifest.RT_MANIFEST: {1: {0: fd.read()}}}
else:
# check the binary for embedded manifest
try:
res = winmanifest.GetManifestResources(pth)
except winresource.pywintypes.error as exc:
if exc.args[0] == winresource.ERROR_BAD_EXE_FORMAT:
logger.info('Cannot get manifest resource from non-PE file %s', pth)
return []
raise
rv = []
if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]):
for name in res[winmanifest.RT_MANIFEST]:
for language in res[winmanifest.RT_MANIFEST][name]:
# check the manifest for dependent assemblies
try:
manifest = winmanifest.Manifest()
manifest.filename = ":".join([
pth,
str(winmanifest.RT_MANIFEST),
str(name),
str(language),
])
manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language], False)
except Exception:
logger.error("Cannot parse manifest resource %s, %s from %s", name, language, pth, exc_info=1)
else:
if manifest.dependentAssemblies:
logger.debug("Dependent assemblies of %s:", pth)
logger.debug(", ".join([assembly.getid() for assembly in manifest.dependentAssemblies]))
rv.extend(manifest.dependentAssemblies)
return rv
def getAssemblyFiles(pth, manifest=None, redirects=None):
"""
Find all assemblies that are dependencies of the given binary and return the files that make up the assemblies as
(name, fullpath) tuples.
If a WinManifest object is passed as `manifest`, also updates that manifest to reference the returned assemblies.
This is done only to update the built app's .exe with the dependencies of python.exe
If a list is passed as `redirects`, and binding redirects in policy files are applied when searching for
assemblies, BindingRedirect objects are appended to this list.
Return a list of pairs (name, fullpath)
"""
rv = []
if manifest:
_depNames = set(dep.name for dep in manifest.dependentAssemblies)
for assembly in getAssemblies(pth):
if assembly.getid().upper() in seen:
continue
if manifest and assembly.name not in _depNames:
# Add assembly as dependency to our final output exe's manifest
logger.info("Adding %s to dependent assemblies of final executable\n required by %s", assembly.name, pth)
manifest.dependentAssemblies.append(assembly)
_depNames.add(assembly.name)
if not dylib.include_library(assembly.name):
logger.debug("Skipping assembly %s", assembly.getid())
continue
if assembly.optional:
logger.debug("Skipping optional assembly %s", assembly.getid())
continue
from PyInstaller.config import CONF
if CONF.get("win_no_prefer_redirects"):
files = assembly.find_files()
else:
files = []
if not len(files):
# If no files were found, it may be the case that the required version of the assembly is not installed, and
# the policy file is redirecting it to a newer version. So, we collect the newer version instead.
files = assembly.find_files(ignore_policies=False)
if len(files) and redirects is not None:
# New version was found, old version was not. Add a redirect in the app configuration.
old_version = assembly.version
new_version = assembly.get_policy_redirect()
logger.info("Adding redirect %s version %s -> %s", assembly.name, old_version, new_version)
redirects.append(
BindingRedirect(
name=assembly.name,
language=assembly.language,
arch=assembly.processorArchitecture,
publicKeyToken=assembly.publicKeyToken,
oldVersion=old_version,
newVersion=new_version,
)
)
if files:
seen.add(assembly.getid().upper())
for fn in files:
fname, fext = os.path.splitext(fn)
if fext.lower() == ".manifest":
nm = assembly.name + fext
else:
nm = os.path.basename(fn)
ftocnm = nm
if assembly.language not in (None, "", "*", "neutral"):
ftocnm = os.path.join(assembly.getlanguage(), ftocnm)
nm, ftocnm, fn = [item.encode(sys.getfilesystemencoding()) for item in (nm, ftocnm, fn)]
if fn.upper() not in seen:
logger.debug("Adding %s", ftocnm)
seen.add(nm.upper())
seen.add(fn.upper())
rv.append((ftocnm, fn))
else:
#logger.info("skipping %s part of assembly %s dependency of %s", ftocnm, assembly.name, pth)
pass
else:
logger.error("Assembly %s not found", assembly.getid())
# Convert items in list from 'bytes' type to 'str' type.
# NOTE: with Python 3 we somehow get type 'bytes' and it then causes other issues and failures with PyInstaller.
new_rv = []
for item in rv:
a = item[0].decode('ascii')
b = item[1].decode('ascii')
new_rv.append((a, b))
rv = new_rv
return rv
def selectImports(pth, xtrapath=None):
"""
Return the dependencies of a binary that should be included.
Return a list of pairs (name, fullpath)
"""
rv = []
if xtrapath is None:
xtrapath = [os.path.dirname(pth)]
else:
assert isinstance(xtrapath, list)
xtrapath = [os.path.dirname(pth)] + xtrapath # make a copy
dlls = getImports(pth)
for lib in dlls:
if lib.upper() in seen:
continue
if not compat.is_win:
# all other platforms
npth = lib
lib = os.path.basename(lib)
else:
# plain win case
npth = getfullnameof(lib, xtrapath)
# Now npth is a candidate lib if found. Check again for excludes, but with regex. FIXME: split the list.
if npth:
candidatelib = npth
else:
candidatelib = lib
if not dylib.include_library(candidatelib):
if candidatelib.find('libpython') < 0 and candidatelib.find('Python.framework') < 0:
# skip libs not containing (libpython or Python.framework)
if npth.upper() not in seen:
logger.debug("Skipping %s dependency of %s", lib, os.path.basename(pth))
continue
else:
pass
if npth:
if npth.upper() not in seen:
logger.debug("Adding %s dependency of %s from %s", lib, os.path.basename(pth), npth)
rv.append((lib, npth))
elif dylib.warn_missing_lib(lib):
logger.warning("lib not found: %s dependency of %s", lib, pth)
return rv
def _getImports_ldd(pth):
"""
Find the binary dependencies of PTH.
This implementation is for ldd platforms (mostly unix).
"""
rslt = set()
if compat.is_aix:
# Match libs of the form
# 'archivelib.a(objectmember.so/.o)'
# or
# 'sharedlib.so'
# Will not match the fake lib '/unix'
lddPattern = re.compile(r"^\s*(((?P<libarchive>(.*\.a))(?P<objectmember>\(.*\)))|((?P<libshared>(.*\.so))))$")
elif compat.is_hpux:
# Match libs of the form
# 'sharedlib.so => full-path-to-lib
# e.g.
# 'libpython2.7.so => /usr/local/lib/hpux32/libpython2.7.so'
lddPattern = re.compile(r"^\s+(.*)\s+=>\s+(.*)$")
elif compat.is_solar:
# Match libs of the form
# 'sharedlib.so => full-path-to-lib
# e.g.
# 'libpython2.7.so.1.0 => /usr/local/lib/libpython2.7.so.1.0'
# Will not match the platform specific libs starting with '/platform'
lddPattern = re.compile(r"^\s+(.*)\s+=>\s+(.*)$")
else:
lddPattern = re.compile(r"\s*(.*?)\s+=>\s+(.*?)\s+\(.*\)")
p = subprocess.run(['ldd', pth], stderr=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
for line in p.stderr.splitlines():
if not line:
continue
# Python extensions (including stdlib ones) are not linked against python.so but rely on Python's symbols having
# already been loaded into symbol space at runtime. musl's ldd issues a series of harmless warnings to stderr
# telling us that those symbols are unfindable. These should be suppressed.
elif line.startswith("Error relocating ") and line.endswith(" symbol not found"):
continue
# Propagate any other warnings it might have.
print(line, file=sys.stderr)
for line in p.stdout.splitlines():
m = lddPattern.search(line)
if m:
if compat.is_aix:
libarchive = m.group('libarchive')
if libarchive:
# We matched an archive lib with a request for a particular embedded shared object.
# 'archivelib.a(objectmember.so/.o)'
lib = libarchive
name = os.path.basename(lib) + m.group('objectmember')
else:
# We matched a stand-alone shared library.
# 'sharedlib.so'
lib = m.group('libshared')
name = os.path.basename(lib)
elif compat.is_hpux:
name, lib = m.group(1), m.group(2)
else:
name, lib = m.group(1), m.group(2)
if name[:10] in ('linux-gate', 'linux-vdso'):
# linux-gate is a fake library which does not exist and should be ignored. See also:
# http://www.trilithium.com/johan/2005/08/linux-gate/
continue
if compat.is_cygwin:
# exclude Windows system library
if lib.lower().startswith('/cygdrive/c/windows/system'):
continue
if os.path.exists(lib):
# Add lib if it is not already found.
if lib not in rslt:
rslt.add(lib)
elif dylib.warn_missing_lib(name):
logger.warning('Cannot find %s in path %s (needed by %s)', name, lib, pth)
elif line.endswith("not found"):
# On glibc-based linux distributions, missing libraries are marked with name.so => not found
tokens = line.split('=>')
if len(tokens) != 2:
continue
name = tokens[0].strip()
if dylib.warn_missing_lib(name):
logger.warning('Cannot find %s (needed by %s)', name, pth)
return rslt
def _getImports_macholib(pth):
"""
Find the binary dependencies of PTH.
This implementation is for Mac OS X and uses library macholib.
"""
from macholib.dyld import dyld_find
from macholib.mach_o import LC_RPATH
from macholib.MachO import MachO
from macholib.util import in_system_path
rslt = set()
seen = set() # Libraries read from binary headers.
#- Walk through mach binary headers.
m = MachO(pth)
for header in m.headers:
for idx, name, lib in header.walkRelocatables():
# Sometimes libraries are present multiple times.
if lib not in seen:
seen.add(lib)
# Walk through mach binary headers and look for LC_RPATH. macholib can't handle @rpath. LC_RPATH has to be read from
# the MachO header.
# TODO Do we need to remove LC_RPATH from MachO load commands? Will it cause any harm to leave them untouched?
# Removing LC_RPATH should be implemented when getting files from the bincache if it is necessary.
run_paths = set()
for header in m.headers:
for command in header.commands:
# A command is a tuple like:
# (<macholib.mach_o.load_command object at 0x>,
# <macholib.mach_o.rpath_command object at 0x>,
# '../lib\x00\x00')
cmd_type = command[0].cmd
if cmd_type == LC_RPATH:
rpath = command[2].decode('utf-8')
# Remove trailing '\x00' characters. E.g., '../lib\x00\x00'
rpath = rpath.rstrip('\x00')
# Replace the @executable_path and @loader_path keywords with the actual path to the binary.
executable_path = os.path.dirname(pth)
rpath = re.sub('^@(executable_path|loader_path|rpath)(/|$)', executable_path + r'\2', rpath)
# Make rpath absolute. According to Apple doc LC_RPATH is always relative to the binary location.
rpath = os.path.normpath(os.path.join(executable_path, rpath))
run_paths.update([rpath])
else:
# Frameworks that have this structure Name.framework/Versions/N/Name need to search at the same level
# as the framework dir. This is specifically needed so that the QtWebEngine dependencies can be found.
if '.framework' in pth:
run_paths.update(['../../../'])
# For distributions like Anaconda, all of the dylibs are stored in the lib directory of the Python distribution, not
# alongside of the .so's in each module's subdirectory.
run_paths.add(os.path.join(compat.base_prefix, 'lib'))
#- Try to find files in file system.
# In cases with @loader_path or @executable_path try to look in the same directory as the checked binary is. This
# seems to work in most cases.
exec_path = os.path.abspath(os.path.dirname(pth))
for lib in seen:
# Suppose that @rpath is not used for system libraries and using macholib can be avoided. macholib cannot handle
# @rpath.
if lib.startswith('@rpath'):
lib = lib.replace('@rpath', '.') # Make path relative.
final_lib = None # Absolute path to existing lib on disk.
# Try multiple locations.
for run_path in run_paths:
# @rpath may contain relative value. Use exec_path as base path.
if not os.path.isabs(run_path):
run_path = os.path.join(exec_path, run_path)
# Stop looking for lib when found in first location.
if os.path.exists(os.path.join(run_path, lib)):
final_lib = os.path.abspath(os.path.join(run_path, lib))
rslt.add(final_lib)
break
# Log warning if no existing file found.
if not final_lib and dylib.warn_missing_lib(lib):
logger.warning('Cannot find path %s (needed by %s)', lib, pth)
# Macholib has to be used to get absolute path to libraries.
else:
# macholib cannot handle @loader_path. It has to be handled the same way as @executable_path. It is also
# replaced by 'exec_path'.
if lib.startswith('@loader_path'):
lib = lib.replace('@loader_path', '@executable_path')
try:
lib = dyld_find(lib, executable_path=exec_path)
rslt.add(lib)
except ValueError:
# Starting with Big Sur, system libraries are hidden. And we do not collect system libraries on any
# macOS version anyway, so suppress the corresponding error messages.
if not in_system_path(lib) and dylib.warn_missing_lib(lib):
logger.warning('Cannot find path %s (needed by %s)', lib, pth)
return rslt
def getImports(pth):
"""
Forwards to the correct getImports implementation for the platform.
"""
if compat.is_win:
if pth.lower().endswith(".manifest"):
return []
try:
return _getImports_pe(pth)
except Exception as exception:
# Assemblies can pull in files which aren't necessarily PE, but are still needed by the assembly. Any
# additional binary dependencies should already have been handled by selectAssemblies in that case, so just
# warn, return an empty list and continue. For less specific errors also log the traceback.
logger.warning('Cannot get binary dependencies for file: %s', pth)
logger.warning(' Reason: %s', exception, exc_info=not isinstance(exception, pefile.PEFormatError))
return []
elif compat.is_darwin:
return _getImports_macholib(pth)
else:
return _getImports_ldd(pth)
def findLibrary(name):
"""
Look for a library in the system.
Emulate the algorithm used by dlopen. `name` must include the prefix, e.g., ``libpython2.4.so``.
"""
assert compat.is_unix, "Current implementation for Unix only (Linux, Solaris, AIX, FreeBSD)"
# Look in the LD_LIBRARY_PATH according to platform.
if compat.is_aix:
lp = compat.getenv('LIBPATH', '')
elif compat.is_darwin:
lp = compat.getenv('DYLD_LIBRARY_PATH', '')
else:
lp = compat.getenv('LD_LIBRARY_PATH', '')
lib = _which_library(name, filter(None, lp.split(os.pathsep)))
# Look in /etc/ld.so.cache
# Solaris does not have /sbin/ldconfig. Just check if this file exists.
if lib is None:
utils.load_ldconfig_cache()
lib = utils.LDCONFIG_CACHE.get(name)
if lib:
assert os.path.isfile(lib)
# Look in the known safe paths.
if lib is None:
# Architecture independent locations.
paths = ['/lib', '/usr/lib']
# Architecture dependent locations.
if compat.architecture == '32bit':
paths.extend(['/lib32', '/usr/lib32'])
else:
paths.extend(['/lib64', '/usr/lib64'])
# Machine dependent locations.
if compat.machine == 'intel':
if compat.architecture == '32bit':
paths.extend(['/usr/lib/i386-linux-gnu'])
else:
paths.extend(['/usr/lib/x86_64-linux-gnu'])
# On Debian/Ubuntu /usr/bin/python is linked statically with libpython. Newer Debian/Ubuntu with multiarch
# support puts the libpythonX.Y.so in paths like /usr/lib/i386-linux-gnu/.
try:
# Module available only in Python 2.7+
import sysconfig
# 'multiarchsubdir' works on Debian/Ubuntu only in Python 2.7 and 3.3+.
arch_subdir = sysconfig.get_config_var('multiarchsubdir')
# Ignore if None is returned.
if arch_subdir:
arch_subdir = os.path.basename(arch_subdir)
paths.append(os.path.join('/usr/lib', arch_subdir))
else:
logger.debug('Multiarch directory not detected.')
except ImportError:
logger.debug('Multiarch directory not detected.')
# Termux (a Ubuntu like subsystem for Android) has an additional libraries directory.
if os.path.isdir('/data/data/com.termux/files/usr/lib'):
paths.append('/data/data/com.termux/files/usr/lib')
if compat.is_aix:
paths.append('/opt/freeware/lib')
elif compat.is_hpux:
if compat.architecture == '32bit':
paths.append('/usr/local/lib/hpux32')
else:
paths.append('/usr/local/lib/hpux64')
elif compat.is_freebsd or compat.is_openbsd:
paths.append('/usr/local/lib')
lib = _which_library(name, paths)
# Give up :(
if lib is None:
return None
# Resolve the file name into the soname
if compat.is_freebsd or compat.is_aix or compat.is_openbsd:
# On FreeBSD objdump does not show SONAME, and on AIX objdump does not exist, so we just return the lib we
# have found.
return lib
else:
dir = os.path.dirname(lib)
return os.path.join(dir, _get_so_name(lib))
def _which_library(name, dirs):
"""
Search for a shared library in a list of directories.
Args:
name:
The library name including the `lib` prefix but excluding any `.so` suffix.
dirs:
An iterable of folders to search in.
Returns:
The path to the library if found or None otherwise.
"""
matcher = _library_matcher(name)
for path in filter(os.path.exists, dirs):
for _path in os.listdir(path):
if matcher(_path):
return os.path.join(path, _path)
def _library_matcher(name):
"""
Create a callable that matches libraries if **name** is a valid library prefix for input library full names.
"""
return re.compile(name + r"[0-9]*\.").match
def _get_so_name(filename):
"""
Return the soname of a library.
Soname is useful when there are multiple symplinks to one library.
"""
# TODO verify that objdump works on other unixes and not Linux only.
cmd = ["objdump", "-p", filename]
pattern = r'\s+SONAME\s+([^\s]+)'
if compat.is_solar:
cmd = ["elfdump", "-d", filename]
pattern = r'\s+SONAME\s+[^\s]+\s+([^\s]+)'
m = re.search(pattern, compat.exec_command(*cmd))
return m.group(1)
def get_python_library_path():
"""
Find dynamic Python library that will be bundled with frozen executable.
NOTE: This is a fallback option when the Python executable is likely statically linked with the Python library and
we need to search more for it. For example, this is the case on Debian/Ubuntu.
Return full path to Python dynamic library or None when not found.
We need to know name of the Python dynamic library for the bootloader. Bootloader has to know what library to
load and not try to guess.
Some linux distributions (e.g. debian-based) statically link the Python executable to the libpython,
so bindepend does not include it in its output. In this situation let's try to find it.
Custom Mac OS builds could possibly also have non-framework style libraries, so this method also checks for that
variant as well.
"""
def _find_lib_in_libdirs(*libdirs):
for libdir in libdirs:
for name in compat.PYDYLIB_NAMES:
full_path = os.path.join(libdir, name)
if os.path.exists(full_path):
return full_path
return None
# If this is Microsoft App Store Python, check the compat.base_path first. While compat.python_executable resolves
# to actual python.exe file, the latter contains relative library reference that does not get properly resolved by
# getfullnameof().
if compat.is_ms_app_store:
python_libname = _find_lib_in_libdirs(compat.base_prefix)
if python_libname:
return python_libname
# Try to get Python library name from the Python executable. It assumes that Python library is not statically
# linked.
dlls = getImports(compat.python_executable)
for filename in dlls:
for name in compat.PYDYLIB_NAMES:
if os.path.basename(filename) == name:
# On Windows filename is just like 'python27.dll'. Convert it to absolute path.
if compat.is_win and not os.path.isabs(filename):
filename = getfullnameof(filename)
# Python library found. Return absolute path to it.
return filename
# Python library NOT found. Resume searching using alternative methods.
# Work around for python venv having VERSION.dll rather than pythonXY.dll
if compat.is_win and 'VERSION.dll' in dlls:
pydll = 'python%d%d.dll' % sys.version_info[:2]
return getfullnameof(pydll)
# Applies only to non Windows platforms and conda.
if compat.is_conda:
# Conda needs to be the first here since it overrules the operating system specific paths.
python_libname = _find_lib_in_libdirs(os.path.join(compat.base_prefix, 'lib'))
if python_libname:
return python_libname
elif compat.is_unix:
for name in compat.PYDYLIB_NAMES:
python_libname = findLibrary(name)
if python_libname:
return python_libname
if compat.is_darwin or compat.is_linux:
# On MacPython, Analysis.assemble is able to find the libpython with no additional help, asking for
# sys.executable dependencies. However, this fails on system python, because the shared library is not listed as
# a dependency of the binary (most probably it is opened at runtime using some dlopen trickery). This happens on
# Mac OS when Python is compiled as Framework.
# Linux using pyenv is similarly linked so that sys.executable dependencies does not yield libpython.so.
# Python compiled as Framework contains same values in sys.prefix and exec_prefix. That is why we can use just
# sys.prefix. In virtualenv, PyInstaller is not able to find Python library. We need special care for this case.
python_libname = _find_lib_in_libdirs(
compat.base_prefix,
os.path.join(compat.base_prefix, 'lib'),
)
if python_libname:
return python_libname
# Python library NOT found. Provide helpful feedback.
msg = """Python library not found: %s
This means your Python installation does not come with proper shared library files.
This usually happens due to missing development package, or unsuitable build parameters of the Python installation.
* On Debian/Ubuntu, you need to install Python development packages:
* apt-get install python3-dev
* apt-get install python-dev
* If you are building Python by yourself, rebuild with `--enable-shared` (or, `--enable-framework` on macOS).
""" % (", ".join(compat.PYDYLIB_NAMES),)
raise IOError(msg)
def findSystemLibrary(name):
"""
Given a library name, try to resolve the path to that library.
If the path is already an absolute path, return it without searching.
"""
if os.path.isabs(name):
return name
if compat.is_unix:
return findLibrary(name)
elif compat.is_win:
return getfullnameof(name)
else:
# This seems to work, and is similar to what we have above..
return ctypes.util.find_library(name)
| 40.768987
| 120
| 0.614143
|
795079512712dd827e69c3d791b7ed6e8a90f738
| 1,463
|
py
|
Python
|
cvxpy/reductions/eliminate_pwl/eliminate_pwl.py
|
hashstat/cvxpy
|
20d667ebe8614821fa38e41b1e333257512d9594
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-05-28T16:41:11.000Z
|
2021-05-28T16:41:11.000Z
|
cvxpy/reductions/eliminate_pwl/eliminate_pwl.py
|
h-vetinari/cvxpy
|
86307f271819bb78fcdf64a9c3a424773e8269fa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cvxpy/reductions/eliminate_pwl/eliminate_pwl.py
|
h-vetinari/cvxpy
|
86307f271819bb78fcdf64a9c3a424773e8269fa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Copyright 2017 Robin Verschueren
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms import abs, maximum, sum_largest, max, norm1, norm_inf
from cvxpy.reductions.canonicalization import Canonicalization
from cvxpy.reductions.eliminate_pwl.atom_canonicalizers import (
CANON_METHODS as elim_pwl_methods)
class EliminatePwl(Canonicalization):
"""Eliminates piecewise linear atoms."""
def __init__(self, problem=None) -> None:
super(EliminatePwl, self).__init__(
problem=problem, canon_methods=elim_pwl_methods)
def accepts(self, problem) -> bool:
atom_types = [type(atom) for atom in problem.atoms()]
pwl_types = [abs, maximum, sum_largest, max, norm1, norm_inf]
return any(atom in pwl_types for atom in atom_types)
def apply(self, problem):
if not self.accepts(problem):
raise ValueError("Cannot canonicalize pwl atoms.")
return super(EliminatePwl, self).apply(problem)
| 37.512821
| 72
| 0.742994
|
795079ab6824cbc65bb3ae599bf7691185d9c9ab
| 2,917
|
py
|
Python
|
2nd June Assignments/case study 3/questions.py
|
JangirSumit/data_science
|
a1957122f8a4c66e3b4c7b7c93a74c53a2db1fe4
|
[
"MIT"
] | 15
|
2019-05-05T04:48:42.000Z
|
2022-02-15T12:08:33.000Z
|
2nd June Assignments/case study 3/questions.py
|
JangirSumit/data_science
|
a1957122f8a4c66e3b4c7b7c93a74c53a2db1fe4
|
[
"MIT"
] | null | null | null |
2nd June Assignments/case study 3/questions.py
|
JangirSumit/data_science
|
a1957122f8a4c66e3b4c7b7c93a74c53a2db1fe4
|
[
"MIT"
] | 53
|
2019-11-10T05:09:25.000Z
|
2022-03-28T01:26:32.000Z
|
from sklearn.preprocessing import StandardScaler
import pandas as pd
from sklearn.model_selection import train_test_split, ShuffleSplit, GridSearchCV
from sklearn import svm
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
# 1.Load the data from “college.csv” that has attributes collected about private and public colleges
# for a particular year. We will try to predict the private/public status of the college from other attributes.
data = pd.read_csv("2nd June Assignments\case study 3\College.csv")
data.head()
labelencoder = LabelEncoder()
data["Private"] = labelencoder.fit_transform(data["Private"])
data.head()
# 2.Use LabelEncoder to encode the target variable in to numerical form and split the data such that 20% of the data is set aside fortesting.
X = data.iloc[:, 1:]
Y = data["Private"]
train_x, test_x, train_y, test_y = train_test_split(
X, Y, test_size=0.30, random_state=10)
# 3.Fit a linear svm from scikit learn and observe the accuracy.[Hint:Use Linear SVC
model_svm = svm.LinearSVC()
model_svm.fit(train_x, train_y)
predicted_values = model_svm.predict(test_x)
print("\nAccuracy Score\n")
print(metrics.accuracy_score(predicted_values, test_y))
# 4.Preprocess the data using StandardScalar and fit the same model again and observe the change in accuracy.
# [Hint: Refer to scikitlearn’s preprocessing methods]
# http://benalexkeen.com/feature-scaling-with-scikit-learn/
scaler_df = StandardScaler().fit_transform(X)
scaler_df = pd.DataFrame(X, columns=X.columns)
X = scaler_df
Y = data["Private"]
train_x, test_x, train_y, test_y = train_test_split(
X, Y, test_size=0.30, random_state=10)
model_svm = svm.LinearSVC()
model_svm.fit(train_x, train_y)
predicted_values = model_svm.predict(test_x)
metrics.accuracy_score(predicted_values, test_y)
# 5.Use scikit learn’s gridsearch to select the best hyperparameter for a non-linear SVM,identify the model with
# best score and its parameters.
# [Hint: Refer to model_selection module of Scikit learn]
# https://chrisalbon.com/machine_learning/model_evaluation/cross_validation_parameter_tuning_grid_search/
parameter_candidates = [
{'C': [1, 10, 100, 1000], 'kernel': ['poly']},
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
]
# Create a classifier object with the classifier and parameter candidates
cv = ShuffleSplit()
clf = GridSearchCV(estimator=svm.SVC(max_iter=1000),
param_grid=parameter_candidates, n_jobs=-1, cv=cv)
# Train the classifier on data1's feature and target data
clf.fit(train_x, train_y)
# View the accuracy score
print('Best score for data1:', clf.best_score_)
# View the best parameters for the model found using grid search
print('Best C:', clf.best_estimator_.C)
print('Best Kernel:', clf.best_estimator_.kernel)
print('Best Gamma:', clf.best_estimator_.gamma)
| 36.4625
| 141
| 0.758999
|
79507b60e531382c2973d6a057af0c36ecf5b302
| 1,791
|
py
|
Python
|
simulate.py
|
jerrytheo/mim-plotsim
|
2baa4a75938652bfeec957f4924e11e2c7da7a71
|
[
"MIT"
] | 1
|
2018-06-01T15:15:13.000Z
|
2018-06-01T15:15:13.000Z
|
simulate.py
|
MiMSwarm/point-simulation
|
2baa4a75938652bfeec957f4924e11e2c7da7a71
|
[
"MIT"
] | null | null | null |
simulate.py
|
MiMSwarm/point-simulation
|
2baa4a75938652bfeec957f4924e11e2c7da7a71
|
[
"MIT"
] | null | null | null |
from source import Environment
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
def horizontal_line(xstart, xstop, yval):
xstop += 0.01
return np.vstack((
np.arange(xstart, xstop, 0.01),
np.full(int(round((xstop - xstart) * 100)), yval),
))
def vertical_line(xval, ystart, ystop):
ystop += 0.01
return np.vstack((
np.full(int(round((ystop - ystart) * 100)), xval),
np.arange(ystart, ystop, 0.01),
))
def simple_map():
walls = [
# Outer walls.
['vertical', 0, 0, 10],
['vertical', 10, 0, 10],
['horizontal', 0, 10, 10],
['horizontal', 0, 4.5, 0],
['horizontal', 5.5, 10, 0],
# Bottom left room.
['vertical', 4.5, 0, 2],
['vertical', 4.5, 3, 5],
['horizontal', 1.5, 4.5, 5],
# Right room.
['vertical', 5.5, 0, 3],
['horizontal', 5.5, 9, 3],
['vertical', 9, 3, 9],
]
pts = []
for wall in walls:
if wall[0] == 'vertical':
xval, ystart, ystop = wall[1:]
for x in np.arange(xval-.01, xval+.02, .01):
pts.append(vertical_line(x, ystart-0.01, ystop+0.01))
if wall[0] == 'horizontal':
xstart, xstop, yval = wall[1:]
for y in np.arange(yval-.01, yval+.02, .01):
pts.append(horizontal_line(xstart-0.01, xstop+0.01, y))
return np.hstack(pts).T
if __name__ == '__main__':
print('\nInitializing environment ...')
env = Environment(
simple_map, nbot=1, center=(1, -1), radius=.7, plot=True)
print('... done.')
print('\nRunning simulation ...')
ani = FuncAnimation(env.fig, env, interval=100)
plt.show()
print('\n... done.')
| 26.731343
| 71
| 0.531547
|
79507c0bb88b88cb4700156a8ddd4a04644e318c
| 49,490
|
py
|
Python
|
networkx/readwrite/tests/test_graphml.py
|
nik0sc/networkx
|
3d5f577f5176950473367c89fc4e2fac5fb49ce7
|
[
"BSD-3-Clause"
] | 1
|
2019-10-21T06:10:38.000Z
|
2019-10-21T06:10:38.000Z
|
networkx/readwrite/tests/test_graphml.py
|
nik0sc/networkx
|
3d5f577f5176950473367c89fc4e2fac5fb49ce7
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/readwrite/tests/test_graphml.py
|
nik0sc/networkx
|
3d5f577f5176950473367c89fc4e2fac5fb49ce7
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import networkx as nx
from networkx.testing.utils import assert_edges_equal, assert_nodes_equal
import io
import tempfile
import os
from networkx.testing import almost_equal
class BaseGraphML:
@classmethod
def setup_class(cls):
cls.simple_directed_data = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This file was written by the JAVA GraphML Library.-->
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<graph id="G" edgedefault="directed">
<node id="n0"/>
<node id="n1"/>
<node id="n2"/>
<node id="n3"/>
<node id="n4"/>
<node id="n5"/>
<node id="n6"/>
<node id="n7"/>
<node id="n8"/>
<node id="n9"/>
<node id="n10"/>
<edge id="foo" source="n0" target="n2"/>
<edge source="n1" target="n2"/>
<edge source="n2" target="n3"/>
<edge source="n3" target="n5"/>
<edge source="n3" target="n4"/>
<edge source="n4" target="n6"/>
<edge source="n6" target="n5"/>
<edge source="n5" target="n7"/>
<edge source="n6" target="n8"/>
<edge source="n8" target="n7"/>
<edge source="n8" target="n9"/>
</graph>
</graphml>"""
cls.simple_directed_graph = nx.DiGraph()
cls.simple_directed_graph.add_node('n10')
cls.simple_directed_graph.add_edge('n0', 'n2', id='foo')
cls.simple_directed_graph.add_edges_from([('n1', 'n2'),
('n2', 'n3'),
('n3', 'n5'),
('n3', 'n4'),
('n4', 'n6'),
('n6', 'n5'),
('n5', 'n7'),
('n6', 'n8'),
('n8', 'n7'),
('n8', 'n9'),
])
cls.simple_directed_fh = \
io.BytesIO(cls.simple_directed_data.encode('UTF-8'))
cls.attribute_data = """<?xml version="1.0" encoding="UTF-8"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<key id="d0" for="node" attr.name="color" attr.type="string">
<default>yellow</default>
</key>
<key id="d1" for="edge" attr.name="weight" attr.type="double"/>
<graph id="G" edgedefault="directed">
<node id="n0">
<data key="d0">green</data>
</node>
<node id="n1"/>
<node id="n2">
<data key="d0">blue</data>
</node>
<node id="n3">
<data key="d0">red</data>
</node>
<node id="n4"/>
<node id="n5">
<data key="d0">turquoise</data>
</node>
<edge id="e0" source="n0" target="n2">
<data key="d1">1.0</data>
</edge>
<edge id="e1" source="n0" target="n1">
<data key="d1">1.0</data>
</edge>
<edge id="e2" source="n1" target="n3">
<data key="d1">2.0</data>
</edge>
<edge id="e3" source="n3" target="n2"/>
<edge id="e4" source="n2" target="n4"/>
<edge id="e5" source="n3" target="n5"/>
<edge id="e6" source="n5" target="n4">
<data key="d1">1.1</data>
</edge>
</graph>
</graphml>
"""
cls.attribute_graph = nx.DiGraph(id='G')
cls.attribute_graph.graph['node_default'] = {'color': 'yellow'}
cls.attribute_graph.add_node('n0', color='green')
cls.attribute_graph.add_node('n2', color='blue')
cls.attribute_graph.add_node('n3', color='red')
cls.attribute_graph.add_node('n4')
cls.attribute_graph.add_node('n5', color='turquoise')
cls.attribute_graph.add_edge('n0', 'n2', id='e0', weight=1.0)
cls.attribute_graph.add_edge('n0', 'n1', id='e1', weight=1.0)
cls.attribute_graph.add_edge('n1', 'n3', id='e2', weight=2.0)
cls.attribute_graph.add_edge('n3', 'n2', id='e3')
cls.attribute_graph.add_edge('n2', 'n4', id='e4')
cls.attribute_graph.add_edge('n3', 'n5', id='e5')
cls.attribute_graph.add_edge('n5', 'n4', id='e6', weight=1.1)
cls.attribute_fh = io.BytesIO(cls.attribute_data.encode('UTF-8'))
cls.attribute_named_key_ids_data = """<?xml version='1.0' encoding='utf-8'?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<key id="edge_prop" for="edge" attr.name="edge_prop" attr.type="string"/>
<key id="prop2" for="node" attr.name="prop2" attr.type="string"/>
<key id="prop1" for="node" attr.name="prop1" attr.type="string"/>
<graph edgedefault="directed">
<node id="0">
<data key="prop1">val1</data>
<data key="prop2">val2</data>
</node>
<node id="1">
<data key="prop1">val_one</data>
<data key="prop2">val2</data>
</node>
<edge source="0" target="1">
<data key="edge_prop">edge_value</data>
</edge>
</graph>
</graphml>
"""
cls.attribute_named_key_ids_graph = nx.DiGraph()
cls.attribute_named_key_ids_graph.add_node("0", prop1="val1", prop2="val2")
cls.attribute_named_key_ids_graph.add_node("1", prop1="val_one", prop2="val2")
cls.attribute_named_key_ids_graph.add_edge("0", "1", edge_prop="edge_value")
fh = io.BytesIO(cls.attribute_named_key_ids_data.encode('UTF-8'))
cls.attribute_named_key_ids_fh = fh
cls.attribute_numeric_type_data = """<?xml version='1.0' encoding='utf-8'?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<key attr.name="weight" attr.type="double" for="node" id="d1" />
<key attr.name="weight" attr.type="double" for="edge" id="d0" />
<graph edgedefault="directed">
<node id="n0">
<data key="d1">1</data>
</node>
<node id="n1">
<data key="d1">2.0</data>
</node>
<edge source="n0" target="n1">
<data key="d0">1</data>
</edge>
<edge source="n1" target="n0">
<data key="d0">k</data>
</edge>
<edge source="n1" target="n1">
<data key="d0">1.0</data>
</edge>
</graph>
</graphml>
"""
cls.attribute_numeric_type_graph = nx.DiGraph()
cls.attribute_numeric_type_graph.add_node('n0', weight=1)
cls.attribute_numeric_type_graph.add_node('n1', weight=2.0)
cls.attribute_numeric_type_graph.add_edge('n0', 'n1', weight=1)
cls.attribute_numeric_type_graph.add_edge('n1', 'n1', weight=1.0)
fh = io.BytesIO(cls.attribute_numeric_type_data.encode('UTF-8'))
cls.attribute_numeric_type_fh = fh
cls.simple_undirected_data = """<?xml version="1.0" encoding="UTF-8"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<graph id="G">
<node id="n0"/>
<node id="n1"/>
<node id="n2"/>
<node id="n10"/>
<edge id="foo" source="n0" target="n2"/>
<edge source="n1" target="n2"/>
<edge source="n2" target="n3"/>
</graph>
</graphml>"""
# <edge source="n8" target="n10" directed="false"/>
cls.simple_undirected_graph = nx.Graph()
cls.simple_undirected_graph.add_node('n10')
cls.simple_undirected_graph.add_edge('n0', 'n2', id='foo')
cls.simple_undirected_graph.add_edges_from([('n1', 'n2'),
('n2', 'n3'),
])
fh = io.BytesIO(cls.simple_undirected_data.encode('UTF-8'))
cls.simple_undirected_fh = fh
class TestReadGraphML(BaseGraphML):
def test_read_simple_directed_graphml(self):
G = self.simple_directed_graph
H = nx.read_graphml(self.simple_directed_fh)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(G.edges()) == sorted(H.edges())
assert (sorted(G.edges(data=True)) ==
sorted(H.edges(data=True)))
self.simple_directed_fh.seek(0)
I = nx.parse_graphml(self.simple_directed_data)
assert sorted(G.nodes()) == sorted(I.nodes())
assert sorted(G.edges()) == sorted(I.edges())
assert (sorted(G.edges(data=True)) ==
sorted(I.edges(data=True)))
def test_read_simple_undirected_graphml(self):
G = self.simple_undirected_graph
H = nx.read_graphml(self.simple_undirected_fh)
assert_nodes_equal(G.nodes(), H.nodes())
assert_edges_equal(G.edges(), H.edges())
self.simple_undirected_fh.seek(0)
I = nx.parse_graphml(self.simple_undirected_data)
assert_nodes_equal(G.nodes(), I.nodes())
assert_edges_equal(G.edges(), I.edges())
def test_read_attribute_graphml(self):
G = self.attribute_graph
H = nx.read_graphml(self.attribute_fh)
assert_nodes_equal(G.nodes(True), sorted(H.nodes(data=True)))
ge = sorted(G.edges(data=True))
he = sorted(H.edges(data=True))
for a, b in zip(ge, he):
assert a == b
self.attribute_fh.seek(0)
I = nx.parse_graphml(self.attribute_data)
assert sorted(G.nodes(True)) == sorted(I.nodes(data=True))
ge = sorted(G.edges(data=True))
he = sorted(I.edges(data=True))
for a, b in zip(ge, he):
assert a == b
def test_directed_edge_in_undirected(self):
s = """<?xml version="1.0" encoding="UTF-8"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<graph id="G">
<node id="n0"/>
<node id="n1"/>
<node id="n2"/>
<edge source="n0" target="n1"/>
<edge source="n1" target="n2" directed='true'/>
</graph>
</graphml>"""
fh = io.BytesIO(s.encode('UTF-8'))
pytest.raises(nx.NetworkXError, nx.read_graphml, fh)
pytest.raises(nx.NetworkXError, nx.parse_graphml, s)
def test_undirected_edge_in_directed(self):
s = """<?xml version="1.0" encoding="UTF-8"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<graph id="G" edgedefault='directed'>
<node id="n0"/>
<node id="n1"/>
<node id="n2"/>
<edge source="n0" target="n1"/>
<edge source="n1" target="n2" directed='false'/>
</graph>
</graphml>"""
fh = io.BytesIO(s.encode('UTF-8'))
pytest.raises(nx.NetworkXError, nx.read_graphml, fh)
pytest.raises(nx.NetworkXError, nx.parse_graphml, s)
def test_key_raise(self):
s = """<?xml version="1.0" encoding="UTF-8"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<key id="d0" for="node" attr.name="color" attr.type="string">
<default>yellow</default>
</key>
<key id="d1" for="edge" attr.name="weight" attr.type="double"/>
<graph id="G" edgedefault="directed">
<node id="n0">
<data key="d0">green</data>
</node>
<node id="n1"/>
<node id="n2">
<data key="d0">blue</data>
</node>
<edge id="e0" source="n0" target="n2">
<data key="d2">1.0</data>
</edge>
</graph>
</graphml>
"""
fh = io.BytesIO(s.encode('UTF-8'))
pytest.raises(nx.NetworkXError, nx.read_graphml, fh)
pytest.raises(nx.NetworkXError, nx.parse_graphml, s)
def test_hyperedge_raise(self):
s = """<?xml version="1.0" encoding="UTF-8"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<key id="d0" for="node" attr.name="color" attr.type="string">
<default>yellow</default>
</key>
<key id="d1" for="edge" attr.name="weight" attr.type="double"/>
<graph id="G" edgedefault="directed">
<node id="n0">
<data key="d0">green</data>
</node>
<node id="n1"/>
<node id="n2">
<data key="d0">blue</data>
</node>
<hyperedge id="e0" source="n0" target="n2">
<endpoint node="n0"/>
<endpoint node="n1"/>
<endpoint node="n2"/>
</hyperedge>
</graph>
</graphml>
"""
fh = io.BytesIO(s.encode('UTF-8'))
pytest.raises(nx.NetworkXError, nx.read_graphml, fh)
pytest.raises(nx.NetworkXError, nx.parse_graphml, s)
def test_multigraph_keys(self):
# Test that reading multigraphs uses edge id attributes as keys
s = """<?xml version="1.0" encoding="UTF-8"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<graph id="G" edgedefault="directed">
<node id="n0"/>
<node id="n1"/>
<edge id="e0" source="n0" target="n1"/>
<edge id="e1" source="n0" target="n1"/>
</graph>
</graphml>
"""
fh = io.BytesIO(s.encode('UTF-8'))
G = nx.read_graphml(fh)
expected = [("n0", "n1", "e0"), ("n0", "n1", "e1")]
assert sorted(G.edges(keys=True)) == expected
fh.seek(0)
H = nx.parse_graphml(s)
assert sorted(H.edges(keys=True)) == expected
def test_preserve_multi_edge_data(self):
"""
Test that data and keys of edges are preserved on consequent
write and reads
"""
G = nx.MultiGraph()
G.add_node(1)
G.add_node(2)
G.add_edges_from([
# edges with no data, no keys:
(1, 2),
# edges with only data:
(1, 2, dict(key='data_key1')),
(1, 2, dict(id='data_id2')),
(1, 2, dict(key='data_key3', id='data_id3')),
# edges with both data and keys:
(1, 2, 103, dict(key='data_key4')),
(1, 2, 104, dict(id='data_id5')),
(1, 2, 105, dict(key='data_key6', id='data_id7')),
])
fh = io.BytesIO()
nx.write_graphml(G, fh)
fh.seek(0)
H = nx.read_graphml(fh, node_type=int)
assert_edges_equal(
G.edges(data=True, keys=True), H.edges(data=True, keys=True)
)
assert G._adj == H._adj
def test_yfiles_extension(self):
data = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:y="http://www.yworks.com/xml/graphml"
xmlns:yed="http://www.yworks.com/xml/yed/3"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<!--Created by yFiles for Java 2.7-->
<key for="graphml" id="d0" yfiles.type="resources"/>
<key attr.name="url" attr.type="string" for="node" id="d1"/>
<key attr.name="description" attr.type="string" for="node" id="d2"/>
<key for="node" id="d3" yfiles.type="nodegraphics"/>
<key attr.name="Description" attr.type="string" for="graph" id="d4">
<default/>
</key>
<key attr.name="url" attr.type="string" for="edge" id="d5"/>
<key attr.name="description" attr.type="string" for="edge" id="d6"/>
<key for="edge" id="d7" yfiles.type="edgegraphics"/>
<graph edgedefault="directed" id="G">
<node id="n0">
<data key="d3">
<y:ShapeNode>
<y:Geometry height="30.0" width="30.0" x="125.0" y="100.0"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content"
borderDistance="0.0" fontFamily="Dialog" fontSize="13"
fontStyle="plain" hasBackgroundColor="false" hasLineColor="false"
height="19.1328125" modelName="internal" modelPosition="c"
textColor="#000000" visible="true" width="12.27099609375"
x="8.864501953125" y="5.43359375">1</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n1">
<data key="d3">
<y:ShapeNode>
<y:Geometry height="30.0" width="30.0" x="183.0" y="205.0"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content"
borderDistance="0.0" fontFamily="Dialog" fontSize="13"
fontStyle="plain" hasBackgroundColor="false" hasLineColor="false"
height="19.1328125" modelName="internal" modelPosition="c"
textColor="#000000" visible="true" width="12.27099609375"
x="8.864501953125" y="5.43359375">2</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<edge id="e0" source="n0" target="n1">
<data key="d7">
<y:PolyLineEdge>
<y:Path sx="0.0" sy="0.0" tx="0.0" ty="0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
</graph>
<data key="d0">
<y:Resources/>
</data>
</graphml>
"""
fh = io.BytesIO(data.encode('UTF-8'))
G = nx.read_graphml(fh)
assert list(G.edges()) == [('n0', 'n1')]
assert G['n0']['n1']['id'] == 'e0'
assert G.nodes['n0']['label'] == '1'
assert G.nodes['n1']['label'] == '2'
H = nx.parse_graphml(data)
assert list(H.edges()) == [('n0', 'n1')]
assert H['n0']['n1']['id'] == 'e0'
assert H.nodes['n0']['label'] == '1'
assert H.nodes['n1']['label'] == '2'
def test_bool(self):
s = """<?xml version="1.0" encoding="UTF-8"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<key id="d0" for="node" attr.name="test" attr.type="boolean">
<default>false</default>
</key>
<graph id="G" edgedefault="directed">
<node id="n0">
<data key="d0">true</data>
</node>
<node id="n1"/>
<node id="n2">
<data key="d0">false</data>
</node>
<node id="n3">
<data key="d0">FaLsE</data>
</node>
<node id="n4">
<data key="d0">True</data>
</node>
<node id="n5">
<data key="d0">0</data>
</node>
<node id="n6">
<data key="d0">1</data>
</node>
</graph>
</graphml>
"""
fh = io.BytesIO(s.encode('UTF-8'))
G = nx.read_graphml(fh)
H = nx.parse_graphml(s)
for graph in [G, H]:
assert graph.nodes['n0']['test']
assert not graph.nodes['n2']['test']
assert not graph.nodes['n3']['test']
assert graph.nodes['n4']['test']
assert not graph.nodes['n5']['test']
assert graph.nodes['n6']['test']
def test_graphml_header_line(self):
good = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns
http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<key id="d0" for="node" attr.name="test" attr.type="boolean">
<default>false</default>
</key>
<graph id="G">
<node id="n0">
<data key="d0">true</data>
</node>
</graph>
</graphml>
"""
bad = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<graphml>
<key id="d0" for="node" attr.name="test" attr.type="boolean">
<default>false</default>
</key>
<graph id="G">
<node id="n0">
<data key="d0">true</data>
</node>
</graph>
</graphml>
"""
ugly = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<graphml xmlns="https://ghghgh">
<key id="d0" for="node" attr.name="test" attr.type="boolean">
<default>false</default>
</key>
<graph id="G">
<node id="n0">
<data key="d0">true</data>
</node>
</graph>
</graphml>
"""
for s in (good, bad):
fh = io.BytesIO(s.encode('UTF-8'))
G = nx.read_graphml(fh)
H = nx.parse_graphml(s)
for graph in [G, H]:
assert graph.nodes['n0']['test']
fh = io.BytesIO(ugly.encode('UTF-8'))
pytest.raises(nx.NetworkXError, nx.read_graphml, fh)
pytest.raises(nx.NetworkXError, nx.parse_graphml, ugly)
def test_read_attributes_with_groups(self):
data = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:java="http://www.yworks.com/xml/yfiles-common/1.0/java" xmlns:sys="http://www.yworks.com/xml/yfiles-common/markup/primitives/2.0" xmlns:x="http://www.yworks.com/xml/yfiles-common/markup/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
<!--Created by yEd 3.17-->
<key attr.name="Description" attr.type="string" for="graph" id="d0"/>
<key for="port" id="d1" yfiles.type="portgraphics"/>
<key for="port" id="d2" yfiles.type="portgeometry"/>
<key for="port" id="d3" yfiles.type="portuserdata"/>
<key attr.name="CustomProperty" attr.type="string" for="node" id="d4">
<default/>
</key>
<key attr.name="url" attr.type="string" for="node" id="d5"/>
<key attr.name="description" attr.type="string" for="node" id="d6"/>
<key for="node" id="d7" yfiles.type="nodegraphics"/>
<key for="graphml" id="d8" yfiles.type="resources"/>
<key attr.name="url" attr.type="string" for="edge" id="d9"/>
<key attr.name="description" attr.type="string" for="edge" id="d10"/>
<key for="edge" id="d11" yfiles.type="edgegraphics"/>
<graph edgedefault="directed" id="G">
<data key="d0"/>
<node id="n0">
<data key="d4"><![CDATA[CustomPropertyValue]]></data>
<data key="d6"/>
<data key="d7">
<y:ShapeNode>
<y:Geometry height="30.0" width="30.0" x="125.0" y="-255.4611111111111"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">2<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n1" yfiles.foldertype="group">
<data key="d4"><![CDATA[CustomPropertyValue]]></data>
<data key="d5"/>
<data key="d6"/>
<data key="d7">
<y:ProxyAutoBoundsNode>
<y:Realizers active="0">
<y:GroupNode>
<y:Geometry height="250.38333333333333" width="140.0" x="-30.0" y="-330.3833333333333"/>
<y:Fill color="#F5F5F5" transparent="false"/>
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="140.0" x="0.0" y="0.0">Group 3</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
<y:BorderInsets bottom="1" bottomF="1.0" left="0" leftF="0.0" right="0" rightF="0.0" top="1" topF="1.0001736111111086"/>
</y:GroupNode>
<y:GroupNode>
<y:Geometry height="50.0" width="50.0" x="0.0" y="60.0"/>
<y:Fill color="#F5F5F5" transparent="false"/>
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" y="0.0">Folder 3</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
</y:GroupNode>
</y:Realizers>
</y:ProxyAutoBoundsNode>
</data>
<graph edgedefault="directed" id="n1:">
<node id="n1::n0" yfiles.foldertype="group">
<data key="d4"><![CDATA[CustomPropertyValue]]></data>
<data key="d5"/>
<data key="d6"/>
<data key="d7">
<y:ProxyAutoBoundsNode>
<y:Realizers active="0">
<y:GroupNode>
<y:Geometry height="83.46111111111111" width="110.0" x="-15.0" y="-292.9222222222222"/>
<y:Fill color="#F5F5F5" transparent="false"/>
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="110.0" x="0.0" y="0.0">Group 1</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
<y:BorderInsets bottom="1" bottomF="1.0" left="0" leftF="0.0" right="0" rightF="0.0" top="1" topF="1.0001736111111086"/>
</y:GroupNode>
<y:GroupNode>
<y:Geometry height="50.0" width="50.0" x="0.0" y="60.0"/>
<y:Fill color="#F5F5F5" transparent="false"/>
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" y="0.0">Folder 1</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
</y:GroupNode>
</y:Realizers>
</y:ProxyAutoBoundsNode>
</data>
<graph edgedefault="directed" id="n1::n0:">
<node id="n1::n0::n0">
<data key="d4"><![CDATA[CustomPropertyValue]]></data>
<data key="d6"/>
<data key="d7">
<y:ShapeNode>
<y:Geometry height="30.0" width="30.0" x="50.0" y="-255.4611111111111"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">1<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n1::n0::n1">
<data key="d4"><![CDATA[CustomPropertyValue]]></data>
<data key="d6"/>
<data key="d7">
<y:ShapeNode>
<y:Geometry height="30.0" width="30.0" x="0.0" y="-255.4611111111111"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">3<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
</graph>
</node>
<node id="n1::n1" yfiles.foldertype="group">
<data key="d4"><![CDATA[CustomPropertyValue]]></data>
<data key="d5"/>
<data key="d6"/>
<data key="d7">
<y:ProxyAutoBoundsNode>
<y:Realizers active="0">
<y:GroupNode>
<y:Geometry height="83.46111111111111" width="110.0" x="-15.0" y="-179.4611111111111"/>
<y:Fill color="#F5F5F5" transparent="false"/>
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="110.0" x="0.0" y="0.0">Group 2</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="false" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="15" bottomF="15.0" left="15" leftF="15.0" right="15" rightF="15.0" top="15" topF="15.0"/>
<y:BorderInsets bottom="1" bottomF="1.0" left="0" leftF="0.0" right="0" rightF="0.0" top="1" topF="1.0001736111111086"/>
</y:GroupNode>
<y:GroupNode>
<y:Geometry height="50.0" width="50.0" x="0.0" y="60.0"/>
<y:Fill color="#F5F5F5" transparent="false"/>
<y:BorderStyle color="#000000" type="dashed" width="1.0"/>
<y:NodeLabel alignment="right" autoSizePolicy="node_width" backgroundColor="#EBEBEB" borderDistance="0.0" fontFamily="Dialog" fontSize="15" fontStyle="plain" hasLineColor="false" height="21.4609375" horizontalTextPosition="center" iconTextGap="4" modelName="internal" modelPosition="t" textColor="#000000" verticalTextPosition="bottom" visible="true" width="65.201171875" x="-7.6005859375" y="0.0">Folder 2</y:NodeLabel>
<y:Shape type="roundrectangle"/>
<y:State closed="true" closedHeight="50.0" closedWidth="50.0" innerGraphDisplayEnabled="false"/>
<y:Insets bottom="5" bottomF="5.0" left="5" leftF="5.0" right="5" rightF="5.0" top="5" topF="5.0"/>
<y:BorderInsets bottom="0" bottomF="0.0" left="0" leftF="0.0" right="0" rightF="0.0" top="0" topF="0.0"/>
</y:GroupNode>
</y:Realizers>
</y:ProxyAutoBoundsNode>
</data>
<graph edgedefault="directed" id="n1::n1:">
<node id="n1::n1::n0">
<data key="d4"><![CDATA[CustomPropertyValue]]></data>
<data key="d6"/>
<data key="d7">
<y:ShapeNode>
<y:Geometry height="30.0" width="30.0" x="0.0" y="-142.0"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">5<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<node id="n1::n1::n1">
<data key="d4"><![CDATA[CustomPropertyValue]]></data>
<data key="d6"/>
<data key="d7">
<y:ShapeNode>
<y:Geometry height="30.0" width="30.0" x="50.0" y="-142.0"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">6<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
</graph>
</node>
</graph>
</node>
<node id="n2">
<data key="d4"><![CDATA[CustomPropertyValue]]></data>
<data key="d6"/>
<data key="d7">
<y:ShapeNode>
<y:Geometry height="30.0" width="30.0" x="125.0" y="-142.0"/>
<y:Fill color="#FFCC00" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.96875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="11.634765625" x="9.1826171875" y="6.015625">9<y:LabelModel>
<y:SmartNodeLabelModel distance="4.0"/>
</y:LabelModel>
<y:ModelParameter>
<y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/>
</y:ModelParameter>
</y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
</node>
<edge id="n1::n1::e0" source="n1::n1::n0" target="n1::n1::n1">
<data key="d10"/>
<data key="d11">
<y:PolyLineEdge>
<y:Path sx="15.0" sy="-0.0" tx="-15.0" ty="-0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="n1::n0::e0" source="n1::n0::n1" target="n1::n0::n0">
<data key="d10"/>
<data key="d11">
<y:PolyLineEdge>
<y:Path sx="15.0" sy="-0.0" tx="-15.0" ty="-0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e0" source="n1::n0::n0" target="n0">
<data key="d10"/>
<data key="d11">
<y:PolyLineEdge>
<y:Path sx="15.0" sy="-0.0" tx="-15.0" ty="-0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
<edge id="e1" source="n1::n1::n1" target="n2">
<data key="d10"/>
<data key="d11">
<y:PolyLineEdge>
<y:Path sx="15.0" sy="-0.0" tx="-15.0" ty="-0.0"/>
<y:LineStyle color="#000000" type="line" width="1.0"/>
<y:Arrows source="none" target="standard"/>
<y:BendStyle smoothed="false"/>
</y:PolyLineEdge>
</data>
</edge>
</graph>
<data key="d8">
<y:Resources/>
</data>
</graphml>
"""
# verify that nodes / attributes are correctly read when part of a group
fh = io.BytesIO(data.encode('UTF-8'))
G = nx.read_graphml(fh)
data = [x for _, x in G.nodes(data=True)]
assert len(data) == 9
for node_data in data:
assert node_data['CustomProperty'] != ''
class TestWriteGraphML(BaseGraphML):
writer = staticmethod(nx.write_graphml_lxml)
@classmethod
def setup_class(cls):
BaseGraphML.setup_class()
_ = pytest.importorskip("lxml.etree")
def test_write_interface(self):
try:
import lxml.etree
assert nx.write_graphml == nx.write_graphml_lxml
except ImportError:
assert nx.write_graphml == nx.write_graphml_xml
def test_write_read_simple_directed_graphml(self):
G = self.simple_directed_graph
G.graph['hi'] = 'there'
fh = io.BytesIO()
self.writer(G, fh)
fh.seek(0)
H = nx.read_graphml(fh)
assert sorted(G.nodes()) == sorted(H.nodes())
assert sorted(G.edges()) == sorted(H.edges())
assert sorted(G.edges(data=True)) == sorted(H.edges(data=True))
self.simple_directed_fh.seek(0)
def test_write_read_attribute_named_key_ids_graphml(self):
from xml.etree.ElementTree import parse
G = self.attribute_named_key_ids_graph
fh = io.BytesIO()
self.writer(G, fh, named_key_ids=True)
fh.seek(0)
H = nx.read_graphml(fh)
fh.seek(0)
assert_nodes_equal(G.nodes(), H.nodes())
assert_edges_equal(G.edges(), H.edges())
assert_edges_equal(G.edges(data=True), H.edges(data=True))
self.attribute_named_key_ids_fh.seek(0)
xml = parse(fh)
# Children are the key elements, and the graph element
children = list(xml.getroot())
assert len(children) == 4
keys = [child.items() for child in children[:3]]
assert len(keys) == 3
assert ('id', 'edge_prop') in keys[0]
assert ('attr.name', 'edge_prop') in keys[0]
assert ('id', 'prop2') in keys[1]
assert ('attr.name', 'prop2') in keys[1]
assert ('id', 'prop1') in keys[2]
assert ('attr.name', 'prop1') in keys[2]
# Confirm the read graph nodes/edge are identical when compared to
# default writing behavior.
default_behavior_fh = io.BytesIO()
nx.write_graphml(G, default_behavior_fh)
default_behavior_fh.seek(0)
H = nx.read_graphml(default_behavior_fh)
named_key_ids_behavior_fh = io.BytesIO()
nx.write_graphml(G, named_key_ids_behavior_fh, named_key_ids=True)
named_key_ids_behavior_fh.seek(0)
J = nx.read_graphml(named_key_ids_behavior_fh)
assert(all(n1 == n2 for (n1, n2) in zip(H.nodes, J.nodes)))
assert(all(e1 == e2 for (e1, e2) in zip(H.edges, J.edges)))
def test_write_read_attribute_numeric_type_graphml(self):
from xml.etree.ElementTree import parse
G = self.attribute_numeric_type_graph
fh = io.BytesIO()
self.writer(G, fh, infer_numeric_types=True)
fh.seek(0)
H = nx.read_graphml(fh)
fh.seek(0)
assert_nodes_equal(G.nodes(), H.nodes())
assert_edges_equal(G.edges(), H.edges())
assert_edges_equal(G.edges(data=True), H.edges(data=True))
self.attribute_numeric_type_fh.seek(0)
xml = parse(fh)
# Children are the key elements, and the graph element
children = list(xml.getroot())
assert len(children) == 3
keys = [child.items() for child in children[:2]]
assert len(keys) == 2
assert ('attr.type', 'double') in keys[0]
assert ('attr.type', 'double') in keys[1]
def test_more_multigraph_keys(self):
"""Writing keys as edge id attributes means keys become strings.
The original keys are stored as data, so read them back in
if `str(key) == edge_id`
This allows the adjacency to remain the same.
"""
G = nx.MultiGraph()
G.add_edges_from([('a', 'b', 2), ('a', 'b', 3)])
fd, fname = tempfile.mkstemp()
self.writer(G, fname)
H = nx.read_graphml(fname)
assert H.is_multigraph()
assert_edges_equal(G.edges(keys=True), H.edges(keys=True))
assert G._adj == H._adj
os.close(fd)
os.unlink(fname)
def test_default_attribute(self):
G = nx.Graph(name="Fred")
G.add_node(1, label=1, color='green')
nx.add_path(G, [0, 1, 2, 3])
G.add_edge(1, 2, weight=3)
G.graph['node_default'] = {'color': 'yellow'}
G.graph['edge_default'] = {'weight': 7}
fh = io.BytesIO()
self.writer(G, fh)
fh.seek(0)
H = nx.read_graphml(fh, node_type=int)
assert_nodes_equal(G.nodes(), H.nodes())
assert_edges_equal(G.edges(), H.edges())
assert G.graph == H.graph
def test_mixed_type_attributes(self):
G = nx.MultiGraph()
G.add_node('n0', special=False)
G.add_node('n1', special=0)
G.add_edge('n0', 'n1', special=False)
G.add_edge('n0', 'n1', special=0)
fh = io.BytesIO()
self.writer(G, fh)
fh.seek(0)
H = nx.read_graphml(fh)
assert not H.nodes['n0']['special']
assert H.nodes['n1']['special'] == 0
assert not H.edges['n0', 'n1', 0]['special']
assert H.edges['n0', 'n1', 1]['special'] == 0
def test_multigraph_to_graph(self):
# test converting multigraph to graph if no parallel edges found
G = nx.MultiGraph()
G.add_edges_from([('a', 'b', 2), ('b', 'c', 3)]) # no multiedges
fd, fname = tempfile.mkstemp()
self.writer(G, fname)
H = nx.read_graphml(fname)
assert not H.is_multigraph()
os.close(fd)
os.unlink(fname)
def test_numpy_float(self):
np = pytest.importorskip('numpy')
wt = np.float(3.4)
G = nx.Graph([(1, 2, {'weight': wt})])
fd, fname = tempfile.mkstemp()
self.writer(G, fname)
H = nx.read_graphml(fname, node_type=int)
assert G._adj == H._adj
os.close(fd)
os.unlink(fname)
def test_numpy_float64(self):
np = pytest.importorskip('numpy')
wt = np.float64(3.4)
G = nx.Graph([(1, 2, {'weight': wt})])
fd, fname = tempfile.mkstemp()
self.writer(G, fname)
H = nx.read_graphml(fname, node_type=int)
assert G.edges == H.edges
wtG = G[1][2]['weight']
wtH = H[1][2]['weight']
assert almost_equal(wtG, wtH, places=6)
assert type(wtG) == np.float64
assert type(wtH) == float
os.close(fd)
os.unlink(fname)
def test_numpy_float32(self):
np = pytest.importorskip('numpy')
wt = np.float32(3.4)
G = nx.Graph([(1, 2, {'weight': wt})])
fd, fname = tempfile.mkstemp()
self.writer(G, fname)
H = nx.read_graphml(fname, node_type=int)
assert G.edges == H.edges
wtG = G[1][2]['weight']
wtH = H[1][2]['weight']
assert almost_equal(wtG, wtH, places=6)
assert type(wtG) == np.float32
assert type(wtH) == float
os.close(fd)
os.unlink(fname)
def test_numpy_float64_inference(self):
np = pytest.importorskip('numpy')
G = self.attribute_numeric_type_graph
G.edges[('n1', 'n1')]['weight'] = np.float64(1.1)
fd, fname = tempfile.mkstemp()
self.writer(G, fname, infer_numeric_types=True)
H = nx.read_graphml(fname)
assert G._adj == H._adj
os.close(fd)
os.unlink(fname)
def test_unicode_attributes(self):
G = nx.Graph()
name1 = chr(2344) + chr(123) + chr(6543)
name2 = chr(5543) + chr(1543) + chr(324)
node_type = str
G.add_edge(name1, 'Radiohead', foo=name2)
fd, fname = tempfile.mkstemp()
self.writer(G, fname)
H = nx.read_graphml(fname, node_type=node_type)
assert G._adj == H._adj
os.close(fd)
os.unlink(fname)
def test_unicode_escape(self):
# test for handling json escaped stings in python 2 Issue #1880
import json
a = dict(a='{"a": "123"}') # an object with many chars to escape
sa = json.dumps(a)
G = nx.Graph()
G.graph['test'] = sa
fh = io.BytesIO()
self.writer(G, fh)
fh.seek(0)
H = nx.read_graphml(fh)
assert G.graph['test'] == H.graph['test']
class TestXMLGraphML(TestWriteGraphML):
writer = staticmethod(nx.write_graphml_xml)
@classmethod
def setup_class(cls):
TestWriteGraphML.setup_class()
pytest.importorskip("xml.etree.ElementTree")
| 44.545455
| 511
| 0.577692
|
79507c1e4827e04fa1911d085dae54db71cb4213
| 666
|
py
|
Python
|
one.py
|
dollarkillerx/PyTorchStudy
|
c17b2973c89e3a2f088513f29bd5eb6f47957585
|
[
"MIT"
] | null | null | null |
one.py
|
dollarkillerx/PyTorchStudy
|
c17b2973c89e3a2f088513f29bd5eb6f47957585
|
[
"MIT"
] | null | null | null |
one.py
|
dollarkillerx/PyTorchStudy
|
c17b2973c89e3a2f088513f29bd5eb6f47957585
|
[
"MIT"
] | null | null | null |
import torch
# 定义向量
vectro = torch.tensor([1,2,3,4])
print("Vector:\t\t",vectro)
print('Vector: Shape:\t',vectro.shape)
# 定义矩阵
matrix = torch.tensor([[1,2],[3,4]])
print('Matrix:\n',matrix)
print('Matrix Shape:\n',matrix.shape)
# 定义张量
tensor = torch.tensor([ [ [1,2],[3,4] ], [ [5,6],[7,8] ] ])
print('Tensor:\n',tensor)
print('Tensor Shape:\n',tensor.shape)
# Autograd 完成所有的梯度下降和反向传递
# 在autograd下 反向传递(backprop)代码自动定义
# .requires_grad
# 在tensor上设定.requires_grad=true后,autograd会自动跟踪所有与改tensor有关的所有运算
# .backward()
# 所有运算完成后,执行.backward(),,autograd会自动计算梯度并执行反向传递
# .grad
# 用来访问梯度
# with torch.no_grad()
# 自动忽略梯度
| 22.965517
| 71
| 0.644144
|
79507c4ef32056b440fb7870f789a4243a03779f
| 5,253
|
py
|
Python
|
lib/TWCManager/EMS/Growatt.py
|
Saftwerk/TWCManager
|
9b17c063ada80fc159db82fe6e3ad8c4ca071a1a
|
[
"Unlicense"
] | 1
|
2021-12-26T03:41:22.000Z
|
2021-12-26T03:41:22.000Z
|
lib/TWCManager/EMS/Growatt.py
|
Saftwerk/TWCManager
|
9b17c063ada80fc159db82fe6e3ad8c4ca071a1a
|
[
"Unlicense"
] | null | null | null |
lib/TWCManager/EMS/Growatt.py
|
Saftwerk/TWCManager
|
9b17c063ada80fc159db82fe6e3ad8c4ca071a1a
|
[
"Unlicense"
] | null | null | null |
import logging
import growattServer
import datetime
logger = logging.getLogger(__name__.rsplit(".")[-1])
class Growatt:
# Growatt EMS Module
# Fetches Consumption and Generation details from Growatt API
import requests
import time
cacheTime = 10
config = None
configConfig = None
configGrowatt = None
batterySOC = 0
consumedW = 0
fetchFailed = False
generatedW = 0
lastFetch = 0
master = None
password = None
session = None
status = False
timeout = 2
username = None
useBatteryAt = None
useBatteryTill = None
batteryMaxOutput = None
dischargingTill = None
useBatteryBefore = None
now = None
def __init__(self, master):
self.master = master
self.config = master.config
self.configConfig = master.config.get("config", {})
self.configGrowatt = master.config["sources"].get("Growatt", {})
self.password = self.configGrowatt.get("password", "")
self.status = self.configGrowatt.get("enabled", False)
self.username = self.configGrowatt.get("username", "")
self.useBatteryAt = float(self.configGrowatt.get("useBatteryAt", 0))
self.useBatteryTill = float(self.configGrowatt.get("useBatteryTill", 0))
self.batteryMaxOutput = float(self.configGrowatt.get("batteryMaxOutput", 0))
timestring = self.configGrowatt.get("useBatteryBefore", "00:00")
timelist =timestring.split(":")
self.useBatteryBefore = datetime.time(int(timelist[0]),int(timelist[1]))
self.discharginTill = self.useBatteryAt
self.now = datetime.datetime.now().time()
# Unload if this module is disabled or misconfigured
if (not self.status) or (
not self.username or not self.password
):
self.master.releaseModule("lib.TWCManager.EMS", "Growatt")
return None
def getConsumption(self): #gets called by TWCManager.py
if not self.status:
logger.debug("EMS Module Disabled. Skipping getConsumption")
return 0
# Perform updates if necessary
self.update()
# Return consumption value
return self.consumedW
def getGeneration(self): #gets called by TWCManager.py
if not self.status:
logger.debug("EMS Module Disabled. Skipping getGeneration")
return 0
# Perform updates if necessary
self.update()
# Return generation value
return self.generatedW
def getGenerationValues(self):
if not self.status:
logger.debug("EMS Module Disabled. Skipping getGeneration")
return 0
api = growattServer.GrowattApi()
try:
logger.debug("Fetching Growatt EMS sensor values")
login_response = api.login(self.username, self.password)
except Exception as e:
logger.log(
logging.INFO4, "Error connecting to Growatt to fetching sensor values"
)
logger.debug(str(e))
self.fetchFailed = True
return False
if not login_response:
logger.log(logging.INFO4, "Empty Response from Growatt API")
return False
if login_response:
plant_list = api.plant_list(login_response['userId'])['data'][0]
plant_ID= plant_list['plantId']
inverter= api.device_list(plant_ID)[0]
deviceAilas = inverter["deviceAilas"]
status = api.mix_system_status(deviceAilas, plant_ID)
plant_info=api.plant_info(plant_ID)
device = plant_info['deviceList'][0]
device_sn = device['deviceSn']
mix_status = api.mix_system_status(device_sn, plant_ID)
self.batterySOC = float(mix_status['SOC'])
gen_calc = float(status['pPv1']) + float(status['pPv2'])
gen_calc *= 1000
gen_api = float(status['ppv'])*1000
inTime = self.now>datetime.time(00,00) and self.now < self.useBatteryBefore
if self.discharginTill<self.batterySOC and inTime:
self.discharginTill = self.useBatteryTill
self.generatedW = gen_api+self.batteryMaxOutput
else:
self.discharginTill = self.useBatteryAt
self.generatedW = gen_api
self.consumedW =float(status['pLocalLoad'])*1000
else:
logger.log(logging.INFO4, "No response from Growatt API")
def setCacheTime(self, cacheTime):
self.cacheTime = cacheTime
def setTimeout(self, timeout):
self.timeout = timeout
def update(self):
# Update function - determine if an update is required
self.now = datetime.datetime.now().time()
if (int(self.time.time()) - self.lastFetch) > self.cacheTime:
# Cache has expired. Fetch values from Growatt.
self.getGenerationValues()
# Update last fetch time
if self.fetchFailed is not True:
self.lastFetch = int(self.time.time())
return True
else:
# Cache time has not elapsed since last fetch, serve from cache.
return False
| 33.458599
| 87
| 0.615077
|
79507d288fa6a12bc39781df6339384543dbdf3a
| 4,338
|
py
|
Python
|
app.py
|
nikhilpakhariya/FaceMaskDetector
|
5d41a1a214ff862e84cda1c1578ba9ea6fb4dcd9
|
[
"MIT"
] | null | null | null |
app.py
|
nikhilpakhariya/FaceMaskDetector
|
5d41a1a214ff862e84cda1c1578ba9ea6fb4dcd9
|
[
"MIT"
] | null | null | null |
app.py
|
nikhilpakhariya/FaceMaskDetector
|
5d41a1a214ff862e84cda1c1578ba9ea6fb4dcd9
|
[
"MIT"
] | null | null | null |
import streamlit as st
from PIL import Image, ImageEnhance
import numpy as np
import cv2
import os
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import detect_mask_image
def mask_image():
global RGB_img
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
weightsPath = os.path.sep.join(["face_detector",
"res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
model = load_model("mask_detector.model")
# load the input image from disk and grab the image spatial
# dimensions
image = cv2.imread("./images/out.jpg")
(h, w) = image.shape[:2]
# construct a blob from the image
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
print("[INFO] computing face detections...")
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = image[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
face = np.expand_dims(face, axis=0)
# pass the face through the model to determine if the face
# has a mask or not
(mask, withoutMask) = model.predict(face)[0]
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(image, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
RGB_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask_image()
def mask_detection():
st.title("Face mask detection")
activities = ["Image", "Webcam"]
st.set_option('deprecation.showfileUploaderEncoding', False)
choice = st.sidebar.selectbox("Mask Detection on?", activities)
if choice == 'Image':
st.subheader("Detection on image")
image_file = st.file_uploader("Upload Image", type=['jpg']) # upload image
if image_file is not None:
our_image = Image.open(image_file) # making compatible to PIL
im = our_image.save('./images/out.jpg')
saved_image = st.image(image_file, caption='image uploaded successfully', use_column_width=True)
if st.button('Process'):
st.image(RGB_img)
if choice == 'Webcam':
st.subheader("Detection on webcam")
st.text("This feature will be avilable soon")
mask_detection()
| 40.542056
| 108
| 0.618027
|
79507e49ac8c71e1acacabb2849a01bf50795d25
| 1,564
|
py
|
Python
|
bin/builder.py
|
grampelberg/k8s-icons
|
928088b18dd4be9093c2ce242509e5b3dfd160a4
|
[
"MIT"
] | 2
|
2019-09-12T06:47:18.000Z
|
2019-09-14T13:34:34.000Z
|
bin/builder.py
|
grampelberg/k8s-icons
|
928088b18dd4be9093c2ce242509e5b3dfd160a4
|
[
"MIT"
] | null | null | null |
bin/builder.py
|
grampelberg/k8s-icons
|
928088b18dd4be9093c2ce242509e5b3dfd160a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import pathlib
import sys
from bs4 import BeautifulSoup
colors = {
'#ffffff': '--foreground',
'#eeeeee': '--foreground',
'#326ce5': '--background'
}
parent_markup = """
<svg
aria-hidden="true"
style="position: absolute; width: 0; height: 0; overflow: hidden;"
version="1.1"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
</defs>
</svg>
"""
def get_fnames():
return [
os.path.join(d, fname)
for d, _, fnames in os.walk(sys.argv[1])
for fname in fnames if '.svg' in fname]
def convert_svg(root):
for tag in root.get('style', '').split(';'):
try:
k, v = tag.split(':', 1)
except:
continue
root[k] = v
del root['style']
for k in ['fill', 'stroke']:
v = root.attrs.get(k, 'none')
if v == 'none':
continue
root[k] = 'var({}, {})'.format(colors.get(v, '--missing'), v)
for child in root.children:
if not child.name:
continue
convert_svg(child)
if __name__ == '__main__':
parent = BeautifulSoup(parent_markup, "html.parser")
for fname in get_fnames():
doc = BeautifulSoup(open(fname, 'r').read(), 'html.parser')
convert_svg(doc.svg.g)
tag = parent.new_tag(
"symbol",
id=fname[2:].replace('/', '-')[:-4])
tag['viewbox'] = doc.svg.get('viewbox')
tag.append(doc.svg.g)
parent.defs.append(tag)
print(parent)
| 21.722222
| 70
| 0.54156
|
79507e7c9812b48e95dd330a199ce2ea6e2398c8
| 181
|
py
|
Python
|
tests/conftest.py
|
nmvalera/boilerplate-python
|
53282c6adcf389965e4a6d06aeefbb0194ee52db
|
[
"BSD-3-Clause"
] | 1
|
2018-11-08T06:19:26.000Z
|
2018-11-08T06:19:26.000Z
|
tests/conftest.py
|
nmvalera/boilerplate-python
|
53282c6adcf389965e4a6d06aeefbb0194ee52db
|
[
"BSD-3-Clause"
] | null | null | null |
tests/conftest.py
|
nmvalera/boilerplate-python
|
53282c6adcf389965e4a6d06aeefbb0194ee52db
|
[
"BSD-3-Clause"
] | null | null | null |
"""
tests.conftest
~~~~~~~~~~~~~~
:copyright: Copyright 2017 by Nicolas Maurice, see AUTHORS.rst for more details.
:license: BSD, see LICENSE for more details.
"""
| 22.625
| 84
| 0.618785
|
7950805c4a54639b3b109219b947709eccd5f911
| 101
|
py
|
Python
|
main.py
|
samyoyo/blackowl
|
d3a40954919cc597bb56fbc075c4f2f4368de3fb
|
[
"MIT"
] | 7
|
2018-07-12T08:49:31.000Z
|
2021-02-17T22:32:23.000Z
|
main.py
|
TheRipperJhon/Black_Owl
|
d3a40954919cc597bb56fbc075c4f2f4368de3fb
|
[
"MIT"
] | null | null | null |
main.py
|
TheRipperJhon/Black_Owl
|
d3a40954919cc597bb56fbc075c4f2f4368de3fb
|
[
"MIT"
] | 3
|
2019-07-16T15:41:08.000Z
|
2021-01-12T18:35:07.000Z
|
#!/usr/bin/env python
from core import blackowl
if __name__ == '__main__':
blackowl.user_put()
| 14.428571
| 26
| 0.70297
|
7950806b7fb8cd426fbfd86ed5fcb9dfedb837c9
| 8,376
|
py
|
Python
|
persistqueue/sqlackqueue.py
|
Nextpertise/persist-queue
|
ca1e8de8f5e2d3c7a257cd9f129af415e0fffdb4
|
[
"BSD-3-Clause"
] | null | null | null |
persistqueue/sqlackqueue.py
|
Nextpertise/persist-queue
|
ca1e8de8f5e2d3c7a257cd9f129af415e0fffdb4
|
[
"BSD-3-Clause"
] | null | null | null |
persistqueue/sqlackqueue.py
|
Nextpertise/persist-queue
|
ca1e8de8f5e2d3c7a257cd9f129af415e0fffdb4
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import pickle
import sqlite3
import time as _time
import threading
import warnings
from . import sqlbase
from .exceptions import Empty
sqlite3.enable_callback_tracebacks(True)
log = logging.getLogger(__name__)
# 10 seconds internal for `wait` of event
TICK_FOR_WAIT = 10
class AckStatus(object):
inited = '0'
ready = '1'
unack = '2'
acked = '5'
ack_failed = '9'
class SQLiteAckQueue(sqlbase.SQLiteBase):
"""SQLite3 based FIFO queue with ack support."""
_TABLE_NAME = 'ack_queue'
_KEY_COLUMN = '_id' # the name of the key column, used in DB CRUD
_MAX_ACKED_LENGTH = 1000
# SQL to create a table
_SQL_CREATE = ('CREATE TABLE IF NOT EXISTS {table_name} ('
'{key_column} INTEGER PRIMARY KEY AUTOINCREMENT, '
'data BLOB, timestamp FLOAT, status INTEGER)')
# SQL to insert a record
_SQL_INSERT = 'INSERT INTO {table_name} (data, timestamp, status)'\
' VALUES (?, ?, %s)' % AckStatus.inited
# SQL to select a record
_SQL_SELECT = ('SELECT {key_column}, data, status FROM {table_name} '
'WHERE status < %s '
'ORDER BY {key_column} ASC LIMIT 1' % AckStatus.unack)
_SQL_MARK_ACK_UPDATE = 'UPDATE {table_name} SET status = ?'\
' WHERE {key_column} = ?'
_SQL_SELECT_WHERE = 'SELECT {key_column}, data FROM {table_name}'\
' WHERE status < %s AND' \
' {column} {op} ? ORDER BY {key_column} ASC'\
' LIMIT 1 ' % AckStatus.unack
def __init__(self, path, auto_resume=True, **kwargs):
super(SQLiteAckQueue, self).__init__(path, **kwargs)
if not self.auto_commit:
warnings.warn("disable auto commit is not support in ack queue")
self.auto_commit = True
self._unack_cache = {}
if auto_resume:
self.resume_unack_tasks()
@sqlbase.with_conditional_transaction
def resume_unack_tasks(self):
unack_count = self.unack_count()
if unack_count:
log.warning("resume %d unack tasks", unack_count)
sql = 'UPDATE {} set status = ?'\
' WHERE status = ?'.format(self._table_name)
return sql, (AckStatus.ready, AckStatus.unack, )
def put(self, item):
obj = pickle.dumps(item, protocol=self.protocol)
self._insert_into(obj, _time.time())
self.total += 1
self.put_event.set()
def _init(self):
super(SQLiteAckQueue, self)._init()
# Action lock to assure multiple action to be *atomic*
self.action_lock = threading.Lock()
self.total = self._count()
def _count(self):
sql = 'SELECT COUNT({}) FROM {}'\
' WHERE status < ?'.format(self._key_column,
self._table_name)
row = self._getter.execute(sql, (AckStatus.unack,)).fetchone()
return row[0] if row else 0
def _ack_count_via_status(self, status):
sql = 'SELECT COUNT({}) FROM {}'\
' WHERE status = ?'.format(self._key_column,
self._table_name)
row = self._getter.execute(sql, (status, )).fetchone()
return row[0] if row else 0
def unack_count(self):
return self._ack_count_via_status(AckStatus.unack)
def acked_count(self):
return self._ack_count_via_status(AckStatus.acked)
def ready_count(self):
return self._ack_count_via_status(AckStatus.ready)
def ack_failed_count(self):
return self._ack_count_via_status(AckStatus.ack_failed)
@sqlbase.with_conditional_transaction
def _mark_ack_status(self, key, status):
return self._sql_mark_ack_status, (status, key, )
@sqlbase.with_conditional_transaction
def clear_acked_data(self):
sql = """DELETE FROM {table_name}
WHERE {key_column} IN (
SELECT _id FROM {table_name} WHERE status = ?
ORDER BY {key_column} DESC
LIMIT 1000 OFFSET {max_acked_length}
)""".format(table_name=self._table_name,
key_column=self._key_column,
max_acked_length=self._MAX_ACKED_LENGTH)
return sql, AckStatus.acked
@property
def _sql_mark_ack_status(self):
return self._SQL_MARK_ACK_UPDATE.format(table_name=self._table_name,
key_column=self._key_column)
def _pop(self):
with self.action_lock:
row = self._select()
# Perhaps a sqlite3 bug, sometimes (None, None) is returned
# by select, below can avoid these invalid records.
if row and row[0] is not None:
self._mark_ack_status(row[0], AckStatus.unack)
pickled_data = row[1] # pickled data
item = pickle.loads(pickled_data)
self._unack_cache[row[0]] = item
self.total -= 1
return item
return None
def _find_item_id(self, item):
for key, value in self._unack_cache.items():
if value is item:
return key
log.warning("Can't find item %s from unack cache", item)
return None
def ack(self, item):
with self.action_lock:
_id = self._find_item_id(item)
if _id is None:
return
self._mark_ack_status(_id, AckStatus.acked)
self._unack_cache.pop(_id)
def ack_failed(self, item):
with self.action_lock:
_id = self._find_item_id(item)
if _id is None:
return
self._mark_ack_status(_id, AckStatus.ack_failed)
self._unack_cache.pop(_id)
def nack(self, item):
with self.action_lock:
_id = self._find_item_id(item)
if _id is None:
return
self._mark_ack_status(_id, AckStatus.ready)
self._unack_cache.pop(_id)
self.total += 1
def get(self, block=True, timeout=None):
if not block:
pickled = self._pop()
if not pickled:
raise Empty
elif timeout is None:
# block until a put event.
pickled = self._pop()
while not pickled:
self.put_event.clear()
self.put_event.wait(TICK_FOR_WAIT)
pickled = self._pop()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
# block until the timeout reached
endtime = _time.time() + timeout
pickled = self._pop()
while not pickled:
self.put_event.clear()
remaining = endtime - _time.time()
if remaining <= 0.0:
raise Empty
self.put_event.wait(
TICK_FOR_WAIT if TICK_FOR_WAIT < remaining else remaining)
pickled = self._pop()
item = pickled
return item
def task_done(self):
"""Persist the current state if auto_commit=False."""
if not self.auto_commit:
self._task_done()
@property
def size(self):
return self.total
def qsize(self):
return self.size
def __len__(self):
return self.size
FIFOSQLiteAckQueue = SQLiteAckQueue
class FILOSQLiteAckQueue(SQLiteAckQueue):
"""SQLite3 based FILO queue with ack support."""
_TABLE_NAME = 'ack_filo_queue'
# SQL to select a record
_SQL_SELECT = ('SELECT {key_column}, data FROM {table_name} '
'WHERE status < %s '
'ORDER BY {key_column} DESC LIMIT 1' % AckStatus.unack)
class UniqueAckQ(SQLiteAckQueue):
_TABLE_NAME = 'ack_unique_queue'
_SQL_CREATE = (
'CREATE TABLE IF NOT EXISTS {table_name} ('
'{key_column} INTEGER PRIMARY KEY AUTOINCREMENT, '
'data BLOB, timestamp FLOAT, status INTEGER, UNIQUE (data))'
)
def put(self, item):
obj = pickle.dumps(item)
try:
self._insert_into(obj, _time.time())
except sqlite3.IntegrityError:
pass
else:
self.total += 1
self.put_event.set()
| 33.106719
| 78
| 0.587393
|
7950808680383283e16d71b4811c596b24185eea
| 327
|
py
|
Python
|
tests/test_simple.py
|
SmartDataAnalytics/Bio-KEEN
|
680dffb6cd170b0f79ea09f0f730f0baa9ce73d4
|
[
"MIT"
] | 38
|
2018-11-13T18:38:46.000Z
|
2021-12-14T12:32:48.000Z
|
tests/test_simple.py
|
SmartDataAnalytics/Bio-KEEN
|
680dffb6cd170b0f79ea09f0f730f0baa9ce73d4
|
[
"MIT"
] | 27
|
2018-10-08T09:01:59.000Z
|
2021-02-07T20:43:31.000Z
|
tests/test_simple.py
|
SmartDataAnalytics/Bio-KEEN
|
680dffb6cd170b0f79ea09f0f730f0baa9ce73d4
|
[
"MIT"
] | 2
|
2019-12-04T19:49:06.000Z
|
2020-07-03T01:25:09.000Z
|
# -*- coding: utf-8 -*-
"""Simple tests for BioKEEN."""
import unittest
from biokeen.constants import VERSION
class TestImport(unittest.TestCase):
"""Simple tests for importing BioKEEN."""
def test_version_type(self):
"""Test the type of the version string."""
self.assertIsInstance(VERSION, str)
| 20.4375
| 50
| 0.678899
|
795081be7cf3c77d2dc0b6189c542cd5cb083a1d
| 2,704
|
py
|
Python
|
prettyqt/custom_widgets/__init__.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 7
|
2019-05-01T01:34:36.000Z
|
2022-03-08T02:24:14.000Z
|
prettyqt/custom_widgets/__init__.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 141
|
2019-04-16T11:22:01.000Z
|
2021-04-14T15:12:36.000Z
|
prettyqt/custom_widgets/__init__.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 5
|
2019-04-17T11:48:19.000Z
|
2021-11-21T10:30:19.000Z
|
"""Module containing custom widget classes."""
from .image import Image
from .listinput import ListInput
from .booldicttoolbutton import BoolDictToolButton
from .optionalwidget import OptionalWidget
from .collapsibleframe import CollapsibleFrame
from .expandableline import ExpandableLine
from .singlelinetextedit import SingleLineTextEdit
from .regexinput import RegexInput
from .mappedcheckbox import MappedCheckBox
from .logtextedit import LogTextEdit
from .flagselectionwidget import FlagSelectionWidget
from .stringornumberwidget import StringOrNumberWidget
from .iconlabel import IconLabel
from .iconwidget import IconWidget
from .flowlayout import FlowLayout
from .borderlayout import BorderLayout
from .completionwidget import CompletionWidget
from .sidebarwidget import SidebarWidget
from .colorchooserbutton import ColorChooserButton
from .filechooserbutton import FileChooserButton
from .fontchooserbutton import FontChooserButton
from .inputandslider import InputAndSlider
from .spanslider import SpanSlider
from .labeledslider import LabeledSlider
from .waitingspinner import WaitingSpinner
from .markdownwidget import MarkdownWindow
from .imageviewer import ImageViewer
from .popupinfo import PopupInfo
from .selectionwidget import SelectionWidget
from .codeeditor import CodeEditor
from .roundprogressbar import RoundProgressBar
from .subsequencecompleter import SubsequenceCompleter
from .framelesswindow import FramelessWindow
# from .stareditor import StarEditor, StarRating
from .timeline import Timeline, VideoSample
from .standardiconswidget import StandardIconsWidget
# Deprecated: should be imported from custom_delegates instead
from prettyqt.custom_delegates.buttondelegate import ButtonDelegate
from prettyqt.custom_delegates.radiodelegate import RadioDelegate
__all__ = [
"Image",
"ListInput",
"BoolDictToolButton",
"OptionalWidget",
"CollapsibleFrame",
"CompletionWidget",
"ExpandableLine",
"SingleLineTextEdit",
"RegexInput",
"MappedCheckBox",
"LogTextEdit",
"FlagSelectionWidget",
"StringOrNumberWidget",
"IconLabel",
"IconWidget",
"FlowLayout",
"BorderLayout",
"SidebarWidget",
"ColorChooserButton",
"FileChooserButton",
"FontChooserButton",
"InputAndSlider",
"SpanSlider",
"LabeledSlider",
"WaitingSpinner",
"RoundProgressBar",
"PopupInfo",
"ButtonDelegate",
"RadioDelegate",
"SelectionWidget",
"ImageViewer",
"MarkdownWindow",
"CodeEditor",
"Player",
"Timeline",
# "StarEditor",
# "StarRating",
"VideoSample",
"RegexEditorWidget",
"StandardIconsWidget",
"SubsequenceCompleter",
"FramelessWindow",
]
| 30.382022
| 67
| 0.788831
|
7950820f21d4ea6e7a4a3af7e2e59d6cb7d4498c
| 908
|
py
|
Python
|
setup.py
|
GabLeRoux/sshconf
|
7f78cc8d9390e039d9d70304f772e66c657b83d7
|
[
"MIT"
] | null | null | null |
setup.py
|
GabLeRoux/sshconf
|
7f78cc8d9390e039d9d70304f772e66c657b83d7
|
[
"MIT"
] | null | null | null |
setup.py
|
GabLeRoux/sshconf
|
7f78cc8d9390e039d9d70304f772e66c657b83d7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, Distribution
Distribution().fetch_build_eggs('versiontag')
from versiontag import get_version, cache_git_tag
cache_git_tag()
MY_VERSION = get_version(pypi=True)
setup(
name='sshconf',
version=MY_VERSION,
description='Lightweight SSH config library',
author='Søren A D',
author_email='sorend@acm.org',
url='https://github.com/sorend/sshconf',
download_url='https://github.com/sorend/sshconf/tarball/%s' % MY_VERSION,
license='MIT',
keywords=['ssh', 'config'],
py_modules=['sshconf'],
data_files=['version.txt'],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
]
)
| 30.266667
| 77
| 0.660793
|
7950825afd3d3e051136da89b72568e8cb3e7abc
| 15,077
|
py
|
Python
|
mars/promise.py
|
ChenQuan/mars
|
46fc9747e99210cebfabfc2d85bcc8272440d1a3
|
[
"Apache-2.0"
] | null | null | null |
mars/promise.py
|
ChenQuan/mars
|
46fc9747e99210cebfabfc2d85bcc8272440d1a3
|
[
"Apache-2.0"
] | null | null | null |
mars/promise.py
|
ChenQuan/mars
|
46fc9747e99210cebfabfc2d85bcc8272440d1a3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import struct
import sys
import threading
import weakref
import numpy as np
from .compat import getargspec
from .actors import FunctionActor
from .actors.core import ActorRef
from .errors import PromiseTimeout
from .utils import wraps
logger = logging.getLogger(__name__)
_promise_pool = dict()
class Promise(object):
"""
Object representing a promised result
"""
def __init__(self, resolve=None, reject=None, done=False, failed=False):
# use random promise id
self._id = struct.pack('<Q', id(self)) + np.random.bytes(32)
# register in global pool to reject gc collection
_promise_pool[self._id] = self
self._accept_handler = self._wrap_handler(resolve)
self._reject_handler = self._wrap_handler(reject)
# _bind_item indicates the Promise object whose step_next()
# should be called when current Promise finishes. For instance,
# in ``p2 = p1.then(lambda arg: promise_call())``, the value of
# _bind_item of the Promise object returned by ``promise_call()``
# is p2, thus when the lambda finishes, subsequent operations in
# p2 can be executed.
self._bind_item = None # type: Promise
# _next_item indicates the next Promise object to be invoked
# when the current one returns.
self._next_item = None # type: Promise
# promise results
if done:
self._accepted = True
elif failed:
self._accepted = False
else:
self._accepted = None
self._args = ()
self._kwargs = {}
self.post_create()
def __del__(self):
self.pre_destroy()
@property
def id(self):
return self._id
def post_create(self):
pass
def pre_destroy(self):
pass
def _wrap_handler(self, func):
"""
Wraps a promise handler
"""
if func is None:
return None
@wraps(func)
def _wrapped(*args, **kwargs):
try:
result = func(*args, **kwargs)
if isinstance(result, Promise):
# the function itself returns a promise object
# bind returned promise object to current promise
result._bind_item = self
if result._accepted is not None:
# promise already done, we move next
args = result._args or ()
kwargs = result._kwargs or {}
kwargs['_accept'] = result._accepted
result.step_next(*args, **kwargs)
else:
# return non-promise result, we just step next
self.step_next(result)
except:
# error occurred when executing func, we reject with exc_info
logger.exception('Exception met in executing promise.')
exc = sys.exc_info()
self.step_next(*exc, _accept=False)
return _wrapped
def _get_bind_root(self):
"""
Get root promise of result promises
:return: root promise
:rtype: Promise
"""
target = self
while target._bind_item is not None:
if target and target.id in _promise_pool:
# remove binder that will not be used later
del _promise_pool[target.id]
target = target._bind_item
return target
@staticmethod
def _get_handling_promise(p, handler_attr):
"""
Get a promise object that defines the handler needed
:param Promise p: promise object
:param str handler_attr: attribute name of the handler
:rtype: Promise
"""
while getattr(p, handler_attr) is None:
p = p._get_bind_root() # type: Promise
if p and p._next_item is not None:
if p.id in _promise_pool:
# remove predecessor that will not be used later
del _promise_pool[p.id]
p = p._next_item
else:
break
return p
@staticmethod
def _log_unexpected_error(args):
if args and len(args) == 3 and issubclass(args[0], Exception):
logger.exception('Unhandled exception in promise', exc_info=args)
def step_next(self, *args, **kwargs):
"""
Step into next promise with given args and kwargs
"""
accept = kwargs.pop('_accept', True)
target_promise = self
self._accepted = accept
try:
root_promise = self._get_bind_root()
if root_promise and root_promise.id in _promise_pool:
del _promise_pool[root_promise.id]
target_promise = root_promise._next_item
root_promise._accepted = self._accepted
root_promise._args = args
root_promise._kwargs = kwargs
if not target_promise:
if not accept:
self._log_unexpected_error(args)
return
if accept:
acceptor = self._get_handling_promise(target_promise, '_accept_handler')
if acceptor and acceptor._accept_handler:
acceptor._accept_handler(*args, **kwargs)
else:
acceptor._accepted = accept
acceptor._args = args
acceptor._kwargs = kwargs
else:
rejecter = self._get_handling_promise(target_promise, '_reject_handler')
if rejecter and rejecter._reject_handler:
rejecter._reject_handler(*args, **kwargs)
else:
rejecter._accepted = accept
rejecter._args = args
rejecter._kwargs = kwargs
self._log_unexpected_error(args)
finally:
if target_promise and target_promise.id in _promise_pool:
del _promise_pool[target_promise.id]
def then(self, on_fulfilled, on_rejected=None):
promise = Promise(on_fulfilled, on_rejected)
self._next_item = promise
if self._accepted is not None:
self._kwargs['_accept'] = self._accepted
self.step_next(*self._args, **self._kwargs)
return promise
def catch(self, on_rejected):
return self.then(None, on_rejected)
def wait(self, waiter=None, timeout=None):
"""
Wait when the promise returns. Currently only used in debug.
:param waiter: wait object
:param timeout: wait timeout
:return: accept or reject
"""
waiter = threading.Event()
status = []
def _finish_exec(accept_or_reject):
waiter.set()
status.append(accept_or_reject)
self.then(lambda *_, **__: _finish_exec(True),
lambda *_, **__: _finish_exec(False))
waiter.wait(timeout)
return status[0]
class PromiseRefWrapper(object):
"""
Promise wrapper that enables promise call by adding _promise=True
"""
def __init__(self, ref, caller):
self._ref = ref
self._caller = caller # type: PromiseActor
def send(self, message):
return self._ref.send(message)
def tell(self, message, delay=None):
return self._ref.tell(message, delay=delay)
def destroy(self):
return self._ref.destroy_actor(self)
@property
def uid(self):
return self._ref.uid
@property
def address(self):
return self._ref.address
def __getattr__(self, item):
if item.startswith('_'):
return object.__getattribute__(self, item)
def _mt_fun(*args, **kwargs):
ref_fun = getattr(self._ref, item)
if not kwargs.pop('_promise', False):
return ref_fun(*args, **kwargs)
p = Promise()
self._caller.register_promise(p, self._ref)
timeout = kwargs.pop('_timeout', 0)
kwargs['callback'] = ((self._caller.uid, self._caller.address),
'handle_promise', p.id)
kwargs['_tell'] = True
ref_fun(*args, **kwargs)
if timeout and timeout > 0:
# add a callback that triggers some times later to deal with timeout
self._caller.ref().handle_promise_timeout(p.id, _tell=True, _delay=timeout)
return p
return _mt_fun
def reject_on_exception(func):
"""
Decorator on actor callback functions that handles exceptions by
sending it to caller as promise rejections. The function should have
an argument called ``callback``.
"""
arg_names = getargspec(func).args
callback_pos = None
if arg_names:
for idx, name in enumerate(arg_names):
if name == 'callback':
callback_pos = idx
break
@wraps(func)
def _wrapped(*args, **kwargs):
callback = None
if 'callback' in kwargs:
callback = kwargs['callback']
elif callback_pos and callback_pos < len(args):
callback = args[callback_pos]
try:
return func(*args, **kwargs)
except:
actor = args[0]
logger.exception('Unhandled exception in promise call')
if callback:
actor.tell_promise(callback, *sys.exc_info(), **dict(_accept=False))
return _wrapped
class PromiseActor(FunctionActor):
"""
Actor class providing promise functionality
"""
def promise_ref(self, *args, **kwargs):
"""
Wraps an existing ActorRef into a promise ref
"""
if not hasattr(self, '_promises'):
self._promises = dict()
self._promise_ref_keys = dict()
self._ref_key_promises = dict()
if not args and not kwargs:
ref = self.ref()
elif args and isinstance(args[0], ActorRef):
ref = self.ctx.actor_ref(args[0].uid, address=args[0].address)
else:
ref = self.ctx.actor_ref(*args, **kwargs)
return PromiseRefWrapper(ref, self)
def register_promise(self, promise, ref):
"""
Register a promise into the actor with referrer info
:param Promise promise: promise object to register
:param ActorRef ref: ref
"""
promise_id = promise.id
def _weak_callback(*_):
self.delete_promise(promise_id)
self._promises[promise_id] = weakref.ref(promise, _weak_callback)
ref_key = (ref.uid, ref.address)
self._promise_ref_keys[promise_id] = ref_key
try:
self._ref_key_promises[ref_key].add(promise_id)
except KeyError:
self._ref_key_promises[ref_key] = {promise_id}
def get_promise(self, promise_id):
"""
Get promise object from weakref.
"""
obj = self._promises.get(promise_id)
if obj is None:
return None
return obj()
def delete_promise(self, promise_id):
if promise_id not in self._promises:
return
ref_key = self._promise_ref_keys[promise_id]
self._ref_key_promises[ref_key].remove(promise_id)
del self._promises[promise_id]
del self._promise_ref_keys[promise_id]
def reject_promise_refs(self, refs, *args, **kwargs):
"""
Reject all promises related to given actor ref
:param refs: actor refs to reject
"""
kwargs['_accept'] = False
handled_refs = []
for ref in refs:
ref_key = (ref.uid, ref.address)
if ref_key not in self._ref_key_promises:
continue
handled_refs.append(ref)
for promise_id in list(self._ref_key_promises[ref_key]):
p = self.get_promise(promise_id)
if p is None:
continue
p.step_next(*args, **kwargs)
return handled_refs
def tell_promise(self, callback, *args, **kwargs):
"""
Tell promise results to the caller
:param callback: promise callback
"""
uid, address = callback[0]
callback_args = callback[1:] + args + (kwargs, )
self.ctx.actor_ref(uid, address=address).tell(callback_args)
def handle_promise(self, promise_id, *args, **kwargs):
"""
Callback entry for promise results
:param promise_id: promise key
"""
p = self.get_promise(promise_id)
if p is None:
logger.warning('Promise %r reentered in %s', promise_id, self.uid)
return
self.get_promise(promise_id).step_next(*args, **kwargs)
self.delete_promise(promise_id)
def handle_promise_timeout(self, promise_id):
"""
Callback entry for promise timeout
:param promise_id: promise key
"""
p = self.get_promise(promise_id)
if not p or p._accepted is not None:
# skip promises that are already finished
return
self.delete_promise(promise_id)
try:
raise PromiseTimeout
except PromiseTimeout:
exc_info = sys.exc_info()
p.step_next(*exc_info, **dict(_accept=False))
def all_(promises):
"""
Create a promise with promises. Invoked when all referenced promises accepted
or at least one rejected
:param promises: collection of promises
:return: the new promise
"""
promises = list(promises)
new_promise = Promise()
finish_set = set()
def _build_then(promise):
def _then(*_, **__):
finish_set.add(promise.id)
if all(p.id in finish_set for p in promises):
new_promise.step_next()
return _then
def _handle_reject(*args, **kw):
if new_promise._accepted is not None:
return
for p in promises:
try:
del _promise_pool[p.id]
except KeyError:
pass
kw['_accept'] = False
new_promise.step_next(*args, **kw)
for p in promises:
if isinstance(p, Promise):
p.then(_build_then(p), _handle_reject)
if promises:
return new_promise
else:
new_promise.step_next()
return new_promise
| 32.284797
| 91
| 0.586456
|
7950826bec5a019fd91606a6165620fa5ea6abe2
| 2,873
|
py
|
Python
|
data/process_data.py
|
emichris/Disaster-Response
|
68d894dc5e28d461fb5a7b1b040e665119e9ad34
|
[
"MIT"
] | null | null | null |
data/process_data.py
|
emichris/Disaster-Response
|
68d894dc5e28d461fb5a7b1b040e665119e9ad34
|
[
"MIT"
] | null | null | null |
data/process_data.py
|
emichris/Disaster-Response
|
68d894dc5e28d461fb5a7b1b040e665119e9ad34
|
[
"MIT"
] | null | null | null |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
'''
Function reads in both datasets
It returns a dataframe with messages and categories merged on 'id'
'''
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = messages.merge(categories, on='id')
return df
def clean_data(df):
'''
This is the transformation step of the ETL process:
Function splits the categories column into separate, clearly named columns,
converts values to binary, and drops duplicates.
'''
categories = df.categories.str.split(';', expand=True)
first_row = categories.iloc[0] # select the first row of the categories dataframe
category_colnames = first_row.apply(lambda x: x[:-2]) # Extract colum names
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].apply(lambda x: x[-1])
# convert column from string to numeric
categories[column] = pd.to_numeric(categories[column])
df.drop(columns=['categories'], inplace=True)
df = pd.concat([df, categories], axis=1)
df.drop_duplicates(inplace=True)
return df
def save_data(df, database_filename):
'''
This is the load step of the ETL process. Function writes the dataframe into an SQLite database in the specified database file path.
'''
engine = create_engine('sqlite:///'+database_filename)
df.to_sql(database_filename, engine, index=False)
def main():
'''
Combines all three functions above to perform the ETL process taking user input: messages_filepath, categories_filepath, database_filepath
'''
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
| 35.469136
| 142
| 0.671076
|
795082b7b8dc110e1af40fd769d2eeca2f3b1c0c
| 1,156
|
py
|
Python
|
doc/source/scripts/titles.py
|
tkn963/pydarkstar
|
ac8adf198988c6f4cd42ee033a36369ecc9eaf28
|
[
"MIT"
] | 18
|
2015-01-13T03:48:02.000Z
|
2022-01-23T16:52:10.000Z
|
doc/source/scripts/titles.py
|
joshr45/pydarkstar
|
ac8adf198988c6f4cd42ee033a36369ecc9eaf28
|
[
"MIT"
] | 29
|
2015-01-14T01:34:10.000Z
|
2022-01-30T16:57:27.000Z
|
doc/source/scripts/titles.py
|
joshr45/pydarkstar
|
ac8adf198988c6f4cd42ee033a36369ecc9eaf28
|
[
"MIT"
] | 46
|
2015-01-13T20:26:39.000Z
|
2022-03-22T13:19:46.000Z
|
import shutil
import os
import re
work = os.getcwd()
found = []
regex = re.compile(r'pydarkstar\.(.*)\.rst')
for root, dirs, files in os.walk(work):
for f in files:
m = regex.match(f)
if m:
found.append((root, f))
for root, f in found:
path = os.path.join(root, f)
with open(path, 'r') as handle:
lines = handle.readlines()
with open(path, 'w') as handle:
for i, line in enumerate(lines):
if i == 0:
line = re.sub(r'\s+package$', '', line)
line = re.sub(r'\s+module$', '', line)
line = re.sub(r'^pydarkstar\.', '', line)
#print('{:2d} {}'.format(i, line.rstrip()))
handle.write(line)
#print('')
# fix main file
with open('pydarkstar.rst', 'r') as handle:
lines = handle.readlines()
z = 0
with open('pydarkstar.rst', 'w') as handle:
for i, line in enumerate(lines):
if i == 0:
line = re.sub(r'\s+package$', '', line)
if re.match(r'^\s\s\spydarkstar.*$', line):
handle.write(' {}'.format(line.lstrip()))
else:
handle.write(line)
| 26.272727
| 57
| 0.513841
|
795082f85d56dd8675bfc8d8d091cbfe3dc069c0
| 1,083
|
py
|
Python
|
gif_generate/urls.py
|
ChnZhgH/gif_generate
|
3f3c262f26857f421abb5159ca94ee59896bcc99
|
[
"MIT"
] | 1
|
2020-06-26T08:23:33.000Z
|
2020-06-26T08:23:33.000Z
|
gif_generate/urls.py
|
ChnZhgH/gif_generate
|
3f3c262f26857f421abb5159ca94ee59896bcc99
|
[
"MIT"
] | 8
|
2021-03-19T02:37:25.000Z
|
2022-03-12T00:39:38.000Z
|
gif_generate/urls.py
|
ChnZhgH/gif_generate
|
3f3c262f26857f421abb5159ca94ee59896bcc99
|
[
"MIT"
] | 1
|
2021-04-12T02:29:13.000Z
|
2021-04-12T02:29:13.000Z
|
"""gif_generate URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from gif_gen import views
from gif_gen.src.controller import gif_controller
urlpatterns = [
path('admin/', admin.site.urls),
path(r'', views.index),
path(r'upload_images', gif_controller.upload_images),
path(r'upload/video', gif_controller.upload_video),
path(r'transform', gif_controller.video_to_gif_quick),
path(r'download/gif', gif_controller.download)
]
| 36.1
| 77
| 0.727608
|
7950842e757e600a0f4cee47af9d05f94caf2122
| 4,936
|
py
|
Python
|
tests/test_so_db_adapter.py
|
krasm/python-onapsdk
|
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_so_db_adapter.py
|
krasm/python-onapsdk
|
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_so_db_adapter.py
|
krasm/python-onapsdk
|
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
|
[
"Apache-2.0"
] | null | null | null |
from unittest import mock
from onapsdk.so.so_db_adapter import SoDbAdapter, IdentityService
ADD_CLOUD_SITE_RESPONSE = {
'_links': {
'cloudSite': {
'href': 'http://so.api.simpledemo.onap.org:30277/cloudSite/mc_test_cloud_site_3'
},
'self': {
'href': 'http://so.api.simpledemo.onap.org:30277/cloudSite/mc_test_cloud_site_3'
}
},
'aic_version': '2.5',
'clli': 'test_clli_0',
'cloud_owner': None,
'cloudify_id': None,
'creation_timestamp': '2021-05-12T08:52:48.134+0000',
'identityService': {
'admin_tenant': 'service',
'creation_timestamp': '2021-05-12T08:52:48.134+0000',
'identityServerTypeAsString': 'KEYSTONE',
'identity_authentication_type': 'USERNAME_PASSWORD',
'identity_server_type': 'KEYSTONE',
'identity_url': 'http://1.2.3.4:5000/v2.0',
'last_updated_by': None,
'member_role': 'admin',
'mso_id': 'onapsdk_user',
'mso_pass': 'mso_pass_onapsdk',
'project_domain_name': 'NULL',
'tenant_metadata': True,
'update_timestamp': '2021-05-12T08:52:48.134+0000',
'user_domain_name': 'NULL'
},
'identity_service_id': 'test_identity_0',
'last_updated_by': None,
'orchestrator': 'multicloud',
'platform': None,
'region_id': 'test_region_0',
'support_fabric': True,
'update_timestamp': '2021-05-12T08:52:48.134+0000',
'uri': None
}
SERVICE_VNF_RESPONSE = {
'serviceVnfs': [
{
'modelInfo': {
'modelName': 'test_vnf_01',
'modelUuid': 'd2779cc5-fb01-449f-a355-7e5d911dca93',
'modelInvariantUuid': '027cb696-f68f-47db-9b0e-585ea3eaa512',
'modelVersion': '1.0',
'modelCustomizationUuid': 'b8740912-e0fc-426f-af97-7657caf57847',
'modelInstanceName': 'test_vnf_01 0'
},
'toscaNodeType': 'org.openecomp.resource.vf.Mvnr5gCucpVfT003',
'nfFunction': None,
'nfType': None,
'nfRole': None,
'nfNamingCode': None,
'multiStageDesign': 'false',
'vnfcInstGroupOrder': None,
'resourceInput': None,
'vfModules': [{'modelInfo':
{
'modelName': 'test_vf_01',
'modelUuid': '153464b8-4f47-4140-8b92-9614c4578d91',
'modelInvariantUuid': '753deff5-99a2-4154-8c1d-3e956cb96f32',
'modelVersion': '1',
'modelCustomizationUuid': '7ca564f3-b908-499c-b086-ae77ad270d8c'
},
'isBase': False,
'vfModuleLabel': 'vf_mod_label',
'initialCount': 0,
'hasVolumeGroup': False
}
],
'groups': []
}
]
}
def test_identity_service():
identity_service = IdentityService(identity_id="identity_123")
assert identity_service.identity_id == "identity_123"
assert identity_service.url == "http://1.2.3.4:5000/v2.0"
assert identity_service.mso_id == "onapsdk_user"
assert identity_service.mso_pass == "mso_pass_onapsdk"
assert identity_service.project_domain_name == "NULL"
assert identity_service.user_domain_name == "NULL"
assert identity_service.admin_tenant == "service"
assert identity_service.member_role == "admin"
assert identity_service.identity_server_type == "KEYSTONE"
assert identity_service.identity_authentication_type == "USERNAME_PASSWORD"
assert identity_service.hibernate_lazy_initializer == {}
assert identity_service.server_type_as_string == "KEYSTONE"
assert identity_service.tenant_metadata is True
@mock.patch.object(SoDbAdapter, "send_message_json")
def test_add_cloud_site(mock_send_message_json):
identity_service = IdentityService(identity_id="test_identity_0")
mock_send_message_json.return_value = ADD_CLOUD_SITE_RESPONSE
response = SoDbAdapter.add_cloud_site(cloud_region_id="test_region_0",
complex_id="test_clli_0",
identity_service=identity_service)
assert response['region_id'] == "test_region_0"
assert response['aic_version'] == "2.5"
assert response['clli'] == "test_clli_0"
assert response['orchestrator'] == "multicloud"
assert response['identity_service_id'] == "test_identity_0"
@mock.patch.object(SoDbAdapter, "send_message_json")
def test_get_service_vnf_info(mock_send_message_json):
mock_send_message_json.return_value = ADD_CLOUD_SITE_RESPONSE
response = SoDbAdapter.get_service_vnf_info(identifier="test_id_0")
assert response['region_id'] == "test_region_0"
assert response['aic_version'] == "2.5"
assert response['clli'] == "test_clli_0"
assert response['orchestrator'] == "multicloud"
assert response['identity_service_id'] == "test_identity_0"
| 39.806452
| 92
| 0.634522
|
79508460b180b5b0ea16d3bc1feab13784c21e2e
| 409
|
py
|
Python
|
ftwdebug.py
|
Shobdo-Kolpo-Drum/Project-codes
|
0083b9c44593c531e4837505683b50a3e6560a22
|
[
"MIT"
] | null | null | null |
ftwdebug.py
|
Shobdo-Kolpo-Drum/Project-codes
|
0083b9c44593c531e4837505683b50a3e6560a22
|
[
"MIT"
] | null | null | null |
ftwdebug.py
|
Shobdo-Kolpo-Drum/Project-codes
|
0083b9c44593c531e4837505683b50a3e6560a22
|
[
"MIT"
] | null | null | null |
import wave
from os import listdir
from os.path import isfile, join
m = input("/Users/rowshanarabegum/Downloads/data 5-12/Data1/test")
onlyfiles = [f for f in listdir(m) if isfile(join(m, f))]
for i in onlyfiles:
try:
wave.open(m+'/'+i)
except:
print(f"\033[1;31;47m ERROR AT "+i)
print(f"\033[0:37:40m")
continue
print(f"\033[1;32;40m Except those All ok!!!")
| 27.266667
| 66
| 0.623472
|
795085068e9d786faeaf82caaeef55b453fdc6bf
| 1,126
|
py
|
Python
|
app/__init__.py
|
Cian747/think-tank
|
82c06892d1d5b3992312baf97d0159e0269d8c00
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
Cian747/think-tank
|
82c06892d1d5b3992312baf97d0159e0269d8c00
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
Cian747/think-tank
|
82c06892d1d5b3992312baf97d0159e0269d8c00
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bootstrap import Bootstrap
from flask_uploads import UploadSet,configure_uploads,IMAGES
from config import config_options
from flask_mail import Mail
from flask_simplemde import SimpleMDE
bootstrap = Bootstrap()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
photos = UploadSet('photos',IMAGES)
mail = Mail()
simple = SimpleMDE()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config_options[config_name])
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
simple.init_app(app)
# Registering the blueprint
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
# Registering auth blueprint
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint,url_prefix = '/authenticate')
# configure UploadSet
configure_uploads(app,photos)
return app
| 26.186047
| 71
| 0.776199
|
79508602204637f1f86e3666025fc723fa055335
| 917
|
py
|
Python
|
mmaction/utils/logger.py
|
HypnosXC/mmaction2
|
a26d5f981449445a5e22a0a60d8b285e06c3dd6e
|
[
"Apache-2.0"
] | 648
|
2021-06-24T19:33:09.000Z
|
2022-03-31T06:27:24.000Z
|
mmaction/utils/logger.py
|
xumingze0308/mmaction2
|
777546f27f8f5a3c83e10d966e2149be2fc9fa31
|
[
"Apache-2.0"
] | 98
|
2020-01-21T09:41:30.000Z
|
2022-03-12T00:53:06.000Z
|
mmaction/utils/logger.py
|
xumingze0308/mmaction2
|
777546f27f8f5a3c83e10d966e2149be2fc9fa31
|
[
"Apache-2.0"
] | 233
|
2020-01-18T03:46:27.000Z
|
2022-03-19T03:17:47.000Z
|
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Use ``get_logger`` method in mmcv to get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If ``log_file`` is specified, a FileHandler
will also be added. The name of the root logger is the top-level package
name, e.g., "mmaction".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
:obj:`logging.Logger`: The root logger.
"""
return get_logger(__name__.split('.')[0], log_file, log_level)
| 36.68
| 79
| 0.679389
|
7950864132e6d9b2b263f294a5e140dea7aadde4
| 505
|
py
|
Python
|
plotly/validators/layout/updatemenu/_showactive.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/layout/updatemenu/_showactive.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/layout/updatemenu/_showactive.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class ShowactiveValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name='showactive',
parent_name='layout.updatemenu',
**kwargs
):
super(ShowactiveValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'arraydraw'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 26.578947
| 73
| 0.617822
|
7950868fafedbc1dba611671269a33bcda87551c
| 2,844
|
py
|
Python
|
alipay/aop/api/domain/AlipayBusinessOrderCancelModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipayBusinessOrderCancelModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipayBusinessOrderCancelModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.PaytoolCancelRequestDetail import PaytoolCancelRequestDetail
class AlipayBusinessOrderCancelModel(object):
def __init__(self):
self._cancel_paytool_list = None
self._merchant_order_no = None
self._order_no = None
@property
def cancel_paytool_list(self):
return self._cancel_paytool_list
@cancel_paytool_list.setter
def cancel_paytool_list(self, value):
if isinstance(value, list):
self._cancel_paytool_list = list()
for i in value:
if isinstance(i, PaytoolCancelRequestDetail):
self._cancel_paytool_list.append(i)
else:
self._cancel_paytool_list.append(PaytoolCancelRequestDetail.from_alipay_dict(i))
@property
def merchant_order_no(self):
return self._merchant_order_no
@merchant_order_no.setter
def merchant_order_no(self, value):
self._merchant_order_no = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
def to_alipay_dict(self):
params = dict()
if self.cancel_paytool_list:
if isinstance(self.cancel_paytool_list, list):
for i in range(0, len(self.cancel_paytool_list)):
element = self.cancel_paytool_list[i]
if hasattr(element, 'to_alipay_dict'):
self.cancel_paytool_list[i] = element.to_alipay_dict()
if hasattr(self.cancel_paytool_list, 'to_alipay_dict'):
params['cancel_paytool_list'] = self.cancel_paytool_list.to_alipay_dict()
else:
params['cancel_paytool_list'] = self.cancel_paytool_list
if self.merchant_order_no:
if hasattr(self.merchant_order_no, 'to_alipay_dict'):
params['merchant_order_no'] = self.merchant_order_no.to_alipay_dict()
else:
params['merchant_order_no'] = self.merchant_order_no
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayBusinessOrderCancelModel()
if 'cancel_paytool_list' in d:
o.cancel_paytool_list = d['cancel_paytool_list']
if 'merchant_order_no' in d:
o.merchant_order_no = d['merchant_order_no']
if 'order_no' in d:
o.order_no = d['order_no']
return o
| 34.26506
| 100
| 0.628692
|
795087e6a4c8e87bf54634990be21c61527cc8b5
| 13,417
|
py
|
Python
|
tensorflow/contrib/distributions/python/kernel_tests/special_math_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 44
|
2017-01-26T11:39:36.000Z
|
2019-06-28T10:03:19.000Z
|
tensorflow/contrib/distributions/python/kernel_tests/special_math_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 7
|
2017-07-13T09:40:59.000Z
|
2019-04-08T22:46:51.000Z
|
tensorflow/contrib/distributions/python/kernel_tests/special_math_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 11
|
2017-08-17T05:52:35.000Z
|
2021-06-19T04:39:45.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from scipy import special
from scipy import stats
from tensorflow.contrib.distributions.python.ops import special_math
from tensorflow.python.framework import ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
sm = special_math
def _check_strictly_increasing(array_1d):
diff = np.diff(array_1d)
np.testing.assert_array_less(0, diff)
def _make_grid(dtype, grid_spec):
"""Returns a uniform grid + noise, reshaped to shape argument."""
rng = np.random.RandomState(0)
num_points = np.prod(grid_spec.shape)
grid = np.linspace(grid_spec.min, grid_spec.max, num=num_points).astype(dtype)
grid_spacing = (grid_spec.max - grid_spec.min) / num_points
grid += 0.1 * grid_spacing * rng.randn(*grid.shape)
# More useful if it's sorted (e.g. for testing monotonicity, or debugging).
grid = np.sort(grid)
return np.reshape(grid, grid_spec.shape)
GridSpec = collections.namedtuple("GridSpec", ["min", "max", "shape"])
ErrorSpec = collections.namedtuple("ErrorSpec", ["rtol", "atol"])
class NdtrTest(test.TestCase):
_use_log = False
# Grid min/max chosen to ensure 0 < cdf(x) < 1.
_grid32 = GridSpec(min=-12.9, max=5., shape=[100])
_grid64 = GridSpec(min=-37.5, max=8., shape=[100])
_error32 = ErrorSpec(rtol=1e-4, atol=0.)
_error64 = ErrorSpec(rtol=1e-6, atol=0.)
def _test_grid(self, dtype, grid_spec, error_spec):
if self._use_log:
self._test_grid_log(dtype, grid_spec, error_spec)
else:
self._test_grid_no_log(dtype, grid_spec, error_spec)
def _test_grid_log(self, dtype, grid_spec, error_spec):
with self.test_session():
grid = _make_grid(dtype, grid_spec)
actual = sm.log_ndtr(grid).eval()
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertTrue(np.isfinite(actual).all())
# On the grid, -inf < log_cdf(x) < 0. In this case, we should be able
# to use a huge grid because we have used tricks to escape numerical
# difficulties.
self.assertTrue((actual < 0).all())
_check_strictly_increasing(actual)
# Versus scipy.
expected = special.log_ndtr(grid)
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
actual.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
def _test_grid_no_log(self, dtype, grid_spec, error_spec):
with self.test_session():
grid = _make_grid(dtype, grid_spec)
actual = sm.ndtr(grid).eval()
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertTrue(np.isfinite(actual).all())
# On the grid, 0 < cdf(x) < 1. The grid cannot contain everything due
# to numerical limitations of cdf.
self.assertTrue((actual > 0).all())
self.assertTrue((actual < 1).all())
_check_strictly_increasing(actual)
# Versus scipy.
expected = special.ndtr(grid)
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
actual.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
def test_float32(self):
self._test_grid(np.float32, self._grid32, self._error32)
def test_float64(self):
self._test_grid(np.float64, self._grid64, self._error64)
class LogNdtrTestLower(NdtrTest):
_use_log = True
_grid32 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT32_LOWER, shape=[100])
_grid64 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT64_LOWER, shape=[100])
_error32 = ErrorSpec(rtol=1e-4, atol=0.)
_error64 = ErrorSpec(rtol=1e-4, atol=0.)
# The errors are quite large when the input is > 6 or so. Also,
# scipy.special.log_ndtr becomes zero very early, before 10,
# (due to ndtr becoming 1). We approximate Log[1 + epsilon] as epsilon, and
# avoid this issue.
class LogNdtrTestMid(NdtrTest):
_use_log = True
_grid32 = GridSpec(
min=sm.LOGNDTR_FLOAT32_LOWER, max=sm.LOGNDTR_FLOAT32_UPPER, shape=[100])
_grid64 = GridSpec(
min=sm.LOGNDTR_FLOAT64_LOWER, max=sm.LOGNDTR_FLOAT64_UPPER, shape=[100])
# Differences show up as soon as we're in the tail, so add some atol.
_error32 = ErrorSpec(rtol=0.1, atol=1e-7)
_error64 = ErrorSpec(rtol=0.1, atol=1e-7)
class LogNdtrTestUpper(NdtrTest):
_use_log = True
_grid32 = GridSpec(
min=sm.LOGNDTR_FLOAT32_UPPER,
max=12., # Beyond this, log_cdf(x) may be zero.
shape=[100])
_grid64 = GridSpec(
min=sm.LOGNDTR_FLOAT64_UPPER,
max=35., # Beyond this, log_cdf(x) may be zero.
shape=[100])
_error32 = ErrorSpec(rtol=1e-6, atol=1e-14)
_error64 = ErrorSpec(rtol=1e-6, atol=1e-14)
class NdtrGradientTest(test.TestCase):
_use_log = False
_grid = GridSpec(min=-100., max=100., shape=[1, 2, 3, 8])
_error32 = ErrorSpec(rtol=1e-4, atol=0)
_error64 = ErrorSpec(rtol=1e-7, atol=0)
def assert_all_true(self, v):
self.assertAllEqual(np.ones_like(v, dtype=np.bool), v)
def assert_all_false(self, v):
self.assertAllEqual(np.zeros_like(v, dtype=np.bool), v)
def _test_grad_finite(self, dtype):
with self.test_session():
x = variables.Variable([-100., 0., 100.], dtype=dtype)
output = (sm.log_ndtr(x) if self._use_log else sm.ndtr(x))
grad_output = gradients_impl.gradients(output, x)
variables.global_variables_initializer().run()
# isfinite checks for NaN and Inf.
self.assert_all_true(np.isfinite(output.eval()))
self.assert_all_true(np.isfinite(grad_output[0].eval()))
def _test_grad_accuracy(self, dtype, grid_spec, error_spec):
raw_grid = _make_grid(dtype, grid_spec)
grid = ops.convert_to_tensor(raw_grid)
with self.test_session():
fn = sm.log_ndtr if self._use_log else sm.ndtr
# If there are N points in the grid,
# grad_eval.shape = (N, N), with grad_eval[i, j] the partial derivative of
# the ith output point w.r.t. the jth grid point. We only expect the
# diagonal to be nonzero.
# TODO(b/31131137): Replace tf.test.compute_gradient with our own custom
# gradient evaluation to ensure we correctly handle small function delta.
grad_eval, _ = gradient_checker.compute_gradient(grid, grid_spec.shape,
fn(grid),
grid_spec.shape)
grad_eval = np.diag(grad_eval)
# Check for NaN separately in order to get informative failures.
self.assert_all_false(np.isnan(grad_eval))
self.assert_all_true(grad_eval > 0.)
# isfinite checks for NaN and Inf.
self.assert_all_true(np.isfinite(grad_eval))
# Do the same checks but explicitly compute the gradient.
# (We did this because we're not sure if we trust
# tf.test.compute_gradient.)
grad_eval = gradients_impl.gradients(fn(grid), grid)[0].eval()
self.assert_all_false(np.isnan(grad_eval))
if self._use_log:
g = np.reshape(grad_eval, [-1])
half = np.ceil(len(g) / 2)
self.assert_all_true(g[:int(half)] > 0.)
self.assert_all_true(g[int(half):] >= 0.)
else:
# The ndtr gradient will only be non-zero in the range [-14, 14] for
# float32 and [-38, 38] for float64.
self.assert_all_true(grad_eval >= 0.)
# isfinite checks for NaN and Inf.
self.assert_all_true(np.isfinite(grad_eval))
# Versus scipy.
expected = stats.norm.pdf(raw_grid)
if self._use_log:
expected /= special.ndtr(raw_grid)
expected[np.isnan(expected)] = 0.
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
grad_eval.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
def test_float32(self):
self._test_grad_accuracy(np.float32, self._grid, self._error32)
self._test_grad_finite(np.float32)
def test_float64(self):
self._test_grad_accuracy(np.float64, self._grid, self._error64)
self._test_grad_finite(np.float64)
class LogNdtrGradientTest(NdtrGradientTest):
_use_log = True
class LogCDFLaplaceTest(test.TestCase):
# Note that scipy.stats.laplace does not have a stable Log CDF, so we cannot
# rely on scipy to cross check the extreme values.
# Test will be done differently over different ranges. These are the values
# such that when exceeded by x, produce output that causes the naive (scipy)
# implementation to have numerical issues.
#
# If x = log(1 / (2 * eps)), then 0.5 * exp{-x} = eps.
# With inserting eps = np.finfo(dtype).eps, we see that log(1 / (2 * eps)) is
# the value of x such that any larger value will result in
# 1 - 0.5 * exp{-x} = 0, which will cause the log_cdf_laplace code to take a
# log # of zero. We therefore choose these as our cutoffs for testing.
CUTOFF_FLOAT64_UPPER = np.log(1. / (2. * np.finfo(np.float64).eps)) - 1.
CUTOFF_FLOAT32_UPPER = np.log(1. / (2. * np.finfo(np.float32).eps)) - 1.
def assertAllTrue(self, x):
self.assertAllEqual(np.ones_like(x, dtype=np.bool), x)
def _test_grid_log(self, dtype, scipy_dtype, grid_spec, error_spec):
with self.test_session():
grid = _make_grid(dtype, grid_spec)
actual = sm.log_cdf_laplace(grid).eval()
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual))
self.assertAllTrue((actual < 0))
_check_strictly_increasing(actual)
# Versus scipy.
scipy_dist = stats.laplace(loc=0., scale=1.)
expected = scipy_dist.logcdf(grid.astype(scipy_dtype))
self.assertAllClose(
expected.astype(np.float64),
actual.astype(np.float64),
rtol=error_spec.rtol,
atol=error_spec.atol)
def test_float32_lower_and_mid_segment_scipy_float32_ok(self):
# Choose values mild enough that we can use scipy in float32, which will
# allow for a high accuracy match to scipy (since we both use float32).
self._test_grid_log(
np.float32, # dtype
np.float32, # scipy_dtype
GridSpec(min=-10, max=self.CUTOFF_FLOAT32_UPPER - 5, shape=[100]),
ErrorSpec(rtol=5e-4, atol=0))
def test_float32_all_segments_with_scipy_float64_ok(self):
# Choose values outside the range where scipy float32 works.
# Let scipy use float64. This means we
# won't be exactly the same since we are in float32.
self._test_grid_log(
np.float32, # dtype
np.float64, # scipy_dtype
GridSpec(min=-50, max=self.CUTOFF_FLOAT32_UPPER + 5, shape=[100]),
ErrorSpec(rtol=0.05, atol=0))
def test_float32_extreme_values_result_and_gradient_finite_and_nonzero(self):
with self.test_session() as sess:
# On the lower branch, log_cdf_laplace(x) = x, so we know this will be
# fine, but test to -200 anyways.
grid = _make_grid(
np.float32, GridSpec(min=-200, max=80, shape=[20, 100]))
grid = ops.convert_to_tensor(grid)
actual = sm.log_cdf_laplace(grid)
grad = gradients_impl.gradients(actual, grid)[0]
actual_, grad_ = sess.run([actual, grad])
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual_))
self.assertAllTrue(np.isfinite(grad_))
self.assertFalse(np.any(actual_ == 0))
self.assertFalse(np.any(grad_ == 0))
def test_float64_extreme_values_result_and_gradient_finite_and_nonzero(self):
with self.test_session() as sess:
# On the lower branch, log_cdf_laplace(x) = x, so we know this will be
# fine, but test to -200 anyways.
grid = _make_grid(
np.float64, GridSpec(min=-200, max=700, shape=[20, 100]))
grid = ops.convert_to_tensor(grid)
actual = sm.log_cdf_laplace(grid)
grad = gradients_impl.gradients(actual, grid)[0]
actual_, grad_ = sess.run([actual, grad])
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual_))
self.assertAllTrue(np.isfinite(grad_))
self.assertFalse(np.any(actual_ == 0))
self.assertFalse(np.any(grad_ == 0))
if __name__ == "__main__":
test.main()
| 38.116477
| 80
| 0.677871
|
79508a45bd87c382093002c7169f7081530f6b98
| 1,052
|
py
|
Python
|
PacketStats/convertion.py
|
mattkozlowski/packet-stats
|
5c12b8efbf6bcb41f2ea17bac6dd41277922c001
|
[
"MIT"
] | null | null | null |
PacketStats/convertion.py
|
mattkozlowski/packet-stats
|
5c12b8efbf6bcb41f2ea17bac6dd41277922c001
|
[
"MIT"
] | null | null | null |
PacketStats/convertion.py
|
mattkozlowski/packet-stats
|
5c12b8efbf6bcb41f2ea17bac6dd41277922c001
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from subprocess import check_call
def export_to_txt(f_name, txt_f_name):
"""
Converts pcap file into txt with packets features
:param f_name: str
filename of source pcap file
:param txt_f_name: str
filename of target file
"""
cmd = "tshark -T fields -e frame.time_relative -e ip.src -e udp.srcport -e udp.dstport -e udp.length -r %s > %s" % (
f_name, txt_f_name)
check_call(cmd, shell=True)
# Delete not udp from txt and save to csv with -udp ending
def delete_non_udp(f_name, output_filename, header_names):
"""
Delete non-udp packets from csv and export it to csv file with -udp ending in name
:param f_name: str
filename of source txt file
:param header_names: list
of column names from file
"""
dataset = pd.read_csv(f_name, delimiter='\t', header=None, names=header_names)
dataset.replace(' ', np.nan, inplace=True)
dataset = dataset.dropna()
dataset.to_csv(output_filename, sep='\t', index=False)
| 30.941176
| 120
| 0.679658
|
79508a59a3f5d72a571dbc78a6154087f93ac6e3
| 4,119
|
py
|
Python
|
Lab-1/Playfair-Cipher/decrypt.py
|
mohith7548/Cryptography-LAB
|
05757072e38558134f2885d36915bfb0c5a26e9b
|
[
"MIT"
] | 2
|
2021-02-21T18:04:19.000Z
|
2021-02-23T06:48:10.000Z
|
Lab-1/Playfair-Cipher/decrypt.py
|
mohith7548/Cryptography-LAB
|
05757072e38558134f2885d36915bfb0c5a26e9b
|
[
"MIT"
] | null | null | null |
Lab-1/Playfair-Cipher/decrypt.py
|
mohith7548/Cryptography-LAB
|
05757072e38558134f2885d36915bfb0c5a26e9b
|
[
"MIT"
] | 2
|
2019-11-29T01:06:03.000Z
|
2019-12-07T19:34:45.000Z
|
#!/usr/bin/python3
KEY = ""
MATRIX = [[None for _ in range(5)] for _ in range(5)]
cols = [[None for _ in range(5)] for _ in range(5)]
def initialize():
# alphabet set
alphabet_set = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
]
# contruct MATRIX
k, c = 0, 0
for i in range(5):
for j in range(5):
if k < len(KEY):
MATRIX[i][j] = KEY[k]
alphabet_set.remove(KEY[k])
k += 1
else:
if alphabet_set[c] == "I":
MATRIX[i][j] = "I"
alphabet_set.remove("J")
else:
MATRIX[i][j] = alphabet_set[c]
c += 1
cols[j][i] = MATRIX[i][j]
for i in range(5):
for j in range(5):
if MATRIX[i][j] == "I":
print("I/J", end="\t")
else:
print(MATRIX[i][j], end="\t")
print()
print()
def get_pos(l):
if l == "J":
return get_pos("I")
for i in range(5):
for j in range(5):
if MATRIX[i][j] == l:
return i, j
return -1, -1
def is_same_col(x, y):
for i in range(len(cols)):
if x in cols[i] and y in cols[i]:
return True, i, (cols[i].index(x), cols[i].index(y))
return False, -1, (-1, -1)
def is_same_row(x, y):
for i in range(5):
if x in MATRIX[i] and y in MATRIX[i]:
return True, i, (MATRIX[i].index(x), MATRIX[i].index(y))
return False, -1, (-1, -1)
def validate_constraints(text):
text = list(text)
# 1. Repeating letters get 'X' inserted in the Middle
to_be_inserted_at = []
for i in range(len(text) - 1):
if text[i] == text[i + 1]:
to_be_inserted_at.append(i + 1)
for pos in to_be_inserted_at:
text.insert(pos, "X")
# 2. If length of text is odd => append 'X'
if len(text) % 2 != 0:
text.append("X")
return "".join(text)
def Decrypt(plain_text):
print(plain_text)
text = plain_text # validate_constraints(plain_text)
print(text)
cipher = []
i = 0
while i < len(text):
print(text[i], text[i + 1])
if text[i] == text[i + 1]:
print("same")
x = get_pos(text[i])
tmp = MATRIX[(x[0] - 1) % 5][(x[1] - 1) % 5]
print(tmp)
cipher.append(tmp)
cipher.append(tmp)
else:
row_res = is_same_row(text[i], text[i + 1])
col_res = is_same_col(text[i], text[i + 1])
# print(row_res, col_res)
if row_res[0]:
row = row_res[1]
col = row_res[2]
print("same row", row, col)
cipher.append(MATRIX[row][(col[0] - 1) % 5])
cipher.append(MATRIX[row][(col[1] - 1) % 5])
# print(cipher)
elif col_res[0]:
col = col_res[1]
row = col_res[2]
print("same col", row, col)
cipher.append(MATRIX[(row[0] - 1) % 5][col])
cipher.append(MATRIX[(row[1] - 1) % 5][col])
# print(cipher)
else:
print("else")
x = get_pos(text[i])
y = get_pos(text[i + 1])
print(x, y)
cipher.append(MATRIX[x[0]][y[1]])
cipher.append(MATRIX[y[0]][x[1]])
# print(cipher)
i += 2
return "".join(cipher)
def main():
global KEY
print("Playfair Cipher Decyption Alg\n")
KEY = input("Enter the Key: ").upper()
print()
initialize()
plain_text = input("Enter a string: ").upper()
print()
cipher = Decrypt(plain_text)
print(cipher)
if __name__ == "__main__":
main()
| 23.271186
| 68
| 0.428745
|
79508b9a7b6b2ca78544f962c382b5ae505634c3
| 52
|
py
|
Python
|
src/test_system/managers/certificate_manager/__init__.py
|
Niwo1403/personality-test-system_bachelor-thesis
|
81f6bafb594df1daec5a36ecbe1efb59e11dd24d
|
[
"MIT"
] | null | null | null |
src/test_system/managers/certificate_manager/__init__.py
|
Niwo1403/personality-test-system_bachelor-thesis
|
81f6bafb594df1daec5a36ecbe1efb59e11dd24d
|
[
"MIT"
] | null | null | null |
src/test_system/managers/certificate_manager/__init__.py
|
Niwo1403/personality-test-system_bachelor-thesis
|
81f6bafb594df1daec5a36ecbe1efb59e11dd24d
|
[
"MIT"
] | null | null | null |
from .certificate_manager import CertificateManager
| 26
| 51
| 0.903846
|
79508c090d39550936a41a229f3424e254b00f40
| 22,979
|
py
|
Python
|
chemdataextractor/doc/text.py
|
edbeard/chemdataextractor-csr
|
6ababa9ac515ba5c591cdb781b6500b656e3c683
|
[
"MIT"
] | 3
|
2020-11-26T18:13:56.000Z
|
2021-11-01T18:44:36.000Z
|
chemdataextractor/doc/text.py
|
edbeard/chemdataextractor-csr
|
6ababa9ac515ba5c591cdb781b6500b656e3c683
|
[
"MIT"
] | null | null | null |
chemdataextractor/doc/text.py
|
edbeard/chemdataextractor-csr
|
6ababa9ac515ba5c591cdb781b6500b656e3c683
|
[
"MIT"
] | 1
|
2020-07-08T07:06:04.000Z
|
2020-07-08T07:06:04.000Z
|
# -*- coding: utf-8 -*-
"""
chemdataextractor.doc.text
~~~~~~~~~~~~~~~~~~~~~~~~~~
Text-based document elements.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import abstractproperty
import collections
import logging
import re
import six
from ..model import ModelList
from ..parse.context import ContextParser
from ..parse.cem import ChemicalLabelParser, CompoundHeadingParser, CompoundParser, chemical_name
from ..parse.table import CaptionContextParser
from ..parse.figure import ChemSchemDiagParser
from ..parse.ir import IrParser
from ..parse.microscopy import TemParser, SemParser, HaadfStemParser, HrtemParser, CryoemParser
from ..parse.mp import MpParser
from ..parse.tg import TgParser
from ..parse.nmr import NmrParser
from ..parse.uvvis import UvvisParser
from ..nlp.lexicon import ChemLexicon
from ..nlp.cem import CemTagger, IGNORE_PREFIX, IGNORE_SUFFIX, SPECIALS, SPLITS
from ..nlp.abbrev import ChemAbbreviationDetector
from ..nlp.tag import NoneTagger
from ..nlp.pos import ChemCrfPosTagger
from ..nlp.tokenize import ChemSentenceTokenizer, ChemWordTokenizer, regex_span_tokenize
from ..text import CONTROL_RE
from ..utils import memoized_property, python_2_unicode_compatible
from .element import BaseElement
log = logging.getLogger(__name__)
@python_2_unicode_compatible
class BaseText(BaseElement):
"""Abstract base class for a text Document Element."""
def __init__(self, text, word_tokenizer=None, lexicon=None, abbreviation_detector=None, pos_tagger=None, ner_tagger=None, parsers=None, **kwargs):
""""""
if not isinstance(text, six.text_type):
raise TypeError('Text must be a unicode string')
super(BaseText, self).__init__(**kwargs)
self._text = text
self.word_tokenizer = word_tokenizer if word_tokenizer is not None else self.word_tokenizer
self.lexicon = lexicon if lexicon is not None else self.lexicon
self.abbreviation_detector = abbreviation_detector if abbreviation_detector is not None else self.abbreviation_detector
self.pos_tagger = pos_tagger if pos_tagger is not None else self.pos_tagger
self.ner_tagger = ner_tagger if ner_tagger is not None else self.ner_tagger
self.parsers = parsers if parsers is not None else self.parsers
def __repr__(self):
return '%s(id=%r, references=%r, text=%r)' % (self.__class__.__name__, self.id, self.references, self._text)
def __str__(self):
return self._text
@property
def text(self):
"""The raw text string for this passage of text."""
return self._text
@abstractproperty
def word_tokenizer(self):
"""The word tokenizer to use."""
return
@abstractproperty
def lexicon(self):
"""The lexicon to use."""
return
@abstractproperty
def pos_tagger(self):
"""The part of speech tagger use."""
return
@abstractproperty
def ner_tagger(self):
"""The named entity recognition tagger to use."""
return
@abstractproperty
def parsers(self):
"""The parsers to use."""
return
@abstractproperty
def tokens(self):
"""Return a list of tokens."""
return
@abstractproperty
def tags(self):
"""Return a list of tags."""
return
def serialize(self):
"""Convert Text element to python dictionary."""
data = {'type': self.__class__.__name__, 'content': self.text}
return data
def _repr_html_(self):
return self.text
class Text(collections.Sequence, BaseText):
"""A passage of text, comprising one or more sentences."""
sentence_tokenizer = ChemSentenceTokenizer()
word_tokenizer = ChemWordTokenizer()
lexicon = ChemLexicon()
abbreviation_detector = ChemAbbreviationDetector()
pos_tagger = ChemCrfPosTagger() # ChemPerceptronTagger()
ner_tagger = CemTagger()
parsers = []
def __init__(self, text, sentence_tokenizer=None, word_tokenizer=None, lexicon=None, abbreviation_detector=None, pos_tagger=None, ner_tagger=None, parsers=None, **kwargs):
""""""
super(Text, self).__init__(text, word_tokenizer=word_tokenizer, lexicon=lexicon, abbreviation_detector=abbreviation_detector, pos_tagger=pos_tagger, ner_tagger=ner_tagger, parsers=None, **kwargs)
self.sentence_tokenizer = sentence_tokenizer if sentence_tokenizer is not None else self.sentence_tokenizer
def __getitem__(self, index):
return self.sentences[index]
def __len__(self):
return len(self.sentences)
@memoized_property
def sentences(self):
"""Return a list of Sentences that make up this text passage."""
sents = []
spans = self.sentence_tokenizer.span_tokenize(self.text)
for span in spans:
sent = Sentence(
text=self.text[span[0]:span[1]],
start=span[0],
end=span[1],
word_tokenizer=self.word_tokenizer,
lexicon=self.lexicon,
abbreviation_detector=self.abbreviation_detector,
pos_tagger=self.pos_tagger,
ner_tagger=self.ner_tagger,
parsers=self.parsers,
document=self.document
)
sents.append(sent)
return sents
@property
def raw_sentences(self):
"""Return a list of sentence strings that make up this text passage."""
return [sentence.text for sentence in self.sentences]
@property
def tokens(self):
"""Return a list of tokens for each sentence in this text passage."""
return [sent.tokens for sent in self.sentences]
@property
def raw_tokens(self):
"""Return a list of tokens for each sentence in this text passage."""
return [sent.raw_tokens for sent in self.sentences]
@property
def pos_tagged_tokens(self):
"""Return a list of (token, tag) tuples for each sentence in this text passage."""
return [sent.pos_tagged_tokens for sent in self.sentences]
@property
def pos_tags(self):
"""Return a list of part of speech tags for each sentence in this text passage."""
return [sent.pos_tags for sent in self.sentences]
@memoized_property
def unprocessed_ner_tagged_tokens(self):
"""Return a list of unprocessed named entity recognition tags for the tokens in this sentence.
No corrections from abbreviation detection are performed.
"""
return [sent.unprocessed_ner_tagged_tokens for sent in self.sentences]
@memoized_property
def unprocessed_ner_tags(self):
"""Return a list of unprocessed named entity tags for the tokens in this sentence.
No corrections from abbreviation detection are performed.
"""
return [sent.unprocessed_ner_tags for sent in self.sentences]
@property
def ner_tagged_tokens(self):
"""Return a list of (token, tag) tuples for each sentence in this text passage."""
return [sent.ner_tagged_tokens for sent in self.sentences]
@property
def ner_tags(self):
"""Return a list of part of speech tags for each sentence in this text passage."""
return [sent.ner_tags for sent in self.sentences]
@property
def cems(self):
"""Return a list of part of speech tags for each sentence in this text passage."""
return [cem for sent in self.sentences for cem in sent.cems]
@property
def tagged_tokens(self):
"""Return a list of (token, tag) tuples for each sentence in this text passage."""
return [sent.tagged_tokens for sent in self.sentences]
@property
def tags(self):
"""Return a list of tags for each sentence in this text passage."""
return [sent.tags for sent in self.sentences]
@property
def abbreviation_definitions(self):
""""""
return [ab for sent in self.sentences for ab in sent.abbreviation_definitions]
@property
def records(self):
"""Return a list of records for this text passage."""
return ModelList(*[r for sent in self.sentences for r in sent.records])
def __add__(self, other):
if type(self) == type(other):
merged = self.__class__(
text=self.text + other.text,
id=self.id or other.id,
references=self.references + other.references,
sentence_tokenizer=self.sentence_tokenizer,
word_tokenizer=self.word_tokenizer,
lexicon=self.lexicon,
abbreviation_detector=self.abbreviation_detector,
pos_tagger=self.pos_tagger,
ner_tagger=self.ner_tagger,
parsers=self.parsers
)
return merged
return NotImplemented
class Title(Text):
parsers = [CompoundParser()]
def _repr_html_(self):
return '<h1 class="cde-title">' + self.text + '</h1>'
class Heading(Text):
parsers = [CompoundHeadingParser(), ChemicalLabelParser()]
def _repr_html_(self):
return '<h2 class="cde-title">' + self.text + '</h2>'
class Paragraph(Text):
parsers = [CompoundParser(), ChemicalLabelParser(), NmrParser(), IrParser(), UvvisParser(), MpParser(), TgParser(), ContextParser()]
def _repr_html_(self):
return '<p class="cde-paragraph">' + self.text + '</p>'
class Footnote(Text):
parsers = [ContextParser(), CaptionContextParser()]
def _repr_html_(self):
return '<p class="cde-footnote">' + self.text + '</p>'
class Citation(Text):
# No tagging in citations
ner_tagger = NoneTagger()
abbreviation_detector = False
# TODO: Citation parser
# TODO: Store number/label
def _repr_html_(self):
return '<p class="cde-citation">' + self.text + '</p>'
class Caption(Text):
parsers = [CompoundParser(), ChemSchemDiagParser(), TemParser(), SemParser(), HaadfStemParser(), HrtemParser(),
CryoemParser(), ChemicalLabelParser(), CaptionContextParser()]
def _repr_html_(self):
return '<caption class="cde-caption">' + self.text + '</caption>'
class Sentence(BaseText):
"""A single sentence within a text passage."""
word_tokenizer = ChemWordTokenizer()
lexicon = ChemLexicon()
abbreviation_detector = ChemAbbreviationDetector()
pos_tagger = ChemCrfPosTagger() # ChemPerceptronTagger()
ner_tagger = CemTagger()
parsers = []
def __init__(self, text, start=0, end=None, word_tokenizer=None, lexicon=None, abbreviation_detector=None, pos_tagger=None, ner_tagger=None, parsers=None, **kwargs):
super(Sentence, self).__init__(text, word_tokenizer=word_tokenizer, lexicon=lexicon, abbreviation_detector=abbreviation_detector, pos_tagger=pos_tagger, ner_tagger=ner_tagger, parsers=parsers, **kwargs)
#: The start index of this sentence within the text passage.
self.start = start
#: The end index of this sentence within the text passage.
self.end = end if end is not None else len(text)
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self._text, self.start, self.end)
@memoized_property
def tokens(self):
"""Return a list of token Spans for this sentence."""
spans = self.word_tokenizer.span_tokenize(self.text)
toks = [Token(
text=self.text[span[0]:span[1]],
start=span[0] + self.start,
end=span[1] + self.start,
lexicon=self.lexicon
) for span in spans]
return toks
@property
def raw_tokens(self):
"""Return a list of token strings that make up this sentence."""
return [token.text for token in self.tokens]
@memoized_property
def pos_tagged_tokens(self):
"""Return a list of part of speech tags for the tokens in this sentence."""
# log.debug('Getting pos tags')
return self.pos_tagger.tag(self.raw_tokens)
@property
def pos_tags(self):
"""Return a list of part of speech tags for the tokens in this sentence."""
return [tag for token, tag in self.pos_tagged_tokens]
@memoized_property
def unprocessed_ner_tagged_tokens(self):
"""Return a list of unprocessed named entity recognition tags for the tokens in this sentence.
No corrections from abbreviation detection are performed.
"""
# log.debug('Getting unprocessed_ner_tags')
return self.ner_tagger.tag(self.pos_tagged_tokens)
@memoized_property
def unprocessed_ner_tags(self):
"""Return a list of unprocessed named entity tags for the tokens in this sentence.
No corrections from abbreviation detection are performed.
"""
return [tag for token, tag in self.unprocessed_ner_tagged_tokens]
@memoized_property
def abbreviation_definitions(self):
"""Return a list of (abbreviation, long, ner_tag) tuples."""
abbreviations = []
if self.abbreviation_detector:
# log.debug('Detecting abbreviations')
ners = self.unprocessed_ner_tags
for abbr_span, long_span in self.abbreviation_detector.detect_spans(self.raw_tokens):
abbr = self.raw_tokens[abbr_span[0]:abbr_span[1]]
long = self.raw_tokens[long_span[0]:long_span[1]]
# Check if long is entirely tagged as one named entity type
long_tags = ners[long_span[0]:long_span[1]]
unique_tags = set([tag[2:] for tag in long_tags if tag is not None])
tag = long_tags[0][2:] if None not in long_tags and len(unique_tags) == 1 else None
abbreviations.append((abbr, long, tag))
return abbreviations
@memoized_property
def ner_tagged_tokens(self):
""""""
return list(zip(self.raw_tokens, self.ner_tags))
@memoized_property
def ner_tags(self):
""""""
# log.debug('Getting ner_tags')
ner_tags = self.unprocessed_ner_tags
abbrev_defs = self.document.abbreviation_definitions if self.document else self.abbreviation_definitions
# Ensure abbreviation entity matches long entity
# TODO: This is potentially a performance bottleneck?
for i in range(0, len(ner_tags)):
for abbr, long, ner_tag in abbrev_defs:
if abbr == self.raw_tokens[i:i+len(abbr)]:
old_ner_tags = ner_tags[i:i+len(abbr)]
ner_tags[i] = 'B-%s' % ner_tag if ner_tag is not None else None
ner_tags[i+1:i+len(abbr)] = ['I-%s' % ner_tag if ner_tag is not None else None] * (len(abbr) - 1)
# Remove ner tags from brackets surrounding abbreviation
if i > 1 and self.raw_tokens[i-1] == '(':
ner_tags[i-1] = None
if i < len(self.raw_tokens) - 1 and self.raw_tokens[i+1] == ')':
ner_tags[i+1] = None
if not old_ner_tags == ner_tags[i:i+len(abbr)]:
log.debug('Correcting abbreviation tag: %s (%s): %s -> %s' % (' '.join(abbr), ' '.join(long), old_ner_tags, ner_tags[i:i+len(abbr)]))
# TODO: Ensure abbreviations in brackets at the end of an entity match are separated and the brackets untagged
# Hydrogen Peroxide (H2O2)
# Tungsten Carbide (WC)
# TODO: Filter off alphanumerics from end (1h) (3) (I)
# May need more intelligent
return ner_tags
@memoized_property
def cems(self):
# log.debug('Getting cems')
spans = []
# print(self.text.encode('utf8'))
for result in chemical_name.scan(self.tagged_tokens):
# parser scan yields (result, startindex, endindex) - we just use the indexes here
tokens = self.tokens[result[1]:result[2]]
start = tokens[0].start
end = tokens[-1].end
# Adjust boundaries to exclude disallowed prefixes/suffixes
currenttext = self.text[start-self.start:end-self.start].lower()
for prefix in IGNORE_PREFIX:
if currenttext.startswith(prefix):
# print('%s removing %s' % (currenttext, prefix))
start += len(prefix)
break
for suffix in IGNORE_SUFFIX:
if currenttext.endswith(suffix):
# print('%s removing %s' % (currenttext, suffix))
end -= len(suffix)
break
# Adjust boundaries to exclude matching brackets at start and end
currenttext = self.text[start-self.start:end-self.start]
for bpair in [('(', ')'), ('[', ']')]:
if len(currenttext) > 2 and currenttext[0] == bpair[0] and currenttext[-1] == bpair[1]:
level = 1
for k, char in enumerate(currenttext[1:]):
if char == bpair[0]:
level += 1
elif char == bpair[1]:
level -= 1
if level == 0 and k == len(currenttext) - 2:
start += 1
end -= 1
break
# If entity has been reduced to nothing by adjusting boundaries, skip it
if start >= end:
continue
currenttext = self.text[start-self.start:end-self.start]
# Do splits
split_spans = []
comps = list(regex_span_tokenize(currenttext, '(-|\+|\)?-to-\(?|···|/|\s)'))
if len(comps) > 1:
for split in SPLITS:
if all(re.search(split, currenttext[comp[0]:comp[1]]) for comp in comps):
# print('%s splitting %s' % (currenttext, [currenttext[comp[0]:comp[1]] for comp in comps]))
for comp in comps:
span = Span(text=currenttext[comp[0]:comp[1]], start=start+comp[0], end=start+comp[1])
# print('SPLIT: %s - %s' % (currenttext, repr(span)))
split_spans.append(span)
break
else:
split_spans.append(Span(text=currenttext, start=start, end=end))
else:
split_spans.append(Span(text=currenttext, start=start, end=end))
# Do specials
for split_span in split_spans:
for special in SPECIALS:
m = re.search(special, split_span.text)
if m:
# print('%s special %s' % (split_span.text, m.groups()))
for i in range(1, len(m.groups()) + 1):
span = Span(text=m.group(i), start=split_span.start+m.start(i), end=split_span.start+m.end(i))
# print('SUBMATCH: %s - %s' % (currenttext, repr(span)))
spans.append(span)
break
else:
spans.append(split_span)
return spans
@memoized_property
def tags(self):
"""Return combined POS and NER tags."""
tags = self.pos_tags
for i, tag in enumerate(self.ner_tags):
if tag is not None:
tags[i] = tag
return tags
@property
def tagged_tokens(self):
return list(zip(self.raw_tokens, self.tags))
@property
def records(self):
"""Return a list of records for this sentence."""
compounds = ModelList()
seen_labels = set()
# Ensure no control characters are sent to a parser (need to be XML compatible)
tagged_tokens = [(CONTROL_RE.sub('', token), tag) for token, tag in self.tagged_tokens]
for parser in self.parsers:
for record in parser.parse(tagged_tokens):
p = record.serialize()
if not p: # TODO: Potential performance issues?
continue
# Skip duplicate records
if record in compounds:
continue
# Skip just labels that have already been seen (bit of a hack)
if all(k in {'labels', 'roles'} for k in p.keys()) and set(record.labels).issubset(seen_labels):
continue
seen_labels.update(record.labels)
compounds.append(record)
return compounds
def __add__(self, other):
if type(self) == type(other):
merged = self.__class__(
text=self.text + other.text,
start=self.start,
end=None,
id=self.id or other.id,
references=self.references + other.references,
word_tokenizer=self.word_tokenizer,
lexicon=self.lexicon,
abbreviation_detector=self.abbreviation_detector,
pos_tagger=self.pos_tagger,
ner_tagger=self.ner_tagger,
parsers=self.parsers
)
return merged
return NotImplemented
@python_2_unicode_compatible
class Span(object):
"""A text span within a sentence."""
def __init__(self, text, start, end):
self.text = text
"""The text content of this span."""
self.start = start
"""The start offset of this token in the original text."""
self.end = end
"""The end offset of this token in the original text."""
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self.text, self.start, self.end)
def __str__(self):
return self.text
def __eq__(self, other):
"""Span objects are equal if the source text is equal, and the start and end indices are equal."""
if not isinstance(other, self.__class__):
return False
return self.text == other.text and self.start == other.start and self.end == other.end
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.text, self.start, self.end))
@property
def length(self):
"""The offset length of this span in the original text."""
return self.end - self.start
class Token(Span):
"""A single token within a sentence. Corresponds to a word, character, punctuation etc."""
def __init__(self, text, start, end, lexicon):
""""""
super(Token, self).__init__(text, start, end)
#: The lexicon for this token.
self.lexicon = lexicon
self.lexicon.add(text)
@property
def lex(self):
"""The corresponding Lexeme entry in the Lexicon for this token."""
return self.lexicon[self.text]
| 38.426421
| 210
| 0.614648
|
79508c37e70dea5deda1d32294fd0bd7b2fb884b
| 1,239
|
py
|
Python
|
TextPreprocessor/text_preprocessing.py
|
elenisproject/Newsclinger
|
ac69ceff437ecc234026ded00d60d3d0f0e83a49
|
[
"MIT"
] | 1
|
2021-03-18T11:21:36.000Z
|
2021-03-18T11:21:36.000Z
|
TextPreprocessor/text_preprocessing.py
|
elenisproject/Newsclinger
|
ac69ceff437ecc234026ded00d60d3d0f0e83a49
|
[
"MIT"
] | 34
|
2020-05-11T14:07:27.000Z
|
2020-11-08T15:23:27.000Z
|
TextPreprocessor/text_preprocessing.py
|
elenisproject/Newsclinger
|
ac69ceff437ecc234026ded00d60d3d0f0e83a49
|
[
"MIT"
] | null | null | null |
import pandas as pd
import unicodedata
import string
import json
import sys
sys.path.insert(1, '/Users/elenikaranikola/Desktop/NewsBackend')
from settings import DB_CREDS
from utilities import finalNormalize, readText, writeData
#get all the data from the articles table
sql_command = "SELECT * FROM articles"
df = readText(sql_command)
#save each column in a different variable
id = df['id']
topic = df['topic']
subtopic = df['subtopic']
website = df['website']
article_date = df['article_date']
author = df['author']
url = df['url']
article_body = []
title = []
#for each article preprocess its body
for x in df['article_body']:
final_text = finalNormalize(x)
res = " ".join(final_text)
article_body.append(res)
#for each article preprocess its title
for x in df['title']:
final_title = finalNormalize(x)
res = " ".join(final_title)
title.append(res)
#save all data to a dictionary
dict = {'id':id, 'topic':topic, 'subtopic':subtopic, 'website': website, 'article_date':article_date, 'author':author, 'title':title, 'article_body':article_body, 'url':url}
#add dict t dataframe
new_df = pd.DataFrame(dict)
#saving the dataframe in a csv file
new_df.to_csv("dependencies/output.csv", index=False)
| 26.361702
| 173
| 0.726392
|
79508c5e559690c76d42fd8ddb179265c3a47557
| 8,684
|
py
|
Python
|
python_modules/dagster/dagster/core/scheduler/scheduler.py
|
souterjk/dagster
|
8b744a4959bb04ff9587cfee82a796404fcbc89e
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/scheduler/scheduler.py
|
souterjk/dagster
|
8b744a4959bb04ff9587cfee82a796404fcbc89e
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/scheduler/scheduler.py
|
souterjk/dagster
|
8b744a4959bb04ff9587cfee82a796404fcbc89e
|
[
"Apache-2.0"
] | null | null | null |
import abc
import os
from collections import namedtuple
from dagster import check
from dagster.config import Field
from dagster.config.source import IntSource
from dagster.core.definitions.run_request import InstigatorType
from dagster.core.errors import DagsterError
from dagster.core.host_representation import ExternalSchedule
from dagster.core.instance import DagsterInstance
from dagster.core.scheduler.instigation import (
InstigatorState,
InstigatorStatus,
ScheduleInstigatorData,
)
from dagster.serdes import ConfigurableClass
from dagster.seven import get_current_datetime_in_utc
from dagster.utils import mkdir_p
class DagsterSchedulerError(DagsterError):
"""Base class for all Dagster Scheduler errors"""
class DagsterScheduleDoesNotExist(DagsterSchedulerError):
"""Errors raised when fetching a schedule."""
class SchedulerDebugInfo(
namedtuple("SchedulerDebugInfo", "errors scheduler_config_info scheduler_info schedule_storage")
):
def __new__(cls, errors, scheduler_config_info, scheduler_info, schedule_storage):
return super(SchedulerDebugInfo, cls).__new__(
cls,
errors=check.list_param(errors, "errors", of_type=str),
scheduler_config_info=check.str_param(scheduler_config_info, "scheduler_config_info"),
scheduler_info=check.str_param(scheduler_info, "scheduler_info"),
schedule_storage=check.list_param(schedule_storage, "schedule_storage", of_type=str),
)
class Scheduler(abc.ABC):
"""Abstract base class for a scheduler. This component is responsible for interfacing with
an external system such as cron to ensure scheduled repeated execution according.
"""
def start_schedule(self, instance, external_schedule):
"""
Updates the status of the given schedule to `InstigatorStatus.RUNNING` in schedule storage,
This should not be overridden by subclasses.
Args:
instance (DagsterInstance): The current instance.
external_schedule (ExternalSchedule): The schedule to start
"""
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(external_schedule, "external_schedule", ExternalSchedule)
schedule_state = instance.get_instigator_state(external_schedule.get_external_origin_id())
if external_schedule.get_current_instigator_state(schedule_state).is_running:
raise DagsterSchedulerError(
"You have attempted to start schedule {name}, but it is already running".format(
name=external_schedule.name
)
)
new_instigator_data = ScheduleInstigatorData(
external_schedule.cron_schedule,
get_current_datetime_in_utc().timestamp(),
)
if not schedule_state:
started_schedule = InstigatorState(
external_schedule.get_external_origin(),
InstigatorType.SCHEDULE,
InstigatorStatus.RUNNING,
new_instigator_data,
)
instance.add_instigator_state(started_schedule)
else:
started_schedule = schedule_state.with_status(InstigatorStatus.RUNNING).with_data(
new_instigator_data
)
instance.update_instigator_state(started_schedule)
return started_schedule
def stop_schedule(self, instance, schedule_origin_id, external_schedule):
"""
Updates the status of the given schedule to `InstigatorStatus.STOPPED` in schedule storage,
This should not be overridden by subclasses.
Args:
schedule_origin_id (string): The id of the schedule target to stop running.
"""
check.str_param(schedule_origin_id, "schedule_origin_id")
check.opt_inst_param(external_schedule, "external_schedule", ExternalSchedule)
schedule_state = instance.get_instigator_state(schedule_origin_id)
if (
external_schedule
and not external_schedule.get_current_instigator_state(schedule_state).is_running
) or (schedule_state and not schedule_state.is_running):
raise DagsterSchedulerError(
"You have attempted to stop schedule {name}, but it is already stopped".format(
name=external_schedule.name
)
)
if not schedule_state:
stopped_schedule = InstigatorState(
external_schedule.get_external_origin(),
InstigatorType.SCHEDULE,
InstigatorStatus.STOPPED,
ScheduleInstigatorData(
external_schedule.cron_schedule,
),
)
instance.add_instigator_state(stopped_schedule)
else:
stopped_schedule = schedule_state.with_status(InstigatorStatus.STOPPED).with_data(
ScheduleInstigatorData(
cron_schedule=schedule_state.instigator_data.cron_schedule,
)
)
instance.update_instigator_state(stopped_schedule)
return stopped_schedule
@abc.abstractmethod
def debug_info(self):
"""Returns debug information about the scheduler"""
@abc.abstractmethod
def get_logs_path(self, instance, schedule_origin_id):
"""Get path to store logs for schedule
Args:
schedule_origin_id (string): The id of the schedule target to retrieve the log path for
"""
DEFAULT_MAX_CATCHUP_RUNS = 5
class DagsterDaemonScheduler(Scheduler, ConfigurableClass):
"""Default scheduler implementation that submits runs from the `dagster-daemon`
long-lived process. Periodically checks each running schedule for execution times that don't
have runs yet and launches them.
"""
def __init__(
self, max_catchup_runs=DEFAULT_MAX_CATCHUP_RUNS, max_tick_retries=0, inst_data=None
):
self.max_catchup_runs = check.opt_int_param(
max_catchup_runs, "max_catchup_runs", DEFAULT_MAX_CATCHUP_RUNS
)
self.max_tick_retries = check.opt_int_param(max_tick_retries, "max_tick_retries", 0)
self._inst_data = inst_data
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {
"max_catchup_runs": Field(
IntSource,
is_required=False,
default_value=DEFAULT_MAX_CATCHUP_RUNS,
description="""For partitioned schedules, controls the maximum number of past
partitions for each schedule that will be considered when looking for missing
runs . Generally this parameter will only come into play if the scheduler
falls behind or launches after experiencing downtime. This parameter will not be checked for
schedules without partition sets (for example, schedules created using the @schedule
decorator) - only the most recent execution time will be considered for those schedules.
Note that no matter what this value is, the scheduler will never launch a run from a time
before the schedule was turned on (even if the start_date on the schedule is earlier) - if
you want to launch runs for earlier partitions, launch a backfill.
""",
),
"max_tick_retries": Field(
IntSource,
default_value=0,
is_required=False,
description="For each schedule tick that raises an error, how many times to retry that tick",
),
}
@staticmethod
def from_config_value(inst_data, config_value):
return DagsterDaemonScheduler(inst_data=inst_data, **config_value)
def debug_info(self):
return ""
def wipe(self, instance):
pass
def _get_or_create_logs_directory(self, instance, schedule_origin_id):
check.inst_param(instance, "instance", DagsterInstance)
check.str_param(schedule_origin_id, "schedule_origin_id")
logs_directory = os.path.join(instance.schedules_directory(), "logs", schedule_origin_id)
if not os.path.isdir(logs_directory):
mkdir_p(logs_directory)
return logs_directory
def get_logs_path(self, instance, schedule_origin_id):
check.inst_param(instance, "instance", DagsterInstance)
check.str_param(schedule_origin_id, "schedule_origin_id")
logs_directory = self._get_or_create_logs_directory(instance, schedule_origin_id)
return os.path.join(logs_directory, "scheduler.log")
| 38.941704
| 109
| 0.683441
|
79508c806aae03e4e7c20a8273b6fef5ebfb0adc
| 481
|
py
|
Python
|
setup.py
|
allbuttonspressed/pymeta2
|
f542f989d4edd9ad6b4a8dace9245b4df2cea453
|
[
"MIT"
] | 2
|
2017-09-07T17:14:06.000Z
|
2018-05-22T19:53:03.000Z
|
setup.py
|
allbuttonspressed/pymeta2
|
f542f989d4edd9ad6b4a8dace9245b4df2cea453
|
[
"MIT"
] | null | null | null |
setup.py
|
allbuttonspressed/pymeta2
|
f542f989d4edd9ad6b4a8dace9245b4df2cea453
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
DESCRIPTION = 'Parser generator'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='pymeta2',
packages=find_packages(exclude=('tests', 'tests.*')),
author='Waldemar Kornewald',
url='http://www.allbuttonspressed.com/projects/pymeta2',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
install_requires=[],
)
| 24.05
| 62
| 0.694387
|
79508cadf4c8dff73c14afdd0269c29cd09fccc9
| 6,003
|
py
|
Python
|
op2d/history.py
|
op2-project/op2-daemon
|
fb7878b3e7da44c2f639fdd9e09f30ca3fddf6cc
|
[
"FTL"
] | 10
|
2015-04-07T19:13:25.000Z
|
2020-10-04T20:17:39.000Z
|
op2d/history.py
|
op2-project/op2-daemon
|
fb7878b3e7da44c2f639fdd9e09f30ca3fddf6cc
|
[
"FTL"
] | null | null | null |
op2d/history.py
|
op2-project/op2-daemon
|
fb7878b3e7da44c2f639fdd9e09f30ca3fddf6cc
|
[
"FTL"
] | null | null | null |
import bisect
import cPickle as pickle
import re
from application.notification import IObserver, NotificationCenter
from application.python import Null
from application.python.types import Singleton
from datetime import date
from sipsimple.account import BonjourAccount
from sipsimple.threading import run_in_thread
from sipsimple.util import ISOTimestamp
from zope.interface import implements
from op2d.resources import ApplicationData
__all__ = ['HistoryManager']
class HistoryManager(object):
__metaclass__ = Singleton
implements(IObserver)
history_size = 20
def start(self):
try:
data = pickle.load(open(ApplicationData.get('calls_history')))
if not isinstance(data, list) or not all(isinstance(item, HistoryEntry) and item.text for item in data):
raise ValueError("invalid save data")
except Exception:
self.calls = []
else:
self.calls = data[-self.history_size:]
notification_center = NotificationCenter()
notification_center.add_observer(self, name='SIPSessionDidEnd')
notification_center.add_observer(self, name='SIPSessionDidFail')
def stop(self):
notification_center = NotificationCenter()
notification_center.remove_observer(self, name='SIPSessionDidEnd')
notification_center.remove_observer(self, name='SIPSessionDidFail')
@run_in_thread('file-io')
def save(self):
with open(ApplicationData.get('calls_history'), 'wb+') as history_file:
pickle.dump(self.calls, history_file)
def handle_notification(self, notification):
handler = getattr(self, '_NH_%s' % notification.name, Null)
handler(notification)
def _NH_SIPSessionDidEnd(self, notification):
if notification.sender.account is BonjourAccount():
return
session = notification.sender
entry = HistoryEntry.from_session(session)
bisect.insort(self.calls, entry)
self.calls = self.calls[-self.history_size:]
self.save()
def _NH_SIPSessionDidFail(self, notification):
if notification.sender.account is BonjourAccount():
return
session = notification.sender
entry = HistoryEntry.from_session(session)
if session.direction == 'incoming':
if notification.data.code != 487 or notification.data.failure_reason != 'Call completed elsewhere':
entry.failed = True
else:
if notification.data.code == 0:
entry.reason = 'Internal Error'
elif notification.data.code == 487:
entry.reason = 'Cancelled'
else:
entry.reason = notification.data.reason or notification.data.failure_reason
entry.failed = True
bisect.insort(self.calls, entry)
self.calls = self.calls[-self.history_size:]
self.save()
class HistoryEntry(object):
phone_number_re = re.compile(r'^(?P<number>(0|00|\+)[1-9]\d{7,14})@')
def __init__(self, direction, name, uri, account_id, call_time, duration, failed=False, reason=None):
self.direction = direction
self.name = name
self.uri = uri
self.account_id = account_id
self.call_time = call_time
self.duration = duration
self.failed = failed
self.reason = reason
self.text = self._generate_text()
def __reduce__(self):
return (self.__class__, (self.direction, self.name, self.uri, self.account_id, self.call_time, self.duration, self.failed, self.reason))
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __lt__(self, other):
return self.call_time < other.call_time
def __le__(self, other):
return self.call_time <= other.call_time
def __gt__(self, other):
return self.call_time > other.call_time
def __ge__(self, other):
return self.call_time >= other.call_time
def _generate_text(self):
result = unicode(self.name or self.uri)
if self.call_time:
call_date = self.call_time.date()
today = date.today()
days = (today - call_date).days
if call_date == today:
result += self.call_time.strftime(" at %H:%M")
elif days == 1:
result += self.call_time.strftime(" Yesterday at %H:%M")
elif days < 7:
result += self.call_time.strftime(" on %A")
elif call_date.year == today.year:
result += self.call_time.strftime(" on %B %d")
else:
result += self.call_time.strftime(" on %Y-%m-%d")
if self.duration:
seconds = int(self.duration.total_seconds())
if seconds >= 3600:
result += """ (%dh%02d'%02d")""" % (seconds / 3600, (seconds % 3600) / 60, seconds % 60)
else:
result += """ (%d'%02d")""" % (seconds / 60, seconds % 60)
elif self.reason:
result += ' (%s)' % self.reason.title()
return result
@classmethod
def from_session(cls, session):
if session.start_time is None and session.end_time is not None:
# Session may have anded before it fully started
session.start_time = session.end_time
call_time = session.start_time or ISOTimestamp.now()
if session.start_time and session.end_time:
duration = session.end_time - session.start_time
else:
duration = None
remote_uri = '%s@%s' % (session.remote_identity.uri.user, session.remote_identity.uri.host)
match = cls.phone_number_re.match(remote_uri)
if match:
remote_uri = match.group('number')
display_name = session.remote_identity.display_name
return cls(session.direction, display_name, remote_uri, unicode(session.account.id), call_time, duration)
| 37.285714
| 144
| 0.634016
|
79508db24ae37e9216e93a0f44cf1ae99984db2c
| 3,056
|
py
|
Python
|
hipo_rank/similarities/cos.py
|
mukul-mehta/HipoRank
|
b44490c4f1f3e0ff8015e3eb0f2b1955947dfe80
|
[
"MIT"
] | 14
|
2021-02-04T16:05:57.000Z
|
2022-03-29T08:31:59.000Z
|
hipo_rank/similarities/cos.py
|
mukul-mehta/HipoRank
|
b44490c4f1f3e0ff8015e3eb0f2b1955947dfe80
|
[
"MIT"
] | 2
|
2021-07-02T10:44:09.000Z
|
2022-03-23T12:56:45.000Z
|
hipo_rank/similarities/cos.py
|
mukul-mehta/HipoRank
|
b44490c4f1f3e0ff8015e3eb0f2b1955947dfe80
|
[
"MIT"
] | 3
|
2021-06-10T15:41:19.000Z
|
2021-11-11T16:21:43.000Z
|
import torch
import numpy as np
from hipo_rank import Embeddings, SentenceEmbeddings, SectionEmbedding, \
PairIndices, SentenceSimilarities, SectionSimilarities, Similarities
from typing import List, Tuple
from numpy import ndarray
class CosSimilarity:
def __init__(self, threshold = 0):
self.threshold = threshold
def _compute_similarities(self, embeds1: ndarray, embeds2: ndarray) -> ndarray:
embeds1 = torch.from_numpy(embeds1)
embeds2 = torch.from_numpy(embeds2)
similarities = torch.cosine_similarity(embeds1, embeds2).numpy()
similarities = similarities / 2 + 0.5 # normalize to a range [0,1]
similarities = np.clip(similarities, self.threshold, 1)
return similarities
def _get_pairwise_similarities(self, embeds: ndarray) -> Tuple[ndarray, PairIndices]:
pair_indices = self._get_pair_indices(len(embeds))
pair_indices_i = [x[0] for x in pair_indices]
pair_indices_j = [x[1] for x in pair_indices]
similarities = self._compute_similarities(embeds[pair_indices_i], embeds[pair_indices_j])
return similarities, pair_indices
def _get_pair_indices(self, num_nodes: int) -> PairIndices:
pair_indices = []
for i in range(num_nodes):
for j in range(i+1, num_nodes):
pair_indices += [(i, j)]
return pair_indices
def get_similarities(self, embeds: Embeddings):
sent_to_sent = []
for sent_embeds in embeds.sentence:
id = sent_embeds.id
e = sent_embeds.embeddings
similarities, pair_indices = self._get_pairwise_similarities(e)
directions = ["undirected" for _ in pair_indices]
sent_to_sent += [SentenceSimilarities(id, similarities, pair_indices, directions)]
sent_to_sect = []
sect_embeds = np.stack([s.embedding for s in embeds.section])
num_sect = len(sect_embeds)
for sent_embeds in embeds.sentence:
# TODO: factor out pair indices for one and two matrices
pair_indices = []
num_sent = len(sent_embeds.embeddings)
for i in range(num_sent):
for j in range(num_sect):
pair_indices += [(i,j)]
pair_indices_i = [x[0] for x in pair_indices]
pair_indices_j = [x[1] for x in pair_indices]
embeds1 = sent_embeds.embeddings[pair_indices_i]
embeds2 = sect_embeds[pair_indices_j]
similarities = self._compute_similarities(embeds1, embeds2)
id = sent_embeds.id
directions = ["undirected" for _ in pair_indices]
sent_to_sect += [SentenceSimilarities(id, similarities, pair_indices, directions)]
similarities, pair_indices = self._get_pairwise_similarities(sect_embeds)
directions = ["undirected" for _ in pair_indices]
sect_to_sect = SectionSimilarities(similarities, pair_indices, directions)
return Similarities(sent_to_sent, sect_to_sect, sent_to_sect)
| 38.2
| 97
| 0.664267
|
79508ddc6c4ad9a9ab02a5f69ad9e3253dd0f165
| 20,618
|
py
|
Python
|
test/functional/feature_pruning.py
|
eleccoin/eleccoin
|
95f86f28019fe8666816e75e1dc82f1edeee3b31
|
[
"MIT"
] | 3
|
2020-04-24T08:03:09.000Z
|
2020-06-24T00:53:03.000Z
|
test/functional/feature_pruning.py
|
eleccoin/eleccoin
|
95f86f28019fe8666816e75e1dc82f1edeee3b31
|
[
"MIT"
] | 8
|
2021-02-06T16:15:10.000Z
|
2022-02-20T20:08:45.000Z
|
test/functional/feature_pruning.py
|
eleccoin/eleccoin
|
95f86f28019fe8666816e75e1dc82f1edeee3b31
|
[
"MIT"
] | 7
|
2020-02-26T22:08:49.000Z
|
2021-02-06T12:35:40.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Eleccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the pruning code.
WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
import os
from test_framework.blocktools import create_coinbase
from test_framework.messages import CBlock
from test_framework.script import (
CScript,
OP_NOP,
OP_RETURN,
)
from test_framework.test_framework import EleccoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
TIMESTAMP_WINDOW = 2 * 60 * 60
def mine_large_blocks(node, n):
# Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN
# followed by 950k of OP_NOP. This would be non-standard in a non-coinbase
# transaction but is consensus valid.
# Set the nTime if this is the first time this function has been called.
# A static variable ensures that time is monotonicly increasing and is therefore
# different for each block created => blockhash is unique.
if "nTimes" not in mine_large_blocks.__dict__:
mine_large_blocks.nTime = 0
# Get the block parameters for the first block
big_script = CScript([OP_RETURN] + [OP_NOP] * 950000)
best_block = node.getblock(node.getbestblockhash())
height = int(best_block["height"]) + 1
mine_large_blocks.nTime = max(mine_large_blocks.nTime, int(best_block["time"])) + 1
previousblockhash = int(best_block["hash"], 16)
for _ in range(n):
# Build the coinbase transaction (with large scriptPubKey)
coinbase_tx = create_coinbase(height)
coinbase_tx.vin[0].nSequence = 2 ** 32 - 1
coinbase_tx.vout[0].scriptPubKey = big_script
coinbase_tx.rehash()
# Build the block
block = CBlock()
block.nVersion = best_block["version"]
block.hashPrevBlock = previousblockhash
block.nTime = mine_large_blocks.nTime
block.nBits = int('207fffff', 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Submit to the node
node.submitblock(block.serialize().hex())
previousblockhash = block.sha256
height += 1
mine_large_blocks.nTime += 1
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
class PruneTest(EleccoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
self.supports_cli = False
# Create nodes 0 and 1 to mine.
# Create node 2 to test pruning.
self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5"]
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
# Create nodes 5 to test wallet in prune mode, but do not connect
self.extra_args = [
self.full_node_default_args,
self.full_node_default_args,
["-maxreceivebuffer=20000", "-prune=550"],
["-maxreceivebuffer=20000"],
["-maxreceivebuffer=20000"],
["-prune=550"],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.prunedir = os.path.join(self.nodes[2].datadir, self.chain, 'blocks', '')
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
self.connect_nodes(0, 4)
self.sync_blocks(self.nodes[0:5])
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
self.sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
mine_large_blocks(self.nodes[0], 645)
self.sync_blocks(self.nodes[0:5])
def test_height_min(self):
assert os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early"
self.log.info("Success")
self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
mine_large_blocks(self.nodes[0], 25)
# Wait for blk00000.dat to be pruned
self.wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
assert_greater_than(550, usage)
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for _ in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
self.disconnect_nodes(0, 1)
self.disconnect_nodes(0, 2)
# Mine 24 blocks in node 1
mine_large_blocks(self.nodes[1], 24)
# Reorg back with 25 block chain from node 0
mine_large_blocks(self.nodes[0], 25)
# Create connections in the order so both nodes can see the reorg at the same time
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.sync_blocks(self.nodes[0:3])
self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
height = self.nodes[1].getblockcount()
self.log.info("Current block height: %d" % height)
self.forkheight = height - 287
self.forkhash = self.nodes[1].getblockhash(self.forkheight)
self.log.info("Invalidating block %s at height %d" % (self.forkhash, self.forkheight))
self.nodes[1].invalidateblock(self.forkhash)
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
assert self.nodes[1].getblockcount() == self.forkheight - 1
self.log.info("New best height: %d" % self.nodes[1].getblockcount())
# Disconnect node1 and generate the new chain
self.disconnect_nodes(0, 1)
self.disconnect_nodes(1, 2)
self.log.info("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
self.log.info("Reconnect nodes")
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.sync_blocks(self.nodes[0:3], timeout=120)
self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
self.log.info("Usage possibly still high because of stale blocks in block files: %d" % calc_usage(self.prunedir))
self.log.info("Mine 220 more large blocks so we have requisite history")
mine_large_blocks(self.nodes[0], 220)
self.sync_blocks(self.nodes[0:3], timeout=120)
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
assert_greater_than(550, usage)
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']):
self.nodes[2].verifychain(checklevel=4, nblocks=0)
self.log.info("Will need to redownload block %d" % self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large blocks are in the block files after it,
# it is expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert_equal(self.nodes[0].getblockcount(), self.mainchainheight)
assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
# Wait for Node 2 to reorg to proper height
self.wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
assert_equal(self.nodes[2].getbestblockhash(), goalbesthash)
# Verify we can now have the data for a block previously pruned
assert_equal(self.nodes[2].getblock(self.forkhash)["height"], self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
self.start_node(node_number)
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500)
# now re-start in manual pruning mode
self.restart_node(node_number, extra_args=["-prune=1"])
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
else:
return index
def prune(index):
ret = node.pruneblockchain(height=height(index))
assert_equal(ret, node.getblockchaininfo()['pruneheight'])
def has_block(index):
return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", "blk{:05}.dat".format(index)))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# Save block transaction count before pruning, assert value
block1_details = node.getblock(node.getblockhash(1))
assert_equal(block1_details["nTx"], len(block1_details["tx"]))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# Pruned block should still know the number of transactions
assert_equal(node.getblockheader(node.getblockhash(1))["nTx"], block1_details["nTx"])
# negative heights should raise an exception
assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
assert has_block(0), "blk00000.dat is missing when should still be there"
# Does nothing
node.pruneblockchain(height(0))
assert has_block(0), "blk00000.dat is missing when should still be there"
# height=500 should prune first file
prune(500)
assert not has_block(0), "blk00000.dat is still there, should be pruned by now"
assert has_block(1), "blk00001.dat is missing when should still be there"
# height=650 should prune second file
prune(650)
assert not has_block(1), "blk00001.dat is still there, should be pruned by now"
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000)
assert has_block(2), "blk00002.dat is still there, should be pruned by now"
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
assert not has_block(2), "blk00002.dat is still there, should be pruned by now"
assert not has_block(3), "blk00003.dat is still there, should be pruned by now"
# stop node, start back up with auto-prune at 550 MiB, make sure still runs
self.restart_node(node_number, extra_args=["-prune=550"])
self.log.info("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.restart_node(2, extra_args=["-prune=550"])
self.log.info("Success")
# check that wallet loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
self.connect_nodes(0, 5)
nds = [self.nodes[0], self.nodes[5]]
self.sync_blocks(nds, wait=5, timeout=300)
self.restart_node(5, extra_args=["-prune=550"]) # restart to trigger rescan
self.log.info("Success")
def run_test(self):
self.log.info("Warning! This test requires 4GB of disk space")
self.log.info("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() # 1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
self.reorg_test() # (1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
self.log.info("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
self.log.info("Test wallet re-scan")
self.wallet_test()
self.log.info("Done")
if __name__ == '__main__':
PruneTest().main()
| 44.821739
| 140
| 0.611796
|
79508eb4f80c4e5e06b5a4dce7a825505db7af63
| 542
|
py
|
Python
|
odoo-13.0/addons/l10n_lu/__init__.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/l10n_lu/__init__.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/l10n_lu/__init__.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, SUPERUSER_ID
from . import models
def _post_init_hook(cr, registry):
_preserve_tag_on_taxes(cr, registry)
env = api.Environment(cr, SUPERUSER_ID, {})
env.ref('l10n_lu.lu_2011_chart_1').process_coa_translations()
def _preserve_tag_on_taxes(cr, registry):
from odoo.addons.account.models.chart_template import preserve_existing_tags_on_taxes
preserve_existing_tags_on_taxes(cr, registry, 'l10n_lu')
| 33.875
| 89
| 0.771218
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.