hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1750970ec0c11776d71c7f6ddfc983ba82d173dd
| 20,546
|
py
|
Python
|
ml/ml_server.py
|
DavidThe4sian/marvin
|
1dddd8cdb8be55d3b553df1b2cf89e8b6b0538b6
|
[
"MIT"
] | 4
|
2021-05-14T02:23:52.000Z
|
2021-12-28T10:33:18.000Z
|
ml/ml_server.py
|
DavidThe4sian/marvin
|
1dddd8cdb8be55d3b553df1b2cf89e8b6b0538b6
|
[
"MIT"
] | null | null | null |
ml/ml_server.py
|
DavidThe4sian/marvin
|
1dddd8cdb8be55d3b553df1b2cf89e8b6b0538b6
|
[
"MIT"
] | 3
|
2021-05-31T06:38:59.000Z
|
2021-09-29T02:44:22.000Z
|
# import flask related modules
from flask import Flask, jsonify, request, render_template
from flask_cors import CORS
# basic imports
import json
import sys
import os
# Pytorch imports
import torch
from torchtext.data.utils import get_tokenizer
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelWithLMHead, AutoConfig, HfArgumentParser
# Joint Model imports
from jointclassifier.joint_args import ModelArguments, DataTrainingArguments, TrainingArguments
from jointclassifier.joint_dataloader import load_dataset
from jointclassifier.joint_trainer import JointTrainer
from jointclassifier.single_trainer import SingleTrainer
from jointclassifier.joint_model_v1 import JointSeqClassifier
#Utils and constants
from constants import MODEL_PATHS
from utils import get_buckets, bucket_match, sort_results, filter_results
import openai
import argparse
app = Flask(__name__)
CORS(app)
# def load_models(mode):
# global classifier_tokenizer, classifier_trainer, classifier_model, transfer_model, transfer_tokenizer, transfer_model_shake, transfer_model_abs, transfer_model_wiki
# if mode in ['micro-formality','micro-joint','macro-shakespeare']:
# transfer_model_shake = None
# transfer_model_abs = None
# transfer_model_wiki = None
# mode_paths = MODEL_PATHS[mode]
# model_args = ModelArguments(
# model_name_or_path=mode_paths['classifier_name'],
# model_nick=mode_paths['classifier_nick'],
# cache_dir="./models/cache"
# )
# data_args = DataTrainingArguments(
# max_seq_len=64,
# task=mode_paths['classifier_task']
# )
# training_args = TrainingArguments(
# output_dir = mode_paths['classifier'],
# train_jointly= True
# )
# idx_to_classes = mode_paths['idx_to_classes']
# label_dims = mode_paths['label_dims']
# classifier_model = JointSeqClassifier.from_pretrained(
# training_args.output_dir,
# tasks=data_args.task.split('+'),
# model_args=model_args,
# task_if_single=None,
# joint = training_args.train_jointly,
# label_dims=label_dims
# )
# classifier_trainer = JointTrainer(
# [training_args,model_args, data_args],
# classifier_model, idx_to_classes = idx_to_classes
# )
# classifier_tokenizer = AutoTokenizer.from_pretrained(
# model_args.model_name_or_path,
# cache_dir=model_args.cache_dir,
# model_max_length = data_args.max_seq_len
# )
# transfer_tokenizer = AutoTokenizer.from_pretrained(mode_paths['transfer_name'])
# transfer_model = AutoModelWithLMHead.from_pretrained(mode_paths['transfer'])
# elif mode in ['macro-binary']:
# classifier_model = None
# transfer_model = None
# mode_paths = MODEL_PATHS[mode]
# transfer_tokenizer = AutoTokenizer.from_pretrained(mode_paths['transfer_name'])
# transfer_model_shake = AutoModelWithLMHead.from_pretrained(mode_paths['transfer_shake'])
# transfer_model_abs = AutoModelWithLMHead.from_pretrained(mode_paths['transfer_abs'])
# transfer_model_wiki = AutoModelWithLMHead.from_pretrained(mode_paths['transfer_wiki'])
def load_models(modes):
global classifier_tokenizer, classifier_trainers, classifier_models, transfer_models, transfer_tokenizer
classifier_models= {}
classifier_trainers = {}
transfer_models = {}
transfer_tokenizer = AutoTokenizer.from_pretrained(MODEL_PATHS['common']['transfer_name'], model_max_length=64, cache_dir="./models/cache")
classifier_tokenizer = AutoTokenizer.from_pretrained(MODEL_PATHS['common']['classifier_name'], model_max_length=64, cache_dir="./models/cache")
for mode in modes:
if mode in ['micro-formality','macro-shakespeare']:
mode_paths = MODEL_PATHS[mode]
model_args = ModelArguments(
model_name_or_path=mode_paths['classifier_name'],
model_nick=mode_paths['classifier_nick'],
cache_dir="./models/cache"
)
data_args = DataTrainingArguments(
max_seq_len=64,
task=mode_paths['classifier_task']
)
training_args = TrainingArguments(
output_dir = mode_paths['classifier'],
train_jointly= True
)
idx_to_classes = mode_paths['idx_to_classes']
label_dims = mode_paths['label_dims']
classifier_models[mode] = JointSeqClassifier.from_pretrained(
training_args.output_dir,
tasks=data_args.task.split('+'),
model_args=model_args,
task_if_single=None,
joint = training_args.train_jointly,
label_dims=label_dims
)
classifier_trainers[mode] = JointTrainer(
[training_args,model_args, data_args],
classifier_models[mode], idx_to_classes = idx_to_classes
)
transfer_models[mode] = AutoModelWithLMHead.from_pretrained(mode_paths['transfer'])
elif mode in ['macro-binary']:
mode_paths = MODEL_PATHS[mode]
transfer_models[mode+"-shake"] = AutoModelWithLMHead.from_pretrained(mode_paths['transfer_shake'])
transfer_models[mode+"-abs"] = AutoModelWithLMHead.from_pretrained(mode_paths['transfer_abs'])
transfer_models[mode+"-wiki"] = AutoModelWithLMHead.from_pretrained(mode_paths['transfer_wiki'])
elif mode in ['micro-joint']:
mode_paths = MODEL_PATHS[mode]
transfer_models[mode] = AutoModelWithLMHead.from_pretrained(mode_paths['transfer'])
@app.route("/hello")
def hello():
res = {
"world": 42,
"app": "ml"
}
return res
@app.route("/swap_models", methods=['POST'])
def swap_models():
mode = request.args.get('mode', type = str)
print(mode)
try:
load_models(mode)
except Exception as e:
print(e)
return {'message' : 'Models Swap Failure! :('}, 500
return {'message' : 'Models Swap Success! :)'}, 200
@app.route('/classification', methods = ['GET'])
def get_joint_classify_and_salience():
'''
Inputs:
Input is assumed to be json of the form
{text: "some text"}.
Results:
Run ML classification model on text.
Returns:
res: a dict containing information on
classification and input salience weights.
It has a key 'tokens' which is an array of the
tokenized input text. It also has a key for each
classification task. Each of these are themselves
dicts containing keys for the predicted class,
the probability of this class, and also the salience score
for each token from the tokenized input.
'''
# Get text input from request
text = request.args.get('text', type = str)
text = text.strip()
lower = text.lower()
mode = request.args.get('mode', type = str)
tokens = []
sentence_seen = 0
joint_tokens = classifier_tokenizer.convert_ids_to_tokens(classifier_tokenizer.encode(lower))[1:-1]
for token in joint_tokens:
# Handle case where the tokenizer splits some suffix as it's own token
if len(token) > 2:
if token[:2] == '##':
token = token[2:]
occ = lower[sentence_seen:].find(token)
start = occ + sentence_seen
end = start + len(token)
adj_len = len(token)
sentence_seen = sentence_seen + adj_len + occ
tokens.append({'text' : text[start:end], 'start' : start, 'end' : end})
if mode=='micro-joint':
res = classifier_trainers['micro-formality'].predict_for_sentence(lower, classifier_tokenizer, salience=True)
else:
res = classifier_trainers[mode].predict_for_sentence(lower, classifier_tokenizer, salience=True)
res['tokens'] = tokens
return res, 200
@app.route('/transfer', methods = ['GET'])
def get_transfer():
# Get text input from request
text = request.args.get('text', type = str)
mode = request.args.get('mode', type = str)
controls = request.args.get('controls', type = str)
text = text.strip()
# lower = text.lower()
lower = text
controls = json.loads(controls)
print(controls)
controls['suggestions'] = int(min(5,max(1,float(controls['suggestions']))))
if mode=="micro-formality":
classifier_output = classifier_trainers[mode].predict_for_sentence(lower, classifier_tokenizer, salience=False)
input_bucket = get_buckets(float(classifier_output['formality']['prob']), 'formality')
output_bucket = ['low', 'mid', 'high'][int(controls['formality'])]
transfer_input = "transfer: "+lower+' | input: '+input_bucket + ' | output: '+output_bucket
t = transfer_tokenizer(transfer_input, return_tensors='pt')
gen = transfer_models[mode].generate(input_ids= t.input_ids, attention_mask = t.attention_mask, max_length=70,
num_beams=15,
# early_stopping=True,
encoder_no_repeat_ngram_size=5,
no_repeat_ngram_size=3,
num_beam_groups=5,
diversity_penalty=0.5,
# num_return_sequences=int(controls['suggestions'])
num_return_sequences=10
)
transfers = transfer_tokenizer.batch_decode(gen, skip_special_tokens=True)
res = {
'input' : {
'text' : text,
'probs' : {
'formality' : classifier_output['formality']['prob']
},
},
"goal" : f"Formality : {output_bucket}",
}
suggestions = []
for transfer in transfers:
cls_opt = classifier_trainers[mode].predict_for_sentence(transfer, classifier_tokenizer, salience=False)
temp = {
'text' : transfer,
'probs' : {
'formality' : cls_opt['formality']['prob']
}
}
suggestions.append(temp)
suggestions = filter_results(suggestions, ['formality'], [output_bucket])
suggestions = sort_results(suggestions, ['formality'], [output_bucket])
res['suggestions'] = suggestions[:int(controls['suggestions'])]
if output_bucket=='high' and server_args.openai:
oai = get_openai_result(text)
cls_opt = classifier_trainers[mode].predict_for_sentence(transfer, classifier_tokenizer, salience=False)
temp = {
'text' : oai,
'probs' : {
'formality' : cls_opt['formality']['prob']
}
}
res['openai'] = temp
else:
res['openai'] = {}
elif mode=="macro-shakespeare":
classifier_output = classifier_trainers[mode].predict_for_sentence(lower, classifier_tokenizer, salience=False)
input_bucket = get_buckets(float(classifier_output['shakespeare']['prob']), 'shakespeare')
output_bucket = ['low', 'mid', 'high'][int(controls['shakespeare'])]
transfer_input = "transfer: "+lower+' | input: '+input_bucket + ' | output: '+output_bucket
t = transfer_tokenizer(transfer_input, return_tensors='pt')
gen = transfer_models[mode].generate(input_ids= t.input_ids, attention_mask = t.attention_mask, max_length=70,
num_beams=15,
# early_stopping=True,
encoder_no_repeat_ngram_size=5,
no_repeat_ngram_size=3,
num_beam_groups=5,
diversity_penalty=0.5,
# num_return_sequences=int(controls['suggestions'])
num_return_sequences=10
)
transfers = transfer_tokenizer.batch_decode(gen, skip_special_tokens=True)
res = {
'input' : {
'text' : text,
'probs' : {
'shakespeare' : classifier_output['shakespeare']['prob']
},
},
"goal" : f"Shakespeare : {output_bucket}",
"suggestions":[],
"openai":{}
}
suggestions = []
for transfer in transfers:
cls_opt = classifier_trainers[mode].predict_for_sentence(transfer, classifier_tokenizer, salience=False)
temp = {
'text' : transfer,
'probs' : {
'shakespeare' : cls_opt['shakespeare']['prob']
}
}
suggestions.append(temp)
suggestions = filter_results(suggestions, ['shakespeare'], [output_bucket])
suggestions = sort_results(suggestions, ['shakespeare'], [output_bucket])
res['suggestions'] = suggestions[:int(controls['suggestions'])]
elif mode=="micro-joint":
classifier_output = classifier_trainers['micro-formality'].predict_for_sentence(lower, classifier_tokenizer, salience=False)
input_bucket_f = get_buckets(float(classifier_output['formality']['prob']), 'formality')
input_bucket_e = get_buckets(float(classifier_output['emo']['prob']), 'emo')
output_bucket_f = ['low', 'mid', 'high'][int(controls['formality'])]
output_bucket_e = ['low', 'mid', 'high'][int(controls['emo'])]
transfer_input = 'transfer: ' + lower + ' | input formality: '+input_bucket_f + ' | input emotion: '+input_bucket_e +' | output formality: '+output_bucket_f +' | output emotion: '+output_bucket_e
print('\n\n',transfer_input,'\n\n')
t = transfer_tokenizer(transfer_input, return_tensors='pt')
gen = transfer_models[mode].generate(input_ids= t.input_ids, attention_mask = t.attention_mask, max_length=70,
num_beams=15,
# early_stopping=True,
encoder_no_repeat_ngram_size=5,
no_repeat_ngram_size=3,
num_beam_groups=5,
diversity_penalty=0.5,
num_return_sequences=10
# num_return_sequences=int(controls['suggestions'])
)
transfers = transfer_tokenizer.batch_decode(gen, skip_special_tokens=True)
res = {
'input' : {
'text' : text,
'probs' : {
'formality' : classifier_output['formality']['prob'],
'emo' : classifier_output['emo']['prob']
},
},
"goal" : f"Formality : {output_bucket_f}; Emotion : {output_bucket_e}",
"suggestions":[],
"openai":{}
}
suggestions = []
for transfer in transfers:
cls_opt = classifier_trainers['micro-formality'].predict_for_sentence(transfer, classifier_tokenizer, salience=False)
temp = {
'text' : transfer,
'probs' : {
'formality' : cls_opt['formality']['prob'],
'emo' : cls_opt['emo']['prob']
}
}
suggestions.append(temp)
suggestions = filter_results(suggestions, ['formality','emo'], [output_bucket_f, output_bucket_e])
suggestions = sort_results(suggestions, ['formality','emo'], [output_bucket_f, output_bucket_e])
res['suggestions'] = suggestions[:int(controls['suggestions'])]
elif mode=="macro-binary":
transfer_input = 'transfer: ' + lower
print('\n\n',transfer_input,'\n\n')
t = transfer_tokenizer(transfer_input, return_tensors='pt')
if int(controls['macro']) == 0:
gen = transfer_models[mode+'-wiki'].generate(input_ids= t.input_ids, attention_mask = t.attention_mask, max_length=70,
num_beams=12,
# early_stopping=True,
encoder_no_repeat_ngram_size=5,
no_repeat_ngram_size=3,
num_beam_groups=3,
diversity_penalty=0.5,
num_return_sequences=int(controls['suggestions'])
)
elif int(controls['macro']) == 1:
gen = transfer_models[mode+'-shake'].generate(input_ids= t.input_ids, attention_mask = t.attention_mask, max_length=70,
num_beams=12,
# early_stopping=True,
encoder_no_repeat_ngram_size=5,
no_repeat_ngram_size=3,
num_beam_groups=3,
diversity_penalty=0.5,
num_return_sequences=int(controls['suggestions'])
)
elif int(controls['macro']) == 2:
gen = transfer_models[mode+'-abs'].generate(input_ids= t.input_ids, attention_mask = t.attention_mask, max_length=70,
num_beams=12,
# early_stopping=True,
encoder_no_repeat_ngram_size=5,
no_repeat_ngram_size=3,
num_beam_groups=3,
diversity_penalty=0.5,
num_return_sequences=int(controls['suggestions'])
)
transfers = transfer_tokenizer.batch_decode(gen, skip_special_tokens=True)
res = {
'input' : {
'text' : text,
},
"goal" : ["Wikipedia", "Shakespeare", "Scientific Abstract"][int(controls['macro'])],
"suggestions":[],
"openai":{}
}
for transfer in transfers:
temp = {
'text' : transfer,
}
res['suggestions'].append(temp)
return res, 200
def load_openai_key():
with open("./key.txt") as fob:
openai.api_key = fob.read().strip()
def get_openai_result(text):
prompt = "Plain Language: what're u doin?\nFormal Language: What are you doing?\nPlain Language: what's up?\nFormal Language: What is up?\nPlain Language: i wanna eat ice cream today!\nFormal Language: I want to eat ice cream today.\nPlain Language: wtf is his problem?\nFormal Language: What is his issue?\nPlain Language: i feel bummed about the store shutting down.\nFormal Language: I feel unhappy about the store closing.\nPlain Language: "
prompt = prompt + text + "\nFormal Language:"
res = openai.Completion.create(
engine="davinci",
prompt= prompt,
max_tokens=64,
temperature=0.15,
stop="\n"
)
return res.choices[0].text.strip()
if __name__ == '__main__':
load_models(['micro-formality','macro-shakespeare','micro-joint','macro-binary'])
# print(transfer_models.keys())
parser = argparse.ArgumentParser()
parser.add_argument('--openai', help='Use openai API or not', default=False)
global server_args
server_args = parser.parse_args()
if server_args.openai==True:
load_openai_key()
app.run(host="0.0.0.0", port=5001)
| 43.995717
| 448
| 0.566874
|
2723c87b9dc8c1ef46ce057fd4ce532f510c0df3
| 4,695
|
py
|
Python
|
ecom/infer.py
|
haohao-hu/ecom-rakuten
|
0602a7f12dfb35c6455fdd42845cd7b08900c3ba
|
[
"MIT"
] | null | null | null |
ecom/infer.py
|
haohao-hu/ecom-rakuten
|
0602a7f12dfb35c6455fdd42845cd7b08900c3ba
|
[
"MIT"
] | null | null | null |
ecom/infer.py
|
haohao-hu/ecom-rakuten
|
0602a7f12dfb35c6455fdd42845cd7b08900c3ba
|
[
"MIT"
] | null | null | null |
from . import bpv, data, scoring
from .slimai import to_np
from kerosene import torch_util
import pandas as pd
import fire
import numpy as np
import pathlib
DATA_PATH=pathlib.Path('data')
def infer(model, dl):
probs = []
targs = []
model.eval()
for x, y in dl:
probs.append(to_np(model(torch_util.variable(x))))
targs.append(to_np(y))
return np.concatenate(probs), np.concatenate(targs)
def predict(scores, tune_f1=False):
if not tune_f1:
return scores.argmax(axis=1)
probs = scoring.softmax(scores)
pcuts = scoring.pred_from_probs(probs)
probs[probs < pcuts] = 0
probs[:, -1] += 1e-9
return probs.argmax(axis=1)
def ensemble_with_ir_system(filepath,category_encoder,totalscores,lambda_factor):#function for combining prediction results of our system and LSTM-BPV(s)
irpredictionresults=pd.read_csv(
filepath,
sep='\t',
header=None,
names=('item','cat','prob'),
)
#print(total_targs.shape)
predictedidx=category_encoder.encode(irpredictionresults.cat)
predictedprobabilities=irpredictionresults.prob
for pcatidx,prob,scores in zip(predictedidx,predictedprobabilities,totalscores):
#scores[pcatidx]=(1-lambda_factor)*scores[pcatidx]+lambda_factor*prob
#scores[pcatidx]=(1-lambda_factor)*scores[pcatidx]+lambda_factor*1
scores[pcatidx]=(1-lambda_factor)*scores[pcatidx]+lambda_factor*(np.amax(scores)+0.1)
def ensemble_with_ir_system_revised(filepath,category_encoder,totalscores,lambda_factor):#function for combining prediction results of our system and LSTM-BPV(s)
irpredictionresults=pd.read_csv(
filepath,
sep='\t',
header=None,
names=('item','cat','prob'),
)
#print(total_targs.shape)
predictedidx=category_encoder.encode(irpredictionresults.cat)
predictedprobabilities=irpredictionresults.prob
for pcatidx,prob,scores in zip(predictedidx,predictedprobabilities,totalscores):
#scores[pcatidx]=(1-lambda_factor)*scores[pcatidx]+lambda_factor*prob
#scores[pcatidx]=(1-lambda_factor)*scores[pcatidx]+lambda_factor*1
scores[pcatidx]=scores[pcatidx]+lambda_factor*(np.amax(scores)+0.1)
#assert (scores.size==3008)
#for y in range(pcatidx+1,3008):
# scores[y]=(1-lambda_factor)*scores[y]#+lambda_factor*0
#i+=1
def main(forward=None, reverse=None, is_trainset=False, is_test=False, debug=False, i=0):
n_emb, n_hid = 50, 512
enc, cenc = data.load_encoders()
n_inp, n_out = len(enc.itos), len(cenc.itos)
models_by_dir = {
False: forward.split(',') if forward else [],
True: reverse.split(',') if reverse else [],
}
n_models = 0
total_scores, total_targs = None, None
for is_reverse, models in models_by_dir.items():
if is_test:
dl, revidx = data.load_test_dataloader(is_reverse)
elif is_trainset:
dl, _ = data.load_dataloaders(is_reverse)#,bs=32)
else:
_, dl = data.load_dataloaders(is_reverse)
for model_name in models:
model = data.load_model(
torch_util.to_gpu(bpv.BalancedPoolLSTM(n_inp, n_emb, n_hid, n_out)),
model_name,
)
scores, targs = infer(model, dl)
if debug:
preds = predict(scores)
print(model_name, is_reverse, scoring.score(preds, targs))
n_models += 1
scores = scoring.logprob_scale(scores)
if total_scores is None:
total_scores, total_targs = scores, targs
else:
assert (targs == total_targs).all()
total_scores += scores
total_scores /= n_models
for tune_f1 in False, True:
if is_test:#infering on the Test Dataset
ensemble_with_ir_system(DATA_PATH/'rdc-catalog-test-IB-SPL-DF-NormH1.tsv',cenc,total_scores,i)
print("lambda="+str(i)+", ")
pred = predict(total_scores, tune_f1=tune_f1)
#print(data.save_test_pred(cenc, pred[revidx], tune_f1=tune_f1))
#print(data.save_test_pred(cenc, pred, tune_f1=tune_f1))
print(scoring.score(pred, total_targs))
else:#infering on the validation dataset
ensemble_with_ir_system(DATA_PATH/'predict-IB-winner-val-2019-01-11-2nd.tsv',cenc,total_scores,i)
print("lambda="+str(i)+", ")
print(scoring.score(predict(total_scores, tune_f1=tune_f1), total_targs))
if __name__ == '__main__':
fire.Fire(main)
| 39.125
| 161
| 0.640469
|
ad60a44bb32ba91fb60ce5483bcf6f32637c679c
| 12,868
|
py
|
Python
|
David and Pooja/++Validating Linked Mods/Python-3.0/Lib/test/test_shutil.py
|
LinkedModernismProject/web_code
|
4cf6bf53d5c3249e52a75f0a3f57d106e31daf9e
|
[
"Apache-2.0"
] | 1
|
2015-05-21T23:47:54.000Z
|
2015-05-21T23:47:54.000Z
|
front-end/testsuite-python-lib/Python-3.0/Lib/test/test_shutil.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2015-10-29T20:51:31.000Z
|
2015-10-29T20:51:31.000Z
|
front-end/testsuite-python-lib/Python-3.0/Lib/test/test_shutil.py
|
MalloyPower/parsing-python
|
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
|
[
"MIT"
] | 1
|
2019-04-11T11:27:01.000Z
|
2019-04-11T11:27:01.000Z
|
# Copyright (C) 2003 Python Software Foundation
import unittest
import shutil
import tempfile
import sys
import stat
import os
import os.path
from test import support
from test.support import TESTFN
class TestShutil(unittest.TestCase):
def test_rmtree_errors(self):
# filename is guaranteed not to exist
filename = tempfile.mktemp()
self.assertRaises(OSError, shutil.rmtree, filename)
# See bug #1071513 for why we don't run this on cygwin
# and bug #1076467 for why we don't run this as root.
if (hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin'
and not (hasattr(os, 'geteuid') and os.geteuid() == 0)):
def test_on_error(self):
self.errorState = 0
os.mkdir(TESTFN)
self.childpath = os.path.join(TESTFN, 'a')
f = open(self.childpath, 'w')
f.close()
old_dir_mode = os.stat(TESTFN).st_mode
old_child_mode = os.stat(self.childpath).st_mode
# Make unwritable.
os.chmod(self.childpath, stat.S_IREAD)
os.chmod(TESTFN, stat.S_IREAD)
shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
# Test whether onerror has actually been called.
self.assertEqual(self.errorState, 2,
"Expected call to onerror function did not happen.")
# Make writable again.
os.chmod(TESTFN, old_dir_mode)
os.chmod(self.childpath, old_child_mode)
# Clean up.
shutil.rmtree(TESTFN)
def check_args_to_onerror(self, func, arg, exc):
if self.errorState == 0:
self.assertEqual(func, os.remove)
self.assertEqual(arg, self.childpath)
self.failUnless(issubclass(exc[0], OSError))
self.errorState = 1
else:
self.assertEqual(func, os.rmdir)
self.assertEqual(arg, TESTFN)
self.failUnless(issubclass(exc[0], OSError))
self.errorState = 2
def test_rmtree_dont_delete_file(self):
# When called on a file instead of a directory, don't delete it.
handle, path = tempfile.mkstemp()
os.fdopen(handle).close()
self.assertRaises(OSError, shutil.rmtree, path)
os.remove(path)
def test_copytree_simple(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(tempfile.mkdtemp(), 'destination')
write_data(os.path.join(src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
try:
shutil.copytree(src_dir, dst_dir)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'test_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test_dir',
'test.txt')))
actual = read_data(os.path.join(dst_dir, 'test.txt'))
self.assertEqual(actual, '123')
actual = read_data(os.path.join(dst_dir, 'test_dir', 'test.txt'))
self.assertEqual(actual, '456')
finally:
for path in (
os.path.join(src_dir, 'test.txt'),
os.path.join(dst_dir, 'test.txt'),
os.path.join(src_dir, 'test_dir', 'test.txt'),
os.path.join(dst_dir, 'test_dir', 'test.txt'),
):
if os.path.exists(path):
os.remove(path)
for path in (src_dir,
os.path.abspath(os.path.join(dst_dir, os.path.pardir))
):
if os.path.exists(path):
shutil.rmtree(path)
def test_copytree_with_exclude(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
# creating data
join = os.path.join
exists = os.path.exists
src_dir = tempfile.mkdtemp()
dst_dir = join(tempfile.mkdtemp(), 'destination')
write_data(join(src_dir, 'test.txt'), '123')
write_data(join(src_dir, 'test.tmp'), '123')
os.mkdir(join(src_dir, 'test_dir'))
write_data(join(src_dir, 'test_dir', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2'))
write_data(join(src_dir, 'test_dir2', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2', 'subdir'))
os.mkdir(join(src_dir, 'test_dir2', 'subdir2'))
write_data(join(src_dir, 'test_dir2', 'subdir', 'test.txt'), '456')
write_data(join(src_dir, 'test_dir2', 'subdir2', 'test.py'), '456')
# testing glob-like patterns
try:
patterns = shutil.ignore_patterns('*.tmp', 'test_dir2')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assert_(exists(join(dst_dir, 'test.txt')))
self.assert_(not exists(join(dst_dir, 'test.tmp')))
self.assert_(not exists(join(dst_dir, 'test_dir2')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
try:
patterns = shutil.ignore_patterns('*.tmp', 'subdir*')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assert_(not exists(join(dst_dir, 'test.tmp')))
self.assert_(not exists(join(dst_dir, 'test_dir2', 'subdir2')))
self.assert_(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
# testing callable-style
try:
def _filter(src, names):
res = []
for name in names:
path = os.path.join(src, name)
if (os.path.isdir(path) and
path.split()[-1] == 'subdir'):
res.append(name)
elif os.path.splitext(path)[-1] in ('.py'):
res.append(name)
return res
shutil.copytree(src_dir, dst_dir, ignore=_filter)
# checking the result: some elements should not be copied
self.assert_(not exists(join(dst_dir, 'test_dir2', 'subdir2',
'test.py')))
self.assert_(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
if hasattr(os, "symlink"):
def test_dont_copy_file_onto_link_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
f = open(src, 'w')
f.write('cheddar')
f.close()
os.link(src, dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
self.assertEqual(open(src,'r').read(), 'cheddar')
os.remove(dst)
# Using `src` here would mean we end up with a symlink pointing
# to TESTFN/TESTFN/cheese, while it should point at
# TESTFN/cheese.
os.symlink('cheese', dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
self.assertEqual(open(src,'r').read(), 'cheddar')
os.remove(dst)
finally:
try:
shutil.rmtree(TESTFN)
except OSError:
pass
def test_rmtree_on_symlink(self):
# bug 1669.
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
os.symlink(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
class TestMove(unittest.TestCase):
def setUp(self):
filename = "foo"
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.src_file = os.path.join(self.src_dir, filename)
self.dst_file = os.path.join(self.dst_dir, filename)
# Try to create a dir in the current directory, hoping that it is
# not located on the same filesystem as the system tmp dir.
try:
self.dir_other_fs = tempfile.mkdtemp(
dir=os.path.dirname(__file__))
self.file_other_fs = os.path.join(self.dir_other_fs,
filename)
except OSError:
self.dir_other_fs = None
with open(self.src_file, "wb") as f:
f.write(b"spam")
def tearDown(self):
for d in (self.src_dir, self.dst_dir, self.dir_other_fs):
try:
if d:
shutil.rmtree(d)
except:
pass
def _check_move_file(self, src, dst, real_dst):
contents = open(src, "rb").read()
shutil.move(src, dst)
self.assertEqual(contents, open(real_dst, "rb").read())
self.assertFalse(os.path.exists(src))
def _check_move_dir(self, src, dst, real_dst):
contents = sorted(os.listdir(src))
shutil.move(src, dst)
self.assertEqual(contents, sorted(os.listdir(real_dst)))
self.assertFalse(os.path.exists(src))
def test_move_file(self):
# Move a file to another location on the same filesystem.
self._check_move_file(self.src_file, self.dst_file, self.dst_file)
def test_move_file_to_dir(self):
# Move a file inside an existing dir on the same filesystem.
self._check_move_file(self.src_file, self.dst_dir, self.dst_file)
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.file_other_fs,
self.file_other_fs)
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.dir_other_fs,
self.file_other_fs)
def test_move_dir(self):
# Move a dir to another location on the same filesystem.
dst_dir = tempfile.mktemp()
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
dst_dir = tempfile.mktemp(dir=self.dir_other_fs)
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_to_dir(self):
# Move a dir inside an existing dir on the same filesystem.
self._check_move_dir(self.src_dir, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_dir(self.src_dir, self.dir_other_fs,
os.path.join(self.dir_other_fs, os.path.basename(self.src_dir)))
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
pass
self.assertRaises(shutil.Error, shutil.move, self.src_file, self.dst_dir)
def test_dont_move_dir_in_itself(self):
# Moving a dir inside itself raises an Error.
dst = os.path.join(self.src_dir, "bar")
self.assertRaises(shutil.Error, shutil.move, self.src_dir, dst)
def test_main():
support.run_unittest(TestShutil, TestMove)
if __name__ == '__main__':
test_main()
| 36.765714
| 81
| 0.564423
|
e26b36c12b6369e85138458966b2f430caf6f64e
| 4,550
|
py
|
Python
|
digilog_n/Layer/IdentityLayer.py
|
DigiLog-N/DigiLog-N
|
43cc2324026b6f333bb2ae6a60faf3e3673a6220
|
[
"Apache-2.0"
] | null | null | null |
digilog_n/Layer/IdentityLayer.py
|
DigiLog-N/DigiLog-N
|
43cc2324026b6f333bb2ae6a60faf3e3673a6220
|
[
"Apache-2.0"
] | null | null | null |
digilog_n/Layer/IdentityLayer.py
|
DigiLog-N/DigiLog-N
|
43cc2324026b6f333bb2ae6a60faf3e3673a6220
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
# IdentityLayer.py
# https://github.com/DigiLog-N/DigiLog-N
# Copyright 2020 Canvass Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from digilog_n.NotifyWriter import NotifyWriter
from digilog_n.PlasmaReader import PlasmaReader
from time import sleep
from digilog_n.Layer import Layer
import logging
import pandas as pd
mylogger = logging.getLogger("mylogger")
class IdentityLayer(Layer):
def __init__(self, path_to_plasma_file):
super().__init__(path_to_plasma_file)
self.name = 'Identity & Role Management'
self.ds_name = 'DigiLog-N Notifications'
def run(self):
self._before_you_begin()
pr = PlasmaReader(self.plasma_path, 'ANNO_GRPS', remove_after_reading=True)
while True:
result = pr.to_pandas()
if result is None:
mylogger.debug("Identity & Role Management Layer: No requests to annotate notifications with user groups")
else:
self.annotate(result)
mylogger.debug("sleeping %d seconds..." % 3)
sleep(3)
def get_user_group(self, flag):
d = {}
d['YELLOW'] = ['user1@gmail.com']
d['ORANGE'] = ['user1@gmail.com']
d['RED'] = ['user1@gmail.com']
d['DANGER'] = ['user1@gmail.com']
d['CRITICAL'] = ['user1@gmail.com']
'''
d['YELLOW'] = ['user1@gmail.com', 'user3@gmail.com', 'user2@gmail.com', 'user4@gmail.com']
d['ORANGE'] = ['user1@gmail.com', 'user3@gmail.com', 'user2@gmail.com', 'user4@gmail.com']
d['RED'] = ['user1@gmail.com', 'user3@gmail.com', 'user2@gmail.com', 'user4@gmail.com']
d['DANGER'] = ['user1@gmail.com', 'user3@gmail.com', 'user2@gmail.com', 'user4@gmail.com']
d['CRITICAL'] = ['user1@gmail.com', 'user3@gmail.com', 'user2@gmail.com', 'user4@gmail.com']
'''
return d[flag]
def annotate(self, result):
nw = NotifyWriter(self.plasma_path)
#mylogger.info(result.head())
metadata = result[['unique_id', 'unit_id', 'prediction', 'current_cycle', 'flag']].copy().drop_duplicates()
#mylogger.info(metadata.head(100))
count = 0
for i in range(0, len(metadata)):
message = []
unique_id = metadata.iloc[i][0]
unit_id = metadata.iloc[i][1]
prediction = metadata.iloc[i][2]
current_cycle = metadata.iloc[i][3]
flag = metadata.iloc[i][4]
count += 1
message.append("Engine Unit ID: %d" % unit_id)
message.append("RUL Prediction: %f" % prediction)
message.append("Current Engine Cycle: %d" % current_cycle)
message.append("Warning Level: %s" % flag)
message.append("Parts Ordered:")
parts = result.loc[result['unique_id'] == unique_id]
d = {}
for j in range(0, len(parts)):
part = parts.iloc[j][5]
location = parts.iloc[j][6]
qty_available = parts.iloc[j][7]
qty_requested = parts.iloc[j][8]
if part not in d:
d[part] = []
d[part].append((location, qty_available, qty_requested))
for part in d:
message.append("\t%s" % part)
for location, qty_available, qty_requested in d[part]:
message.append("\t\tLocation: %s" % location)
message.append("\t\tQty Available: %s" % qty_available)
message.append("\t\tQty Requested: %s" % qty_requested)
message.append("")
subject = 'Engine Unit %d: %s' % (unit_id, flag)
nw.write(self.get_user_group(flag), '\n'.join(message), subject)
mylogger.info("Identity & Role Management Layer: Annotating %d Alert Requests with Role Information..." % count)
| 36.99187
| 122
| 0.573626
|
15f9716db47ef81125c7da2dbd51efde3edfd0c2
| 2,901
|
py
|
Python
|
homeassistant/components/acmeda/base.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 4
|
2016-06-22T12:00:41.000Z
|
2018-06-11T20:31:25.000Z
|
homeassistant/components/acmeda/base.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 78
|
2020-07-23T07:13:08.000Z
|
2022-03-31T06:02:04.000Z
|
homeassistant/components/acmeda/base.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 6
|
2019-07-06T00:43:13.000Z
|
2021-01-16T13:27:06.000Z
|
"""Base class for Acmeda Roller Blinds."""
import aiopulse
from homeassistant.core import callback
from homeassistant.helpers import entity
from homeassistant.helpers.device_registry import async_get_registry as get_dev_reg
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_registry import async_get_registry as get_ent_reg
from .const import ACMEDA_ENTITY_REMOVE, DOMAIN, LOGGER
class AcmedaBase(entity.Entity):
"""Base representation of an Acmeda roller."""
def __init__(self, roller: aiopulse.Roller):
"""Initialize the roller."""
self.roller = roller
async def async_remove_and_unregister(self):
"""Unregister from entity and device registry and call entity remove function."""
LOGGER.error("Removing %s %s", self.__class__.__name__, self.unique_id)
ent_registry = await get_ent_reg(self.hass)
if self.entity_id in ent_registry.entities:
ent_registry.async_remove(self.entity_id)
dev_registry = await get_dev_reg(self.hass)
device = dev_registry.async_get_device(identifiers={(DOMAIN, self.unique_id)})
if device is not None:
dev_registry.async_update_device(
device.id, remove_config_entry_id=self.registry_entry.config_entry_id
)
await self.async_remove(force_remove=True)
async def async_added_to_hass(self):
"""Entity has been added to hass."""
self.roller.callback_subscribe(self.notify_update)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
ACMEDA_ENTITY_REMOVE.format(self.roller.id),
self.async_remove_and_unregister,
)
)
async def async_will_remove_from_hass(self):
"""Entity being removed from hass."""
self.roller.callback_unsubscribe(self.notify_update)
@callback
def notify_update(self):
"""Write updated device state information."""
LOGGER.debug("Device update notification received: %s", self.name)
self.async_write_ha_state()
@property
def should_poll(self):
"""Report that Acmeda entities do not need polling."""
return False
@property
def unique_id(self):
"""Return the unique ID of this roller."""
return self.roller.id
@property
def device_id(self):
"""Return the ID of this roller."""
return self.roller.id
@property
def name(self):
"""Return the name of roller."""
return self.roller.name
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, self.unique_id)},
"name": self.roller.name,
"manufacturer": "Rollease Acmeda",
"via_device": (DOMAIN, self.roller.hub.id),
}
| 32.965909
| 89
| 0.66322
|
3339a361155b4b17ce81fe4b772ff204d9421595
| 4,835
|
py
|
Python
|
splines/core/training_algorithms.py
|
ged1182/splines
|
04ee196f648186da1fb58f6b105b718fd1c81abf
|
[
"MIT"
] | 2
|
2020-09-10T11:14:55.000Z
|
2021-06-09T11:05:00.000Z
|
splines/core/training_algorithms.py
|
ged1182/splines
|
04ee196f648186da1fb58f6b105b718fd1c81abf
|
[
"MIT"
] | null | null | null |
splines/core/training_algorithms.py
|
ged1182/splines
|
04ee196f648186da1fb58f6b105b718fd1c81abf
|
[
"MIT"
] | 1
|
2021-06-09T11:05:01.000Z
|
2021-06-09T11:05:01.000Z
|
import numpy as np
from ..metrics import mse, mse_grad
from .mv_splines import mv_b_spline_vector, mv_spline_grad
from tqdm import trange
from scipy.sparse import coo_matrix
def fit_spline(x_train, y_train,
x_val, y_val,
regressor,
optimizer,
knot_init=None,
p=None,
k=None,
max_iters=100,
patience=25,
batch_size=None,
verbose=False
):
num_of_vars = 1 if np.ndim(x_train) == 1 else x_train.shape[-1]
if p is None and knot_init is None:
p = np.array([1 for _ in range(num_of_vars)], dtype=np.int)
if k is None:
k = np.array([2 for _ in range(num_of_vars)], dtype=np.int)
if knot_init is None:
u = [np.array([i / (p_ + 1) for i in range(1, p_ + 1)]) for p_ in p]
else:
if not(knot_init is None):
u = knot_init #if num_of_vars > 1 else [knot_init]
u_history = {d+1: [np.array(u[d], dtype='float')] for d in range(num_of_vars)}
b_splines_train = mv_b_spline_vector(x_train.reshape(-1, 1), u, k) if num_of_vars == 1 \
else mv_b_spline_vector(x_train, u, k)
regressor.fit(coo_matrix(b_splines_train), y_train)
c = regressor.coef_
c_history = [c]
y_train_hat = regressor.predict(b_splines_train)
b_splines_val = mv_b_spline_vector(x_val.reshape(-1, 1), u, k) if num_of_vars == 1\
else mv_b_spline_vector(x_val, u, k)
y_val_hat = regressor.predict(b_splines_val)
mse_train = mse(y_train, y_train_hat)
mse_val = mse(y_val, y_val_hat)
r2_train = regressor.score(b_splines_train, y_train)
r2_val = regressor.score(b_splines_val, y_val)
mse_train_history = [mse_train]
mse_val_history = [mse_val]
r2_train_history = [r2_train]
r2_val_history = [r2_val]
history = {'u': u_history, 'c': c_history, 'mse_train': mse_train_history, 'mse_val': mse_val_history,
'r2_train': r2_train_history, 'r2_val': r2_val_history}
best_index = 0
epochs_range = trange(max_iters) if verbose else range(max_iters)
for i in epochs_range:
if batch_size is None:
index_batches = [range(x_train.shape[0])]
else:
num_of_complete_batches = x_train.shape[0] // batch_size
shuffled_indices = np.random.permutation(x_train.shape[0])
index_batches = [shuffled_indices[batch_size * i:batch_size * (i + 1)] for i in
range(num_of_complete_batches)]
if batch_size * num_of_complete_batches < x_train.shape[0]:
index_batches.append(shuffled_indices[batch_size * num_of_complete_batches:])
for idx in index_batches:
x = x_train[idx]
y = y_train[idx]
basis_splines = mv_b_spline_vector(x.reshape(-1, 1), u, k) if num_of_vars == 1\
else mv_b_spline_vector(x, u, k)
regressor.fit(coo_matrix(basis_splines), y)
c = regressor.coef_
y_hat = regressor.predict(basis_splines)
dy = mv_spline_grad(x.reshape(-1, 1), u, k, c) if num_of_vars == 1\
else mv_spline_grad(x, u, k, c)
grad = mse_grad(y, y_hat, dy)
u = optimizer.step(u, grad)
b_splines_train = mv_b_spline_vector(x_train.reshape(-1, 1), u, k) if num_of_vars == 1\
else mv_b_spline_vector(x_train, u, k)
regressor.fit(coo_matrix(b_splines_train), y_train)
c = regressor.coef_
b_splines_val = mv_b_spline_vector(x_val.reshape(-1, 1), u, k) if num_of_vars == 1\
else mv_b_spline_vector(x_val, u, k)
y_val_hat = regressor.predict(b_splines_val)
y_train_hat = regressor.predict(b_splines_train)
mse_train = mse(y_train, y_train_hat)
mse_val = mse(y_val, y_val_hat)
r2_train = regressor.score(b_splines_train, y_train)
r2_val = regressor.score(b_splines_val, y_val)
for d in range(num_of_vars):
history['u'][d+1].append(np.array(u[d], dtype='float'))
history['c'].append(c)
history['mse_train'].append(mse_train)
history['mse_val'].append(mse_val)
history['r2_train'].append(r2_train)
history['r2_val'].append(r2_val)
best_index = int(np.argmin(np.array(history['mse_val'])))
# Early Stopping:
if not (patience is None) and i >= patience and best_index <= i - patience:
break
u_best = [history['u'][d+1][best_index] for d in range(num_of_vars)]
b_splines_train = mv_b_spline_vector(x_train.reshape(-1, 1), u_best, k) if num_of_vars == 1 \
else mv_b_spline_vector(x_train, u_best, k)
regressor.fit(coo_matrix(b_splines_train), y_train)
return best_index, regressor, history
| 41.324786
| 106
| 0.623785
|
b4bb6e1aa227c9fa05dae7fe551ff74dccf80242
| 4,944
|
py
|
Python
|
binary_classficiation/data_loader.py
|
princeton-vl/uniloss
|
76ca03726ae793d4de4041eeb4d565e44b9bc17a
|
[
"BSD-3-Clause"
] | 8
|
2020-07-29T01:48:08.000Z
|
2020-09-10T01:01:50.000Z
|
binary_classficiation/data_loader.py
|
princeton-vl/uniloss
|
76ca03726ae793d4de4041eeb4d565e44b9bc17a
|
[
"BSD-3-Clause"
] | null | null | null |
binary_classficiation/data_loader.py
|
princeton-vl/uniloss
|
76ca03726ae793d4de4041eeb4d565e44b9bc17a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Create train, valid, test iterators for CIFAR-10 [1].
Easily extended to MNIST, CIFAR-100 and Imagenet.
[1]: https://discuss.pytorch.org/t/feedback-on-pytorch-for-kaggle-competitions/2252/4
"""
import torch
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
def get_train_valid_loader(data_dir,
batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
show_sample=False,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning train and valid
multi-process iterators over the CIFAR-10 dataset. A sample
9x9 grid of the images can be optionally displayed.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- show_sample: plot 9x9 sample grid of the dataset.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
normalize = transforms.Normalize((0.1307,), (0.3081,))
# define transforms
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# load the dataset
train_dataset = datasets.MNIST(
root=data_dir, train=True,
download=True, transform=train_transform,
)
valid_dataset = datasets.MNIST(
root=data_dir, train=True,
download=True, transform=valid_transform,
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
# visualize some images
if show_sample:
sample_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=9, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
data_iter = iter(sample_loader)
images, labels = data_iter.next()
X = images.numpy().transpose([0, 2, 3, 1])
return (train_loader, valid_loader)
def get_test_loader(data_dir,
batch_size,
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning a multi-process
test iterator over the CIFAR-10 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- shuffle: whether to shuffle the dataset after every epoch.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
)
# define transform
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
dataset = datasets.CIFAR10(
root=data_dir, train=False,
download=True, transform=transform,
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
return data_loader
| 32.103896
| 85
| 0.642597
|
9b7d1c75b05870d6a8ea410f2d4c2c85ae9ee5ac
| 1,373
|
py
|
Python
|
stocks/tests/models/test_stockDay.py
|
pchaos/wanggeService
|
839f7c6c52a685fcdc4b6a70cf8e9d6c8cc78255
|
[
"MIT"
] | 11
|
2018-05-15T18:02:31.000Z
|
2020-05-07T03:57:33.000Z
|
stocks/tests/models/test_stockDay.py
|
pchaos/wanggeService
|
839f7c6c52a685fcdc4b6a70cf8e9d6c8cc78255
|
[
"MIT"
] | 1
|
2018-05-16T11:27:32.000Z
|
2018-07-07T10:56:58.000Z
|
stocks/tests/models/test_stockDay.py
|
pchaos/wanggeService
|
839f7c6c52a685fcdc4b6a70cf8e9d6c8cc78255
|
[
"MIT"
] | 13
|
2018-05-15T18:02:27.000Z
|
2022-03-23T06:18:29.000Z
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@File : test_stockDay.py
Description :
@Author : pchaos
tradedate: 18-5-11
-------------------------------------------------
Change Activity:
18-5-11:
@Contact : p19992003#gmail.com
-------------------------------------------------
"""
from django.test import TestCase
from stocks.models import Listing, MARKET_CHOICES, YES_NO, STOCK_CATEGORY
from stocks.models.block import BKDetail
from stocks.models import StockDay
from django.utils import timezone
import datetime
__author__ = 'pchaos'
class TestStockDay(TestCase):
def test_saveStockDay(self):
a, _ = MARKET_CHOICES[0]
code = 'tt0001'
up_date = datetime.datetime.now()
sc = Listing(code=code, name='Test0001', timeToMarket=up_date, market=a)
sc.save()
sdlist = StockDay.objects.all().count() # 此处不能为StockDay.objects.all(),否则由于lazy算法,并不会马上获取数据,导致更新数据后才查询数据
code = Listing.objects.get(code=code)
sd = StockDay(code=code, open=1, close=2, high=3, low=0, volumn=100000, amount=230000, tradedate=datetime.datetime.now())
sd.save()
print(sdlist, code, sd)
self.assertTrue(StockDay.objects.all().count() > sdlist, '未保存成功, {} > {}:{}'.format(StockDay.objects.all().count(), sdlist, sd.__dict__))
| 33.487805
| 145
| 0.586307
|
10f9f84e60352237bf76bba659db8da1a57c2588
| 867
|
py
|
Python
|
braindump/functions.py
|
atizo/braindump
|
5585284eb86d9f26c73941833ea5b810c65e618d
|
[
"MIT"
] | null | null | null |
braindump/functions.py
|
atizo/braindump
|
5585284eb86d9f26c73941833ea5b810c65e618d
|
[
"MIT"
] | null | null | null |
braindump/functions.py
|
atizo/braindump
|
5585284eb86d9f26c73941833ea5b810c65e618d
|
[
"MIT"
] | 1
|
2019-03-20T21:05:00.000Z
|
2019-03-20T21:05:00.000Z
|
from django.shortcuts import _get_queryset
from django.conf import settings
def get_object_or_None(klass, *args, **kwargs):
"""
Uses get() to return an object or None if the object does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), a MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
return None
def get_config(key, default=None):
"""
Get settings from django.conf if exists,
return default value otherwise
example:
ADMIN_EMAIL = get_config('ADMIN_EMAIL', 'default@email.com')
"""
return getattr(settings, key, default)
| 26.272727
| 84
| 0.693195
|
dbc39852216913c5f594c07dbe7e9bfc44a74bb8
| 1,854
|
py
|
Python
|
pgbouncer/datadog_checks/pgbouncer/config_models/instance.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
pgbouncer/datadog_checks/pgbouncer/config_models/instance.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
pgbouncer/datadog_checks/pgbouncer/config_models/instance.py
|
tcpatterson/integrations-core
|
3692601de09f8db60f42612b0d623509415bbb53
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from __future__ import annotations
from typing import Optional, Sequence
from pydantic import BaseModel, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
database_url: Optional[str]
disable_generic_tags: Optional[bool]
empty_default_hostname: Optional[bool]
host: Optional[str]
min_collection_interval: Optional[float]
password: Optional[str]
port: Optional[int]
service: Optional[str]
tags: Optional[Sequence[str]]
use_cached: Optional[bool]
username: Optional[str]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
| 31.423729
| 110
| 0.721683
|
aeeae57b8428ecf4d1806e6d617184d7bdc92bdc
| 6,757
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/actinomycesspph3.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/actinomycesspph3.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/actinomycesspph3.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Actinomyces sp. ph3.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:28:35.422750
The undirected graph Actinomyces sp. ph3 has 1573 nodes and 107006 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.08655 and has 6 connected components, where the component with most
nodes has 1560 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 117, the mean node degree is 136.05, and
the node degree mode is 8. The top 5 most central nodes are 1118058.CAGY01000001_gene68
(degree 729), 1118058.CAGY01000002_gene745 (degree 712), 1118058.CAGY01000001_gene602
(degree 698), 1118058.CAGY01000003_gene887 (degree 613) and 1118058.CAGY01000001_gene582
(degree 599).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ActinomycesSpPh3
# Then load the graph
graph = ActinomycesSpPh3()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def ActinomycesSpPh3(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Actinomyces sp. ph3 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Actinomyces sp. ph3 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:28:35.422750
The undirected graph Actinomyces sp. ph3 has 1573 nodes and 107006 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.08655 and has 6 connected components, where the component with most
nodes has 1560 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 117, the mean node degree is 136.05, and
the node degree mode is 8. The top 5 most central nodes are 1118058.CAGY01000001_gene68
(degree 729), 1118058.CAGY01000002_gene745 (degree 712), 1118058.CAGY01000001_gene602
(degree 698), 1118058.CAGY01000003_gene887 (degree 613) and 1118058.CAGY01000001_gene582
(degree 599).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ActinomycesSpPh3
# Then load the graph
graph = ActinomycesSpPh3()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="ActinomycesSpPh3",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.376963
| 223
| 0.705195
|
95852745366145a63971a1ed9b3cf20b99e3269d
| 2,459
|
py
|
Python
|
ranger/plugins/extract.py
|
MDBrodskiy/i3DotFiles
|
4727bf0414545a8482bf23017866743c46905975
|
[
"MIT"
] | 1
|
2021-12-26T07:55:05.000Z
|
2021-12-26T07:55:05.000Z
|
ranger/plugins/extract.py
|
MDBrodskiy/i3DotFiles
|
4727bf0414545a8482bf23017866743c46905975
|
[
"MIT"
] | null | null | null |
ranger/plugins/extract.py
|
MDBrodskiy/i3DotFiles
|
4727bf0414545a8482bf23017866743c46905975
|
[
"MIT"
] | null | null | null |
import os
from ranger.api.commands import *
from ranger.core.loader import CommandLoader
class extract(Command):
def execute(self):
"""Extract copied files to current directory or directory
specified in a command line
"""
cwd = self.fm.thisdir
copied_files = cwd.get_selection()
if not copied_files:
return
def refresh(_):
cwd = self.fm.get_directory(original_path)
cwd.load_content()
one_file = copied_files[0]
cwd = self.fm.thisdir
original_path = cwd.path
line_args = self.line.split()[1:]
if line_args:
extraction_dir = os.path.join(cwd.path, "".join(line_args))
os.makedirs(extraction_dir, exist_ok=True)
flags = ['-X', extraction_dir]
flags += ['-e']
else:
flags = ['-X', cwd.path]
flags += ['-e']
self.fm.copy_buffer.clear()
self.fm.cut_buffer = False
if len(copied_files) == 1:
descr = "Extracting: " + os.path.basename(one_file.path)
else:
descr = "Extracting files from: " + os.path.basename(one_file.dirname)
obj = CommandLoader(args=['aunpack'] + flags \
+ [f.path for f in copied_files], descr=descr, read=True)
obj.signal_bind('after', refresh)
self.fm.loader.add(obj)
class extract_to_dirs(Command):
def execute(self):
""" Extract copied files to a subdirectories """
cwd = self.fm.thisdir
original_path = cwd.path
copied_files = cwd.get_selection()
if not copied_files:
return
def refresh(_):
cwd = self.fm.get_directory(original_path)
cwd.load_content()
def make_flags(fn):
flags = ['-D']
return flags
one_file = copied_files[0]
self.fm.copy_buffer.clear()
self.fm.cut_buffer = False
# Making description line
if len(copied_files) == 1:
descr = "Extracting: " + os.path.basename(one_file.path)
else:
descr = "Extracting files from: " + os.path.basename(one_file.dirname)
# Extracting files
for f in copied_files:
obj = CommandLoader(args=['aunpack'] + make_flags(f.path) + [f.path], descr=descr, read=True)
obj.signal_bind('after', refresh)
self.fm.loader.add(obj)
| 29.987805
| 105
| 0.569337
|
0cfc1a99ce55c9739bc85d3d5971e63ce7ffd25c
| 1,354
|
py
|
Python
|
tests/test_utils.py
|
github3py/github3py
|
37a4195a704db9c3e1d083ba6ddbda364e0affeb
|
[
"BSD-3-Clause"
] | 2
|
2017-11-21T13:48:50.000Z
|
2018-09-05T12:42:42.000Z
|
tests/test_utils.py
|
github3py/github3py
|
37a4195a704db9c3e1d083ba6ddbda364e0affeb
|
[
"BSD-3-Clause"
] | 1
|
2018-01-02T07:23:49.000Z
|
2018-01-02T07:23:49.000Z
|
tests/test_utils.py
|
github3py/github3py
|
37a4195a704db9c3e1d083ba6ddbda364e0affeb
|
[
"BSD-3-Clause"
] | 3
|
2020-01-08T16:14:31.000Z
|
2021-10-31T21:35:54.000Z
|
from github3.utils import timestamp_parameter
from tests.utils import BaseCase
from datetime import datetime
class TestTimestampParameter(BaseCase):
def test_datetimes(self):
timestamp = datetime(2010, 6, 1, 12, 15, 30)
self.assertEqual('2010-06-01T12:15:30', timestamp_parameter(timestamp))
def test_valid_datestring(self):
testvals = (
'2010-06-01',
'2010-06-01T12:15:30',
'2010-06-01T12:14:30.12321+02:00',
'2010-06-01T12:14:30.12321-02:00',
'2010-06-01T12:14:30.2115Z',
)
for timestamp in testvals:
self.assertEqual(timestamp, timestamp_parameter(timestamp))
def test_invalid_datestring(self):
testvals = (
'2012-16-04',
'2012-06-01v!',
'fish',
'2010-06-01T12:14:30.12321+02',
'2010-06-01T12:70:30.12321+02',
)
for timestamp in testvals:
self.assertRaises(ValueError, timestamp_parameter, timestamp)
def test_none_handling(self):
self.assertTrue(timestamp_parameter(None, allow_none=True) is None)
self.assertRaises(ValueError, timestamp_parameter, None,
allow_none=False)
def test_invalid_type_handling(self):
self.assertRaises(ValueError, timestamp_parameter, 1)
| 33.02439
| 79
| 0.6226
|
6662fd4a75955a8345fbaed49c6991b6d74d300b
| 11,165
|
py
|
Python
|
Interface/MainWindow.py
|
Snackhole/NomenEXIF
|
b8a3689489678098ac94774e63fb4073fb57ae59
|
[
"MIT"
] | null | null | null |
Interface/MainWindow.py
|
Snackhole/NomenEXIF
|
b8a3689489678098ac94774e63fb4073fb57ae59
|
[
"MIT"
] | 5
|
2021-03-19T15:23:41.000Z
|
2022-03-14T02:22:14.000Z
|
Interface/MainWindow.py
|
Snackhole/NomenExif
|
86313d76f220df62018a84560c5e05a5297ae657
|
[
"MIT"
] | null | null | null |
import math
from Interface.StatusThread import StatusThread
import os
import threading
from Core.ExifRenamer import ExifRenamer
from PyQt5 import QtCore
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import (QApplication, QFileDialog, QFrame, QGridLayout, QLabel, QLineEdit, QListWidget, QListWidgetItem, QMainWindow, QMessageBox, QProgressBar, QPushButton)
class MainWindow(QMainWindow):
def __init__(self, ScriptName, AbsoluteDirectoryPath):
# Store Parameters
self.ScriptName = ScriptName
self.AbsoluteDirectoryPath = AbsoluteDirectoryPath
# Variables
self.RenameInProgress = False
# Create Exif Renamer
self.ExifRenamer = ExifRenamer()
# Initialize
super().__init__()
# Create Interface
self.CreateInterface()
# Show Window
self.show()
# Center Window
self.Center()
# Load Configs
self.LoadConfigs()
def CreateInterface(self):
# Create Window Icon
self.WindowIcon = QIcon(self.GetResourcePath("Assets/NomenExif Icon.png"))
# Window Icon and Title
self.setWindowIcon(self.WindowIcon)
self.setWindowTitle(self.ScriptName)
# Create Central Frame
self.Frame = QFrame()
# Create Widgets
self.QueueLabel = QLabel("Rename Queue")
self.QueueLabel.setAlignment(QtCore.Qt.AlignCenter)
self.QueueListWidget = QListWidget()
self.AddToQueueButton = QPushButton("Add Files to Rename Queue")
self.AddToQueueButton.clicked.connect(self.AddToQueue)
self.ClearQueueButton = QPushButton("Clear Rename Queue")
self.ClearQueueButton.clicked.connect(self.ClearQueue)
self.QueueAndTagsSeparator = QFrame()
self.QueueAndTagsSeparator.setFrameShape(QFrame.VLine)
self.QueueAndTagsSeparator.setFrameShadow(QFrame.Sunken)
self.AvailableTagsLabel = QLabel("Available Tags")
self.AvailableTagsLabel.setAlignment(QtCore.Qt.AlignCenter)
self.AvailableTagsListWidget = QListWidget()
self.AvailableTagsListWidget.itemActivated.connect(self.InsertTag)
self.TemplateSeparator = QFrame()
self.TemplateSeparator.setFrameShape(QFrame.HLine)
self.TemplateSeparator.setFrameShadow(QFrame.Sunken)
self.TemplateLabel = QLabel("Renaming Template:")
self.TemplateLineEdit = QLineEdit()
self.RenameButton = QPushButton("Rename Files in Queue with Template")
self.ProgressSeparator = QFrame()
self.ProgressSeparator.setFrameShape(QFrame.HLine)
self.ProgressSeparator.setFrameShadow(QFrame.Sunken)
self.RenameButton.clicked.connect(self.Rename)
self.RenameProgressLabel = QLabel("Rename Progress")
self.RenameProgressBar = QProgressBar()
# Widgets to Disable While Renaming
self.DisableList = []
self.DisableList.append(self.AddToQueueButton)
self.DisableList.append(self.ClearQueueButton)
self.DisableList.append(self.AvailableTagsListWidget)
self.DisableList.append(self.TemplateLineEdit)
self.DisableList.append(self.RenameButton)
# Create Layout
self.Layout = QGridLayout()
# Widgets in Layout
self.Layout.addWidget(self.QueueLabel, 0, 0, 1, 2)
self.Layout.addWidget(self.AddToQueueButton, 1, 0)
self.Layout.addWidget(self.ClearQueueButton, 1, 1)
self.Layout.addWidget(self.QueueListWidget, 2, 0, 1, 2)
self.Layout.addWidget(self.QueueAndTagsSeparator, 0, 3, 3, 1)
self.Layout.addWidget(self.AvailableTagsLabel, 0, 4)
self.Layout.addWidget(self.AvailableTagsListWidget, 1, 4, 2, 1)
self.Layout.addWidget(self.TemplateSeparator, 3, 0, 1, 5)
self.TemplateLayout = QGridLayout()
self.TemplateLayout.addWidget(self.TemplateLabel, 0, 0)
self.TemplateLayout.addWidget(self.TemplateLineEdit, 0, 1)
self.TemplateLayout.addWidget(self.RenameButton, 1, 0, 1, 2)
self.TemplateLayout.setColumnStretch(1, 1)
self.Layout.addLayout(self.TemplateLayout, 4, 0, 1, 5)
self.Layout.addWidget(self.ProgressSeparator, 5, 0, 1, 5)
self.ProgressLayout = QGridLayout()
self.ProgressLayout.addWidget(self.RenameProgressLabel, 0, 0)
self.ProgressLayout.addWidget(self.RenameProgressBar, 0, 1)
self.Layout.addLayout(self.ProgressLayout, 6, 0, 1, 5)
# Set and Configure Layout
self.Layout.setColumnStretch(0, 1)
self.Layout.setColumnStretch(1, 1)
self.Frame.setLayout(self.Layout)
# Create Status Bar
self.StatusBar = self.statusBar()
# Set Central Frame
self.setCentralWidget(self.Frame)
def GetResourcePath(self, RelativeLocation):
return os.path.join(self.AbsoluteDirectoryPath, RelativeLocation)
def LoadConfigs(self):
# Template
LastEnteredTemplateFile = self.GetResourcePath("Configs/Template.cfg")
if os.path.isfile(LastEnteredTemplateFile):
with open(LastEnteredTemplateFile, "r") as ConfigFile:
self.TemplateLineEdit.setText(ConfigFile.readline())
else:
self.TemplateLineEdit.setText("[YEAR].[MONTH].[DAY] - [HOUR].[MINUTE].[SECOND]")
# Last Opened Directory
LastOpenedDirectoryFile = self.GetResourcePath("Configs/LastOpenedDirectory.cfg")
if os.path.isfile(LastOpenedDirectoryFile):
with open(LastOpenedDirectoryFile, "r") as ConfigFile:
self.LastOpenedDirectory = ConfigFile.readline()
else:
self.LastOpenedDirectory = None
def SaveConfigs(self):
if not os.path.isdir(self.GetResourcePath("Configs")):
os.mkdir(self.GetResourcePath("Configs"))
# Template
TemplateString = self.TemplateLineEdit.text()
if TemplateString != "":
with open(self.GetResourcePath("Configs/Template.cfg"), "w") as ConfigFile:
ConfigFile.write(TemplateString)
# Last Opened Directory
if type(self.LastOpenedDirectory) == str:
if os.path.isdir(self.LastOpenedDirectory):
with open(self.GetResourcePath("Configs/LastOpenedDirectory.cfg"), "w") as ConfigFile:
ConfigFile.write(self.LastOpenedDirectory)
def AddToQueue(self):
FilesToAdd = QFileDialog.getOpenFileNames(caption="Files to Add to Queue", filter="JPEG Images (*.jpeg *.jpg)", directory=self.LastOpenedDirectory)[0]
if len(FilesToAdd) < 1:
return
AllFilesAddedSuccessfully = self.ExifRenamer.AddToRenameQueue(FilesToAdd)
self.UpdateDisplay()
self.LastOpenedDirectory = os.path.dirname(FilesToAdd[0])
if not AllFilesAddedSuccessfully:
self.DisplayMessageBox("Some of the selected files could not be added to the queue. They may not have Exif data.", Icon=QMessageBox.Warning)
def ClearQueue(self):
if self.DisplayMessageBox("Clear the file queue?", Icon=QMessageBox.Question, Buttons=(QMessageBox.Yes | QMessageBox.No)) == QMessageBox.Yes:
self.ExifRenamer.Clear()
self.UpdateDisplay()
def InsertTag(self):
self.TemplateLineEdit.insert((self.AvailableTagsListWidget.selectedItems()[0]).text())
self.TemplateLineEdit.setFocus()
def Rename(self):
# Validate Inputs
if len(self.ExifRenamer.RenameQueue) < 1:
self.DisplayMessageBox("No files selected to rename.", Icon=QMessageBox.Warning)
return
Template = self.TemplateLineEdit.text()
ValidTemplate = False
for Tag in self.ExifRenamer.AvailableTags:
if "[" + Tag + "]" in Template:
ValidTemplate = True
break
if not ValidTemplate:
self.DisplayMessageBox("Rename template must contain at least one available tag.", Icon=QMessageBox.Warning)
return
# Start Renaming
self.SetRenameInProgress(True)
self.ExifRenamer.RenameFilesWithTemplate(Template)
# Attempt to Get Rename Thread and Set Up Status Checking
try:
RenameThreadInst = [RenameThread for RenameThread in threading.enumerate() if RenameThread.name == "RenameThread"][0]
StatusThreadInst = StatusThread(RenameThreadInst)
StatusThreadInst.UpdateProgressSignal.connect(lambda: self.UpdateProgress(RenameThreadInst))
StatusThreadInst.RenameCompleteSignal.connect(self.RenameComplete)
StatusThreadInst.start()
except IndexError:
self.RenameComplete()
# Interface Methods
def UpdateDisplay(self):
self.QueueListWidget.clear()
for File in self.ExifRenamer.RenameQueue:
FileListItem = QListWidgetItem()
FileListItem.setText(File["FileName"])
FileListItem.setToolTip(File["Path"])
self.QueueListWidget.addItem(FileListItem)
self.AvailableTagsListWidget.clear()
for AvailableTag in self.ExifRenamer.GetAvailableTags():
AvailableTagListItem = QListWidgetItem()
AvailableTagListItem.setText("[" + AvailableTag + "]")
self.AvailableTagsListWidget.addItem(AvailableTagListItem)
def SetRenameInProgress(self, RenameInProgress):
self.RenameInProgress = RenameInProgress
for Widget in self.DisableList:
Widget.setDisabled(RenameInProgress)
if RenameInProgress:
self.StatusBar.showMessage("Renaming in progress...")
else:
self.StatusBar.clearMessage()
self.RenameProgressBar.reset()
def UpdateProgress(self, RenameThread):
RenameProgress = math.floor((RenameThread.FilesRenamed / RenameThread.FileQueueSize) * 100)
self.RenameProgressBar.setValue(RenameProgress)
def RenameComplete(self):
self.SetRenameInProgress(False)
self.ExifRenamer.Clear()
self.UpdateDisplay()
def DisplayMessageBox(self, Message, Icon=QMessageBox.Information, Buttons=QMessageBox.Ok, Parent=None):
MessageBox = QMessageBox(self if Parent is None else Parent)
MessageBox.setWindowIcon(self.WindowIcon)
MessageBox.setWindowTitle(self.ScriptName)
MessageBox.setIcon(Icon)
MessageBox.setText(Message)
MessageBox.setStandardButtons(Buttons)
return MessageBox.exec_()
# Window Management Methods
def Center(self):
FrameGeometryRectangle = self.frameGeometry()
DesktopCenterPoint = QApplication.primaryScreen().availableGeometry().center()
FrameGeometryRectangle.moveCenter(DesktopCenterPoint)
self.move(FrameGeometryRectangle.topLeft())
def closeEvent(self, Event):
Close = True
if self.RenameInProgress:
Close = self.DisplayMessageBox("Files are currently being renamed. Exit anyway?", Icon=QMessageBox.Question, Buttons=(QMessageBox.Yes | QMessageBox.No)) == QMessageBox.Yes
if Close:
self.SaveConfigs()
Event.accept()
else:
Event.ignore()
| 42.452471
| 184
| 0.678997
|
d325f6e99b8f20093a9436c8b6f93ba386202edf
| 2,020
|
py
|
Python
|
app/controllers/tattooists_controllers/post_create.py
|
Poketnans/capstone-q3
|
38d550a54ff41387534241df85eb8aa8c9b6ba7e
|
[
"MIT"
] | null | null | null |
app/controllers/tattooists_controllers/post_create.py
|
Poketnans/capstone-q3
|
38d550a54ff41387534241df85eb8aa8c9b6ba7e
|
[
"MIT"
] | 4
|
2022-03-03T12:47:02.000Z
|
2022-03-08T18:10:34.000Z
|
app/controllers/tattooists_controllers/post_create.py
|
Poketnans/capstone-q3
|
38d550a54ff41387534241df85eb8aa8c9b6ba7e
|
[
"MIT"
] | 1
|
2022-03-17T14:21:30.000Z
|
2022-03-17T14:21:30.000Z
|
from http import HTTPStatus
from flask import current_app, jsonify
from psycopg2.errors import UniqueViolation, ForeignKeyViolation
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from app.models import Tattooist
from app.services import get_files, get_orig_error_field, generate_image_default
from app.decorators import verify_payload, validator
import werkzeug.exceptions
@validator(email="email", password="password")
@verify_payload(
fields_and_types={
"name": str,
"email": str,
"password": str,
"general_information": str,
"admin": bool
},
optional=["general_information"]
)
def post_create(payload):
try:
session: Session = current_app.db.session
new_tatooist = Tattooist(**payload)
files = get_files()
if files:
for file in files:
new_tatooist.image_bin = file.file_bin
new_tatooist.image_hash = file.filename
new_tatooist.image_mimetype = file.mimetype
else:
image = generate_image_default()
new_tatooist.image_mimetype = image.mimetype
new_tatooist.image_hash = image.filename
new_tatooist.image_bin = image.file_bin
session.add(new_tatooist)
session.commit()
return jsonify(new_tatooist), HTTPStatus.CREATED
except IntegrityError as error:
if isinstance(error.orig, UniqueViolation):
error_field = get_orig_error_field(error)
msg = {"msg": f"{error_field} already registered"}
return jsonify(msg), HTTPStatus.CONFLICT
elif isinstance(error.orig, ForeignKeyViolation):
error_field = get_orig_error_field(error)
msg = {"msg": f"{error_field} not found"}
return jsonify(msg), HTTPStatus.CONFLICT
else:
raise error
except werkzeug.exceptions.UnsupportedMediaType as e:
return e.description, HTTPStatus.UNSUPPORTED_MEDIA_TYPE
| 34.237288
| 80
| 0.673267
|
bec71f03d340b9b39f830d994fafa544fc7cc75c
| 6,991
|
py
|
Python
|
halp/layers/cross_entropy.py
|
vishalbelsare/halp
|
0da6e758aba779a5493069b99d4cc7bb5052082d
|
[
"Apache-2.0"
] | 3
|
2019-01-03T20:59:04.000Z
|
2019-05-13T11:58:08.000Z
|
halp/layers/cross_entropy.py
|
vishalbelsare/halp
|
0da6e758aba779a5493069b99d4cc7bb5052082d
|
[
"Apache-2.0"
] | 1
|
2019-05-14T09:20:13.000Z
|
2019-05-14T09:20:13.000Z
|
halp/layers/cross_entropy.py
|
vishalbelsare/halp
|
0da6e758aba779a5493069b99d4cc7bb5052082d
|
[
"Apache-2.0"
] | 2
|
2019-01-03T20:59:07.000Z
|
2021-11-15T17:08:45.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from torch.autograd import Function
from torch.autograd import Variable
import numpy as np
from halp.utils.utils import void_cast_func, single_to_half_det, single_to_half_stoc
from halp.layers.bit_center_layer import BitCenterLayer
# the current implementation does not use chebyshev approximiation yet.
# The input grad during backward is the exact delta grad
class BitCenterCrossEntropyLPFunction(Function):
@staticmethod
def forward(ctx, input_delta, input_lp, target, grad_offset):
# suffix lp means the lp version of the offset tensors
# suffix delta means the real low precision part of the model representation
# make softmax more numerically stable, we substract the max
input = input_lp + input_delta - torch.max(input_lp + input_delta)
ctx.save_for_backward(input_lp, input_delta, target, grad_offset)
output = F.nll_loss(F.log_softmax(input, dim=1), target)
return output
@staticmethod
def backward(ctx, grad_output):
# grad_z_{i, j} = 1/m\sum_i 1(y_i = j) - P(y_i = j)
input_lp, input_delta, target, grad_offset = ctx.saved_tensors
assert input_lp.size(0) == target.numel()
prob = F.softmax(input_lp + input_delta - torch.max(input_lp + input_delta), dim=1)
sample_idx = torch.LongTensor(np.arange(input_lp.size(0)))
minibatch_size = input_delta.size(0)
grad_input_lp = None
grad_input_delta = torch.zeros_like(
input_delta, dtype=input_delta.dtype)
grad_input_delta[sample_idx, target] = 1.0
grad_input_delta.add_(-prob)
grad_input_delta.div_(-minibatch_size)
grad_input_delta.add_(-grad_offset)
grad_target = None
grad_grad_offset = None
return grad_input_delta, grad_input_lp, grad_target, grad_grad_offset
class BitCenterCrossEntropyFPFunction(Function):
@staticmethod
def forward(ctx, input_fp, target):
# suffix lp means the lp version of the offset tensors
# suffix delta means the real low precision part of the model representation
ctx.save_for_backward(input_fp, target)
output = F.nll_loss(F.log_softmax(input_fp, dim=1), target)
return output
@staticmethod
def backward(ctx, grad_output):
# grad_z_{i, j} = 1/m\sum_i 1(y_i = j) - P(y_i = j)
input_fp, target = ctx.saved_tensors
assert input_fp.size(0) == target.numel()
prob = F.softmax(input_fp, dim=1)
sample_idx = torch.LongTensor(np.arange(input_fp.size(0)))
minibatch_size = input_fp.size(0)
grad_input_fp = torch.zeros_like(input_fp, dtype=input_fp.dtype)
grad_input_fp[sample_idx, target] = 1.0
grad_input_fp.add_(-prob)
grad_input_fp.div_(-minibatch_size)
grad_target = None
return grad_input_fp, grad_target
bit_center_cross_entropy_lp = BitCenterCrossEntropyLPFunction.apply
bit_center_cross_entropy_fp = BitCenterCrossEntropyFPFunction.apply
class BitCenterCrossEntropy(BitCenterLayer):
def __init__(self, cast_func=void_cast_func, n_train_sample=1):
BitCenterLayer.__init__(
self,
fp_functional=bit_center_cross_entropy_fp,
lp_functional=bit_center_cross_entropy_lp,
bias=False,
cast_func=cast_func,
n_train_sample=n_train_sample)
self.setup_bit_center_vars()
self.cuda()
self.reset_parameters_bit_center()
self.register_backward_hook(self.update_grad_output_cache)
def setup_bit_center_vars(self):
# there is no bc variables to be setup for this layer
pass
def reset_parameters_bit_center(self):
pass
def update_grad_output_cache(self, self1, input, output):
# pass
# use duplicated self to adapt to the pytorch API requirement
# as this is a class member function
if self.do_offset:
# note here grad_output_lp is actually the grad_input offset.
# This is because we want to utilize the existing infra in bitCenterLayer
if self.on_site_compute:
self.grad_output_cache = \
self.update_single_cache_on_site_compute(
self.grad_output_cache, input[0])
self.grad_cache_iter = 0
self.output_size = input[0].size()
else:
self.grad_output_cache[self.grad_cache_iter:min(
self.grad_cache_iter +
input[0].size(0), self.n_train_sample)].data.copy_(
self.cast_func(input[0].cpu()))
self.grad_cache_iter = (
self.grad_cache_iter + input[0].size(0)) % self.n_train_sample
# we use the following variable only for test purpose, we want to be able to access
# the gradeint value wrt input in the outside world. For lp mode, it is grad_input_delta
# for fp mode, it is grad_input
self.input_grad_for_test = input[0].clone()
def forward_fp(self, input, target):
self.check_or_setup_input_cache(input)
output = self.fp_func(input, target)
if self.grad_output_cache is None:
# in the cross entropy layer we need to cache the input gradient
self.grad_output_cache = self.setup_cache(input)
self.grad_cache_iter = 0
self.update_input_cache(input)
return output
def forward_lp(self, input, target):
# Need to test do_offset mode whether gradient is updated properly
if self.on_site_compute:
assert self.cache_iter == 0 and self.grad_cache_iter == 0
input_lp = self.input_cache[0:input.size(0)].cuda()
else:
input_lp = self.input_cache[self.cache_iter:(
self.cache_iter + input.size(0))].cuda()
# give a handle to access input_lp from outside
self.input_lp = input_lp
# note here grad_output_lp is actually the grad_input offset.
# This is because we want to utilize the existing infra in bitCenterLayer
if self.on_site_compute:
grad_output_lp = self.get_single_cache_on_site_compute(
self.grad_output_cache, self.output_size).cuda()
else:
grad_output_lp = \
self.grad_output_cache[self.grad_cache_iter:(self.grad_cache_iter + input.size(0))].cuda()
input_delta = input
output = self.lp_func(input_delta, input_lp, target, grad_output_lp)
self.increment_cache_iter(input)
return output
def forward(self, input, target):
# Need to test do_offset mode whether gradient is updated properly
if self.do_offset:
return self.forward_fp(input, target)
else:
return self.forward_lp(input, target)
| 43.968553
| 106
| 0.669146
|
3a268abdaf3d46547244845942a0b09a633aa4f0
| 12,329
|
py
|
Python
|
directord/user.py
|
Directord/directord
|
d7aabefdab830471ba1f77543aefc934a4ead59e
|
[
"Apache-2.0"
] | 10
|
2021-11-08T11:29:59.000Z
|
2022-01-11T16:29:23.000Z
|
directord/user.py
|
directord/directord
|
d7aabefdab830471ba1f77543aefc934a4ead59e
|
[
"Apache-2.0"
] | 19
|
2021-10-29T21:22:18.000Z
|
2022-01-18T22:21:55.000Z
|
directord/user.py
|
directord/directord
|
d7aabefdab830471ba1f77543aefc934a4ead59e
|
[
"Apache-2.0"
] | 2
|
2021-11-02T00:54:53.000Z
|
2022-01-10T10:31:33.000Z
|
# Copyright Peznauts <kevin@cloudnull.com>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
import os
import time
import directord
from directord import interface
from directord import iodict
class User(interface.Interface):
"""Directord User interface class."""
def __init__(self, args):
"""Initialize the User interface class.
Sets up the user object.
:param args: Arguments parsed by argparse.
:type args: Object
"""
super(User, self).__init__(args=args)
class Manage(User):
"""Directord Manage interface class."""
def __init__(self, args):
"""Initialize the Manage interface class.
Sets up the manage object.
:param args: Arguments parsed by argparse.
:type args: Object
"""
super(User, self).__init__(args=args)
def poll_job(self, job_id):
"""Given a job poll for its completion and return status.
> The status return is (Boolean, String)
:param job_id: UUID for job
:type job_id: String
:returns: Tuple
"""
job_processing_interval = 0.25
processing_attempts = 0
state_timeout = time.time()
timeout = getattr(self.args, "timeout", 600)
while True:
try:
data = dict(json.loads(self.run(override=job_id)))
except json.JSONDecodeError:
if time.time() - state_timeout > timeout:
state_timeout = time.time()
return (
None,
"Job in an unknown state: {}".format(job_id),
None,
None,
None,
)
else:
time.sleep(1)
continue
else:
data_return = data.get(job_id, dict())
if not data_return:
if time.time() - state_timeout > timeout:
state_timeout = time.time()
return (
None,
"Job in an unknown state: {}".format(job_id),
None,
None,
None,
)
else:
time.sleep(1)
continue
info = data_return.get("INFO")
stdout = data_return.get("STDOUT")
stderr = data_return.get("STDERR")
job_state = data_return.get("PROCESSING", "unknown")
if job_state == self.driver.job_processing:
time.sleep(job_processing_interval)
processing_attempts += 1
if processing_attempts > 20:
job_processing_interval = 1
elif job_state == self.driver.job_failed:
state_timeout = time.time()
return (
False,
"Job Failed: {}".format(job_id),
stdout,
stderr,
info,
)
elif job_state in [
self.driver.job_end,
self.driver.nullbyte,
self.driver.transfer_end,
]:
nodes = len(data_return.get("_nodes"))
if len(data_return.get("FAILED", list())) > 0:
state_timeout = time.time()
return (
False,
"Job Degrated: {}".format(job_id),
stdout,
stderr,
info,
)
elif len(data_return.get("SUCCESS", list())) == nodes:
state_timeout = time.time()
return (
True,
"Job Success: {}".format(job_id),
stdout,
stderr,
info,
)
else:
if time.time() - state_timeout > timeout:
state_timeout = time.time()
return (
True,
"Job Skipped: {}".format(job_id),
stdout,
stderr,
info,
)
else:
time.sleep(1)
else:
if time.time() - state_timeout > timeout:
state_timeout = time.time()
return (
None,
"Job in an unknown state: {}".format(job_id),
stdout,
stderr,
info,
)
else:
time.sleep(1)
def analyze_job(self, job_id):
"""Run analysis on a given job UUID.
:param job_id: Job UUID
:type job_id: String
:returns: String
"""
data = directord.send_data(
socket_path=self.args.socket_path,
data=json.dumps(dict(manage={"job_info": job_id})),
)
item = list(dict(json.loads(data)).values())
if item and not item[0]:
return json.dumps({"job_id_not_found": job_id})
return self.analyze_data(parent_id=job_id, parent_jobs=item)
def analyze_parent(self, parent_id):
"""Run analysis on a given parent UUID.
:param parent_id: Parent UUID
:type parent_id: String
:returns: String
"""
data = directord.send_data(
socket_path=self.args.socket_path,
data=json.dumps(dict(manage={"list_jobs": None})),
)
parent_jobs = list()
if data:
data = dict(json.loads(data))
for value in data.values():
if value["PARENT_JOB_ID"] == parent_id:
parent_jobs.append(value)
if not parent_jobs:
return json.dumps({"parent_id_not_found": parent_id})
return self.analyze_data(parent_id=parent_id, parent_jobs=parent_jobs)
def analyze_all(self):
"""Run analysis on a given parent UUID.
:param parent_id: Parent UUID
:type parent_id: String
:returns: String
"""
data = directord.send_data(
socket_path=self.args.socket_path,
data=json.dumps(dict(manage={"list_jobs": None})),
)
if data:
data = dict(json.loads(data))
return self.analyze_data(
parent_id="All-Jobs", parent_jobs=list(data.values())
)
else:
return json.dumps({"no_jobs_found": "All-Jobs"})
def analyze_data(self, parent_id, parent_jobs):
"""Run Parent analysis.
:param parent_id: Parent UUID
:type parent_id: String
:param parent_jobs: List of all jobs for a given parent.
:type parent_jobs: List
:returns: String
"""
meta = dict(
execution=collections.defaultdict(int),
roundtrip=collections.defaultdict(int),
nodes=set(),
node_successes=collections.defaultdict(int),
node_failures=collections.defaultdict(int),
)
analysis = dict(id=parent_id, total_jobs=len(parent_jobs))
for job in parent_jobs:
for k, v in job.get("_executiontime", dict()).items():
meta["nodes"].add(k)
meta["execution"][k] += v
for k, v in job.get("_roundtripltime", dict()).items():
meta["nodes"].add(k)
meta["roundtrip"][k] += v
for item in job.get("SUCCESS", list()):
meta["node_successes"][item] += 1
for item in job.get("FAILED", list()):
meta["node_failures"][item] += 1
try:
analysis["actual_runtime"] = parent_jobs[-1].get(
"_lasttime", 0
) - parent_jobs[0].get("_createtime", 0)
except IndexError:
return json.dumps({})
analysis["slowest_node_execution"] = max(
meta["execution"], key=meta["execution"].get
)
analysis["slowest_node_roundtrip"] = max(
meta["roundtrip"], key=meta["roundtrip"].get
)
analysis["fastest_node_execution"] = min(
meta["execution"], key=meta["execution"].get
)
analysis["fastest_node_roundtrip"] = min(
meta["roundtrip"], key=meta["roundtrip"].get
)
analysis["combined_execution_time"] = sum(meta["execution"].values())
analysis["total_successes"] = sum(meta["node_successes"].values())
analysis["total_failures"] = sum(meta["node_failures"].values())
analysis["total_node_count"] = len(meta["nodes"])
analysis["total_avg_execution_time"] = (
analysis["combined_execution_time"] / analysis["total_jobs"]
)
return json.dumps(analysis, sort_keys=True)
def run(self, override=None):
"""Send the management command to the server.
:param override: Set the job function regardless of args.
:type override: String
:returns: String
"""
def _cache_dump():
try:
cache = iodict.Cache(
path=os.path.join(self.args.cache_path, "client")
)
print(json.dumps(dict(cache.items()), indent=4))
except KeyError:
pass
execution_map = {
"dump-cache": _cache_dump,
"export-jobs": {"list_jobs": None},
"export-nodes": {"list_nodes": None},
"job-info": {"job_info": override},
"list-jobs": {"list_jobs": None},
"list-nodes": {"list_nodes": None},
"purge-jobs": {"purge_jobs": None},
"purge-nodes": {"purge_nodes": None},
"analyze-parent": self.analyze_parent,
"analyze-job": self.analyze_job,
"analyze-all": self.analyze_all,
}
if override and override in execution_map:
manage = execution_map[override]
if callable(manage):
return manage()
elif isinstance(override, str):
manage = execution_map["job-info"]
else:
for k, v in execution_map.items():
k_obj = k.replace("-", "_")
k_arg = getattr(self.args, k_obj, False)
if k_arg:
if callable(v):
if isinstance(override, str):
return v(override)
elif isinstance(k_arg, str):
return v(k_arg)
else:
return v()
else:
if isinstance(k_arg, str):
v[k_obj] = k_arg
manage = v
break
else:
raise SystemExit("No known management function was defined.")
self.log.debug("Executing Management Command:%s", manage)
return directord.send_data(
socket_path=self.args.socket_path,
data=json.dumps(dict(manage=manage)),
)
| 34.729577
| 78
| 0.482683
|
ca4f7097e8647933e9f69528f5fbac236d7a2858
| 9,176
|
py
|
Python
|
security_scripts/rdk/scimma-s3-bucket-tagged/scimma-s3-bucket-tagged_test.py
|
loftwah/security-scripts
|
eb75b67499d3cb8d87eac114efdc9988a0f56511
|
[
"MIT"
] | 1
|
2021-12-23T05:02:51.000Z
|
2021-12-23T05:02:51.000Z
|
security_scripts/rdk/scimma-s3-bucket-tagged/scimma-s3-bucket-tagged_test.py
|
loftwah/security-scripts
|
eb75b67499d3cb8d87eac114efdc9988a0f56511
|
[
"MIT"
] | null | null | null |
security_scripts/rdk/scimma-s3-bucket-tagged/scimma-s3-bucket-tagged_test.py
|
loftwah/security-scripts
|
eb75b67499d3cb8d87eac114efdc9988a0f56511
|
[
"MIT"
] | 1
|
2021-12-23T05:02:57.000Z
|
2021-12-23T05:02:57.000Z
|
import sys
import unittest
from unittest.mock import MagicMock
from botocore.exceptions import ClientError #dlp
import botocore
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::::Account'
#############
# Main Code #
#############
CONFIG_CLIENT_MOCK = MagicMock()
STS_CLIENT_MOCK = MagicMock()
class Boto3Mock():
@staticmethod
def client(client_name, *args, **kwargs):
if client_name == 'config':
return CONFIG_CLIENT_MOCK
if client_name == 'sts':
return STS_CLIENT_MOCK
raise Exception("Attempting to create an unknown client")
sys.modules['boto3'] = Boto3Mock()
RULE = __import__('scimma-s3-bucket-tagged')
class ComplianceTest(unittest.TestCase):
# pull in the parameters as defined in the parameter file.
# read the file as json, but caution. the AWS runtime...
# ... framework Provides "InputParameters" as a string...
# ... The framework anticipates that the string is valid JSON.
# ... Note that th funcion we are testing recived this item
# ... as JSON.
# ... don't thank me, thank this framework.
import pdb ; pdb.set_trace()
import json
params=json.load(open('parameters.json','r'))
rule_parameters = params["Parameters"]["InputParameters"]
assert(type(rule_parameters) == type(""))
# mock the "invoking event" that AWS would generate for a
# configuration change. the %s will hold the specifc
# S3 bucket mock configuration for a test use case. That will
# be filled out in the specfic test case.
invoking_event_template = '''{
"configurationItem": %s ,
"notificationCreationTime":"2018-07-02T23:05:34.445Z",
"messageType":"ConfigurationItemChangeNotification"
}'''
def setUp(self):
pass
def test_sample(self):
self.assertTrue(True)
def test_empty_tags(self):
RULE.ASSUME_ROLE_MODE = False
with open("S3-empty-tags-CI.json",'r') as f:
CI = f.read()
invoking_event = self.invoking_event_template % CI
response = RULE.lambda_handler(build_lambda_configurationchange_event(invoking_event, self.rule_parameters), {})
resp_expected = []
resp_expected.append(build_expected_response('NON_COMPLIANT', 'mborch-test-bucket-config-item', 'AWS::S3::Bucket'))
assert_successful_evaluation(self, response, resp_expected)
def test_valid_criticality(self):
RULE.ASSUME_ROLE_MODE = False
with open("S3-production-tags-CI.json",'r') as f:
CI = f.read()
invoking_event = self.invoking_event_template % CI
response = RULE.lambda_handler(build_lambda_configurationchange_event(invoking_event, self.rule_parameters), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'mborch-test-bucket-config-item', 'AWS::S3::Bucket'))
assert_successful_evaluation(self, response, resp_expected)
#def test_notags_at_all(self):
# RULE.ASSUME_ROLE_MODE = False
# response = RULE.lambda_handler(build_lambda_configurationchange_event(self.invoking_event_iam_role_sample, self.rule_parameters), {})
# resp_expected = []
# resp_expected.append(build_expected_response('NOT_APPLICABLE', 'some-resource-id', 'AWS::IAM::Role'))
# assert_successful_evaluation(self, response, resp_expected)
####################
# Helper Functions #
####################
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
'configRuleName':'scimma-s3-bucket-tagged',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
'configRuleName':'scimma-s3-bucket-tagged',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
if not annotation:
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type
}
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type,
'Annotation': annotation
}
def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1):
if isinstance(response, dict):
test_class.assertEqual(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])
test_class.assertEqual(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])
test_class.assertEqual(resp_expected['ComplianceType'], response['ComplianceType'])
test_class.assertTrue(response['OrderingTimestamp'])
if 'Annotation' in resp_expected or 'Annotation' in response:
test_class.assertEqual(resp_expected['Annotation'], response['Annotation'])
elif isinstance(response, list):
test_class.assertEqual(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
test_class.assertEqual(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])
test_class.assertEqual(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])
test_class.assertEqual(response_expected['ComplianceType'], response[i]['ComplianceType'])
test_class.assertTrue(response[i]['OrderingTimestamp'])
if 'Annotation' in response_expected or 'Annotation' in response[i]:
test_class.assertEqual(response_expected['Annotation'], response[i]['Annotation'])
def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None):
if customer_error_code:
test_class.assertEqual(customer_error_code, response['customerErrorCode'])
if customer_error_message:
test_class.assertEqual(customer_error_message, response['customerErrorMessage'])
test_class.assertTrue(response['customerErrorCode'])
test_class.assertTrue(response['customerErrorMessage'])
if "internalErrorMessage" in response:
test_class.assertTrue(response['internalErrorMessage'])
if "internalErrorDetails" in response:
test_class.assertTrue(response['internalErrorDetails'])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string"}}
STS_CLIENT_MOCK.reset_mock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response)
##################
# Common Testing #
##################
class TestStsErrors(unittest.TestCase):
def xtest_sts_unknown_error(self):
RULE.ASSUME_ROLE_MODE = True
RULE.evaluate_parameters = MagicMock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(side_effect=ClientError(
{'Error': {'Code': 'unknown-code', 'Message': 'unknown-message'}}, 'operation'))
#dlp STS_CLIENT_MOCK.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
#Dlp {'Error': {'Code': 'unknown-code', 'Message': 'unknown-message'}}, 'operation'))
response = RULE.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'InternalError', 'InternalError')
def xtest_sts_access_denied(self):
RULE.ASSUME_ROLE_MODE = True
RULE.evaluate_parameters = MagicMock(return_value=True)
#dlp STS_CLIENT_MOCK.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
#dlp {'Error': {'Code': 'AccessDenied', 'Message': 'access-denied'}}, 'operation'))
STS_CLIENT_MOCK.assume_role = MagicMock(ClientError(
{'Error': {'Code': 'AccessDenied', 'Message': 'access-denied'}}, 'operation'))
response = RULE.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'AccessDenied', 'AWS Config does not have permission to assume the IAM role.')
| 44.328502
| 142
| 0.696055
|
01ad65f55393d8274b2683359824351fd2bc1a67
| 2,061
|
py
|
Python
|
external/fusesoc-ipyxact/print_businterfaces.py
|
koenenwmn/optimsoc
|
2cbb92af68c17484b0b65c5e837e71e51eaafebc
|
[
"MIT"
] | 5
|
2019-12-31T19:16:10.000Z
|
2020-11-03T15:49:09.000Z
|
external/fusesoc-ipyxact/print_businterfaces.py
|
koenenwmn/optimsoc
|
2cbb92af68c17484b0b65c5e837e71e51eaafebc
|
[
"MIT"
] | null | null | null |
external/fusesoc-ipyxact/print_businterfaces.py
|
koenenwmn/optimsoc
|
2cbb92af68c17484b0b65c5e837e71e51eaafebc
|
[
"MIT"
] | 1
|
2019-12-31T19:16:13.000Z
|
2019-12-31T19:16:13.000Z
|
import sys
from ipyxact.ipyxact import Component
def get_businterfaces(busInterfaces):
ifs = []
for busInterface in busInterfaces.busInterface:
print('='*20)
print('name : ' + busInterface.name)
_vendor = busInterface.busType.vendor
_library = busInterface.busType.library
_name = busInterface.busType.name
_version = busInterface.busType.version
print(busInterface.busType.name)
print('busType : {}/{}/{}/{}'.format(_vendor,
_library,
_name,
_version))
print('abstractionType : {}/{}/{}/{}'.format(_vendor,
_library,
_name,
_version))
for portMap in busInterface.portMaps.portMap:
if portMap.logicalPort.vector:
log_range = '[{}:{}]'.format(portMap.logicalPort.vector.left,
portMap.logicalPort.vector.right)
else:
log_range = ''
if portMap.physicalPort.vector:
phy_range = '[{}:{}]'.format(portMap.physicalPort.vector.left,
portMap.physicalPort.vector.right)
else:
phy_range = ''
print("{}{} => {}{}".format(portMap.logicalPort.name,
log_range,
portMap.physicalPort.name,
phy_range))
return ifs
if __name__ == "__main__":
f = open(sys.argv[1])
component = Component()
component.load(f)
if component.busInterfaces is not None:
ifs = get_businterfaces(component.busInterfaces)
print(ifs)
else:
print("No bus interfaces found in file")
f.close()
| 38.886792
| 79
| 0.45706
|
affe40b10f23873c64162558f6247fd7035092b1
| 28
|
py
|
Python
|
backend/src/config.py
|
PTYin/SQLOJ
|
ed568791224b2d95390230ecd3fbba798a485da9
|
[
"MIT"
] | 7
|
2021-07-05T18:44:31.000Z
|
2021-11-06T05:06:51.000Z
|
backend/src/config.py
|
PTYin/SQLOJ
|
ed568791224b2d95390230ecd3fbba798a485da9
|
[
"MIT"
] | 2
|
2021-11-16T12:08:41.000Z
|
2021-12-10T08:42:35.000Z
|
backend/src/config.py
|
PTYin/sqloj
|
ed568791224b2d95390230ecd3fbba798a485da9
|
[
"MIT"
] | null | null | null |
folder = "/var/lib/sqloj/"
| 9.333333
| 26
| 0.607143
|
ada7bd8a67dad8d045a4fd515ad371b4ea4a7136
| 45,192
|
py
|
Python
|
lib/galaxy/tools/actions/__init__.py
|
tdans1/Use-Galaxy
|
e9f05cb1b11db20a729ac73520f886ee619c6b90
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/tools/actions/__init__.py
|
tdans1/Use-Galaxy
|
e9f05cb1b11db20a729ac73520f886ee619c6b90
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/tools/actions/__init__.py
|
tdans1/Use-Galaxy
|
e9f05cb1b11db20a729ac73520f886ee619c6b90
|
[
"CC-BY-3.0"
] | null | null | null |
import json
import logging
import re
from json import dumps
from six import string_types
from galaxy import model
from galaxy.exceptions import ObjectInvalid
from galaxy.model import LibraryDatasetDatasetAssociation
from galaxy.tools.parameters import update_param
from galaxy.tools.parameters.basic import DataCollectionToolParameter, DataToolParameter, RuntimeValue
from galaxy.tools.parameters.wrapped import WrappedParameters
from galaxy.util import ExecutionTimer
from galaxy.util.none_like import NoneDataset
from galaxy.util.odict import odict
from galaxy.util.template import fill_template
from galaxy.web import url_for
log = logging.getLogger(__name__)
class ToolExecutionCache(object):
""" An object mean to cache calculation caused by repeatedly evaluting
the same tool by the same user with slightly different parameters.
"""
def __init__(self, trans):
self.trans = trans
self.current_user_roles = trans.get_current_user_roles()
class ToolAction(object):
"""
The actions to be taken when a tool is run (after parameters have
been converted and validated).
"""
def execute(self, tool, trans, incoming={}, set_output_hid=True):
raise TypeError("Abstract method")
class DefaultToolAction(object):
"""Default tool action is to run an external command"""
def _collect_input_datasets(self, tool, param_values, trans, history, current_user_roles=None):
"""
Collect any dataset inputs from incoming. Returns a mapping from
parameter name to Dataset instance for each tool parameter that is
of the DataToolParameter type.
"""
if current_user_roles is None:
current_user_roles = trans.get_current_user_roles()
input_datasets = odict()
def visitor(input, value, prefix, parent=None, **kwargs):
def process_dataset(data, formats=None):
if not data or isinstance(data, RuntimeValue):
return None
if formats is None:
formats = input.formats
if not data.datatype.matches_any(formats):
# Need to refresh in case this conversion just took place, i.e. input above in tool performed the same conversion
trans.sa_session.refresh(data)
target_ext, converted_dataset = data.find_conversion_destination(formats)
if target_ext:
if converted_dataset:
data = converted_dataset
else:
data = data.get_converted_dataset(trans, target_ext, target_context=parent, history=history)
if not trans.app.security_agent.can_access_dataset(current_user_roles, data.dataset):
raise Exception("User does not have permission to use a dataset (%s) provided for input." % data.id)
return data
if isinstance(input, DataToolParameter):
if isinstance(value, list):
# If there are multiple inputs with the same name, they
# are stored as name1, name2, ...
for i, v in enumerate(value):
processed_dataset = process_dataset(v)
if i == 0:
# Allow copying metadata to output, first item will be source.
input_datasets[prefix + input.name] = processed_dataset
input_datasets[prefix + input.name + str(i + 1)] = processed_dataset
conversions = []
for conversion_name, conversion_extensions, conversion_datatypes in input.conversions:
new_data = process_dataset(input_datasets[prefix + input.name + str(i + 1)], conversion_datatypes)
if not new_data or new_data.datatype.matches_any(conversion_datatypes):
input_datasets[prefix + conversion_name + str(i + 1)] = new_data
conversions.append((conversion_name, new_data))
else:
raise Exception('A path for explicit datatype conversion has not been found: %s --/--> %s' % (input_datasets[prefix + input.name + str(i + 1)].extension, conversion_extensions))
if parent:
parent[input.name][i] = input_datasets[prefix + input.name + str(i + 1)]
for conversion_name, conversion_data in conversions:
# allow explicit conversion to be stored in job_parameter table
parent[conversion_name][i] = conversion_data.id # a more robust way to determine JSONable value is desired
else:
param_values[input.name][i] = input_datasets[prefix + input.name + str(i + 1)]
for conversion_name, conversion_data in conversions:
# allow explicit conversion to be stored in job_parameter table
param_values[conversion_name][i] = conversion_data.id # a more robust way to determine JSONable value is desired
else:
input_datasets[prefix + input.name] = process_dataset(value)
conversions = []
for conversion_name, conversion_extensions, conversion_datatypes in input.conversions:
new_data = process_dataset(input_datasets[prefix + input.name], conversion_datatypes)
if not new_data or new_data.datatype.matches_any(conversion_datatypes):
input_datasets[prefix + conversion_name] = new_data
conversions.append((conversion_name, new_data))
else:
raise Exception('A path for explicit datatype conversion has not been found: %s --/--> %s' % (input_datasets[prefix + input.name].extension, conversion_extensions))
target_dict = parent
if not target_dict:
target_dict = param_values
target_dict[input.name] = input_datasets[prefix + input.name]
for conversion_name, conversion_data in conversions:
# allow explicit conversion to be stored in job_parameter table
target_dict[conversion_name] = conversion_data.id # a more robust way to determine JSONable value is desired
elif isinstance(input, DataCollectionToolParameter):
if not value:
return
dataset_instances = []
if hasattr(value, 'child_collection'):
# if we are mapping a collection over a tool, we only require the child_collection
dataset_instances = value.child_collection.dataset_instances
else:
# else the tool takes a collection as input so we need everything
dataset_instances = value.collection.dataset_instances
for i, v in enumerate(dataset_instances):
data = v
if not trans.app.security_agent.can_access_dataset(current_user_roles, data.dataset):
raise Exception("User does not have permission to use a dataset (%s) provided for input." % data.id)
# Skipping implicit conversion stuff for now, revisit at
# some point and figure out if implicitly converting a
# dataset collection makes senese.
input_datasets[prefix + input.name + str(i + 1)] = data
tool.visit_inputs(param_values, visitor)
return input_datasets
def collect_input_dataset_collections(self, tool, param_values):
def append_to_key(the_dict, key, value):
if key not in the_dict:
the_dict[key] = []
the_dict[key].append(value)
input_dataset_collections = dict()
def visitor(input, value, prefix, parent=None, prefixed_name=None, **kwargs):
if isinstance(input, DataToolParameter):
values = value
if not isinstance(values, list):
values = [value]
for i, value in enumerate(values):
if isinstance(value, model.HistoryDatasetCollectionAssociation):
append_to_key(input_dataset_collections, prefixed_name, (value, True))
target_dict = parent
if not target_dict:
target_dict = param_values
# This is just a DataToolParameter, so replace this
# collection with individual datasets. Database will still
# record collection which should be enought for workflow
# extraction and tool rerun.
dataset_instances = value.collection.dataset_instances
if i == 0:
target_dict[input.name] = []
target_dict[input.name].extend(dataset_instances)
elif isinstance(input, DataCollectionToolParameter):
append_to_key(input_dataset_collections, prefix + input.name, (value, False))
tool.visit_inputs(param_values, visitor)
return input_dataset_collections
def _check_access(self, tool, trans):
assert tool.allow_user_access(trans.user), "User (%s) is not allowed to access this tool." % (trans.user)
def _collect_inputs(self, tool, trans, incoming, history, current_user_roles):
""" Collect history as well as input datasets and collections. """
app = trans.app
# Set history.
if not history:
history = tool.get_default_history_by_trans(trans, create=True)
if history not in trans.sa_session:
history = trans.sa_session.query(app.model.History).get(history.id)
# Track input dataset collections - but replace with simply lists so collect
# input datasets can process these normally.
inp_dataset_collections = self.collect_input_dataset_collections(tool, incoming)
# Collect any input datasets from the incoming parameters
inp_data = self._collect_input_datasets(tool, incoming, trans, history=history, current_user_roles=current_user_roles)
return history, inp_data, inp_dataset_collections
def execute(self, tool, trans, incoming={}, return_job=False, set_output_hid=True, history=None, job_params=None, rerun_remap_job_id=None, mapping_over_collection=False, execution_cache=None):
"""
Executes a tool, creating job and tool outputs, associating them, and
submitting the job to the job queue. If history is not specified, use
trans.history as destination for tool's output datasets.
"""
self._check_access(tool, trans)
app = trans.app
if execution_cache is None:
execution_cache = ToolExecutionCache(trans)
current_user_roles = execution_cache.current_user_roles
history, inp_data, inp_dataset_collections = self._collect_inputs(tool, trans, incoming, history, current_user_roles)
# Build name for output datasets based on tool name and input names
on_text = self._get_on_text(inp_data)
# format='input" previously would give you a random extension from
# the input extensions, now it should just give "input" as the output
# format.
input_ext = 'data' if tool.profile < 16.04 else "input"
input_dbkey = incoming.get("dbkey", "?")
preserved_tags = {}
for name, data in reversed(inp_data.items()):
if not data:
data = NoneDataset(datatypes_registry=app.datatypes_registry)
continue
# Convert LDDA to an HDA.
if isinstance(data, LibraryDatasetDatasetAssociation):
data = data.to_history_dataset_association(None)
inp_data[name] = data
if tool.profile < 16.04:
input_ext = data.ext
if data.dbkey not in [None, '?']:
input_dbkey = data.dbkey
identifier = getattr(data, "element_identifier", None)
if identifier is not None:
incoming["%s|__identifier__" % name] = identifier
for tag in [t for t in data.tags if t.user_tname == 'name']:
preserved_tags[tag.value] = tag
# Collect chromInfo dataset and add as parameters to incoming
(chrom_info, db_dataset) = app.genome_builds.get_chrom_info(input_dbkey, trans=trans, custom_build_hack_get_len_from_fasta_conversion=tool.id != 'CONVERTER_fasta_to_len')
if db_dataset:
inp_data.update({"chromInfo": db_dataset})
incoming["chromInfo"] = chrom_info
# Determine output dataset permission/roles list
existing_datasets = [inp for inp in inp_data.values() if inp]
if existing_datasets:
output_permissions = app.security_agent.guess_derived_permissions_for_datasets(existing_datasets)
else:
# No valid inputs, we will use history defaults
output_permissions = app.security_agent.history_get_default_permissions(history)
# Add the dbkey to the incoming parameters
incoming["dbkey"] = input_dbkey
# wrapped params are used by change_format action and by output.label; only perform this wrapping once, as needed
wrapped_params = self._wrapped_params(trans, tool, incoming, inp_data)
out_data = odict()
input_collections = dict((k, v[0][0]) for k, v in inp_dataset_collections.items())
output_collections = OutputCollections(
trans,
history,
tool=tool,
tool_action=self,
input_collections=input_collections,
mapping_over_collection=mapping_over_collection,
on_text=on_text,
incoming=incoming,
params=wrapped_params.params,
job_params=job_params,
)
# Keep track of parent / child relationships, we'll create all the
# datasets first, then create the associations
parent_to_child_pairs = []
child_dataset_names = set()
object_store_populator = ObjectStorePopulator(app)
def handle_output(name, output, hidden=None):
if output.parent:
parent_to_child_pairs.append((output.parent, name))
child_dataset_names.add(name)
# What is the following hack for? Need to document under what
# conditions can the following occur? (james@bx.psu.edu)
# HACK: the output data has already been created
# this happens i.e. as a result of the async controller
if name in incoming:
dataid = incoming[name]
data = trans.sa_session.query(app.model.HistoryDatasetAssociation).get(dataid)
assert data is not None
out_data[name] = data
else:
ext = determine_output_format(
output,
wrapped_params.params,
inp_data,
inp_dataset_collections,
input_ext
)
data = app.model.HistoryDatasetAssociation(extension=ext, create_dataset=True, flush=False)
if hidden is None:
hidden = output.hidden
if hidden:
data.visible = False
trans.sa_session.add(data)
trans.app.security_agent.set_all_dataset_permissions(data.dataset, output_permissions, new=True)
for _, tag in preserved_tags.items():
data.tags.append(tag.copy())
# Must flush before setting object store id currently.
# TODO: optimize this.
trans.sa_session.flush()
object_store_populator.set_object_store_id(data)
# This may not be neccesary with the new parent/child associations
data.designation = name
# Copy metadata from one of the inputs if requested.
# metadata source can be either a string referencing an input
# or an actual object to copy.
metadata_source = output.metadata_source
if metadata_source:
if isinstance(metadata_source, string_types):
metadata_source = inp_data.get(metadata_source)
if metadata_source is not None:
data.init_meta(copy_from=metadata_source)
else:
data.init_meta()
# Take dbkey from LAST input
data.dbkey = str(input_dbkey)
# Set state
data.blurb = "queued"
# Set output label
data.name = self.get_output_name(output, data, tool, on_text, trans, incoming, history, wrapped_params.params, job_params)
# Store output
out_data[name] = data
if output.actions:
# Apply pre-job tool-output-dataset actions; e.g. setting metadata, changing format
output_action_params = dict(out_data)
output_action_params.update(incoming)
output.actions.apply_action(data, output_action_params)
# Also set the default values of actions of type metadata
self.set_metadata_defaults(output, data, tool, on_text, trans, incoming, history, wrapped_params.params, job_params)
# Flush all datasets at once.
return data
for name, output in tool.outputs.items():
if not filter_output(output, incoming):
if output.collection:
collections_manager = app.dataset_collections_service
element_identifiers = []
known_outputs = output.known_outputs(input_collections, collections_manager.type_registry)
# Just to echo TODO elsewhere - this should be restructured to allow
# nested collections.
for output_part_def in known_outputs:
# Add elements to top-level collection, unless nested...
current_element_identifiers = element_identifiers
current_collection_type = output.structure.collection_type
for parent_id in (output_part_def.parent_ids or []):
# TODO: replace following line with formal abstractions for doing this.
current_collection_type = ":".join(current_collection_type.split(":")[1:])
name_to_index = dict((value["name"], index) for (index, value) in enumerate(current_element_identifiers))
if parent_id not in name_to_index:
if parent_id not in current_element_identifiers:
index = len(current_element_identifiers)
current_element_identifiers.append(dict(
name=parent_id,
collection_type=current_collection_type,
src="new_collection",
element_identifiers=[],
))
else:
index = name_to_index[parent_id]
current_element_identifiers = current_element_identifiers[index]["element_identifiers"]
effective_output_name = output_part_def.effective_output_name
element = handle_output(effective_output_name, output_part_def.output_def, hidden=True)
# TODO: this shouldn't exist in the top-level of the history at all
# but for now we are still working around that by hiding the contents
# there.
# Following hack causes dataset to no be added to history...
child_dataset_names.add(effective_output_name)
history.add_dataset(element, set_hid=set_output_hid, quota=False)
trans.sa_session.add(element)
trans.sa_session.flush()
current_element_identifiers.append({
"__object__": element,
"name": output_part_def.element_identifier,
})
log.info(element_identifiers)
if output.dynamic_structure:
assert not element_identifiers # known_outputs must have been empty
element_kwds = dict(elements=collections_manager.ELEMENTS_UNINITIALIZED)
else:
element_kwds = dict(element_identifiers=element_identifiers)
output_collections.create_collection(
output=output,
name=name,
tags=preserved_tags,
**element_kwds
)
else:
handle_output_timer = ExecutionTimer()
handle_output(name, output)
log.info("Handled output named %s for tool %s %s" % (name, tool.id, handle_output_timer))
add_datasets_timer = ExecutionTimer()
# Add all the top-level (non-child) datasets to the history unless otherwise specified
datasets_to_persist = []
for name in out_data.keys():
if name not in child_dataset_names and name not in incoming: # don't add children; or already existing datasets, i.e. async created
data = out_data[name]
datasets_to_persist.append(data)
# Set HID and add to history.
# This is brand new and certainly empty so don't worry about quota.
# TOOL OPTIMIZATION NOTE - from above loop to the job create below 99%+
# of execution time happens within in history.add_datasets.
history.add_datasets(trans.sa_session, datasets_to_persist, set_hid=set_output_hid, quota=False, flush=False)
# Add all the children to their parents
for parent_name, child_name in parent_to_child_pairs:
parent_dataset = out_data[parent_name]
child_dataset = out_data[child_name]
parent_dataset.children.append(child_dataset)
log.info("Added output datasets to history %s" % add_datasets_timer)
job_setup_timer = ExecutionTimer()
# Create the job object
job, galaxy_session = self._new_job_for_session(trans, tool, history)
self._record_inputs(trans, tool, job, incoming, inp_data, inp_dataset_collections, current_user_roles)
self._record_outputs(job, out_data, output_collections)
job.object_store_id = object_store_populator.object_store_id
if job_params:
job.params = dumps(job_params)
job.set_handler(tool.get_job_handler(job_params))
trans.sa_session.add(job)
# Now that we have a job id, we can remap any outputs if this is a rerun and the user chose to continue dependent jobs
# This functionality requires tracking jobs in the database.
if app.config.track_jobs_in_database and rerun_remap_job_id is not None:
try:
old_job = trans.sa_session.query(app.model.Job).get(rerun_remap_job_id)
assert old_job is not None, '(%s/%s): Old job id is invalid' % (rerun_remap_job_id, job.id)
assert old_job.tool_id == job.tool_id, '(%s/%s): Old tool id (%s) does not match rerun tool id (%s)' % (old_job.id, job.id, old_job.tool_id, job.tool_id)
if trans.user is not None:
assert old_job.user_id == trans.user.id, '(%s/%s): Old user id (%s) does not match rerun user id (%s)' % (old_job.id, job.id, old_job.user_id, trans.user.id)
elif trans.user is None and type(galaxy_session) == trans.model.GalaxySession:
assert old_job.session_id == galaxy_session.id, '(%s/%s): Old session id (%s) does not match rerun session id (%s)' % (old_job.id, job.id, old_job.session_id, galaxy_session.id)
else:
raise Exception('(%s/%s): Remapping via the API is not (yet) supported' % (old_job.id, job.id))
# Duplicate PJAs before remap.
for pjaa in old_job.post_job_actions:
job.add_post_job_action(pjaa.post_job_action)
for jtod in old_job.output_datasets:
for (job_to_remap, jtid) in [(jtid.job, jtid) for jtid in jtod.dataset.dependent_jobs]:
if (trans.user is not None and job_to_remap.user_id == trans.user.id) or (trans.user is None and job_to_remap.session_id == galaxy_session.id):
if job_to_remap.state == job_to_remap.states.PAUSED:
job_to_remap.state = job_to_remap.states.NEW
for hda in [dep_jtod.dataset for dep_jtod in job_to_remap.output_datasets]:
if hda.state == hda.states.PAUSED:
hda.state = hda.states.NEW
hda.info = None
input_values = dict([(p.name, json.loads(p.value)) for p in job_to_remap.parameters])
update_param(jtid.name, input_values, str(out_data[jtod.name].id))
for p in job_to_remap.parameters:
p.value = json.dumps(input_values[p.name])
jtid.dataset = out_data[jtod.name]
jtid.dataset.hid = jtod.dataset.hid
log.info('Job %s input HDA %s remapped to new HDA %s' % (job_to_remap.id, jtod.dataset.id, jtid.dataset.id))
trans.sa_session.add(job_to_remap)
trans.sa_session.add(jtid)
jtod.dataset.visible = False
trans.sa_session.add(jtod)
except Exception:
log.exception('Cannot remap rerun dependencies.')
log.info("Setup for job %s complete, ready to flush %s" % (job.log_str(), job_setup_timer))
job_flush_timer = ExecutionTimer()
trans.sa_session.flush()
log.info("Flushed transaction for job %s %s" % (job.log_str(), job_flush_timer))
# Some tools are not really executable, but jobs are still created for them ( for record keeping ).
# Examples include tools that redirect to other applications ( epigraph ). These special tools must
# include something that can be retrieved from the params ( e.g., REDIRECT_URL ) to keep the job
# from being queued.
if 'REDIRECT_URL' in incoming:
# Get the dataset - there should only be 1
for name in inp_data.keys():
dataset = inp_data[name]
redirect_url = tool.parse_redirect_url(dataset, incoming)
# GALAXY_URL should be include in the tool params to enable the external application
# to send back to the current Galaxy instance
GALAXY_URL = incoming.get('GALAXY_URL', None)
assert GALAXY_URL is not None, "GALAXY_URL parameter missing in tool config."
redirect_url += "&GALAXY_URL=%s" % GALAXY_URL
# Job should not be queued, so set state to ok
job.set_state(app.model.Job.states.OK)
job.info = "Redirected to: %s" % redirect_url
trans.sa_session.add(job)
trans.sa_session.flush()
trans.response.send_redirect(url_for(controller='tool_runner', action='redirect', redirect_url=redirect_url))
else:
# Put the job in the queue if tracking in memory
app.job_queue.put(job.id, job.tool_id)
trans.log_event("Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id)
return job, out_data
def _wrapped_params(self, trans, tool, incoming, input_datasets=None):
wrapped_params = WrappedParameters(trans, tool, incoming, input_datasets=input_datasets)
return wrapped_params
def _get_on_text(self, inp_data):
input_names = []
for name, data in reversed(inp_data.items()):
if getattr(data, "hid", None):
input_names.append('data %s' % data.hid)
return on_text_for_names(input_names)
def _new_job_for_session(self, trans, tool, history):
job = trans.app.model.Job()
galaxy_session = None
if hasattr(trans, "get_galaxy_session"):
galaxy_session = trans.get_galaxy_session()
# If we're submitting from the API, there won't be a session.
if type(galaxy_session) == trans.model.GalaxySession:
job.session_id = galaxy_session.id
if trans.user is not None:
job.user_id = trans.user.id
job.history_id = history.id
job.tool_id = tool.id
try:
# For backward compatibility, some tools may not have versions yet.
job.tool_version = tool.version
except:
job.tool_version = "1.0.0"
return job, galaxy_session
def _record_inputs(self, trans, tool, job, incoming, inp_data, inp_dataset_collections, current_user_roles):
# FIXME: Don't need all of incoming here, just the defined parameters
# from the tool. We need to deal with tools that pass all post
# parameters to the command as a special case.
reductions = {}
for name, dataset_collection_info_pairs in inp_dataset_collections.items():
for (dataset_collection, reduced) in dataset_collection_info_pairs:
if reduced:
if name not in reductions:
reductions[name] = []
reductions[name].append(dataset_collection)
# TODO: verify can have multiple with same name, don't want to lose traceability
job.add_input_dataset_collection(name, dataset_collection)
# If this an input collection is a reduction, we expanded it for dataset security, type
# checking, and such, but the persisted input must be the original collection
# so we can recover things like element identifier during tool command evaluation.
def restore_reduction_visitor(input, value, prefix, parent=None, prefixed_name=None, **kwargs):
if prefixed_name in reductions and isinstance(input, DataToolParameter):
target_dict = parent
if not target_dict:
target_dict = incoming
target_dict[input.name] = []
for reduced_collection in reductions[prefixed_name]:
target_dict[input.name].append({'id': reduced_collection.id, 'src': 'hdca'})
if reductions:
tool.visit_inputs(incoming, restore_reduction_visitor)
for name, value in tool.params_to_strings(incoming, trans.app).items():
job.add_parameter(name, value)
self._check_input_data_access(trans, job, inp_data, current_user_roles)
def _record_outputs(self, job, out_data, output_collections):
out_collections = output_collections.out_collections
out_collection_instances = output_collections.out_collection_instances
for name, dataset in out_data.items():
job.add_output_dataset(name, dataset)
for name, dataset_collection in out_collections.items():
job.add_implicit_output_dataset_collection(name, dataset_collection)
for name, dataset_collection_instance in out_collection_instances.items():
job.add_output_dataset_collection(name, dataset_collection_instance)
def _check_input_data_access(self, trans, job, inp_data, current_user_roles):
access_timer = ExecutionTimer()
for name, dataset in inp_data.items():
if dataset:
if not trans.app.security_agent.can_access_dataset(current_user_roles, dataset.dataset):
raise Exception("User does not have permission to use a dataset (%s) provided for input." % dataset.id)
if dataset in trans.sa_session:
job.add_input_dataset(name, dataset=dataset)
else:
job.add_input_dataset(name, dataset_id=dataset.id)
else:
job.add_input_dataset(name, None)
job_str = job.log_str()
log.info("Verified access to datasets for %s %s" % (job_str, access_timer))
def get_output_name(self, output, dataset, tool, on_text, trans, incoming, history, params, job_params):
if output.label:
params['tool'] = tool
params['on_string'] = on_text
return fill_template(output.label, context=params)
else:
return self._get_default_data_name(dataset, tool, on_text=on_text, trans=trans, incoming=incoming, history=history, params=params, job_params=job_params)
def set_metadata_defaults(self, output, dataset, tool, on_text, trans, incoming, history, params, job_params):
"""
This allows to map names of input files to metadata default values. Example:
<data format="tabular" name="output" label="Tabular output, aggregates data from individual_inputs" >
<actions>
<action name="column_names" type="metadata" default="${','.join([input.name for input in $individual_inputs ])}" />
</actions>
</data>
"""
if output.actions:
for action in output.actions.actions:
if action.tag == "metadata" and action.default:
metadata_new_value = fill_template(action.default, context=params).split(",")
dataset.metadata.__setattr__(str(action.name), metadata_new_value)
def _get_default_data_name(self, dataset, tool, on_text=None, trans=None, incoming=None, history=None, params=None, job_params=None, **kwd):
name = tool.name
if on_text:
name += (" on " + on_text)
return name
class ObjectStorePopulator(object):
""" Small helper for interacting with the object store and making sure all
datasets from a job end up with the same object_store_id.
"""
def __init__(self, app):
self.object_store = app.object_store
self.object_store_id = None
def set_object_store_id(self, data):
# Create an empty file immediately. The first dataset will be
# created in the "default" store, all others will be created in
# the same store as the first.
data.dataset.object_store_id = self.object_store_id
try:
self.object_store.create(data.dataset)
except ObjectInvalid:
raise Exception('Unable to create output dataset: object store is full')
self.object_store_id = data.dataset.object_store_id # these will be the same thing after the first output
class OutputCollections(object):
""" Keeps track of collections (DC or HDCA) created by actions.
Actions do fairly different things depending on whether we are creating
just part of an collection or a whole output collection (mapping_over_collection
parameter).
"""
def __init__(self, trans, history, tool, tool_action, input_collections, mapping_over_collection, on_text, incoming, params, job_params):
self.trans = trans
self.history = history
self.tool = tool
self.tool_action = tool_action
self.input_collections = input_collections
self.mapping_over_collection = mapping_over_collection
self.on_text = on_text
self.incoming = incoming
self.params = params
self.job_params = job_params
self.out_collections = {}
self.out_collection_instances = {}
def create_collection(self, output, name, tags=None, **element_kwds):
input_collections = self.input_collections
collections_manager = self.trans.app.dataset_collections_service
collection_type = output.structure.collection_type
if collection_type is None:
collection_type_source = output.structure.collection_type_source
if collection_type_source is None:
# TODO: Not a new problem, but this should be determined
# sooner.
raise Exception("Could not determine collection type to create.")
if collection_type_source not in input_collections:
raise Exception("Could not find collection type source with name [%s]." % collection_type_source)
collection_type = input_collections[collection_type_source].collection.collection_type
if "elements" in element_kwds:
elements = element_kwds["elements"]
if hasattr(elements, "items"): # else it is ELEMENTS_UNINITIALIZED object.
for key, value in elements.items():
# Either a HDA (if) or a DatasetCollection (the else)
if getattr(value, "history_content_type", None) == "dataset":
assert value.history is not None
else:
for dataset in value.dataset_instances:
assert dataset.history is not None
if self.mapping_over_collection:
dc = collections_manager.create_dataset_collection(
self.trans,
collection_type=collection_type,
**element_kwds
)
self.out_collections[name] = dc
else:
hdca_name = self.tool_action.get_output_name(
output,
None,
self.tool,
self.on_text,
self.trans,
self.incoming,
self.history,
self.params,
self.job_params,
)
hdca = collections_manager.create(
self.trans,
self.history,
name=hdca_name,
collection_type=collection_type,
trusted_identifiers=True,
tags=tags,
**element_kwds
)
# name here is name of the output element - not name
# of the hdca.
self.out_collection_instances[name] = hdca
def on_text_for_names(input_names):
# input_names may contain duplicates... this is because the first value in
# multiple input dataset parameters will appear twice once as param_name
# and once as param_name1.
unique_names = []
for name in input_names:
if name not in unique_names:
unique_names.append(name)
input_names = unique_names
# Build name for output datasets based on tool name and input names
if len(input_names) == 1:
on_text = input_names[0]
elif len(input_names) == 2:
on_text = '%s and %s' % tuple(input_names[0:2])
elif len(input_names) == 3:
on_text = '%s, %s, and %s' % tuple(input_names[0:3])
elif len(input_names) > 3:
on_text = '%s, %s, and others' % tuple(input_names[0:2])
else:
on_text = ""
return on_text
def filter_output(output, incoming):
for filter in output.filters:
try:
if not eval(filter.text.strip(), globals(), incoming):
return True # do not create this dataset
except Exception as e:
log.debug('Dataset output filter failed: %s' % e)
return False
def determine_output_format(output, parameter_context, input_datasets, input_dataset_collections, random_input_ext):
""" Determines the output format for a dataset based on an abstract
description of the output (galaxy.tools.parser.ToolOutput), the parameter
wrappers, a map of the input datasets (name => HDA), and the last input
extensions in the tool form.
TODO: Don't deal with XML here - move this logic into ToolOutput.
TODO: Make the input extension used deterministic instead of random.
"""
# the type should match the input
ext = output.format
if ext == "input":
ext = random_input_ext
format_source = output.format_source
if format_source is not None and format_source in input_datasets:
try:
input_dataset = input_datasets[output.format_source]
input_extension = input_dataset.ext
ext = input_extension
except Exception:
pass
elif format_source is not None:
if re.match(r"^[^\[\]]*\[[^\[\]]*\]$", format_source):
collection_name, element_index = format_source[0:-1].split("[")
# Treat as json to interpret "forward" vs 0 with type
# Make it feel more like Python, single quote better in XML also.
element_index = element_index.replace("'", '"')
element_index = json.loads(element_index)
if collection_name in input_dataset_collections:
try:
input_collection = input_dataset_collections[collection_name][0][0]
input_collection_collection = input_collection.collection
try:
input_element = input_collection_collection[element_index]
except KeyError:
for element in input_collection_collection.dataset_elements:
if element.element_identifier == element_index:
input_element = element
break
input_dataset = input_element.element_object
input_extension = input_dataset.ext
ext = input_extension
except Exception as e:
log.debug("Exception while trying to determine format_source: %s", e)
pass
# process change_format tags
if output.change_format is not None:
new_format_set = False
for change_elem in output.change_format:
for when_elem in change_elem.findall('when'):
check = when_elem.get('input', None)
if check is not None:
try:
if '$' not in check:
# allow a simple name or more complex specifications
check = '${%s}' % check
if str(fill_template(check, context=parameter_context)) == when_elem.get('value', None):
ext = when_elem.get('format', ext)
except: # bad tag input value; possibly referencing a param within a different conditional when block or other nonexistent grouping construct
continue
else:
check = when_elem.get('input_dataset', None)
if check is not None:
check = input_datasets.get(check, None)
# At this point check is a HistoryDatasetAssociation object.
check_format = when_elem.get('format', ext)
check_value = when_elem.get('value', None)
check_attribute = when_elem.get('attribute', None)
if check is not None and check_value is not None and check_attribute is not None:
# See if the attribute to be checked belongs to the HistoryDatasetAssociation object.
if hasattr(check, check_attribute):
if str(getattr(check, check_attribute)) == str(check_value):
ext = check_format
new_format_set = True
break
# See if the attribute to be checked belongs to the metadata associated with the
# HistoryDatasetAssociation object.
if check.metadata is not None:
metadata_value = check.metadata.get(check_attribute, None)
if metadata_value is not None:
if str(metadata_value) == str(check_value):
ext = check_format
new_format_set = True
break
if new_format_set:
break
return ext
| 52.245087
| 209
| 0.602695
|
67d290e3642256f1bb42854844ef3e6706394461
| 15,016
|
py
|
Python
|
dropboxhandler/dropboxhandler.py
|
qbicsoftware/dropboxhandler
|
2a774975f122b86a6310eb5ed8e6a64edb2191a9
|
[
"MIT"
] | null | null | null |
dropboxhandler/dropboxhandler.py
|
qbicsoftware/dropboxhandler
|
2a774975f122b86a6310eb5ed8e6a64edb2191a9
|
[
"MIT"
] | 4
|
2015-10-15T12:44:08.000Z
|
2021-07-19T11:10:34.000Z
|
dropboxhandler/dropboxhandler.py
|
qbicsoftware/dropboxhandler
|
2a774975f122b86a6310eb5ed8e6a64edb2191a9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf8
from __future__ import print_function
import re
import os
import time
import shutil
import logging
import glob
import traceback
import concurrent.futures
from os.path import join as pjoin
from . import fstools
if not hasattr(__builtins__, 'FileExistsError'):
FileExistsError = OSError
if not hasattr(__builtins__, 'FileNotFoundError'):
FileNotFoundError = OSError
logger = logging.getLogger('dropboxhandler.handler')
BARCODE_REGEX = "Q[A-X0-9]{4}[0-9]{3}[A-X][A-X0-9]"
FINISHED_MARKER = ".MARKER_is_finished_"
ERROR_MARKER = "MARKER_error_"
STARTED_MARKER = "MARKER_started_"
def is_valid_barcode(barcode):
"""Check if barcode is a valid OpenBis barcode."""
if re.match('^' + BARCODE_REGEX + '$', barcode) is None:
return False
csum = sum(ord(c) * (i + 1) for i, c in enumerate(barcode[:-1]))
csum = csum % 34 + 48
if csum > 57:
csum += 7
if barcode[-1] == chr(csum):
return True
return False
def extract_barcode(path):
"""Extract an OpenBis barcode from the file name.
If a barcode is found, return it. Raise ValueError if no barcode,
or more that one barcode has been found.
Barcodes must match this regular expression: [A-Z]{5}[0-9]{3}[A-Z][A-Z0-9]
"""
stem, suffix = os.path.splitext(os.path.basename(path))
barcodes = re.findall(BARCODE_REGEX, stem)
valid_barcodes = [b for b in barcodes if is_valid_barcode(b)]
if len(barcodes) != len(valid_barcodes):
logger.warn("Invalid barcode in file name: %s",
set(barcodes) - set(valid_barcodes))
if not barcodes:
raise ValueError("no barcodes found")
if len(set(barcodes)) > 1:
raise ValueError("more than one barcode in filename")
return barcodes[0]
def generate_openbis_name(path):
r"""Generate a sane file name from the input file.
Copy the barcode to the front and remove invalid characters.
Raise ValueError if the filename does not contain a barcode.
Example
-------
>>> path = "stüpid\tname(<QJFDC010EU.).>ä.raW"
>>> generate_openbis_name(path)
'QJFDC010EU_stpidname.raw'
"""
cleaned_name = fstools.clean_filename(path)
try:
barcode = extract_barcode(cleaned_name)
except ValueError:
logger.warn("No or more than one barcode in file: %s. Trying to find respective rule.", path)
return cleaned_name
name = cleaned_name.replace(barcode, "")
if name.startswith('_'):
name = name[1:]
return barcode + '_' + name
class FileHandler(concurrent.futures.ThreadPoolExecutor):
"""Handle incoming files.
Parameters
----------
target_dirs: dict
A dictionary containing the paths to output directories. Must
have the keys `storage`, `manual`.
openbis_dropboxes: list
A list of pairs (regexp, path). Incoming files that contain
a valid QBiC barcode will be stored in the path with the first
matching regexp. If no regexp matches, throw an error.
tmpdir: path, optional
A basepath for temporary files. Must be on the same filesystem
as the source and target directories. Default is system
default temp directory.
perms: dict, optional
A dict with keys `userid`, `groupid`, `filemode` and `dirmode`.
Input files that do not match these will throw an error.
"""
def __init__(self, openbis_dropboxes, storage, manual,
tmpdir=None, max_workers=5, checksum=True):
super(FileHandler, self).__init__(max_workers)
self._openbis_dropboxes = openbis_dropboxes
self._storage_dir = storage
self._manual_dir = manual
self._tmpdir = tmpdir
def _find_openbis_dest(self, origin, name, is_dir):
for conf in self._openbis_dropboxes:
regexp, path = conf['regexp'], conf['path']
if 'origin' in conf and origin not in conf['origin']:
continue
if is_dir and not conf.get('match_dir', True):
continue
if not is_dir and not conf.get('match_file', True):
continue
if re.match(regexp, name):
logger.debug("file %s matches regex %s", name, regexp)
return os.path.join(path, name)
logger.error("File with barcode, but does not match " +
"an openbis dropbox: %s", name)
raise ValueError('No known openbis dropbox for file %s' % name)
def to_openbis(self, origin, file, perms=None):
"""Sort this file or directory to the openbis dropboxes.
If the filename does not include an openbis barcode, raise ValueError.
file, openbis_dir and tmpdir must all be on the same file system.
Two additional files will be created: `{name}.origlabfilename`,
that contains the original name of the file; and `{}.sha256sum`, that
contains a checksum of the new file or directory.
"""
file = os.path.abspath(file)
base, orig_name = os.path.split(file)
openbis_name = generate_openbis_name(file)
logger.info("Exporting %s to OpenBis as %s", file, openbis_name)
is_dir = os.path.isdir(file)
dest = self._find_openbis_dest(origin, openbis_name, is_dir)
# Put all related files inside a directory, so that openbis
# can process them together.
dest_file = os.path.join(dest, openbis_name)
dest_dir = os.path.split(dest)[0]
os.mkdir(dest)
logger.debug("Write file to openbis dropbox %s" % dest)
fstools.recursive_copy(
file, dest_file, tmpdir=self._tmpdir, perms=perms
)
labname_file = "%s.origlabfilename" % openbis_name
with fstools.create_open(os.path.join(dest, labname_file)) as f:
f.write(orig_name)
fstools.write_checksum(dest_file)
source_file = os.path.join(dest, "source_dropbox.txt")
with fstools.create_open(source_file) as f:
f.write(origin)
# tell openbis that we are finished copying
for name in [openbis_name]:
marker = os.path.join(dest_dir, FINISHED_MARKER + name)
with fstools.create_open(marker):
pass
def to_storage(self, origin, file, perms=None):
"""Store file in a subdir of storage_dir with the name of the project.
The first 4 letters of the barcode are the project name. If no barcode
is found, it will use the name 'other'.
"""
file = os.path.abspath(file)
try:
project = extract_barcode(file)[:5]
name = generate_openbis_name(file)
except ValueError:
project = 'other'
name = fstools.clean_filename(file)
dest = os.path.join(self._storage_dir, project, name)
try:
os.mkdir(os.path.join(self._storage_dir, project))
except FileExistsError:
pass
fstools.recursive_copy(file, dest, tmpdir=self._tmpdir, perms=perms)
fstools.write_checksum(dest)
def to_manual(self, origin, file, perms=None):
"""Copy this file to the directory for manual intervention."""
file = os.path.abspath(file)
base, name = os.path.split(file)
cleaned_name = fstools.clean_filename(file)
manual_dir = os.path.join(self._manual_dir, origin)
if not os.path.exists(manual_dir):
os.mkdir(manual_dir)
dest_dir = os.path.join(manual_dir, cleaned_name)
os.mkdir(dest_dir)
dest = os.path.join(dest_dir, cleaned_name)
fstools.recursive_copy(file, dest, tmpdir=self._tmpdir, perms=perms)
logger.warn("manual intervention is required for %s", dest_dir)
# store the original file name
orig_file = os.path.join(dest_dir,
cleaned_name + '.origlabfilename')
with fstools.create_open(orig_file) as f:
f.write(name)
source_file = os.path.join(dest_dir, "source_dropbox.txt")
with fstools.create_open(source_file) as f:
f.write(origin)
fstools.write_checksum(dest)
def _handle_file(self, origin, file, perms=None):
"""Figure out to which dirs file should be copied."""
try:
file = os.path.abspath(file)
logger.debug("processing file " + str(file))
if perms is not None:
fstools.check_permissions(file, **perms)
try:
self.to_openbis(origin, file, perms=perms)
self.to_storage(origin, file, perms=perms)
except ValueError:
self.to_manual(origin, file, perms=perms)
logger.debug("Removing original file %s", file)
try:
if os.path.isfile(file):
os.unlink(file)
elif os.path.isdir(file):
shutil.rmtree(str(file))
else:
logger.error("Could not remove file, it is not a " +
"regular file: %s", file)
except Exception:
logger.error("Could not remove original file %s after " +
"handeling the file correctly", file)
raise
except BaseException:
incoming, filename = os.path.split(file)
error_marker = os.path.join(
incoming,
ERROR_MARKER + filename
)
logger.exception("An error occured while handeling file. " +
"Creating error marker file %s, remove if " +
"you fixed the error. Error was:",
error_marker)
with open(error_marker, 'w') as f:
traceback.print_exc(file=f)
def submit(self, origin, path, basedir, perms=None):
"""Submit an incoming file or directory to the thread pool.
Arguments
---------
origin: str
The name of the dropbox the file came from, as specified in
the config file.
path: str
Path to the incoming file or directory.
basedir: str
Path to the dropbox that contains the incoming file.
perms: dict
A dictionary with arguments to `fstools.check_permissions`.
"""
filename = os.path.split(path)[1]
future = super(FileHandler, self).submit(
self._handle_file, origin, path, perms
)
def remove_markers(future):
started_marker = os.path.join(basedir, STARTED_MARKER + filename)
finish_marker = os.path.join(basedir, FINISHED_MARKER + filename)
try:
os.unlink(finish_marker)
except OSError:
logger.error("Could not find finish marker for file %s", path)
try:
os.unlink(started_marker)
except OSError:
logger.error("Could not find start marker for file %s", path)
logger.info("Finished processing of file %s", path)
future.add_done_callback(remove_markers)
return future
def process_marker(marker, basedir, incoming_name, handler, perms=None):
"""Check if there are new files in `incoming` and handle them if so.
Marker files
------------
- All incoming files are expected to write a marker file
`FINISHED_MARKER<filename>` when copying is finished. Incoming files
without such a marker file will be silently ignored.
- If a file is being processed by the dropboxhandler, a
`STARTED_MARKER<filename>` marker file is written. This will be removed
after the incoming file itself has been moved to the correct location.
- If the incoming file has incorrect permissions or if handling the
file fails for another reason, a `ERROR_MARKER<filename>` marker file
is created, that contains the error message. If this file is removed
and a new `FINISHED_MARKER<filename>` marker file is created, the
dropboxhandler will try again.
"""
logger.debug("Found new marker file: %s", marker)
filename = os.path.basename(marker)[len(FINISHED_MARKER):]
file = pjoin(basedir, filename)
# error_marker is created if we can't process the file
error_marker = pjoin(basedir, ERROR_MARKER + filename)
# start marker tells us that a background process is looking at it
start_marker = pjoin(basedir, STARTED_MARKER + filename)
# finish marker is created by the datamover when the file
# has been copied completely
finish_marker = pjoin(basedir, FINISHED_MARKER + filename)
if os.path.exists(error_marker):
logger.debug("Ignoring file %s because of error marker", file)
return
if os.path.exists(start_marker):
logger.debug("Ignoring file %s because of started marker", file)
if fstools.is_old(start_marker):
logger.error("Found an old start marker: %s.", start_marker)
return
# The finished marker file may have been removed by now
if not os.path.exists(finish_marker):
logger.debug(
"Marker file does not exist any more. Aborting: %s", finish_marker
)
return
try:
if not filename:
raise ValueError("Got invalid marker file: %s" % finish_marker)
logger.info("New file arrived for dropbox %s: %s" %
(incoming_name, file))
if (filename.startswith(FINISHED_MARKER) or
filename.startswith(ERROR_MARKER) or
filename.startswith(STARTED_MARKER)):
raise ValueError("Filename starts with marker name")
if not os.path.exists(file):
raise ValueError("Got marker %s, but %s does not exist" %
(finish_marker, file))
fstools.touch(start_marker)
handler.submit(incoming_name, file, basedir, perms)
# handler will remove start_marker and finish marker
except BaseException:
logger.exception("An error occured while submitting a job. " +
"Creating error marker file %s, remove if " +
"you fixed the error.",
error_marker)
with open(error_marker, 'w') as f:
traceback.print_exc(file=f)
def listen(incoming, interval, handler):
"""Watch directories `incomings` for new files and call FileHandler."""
logger.info("Starting to listen for new files")
while True:
for conf in incoming:
basedir = conf['path']
name = conf['name']
perms = conf.get('perms', None)
logger.debug("Check for new files in %s at %s" % (name, basedir))
for marker in glob.glob(pjoin(basedir, FINISHED_MARKER + '*')):
process_marker(marker, basedir, name, handler, perms)
time.sleep(interval)
| 36.894349
| 101
| 0.618673
|
da51b899199823a8c195da3f3af19e52f9cfa38a
| 2,699
|
py
|
Python
|
emo_classifier/model.py
|
stdiff/emo-classifier
|
211731a44022408c750b611383216ce0578f2d41
|
[
"MIT"
] | null | null | null |
emo_classifier/model.py
|
stdiff/emo-classifier
|
211731a44022408c750b611383216ce0578f2d41
|
[
"MIT"
] | null | null | null |
emo_classifier/model.py
|
stdiff/emo-classifier
|
211731a44022408c750b611383216ce0578f2d41
|
[
"MIT"
] | null | null | null |
from typing import BinaryIO
from importlib import resources
from abc import ABC, abstractmethod
from pathlib import Path
import numpy as np
from emo_classifier import ARTIFACT_DIR
from emo_classifier.api import Comment, Prediction
from emo_classifier.emotion import load_emotions
from emo_classifier.metrics import Thresholds
class Model(ABC):
"""
An abstract class for a model class. This provides a united interface for
- saving a model under ARTIFACT_DIR,
- loading a model and
- making a prediction (for REST API)
"""
artifact_file_name = "model.model"
emotions: list[str] = load_emotions()
@classmethod
@abstractmethod
def load_artifact_file(cls, fp: BinaryIO) -> "Model":
"""
Given the file-like object of the model artifact, this method must recover the original Model instance.
:param fp: file-like object of the model artifact
:return: recovered Model instance
"""
raise NotImplementedError
@classmethod
def load(cls) -> "Model":
with resources.open_binary("emo_classifier.artifact", cls.artifact_file_name) as fp:
model = cls.load_artifact_file(fp)
print(f"LOADED: {type(model).__name__} instance")
return model
@abstractmethod
def save_artifact_file(self, path: Path):
"""
Save the artifacts which we can recover the original instance from.
:param path: save location (provided by the method save())
"""
raise NotImplementedError
def save(self):
file_path = ARTIFACT_DIR / self.artifact_file_name
self.save_artifact_file(file_path)
print("SAVED:", file_path.absolute())
@property
@abstractmethod
def thresholds(self) -> Thresholds:
raise NotImplementedError
@thresholds.setter
@abstractmethod
def thresholds(self, thresholds: Thresholds):
raise NotImplementedError
@abstractmethod
def predict_proba(self, texts) -> np.ndarray:
"""
:param texts: Series/Iterator of texts
:return: array of prediction of shape (#instances, #emotions)
"""
raise NotImplementedError
def predict(self, comment: Comment) -> Prediction:
"""
Makes a prediction for a single Comment instance. This is the main functionality of the API.
:param comment: Comment instance
:return: Prediction instance
"""
X = np.array([comment.text])
y = self.predict_proba(X)[0, :]
emotions = [emotion for i, emotion in enumerate(self.emotions) if y[i] > self._dict_thresholds.get(emotion)]
return Prediction(id=comment.id, labels=emotions)
| 31.383721
| 116
| 0.674694
|
aeb56c5d67b334c19bbc874ad3edf66d66f7f486
| 5,099
|
py
|
Python
|
selfdrive/car/volkswagen/carcontroller.py
|
GratefulJinx77/comma
|
f16e30a44ff5026f1aee502f44f525db2de31d5b
|
[
"MIT"
] | 10
|
2021-10-09T15:32:24.000Z
|
2022-02-14T08:31:56.000Z
|
selfdrive/car/volkswagen/carcontroller.py
|
GratefulJinx77/comma
|
f16e30a44ff5026f1aee502f44f525db2de31d5b
|
[
"MIT"
] | 51
|
2021-11-17T16:13:39.000Z
|
2022-03-22T00:14:00.000Z
|
selfdrive/car/volkswagen/carcontroller.py
|
GratefulJinx77/comma
|
f16e30a44ff5026f1aee502f44f525db2de31d5b
|
[
"MIT"
] | 10
|
2021-11-29T20:45:56.000Z
|
2022-03-28T20:15:41.000Z
|
from cereal import car
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.volkswagen import volkswagencan
from selfdrive.car.volkswagen.values import DBC_FILES, CANBUS, MQB_LDW_MESSAGES, BUTTON_STATES, CarControllerParams as P
from opendbc.can.packer import CANPacker
VisualAlert = car.CarControl.HUDControl.VisualAlert
class CarController():
def __init__(self, dbc_name, CP, VM):
self.apply_steer_last = 0
self.CP = CP
self.packer_pt = CANPacker(DBC_FILES.mqb)
self.hcaSameTorqueCount = 0
self.hcaEnabledFrameCount = 0
self.graButtonStatesToSend = None
self.graMsgSentCount = 0
self.graMsgStartFramePrev = 0
self.graMsgBusCounterPrev = 0
self.steer_rate_limited = False
def update(self, c, CS, frame, ext_bus, actuators, visual_alert, left_lane_visible, right_lane_visible, left_lane_depart, right_lane_depart):
""" Controls thread """
can_sends = []
# **** Steering Controls ************************************************ #
if frame % P.HCA_STEP == 0:
# Logic to avoid HCA state 4 "refused":
# * Don't steer unless HCA is in state 3 "ready" or 5 "active"
# * Don't steer at standstill
# * Don't send > 3.00 Newton-meters torque
# * Don't send the same torque for > 6 seconds
# * Don't send uninterrupted steering for > 360 seconds
# One frame of HCA disabled is enough to reset the timer, without zeroing the
# torque value. Do that anytime we happen to have 0 torque, or failing that,
# when exceeding ~1/3 the 360 second timer.
if c.latActive:
new_steer = int(round(actuators.steer * P.STEER_MAX))
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, P)
self.steer_rate_limited = new_steer != apply_steer
if apply_steer == 0:
hcaEnabled = False
self.hcaEnabledFrameCount = 0
else:
self.hcaEnabledFrameCount += 1
if self.hcaEnabledFrameCount >= 118 * (100 / P.HCA_STEP): # 118s
hcaEnabled = False
self.hcaEnabledFrameCount = 0
else:
hcaEnabled = True
if self.apply_steer_last == apply_steer:
self.hcaSameTorqueCount += 1
if self.hcaSameTorqueCount > 1.9 * (100 / P.HCA_STEP): # 1.9s
apply_steer -= (1, -1)[apply_steer < 0]
self.hcaSameTorqueCount = 0
else:
self.hcaSameTorqueCount = 0
else:
hcaEnabled = False
apply_steer = 0
self.apply_steer_last = apply_steer
idx = (frame / P.HCA_STEP) % 16
can_sends.append(volkswagencan.create_mqb_steering_control(self.packer_pt, CANBUS.pt, apply_steer,
idx, hcaEnabled))
# **** HUD Controls ***************************************************** #
if frame % P.LDW_STEP == 0:
if visual_alert in (VisualAlert.steerRequired, VisualAlert.ldw):
hud_alert = MQB_LDW_MESSAGES["laneAssistTakeOverSilent"]
else:
hud_alert = MQB_LDW_MESSAGES["none"]
can_sends.append(volkswagencan.create_mqb_hud_control(self.packer_pt, CANBUS.pt, c.enabled,
CS.out.steeringPressed, hud_alert, left_lane_visible,
right_lane_visible, CS.ldw_stock_values,
left_lane_depart, right_lane_depart))
# **** ACC Button Controls ********************************************** #
# FIXME: this entire section is in desperate need of refactoring
if self.CP.pcmCruise:
if frame > self.graMsgStartFramePrev + P.GRA_VBP_STEP:
if c.cruiseControl.cancel:
# Cancel ACC if it's engaged with OP disengaged.
self.graButtonStatesToSend = BUTTON_STATES.copy()
self.graButtonStatesToSend["cancel"] = True
elif c.enabled and CS.esp_hold_confirmation:
# Blip the Resume button if we're engaged at standstill.
# FIXME: This is a naive implementation, improve with visiond or radar input.
self.graButtonStatesToSend = BUTTON_STATES.copy()
self.graButtonStatesToSend["resumeCruise"] = True
if CS.graMsgBusCounter != self.graMsgBusCounterPrev:
self.graMsgBusCounterPrev = CS.graMsgBusCounter
if self.graButtonStatesToSend is not None:
if self.graMsgSentCount == 0:
self.graMsgStartFramePrev = frame
idx = (CS.graMsgBusCounter + 1) % 16
can_sends.append(volkswagencan.create_mqb_acc_buttons_control(self.packer_pt, ext_bus, self.graButtonStatesToSend, CS, idx))
self.graMsgSentCount += 1
if self.graMsgSentCount >= P.GRA_VBP_COUNT:
self.graButtonStatesToSend = None
self.graMsgSentCount = 0
new_actuators = actuators.copy()
new_actuators.steer = self.apply_steer_last / P.STEER_MAX
return new_actuators, can_sends
| 43.211864
| 143
| 0.62424
|
8fb6d9451aaadfb58fceb7a6ffc1077d71ef97fc
| 153
|
py
|
Python
|
arc078_a.py
|
hythof/atc
|
12cb94ebe693e1f469ce0d982bc2924b586552cd
|
[
"CC0-1.0"
] | null | null | null |
arc078_a.py
|
hythof/atc
|
12cb94ebe693e1f469ce0d982bc2924b586552cd
|
[
"CC0-1.0"
] | null | null | null |
arc078_a.py
|
hythof/atc
|
12cb94ebe693e1f469ce0d982bc2924b586552cd
|
[
"CC0-1.0"
] | null | null | null |
n,*a = [int(x) for x in open(0).read().split()]
l=0
r=sum(a)
ans=float('inf')
for ai in a[:-1]:
l+=ai
r-=ai
ans=min(ans,abs(l-r))
print(ans)
| 15.3
| 47
| 0.529412
|
201910ef9b04ab8c5a1c5383f35fecf28af4b9f8
| 14,524
|
py
|
Python
|
fastai/callback.py
|
shafiul/fastai
|
08d6de8a9a89a77569bfbccca278fc5522772100
|
[
"Apache-2.0"
] | 2
|
2019-02-08T04:59:27.000Z
|
2020-05-15T21:17:23.000Z
|
fastai/callback.py
|
shafiul/fastai
|
08d6de8a9a89a77569bfbccca278fc5522772100
|
[
"Apache-2.0"
] | 3
|
2021-05-20T19:59:09.000Z
|
2022-02-26T09:11:29.000Z
|
fastai/callback.py
|
shafiul/fastai
|
08d6de8a9a89a77569bfbccca278fc5522772100
|
[
"Apache-2.0"
] | 1
|
2020-01-09T15:44:46.000Z
|
2020-01-09T15:44:46.000Z
|
"Callbacks provides extensibility to the `basic_train` loop. See `train` for examples of custom callbacks."
from .basic_data import *
from .torch_core import *
__all__ = ['AverageMetric', 'Callback', 'CallbackHandler', 'OptimWrapper', 'SmoothenValue', 'Stepper', 'annealing_cos', 'CallbackList',
'annealing_exp', 'annealing_linear', 'annealing_no', 'annealing_poly']
class OptimWrapper():
"Basic wrapper around `opt` to simplify hyper-parameters changes."
def __init__(self, opt:optim.Optimizer, wd:Floats=0., true_wd:bool=False, bn_wd:bool=True):
self.opt,self.true_wd,self.bn_wd = opt,true_wd,bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_func:Union[type,Callable], lr:Union[float,Tuple,List],
layer_groups:ModuleList, **kwargs:Any)->optim.Optimizer:
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr':0} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr,opt.opt_func = listify(lr, layer_groups),opt_func
return opt
def new(self, layer_groups:ModuleList):
"Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."
opt_func = getattr(self, 'opt_func', self.opt.__class__)
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr':0} for l in split_groups])
return self.create(opt_func, self.lr, layer_groups, wd=self.wd, true_wd=self.true_wd, bn_wd=self.bn_wd)
def __repr__(self)->str:
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
#Pytorch optimizer methods
def step(self)->None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr,wd,pg1,pg2 in zip(self._lr,self._wd,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
for p in pg1['params']: p.data.mul_(1 - wd*lr)
if self.bn_wd:
for p in pg2['params']: p.data.mul_(1 - wd*lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self)->None:
"Clear optimizer gradients."
self.opt.zero_grad()
#Passthrough to the inner opt.
def __getattr__(self,k:str)->Any: return getattr(self.opt, k, None)
def clear(self):
"Reset the state of the inner optimizer."
sd = self.state_dict()
sd['state'] = {}
self.load_state_dict(sd)
#Hyperparameters as properties
@property
def lr(self)->float: return self._lr[-1]
@lr.setter
def lr(self, val:float)->None:
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self)->float:return self._mom[-1]
@mom.setter
def mom(self, val:float)->None:
if 'momentum' in self.opt_keys: self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys: self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self)->float: return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val:float)->None:
"Set beta (or alpha as makes sense for given optimizer)."
if val is None: return
if 'betas' in self.opt_keys: self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys: self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self)->float: return self._wd[-1]
@wd.setter
def wd(self, val:float)->None:
"Set weight decay."
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
#Helper functions
def read_defaults(self)->None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom,self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
def set_val(self, key:str, val:Any, bn_groups:bool=True)->Any:
"Set `val` inside the optimizer dictionary at `key`."
if is_tuple(val): val = [(v1,v2) for v1,v2 in zip(*val)]
for v,pg1,pg2 in zip(val,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
def read_val(self, key:str) -> Union[List[float],Tuple[List[float],List[float]]]:
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
class Callback():
"Base class for callbacks that want to record values, dynamically change learner params, etc."
_order=0
def on_train_begin(self, **kwargs:Any)->None:
"To initialize constants in the callback."
pass
def on_epoch_begin(self, **kwargs:Any)->None:
"At the beginning of each epoch."
pass
def on_batch_begin(self, **kwargs:Any)->None:
"Set HP before the step is done. Returns xb, yb (which can allow us to modify the input at that step if needed)."
pass
def on_loss_begin(self, **kwargs:Any)->None:
"Called after forward pass but before loss has been computed. Returns the output (which can allow us to modify it)."
pass
def on_backward_begin(self, **kwargs:Any)->None:
"""Called after the forward pass and the loss has been computed, but before backprop.
Returns the loss (which can allow us to modify it, for instance for reg functions)"""
pass
def on_backward_end(self, **kwargs:Any)->None:
"Called after backprop but before optimizer step. Useful for true weight decay in AdamW."
pass
def on_step_end(self, **kwargs:Any)->None:
"Called after the step of the optimizer but before the gradients are zeroed."
pass
def on_batch_end(self, **kwargs:Any)->None:
"Called at the end of the batch."
pass
def on_epoch_end(self, **kwargs:Any)->bool:
"Called at the end of an epoch."
return False
def on_train_end(self, **kwargs:Any)->None:
"Useful for cleaning up things and saving files/models."
pass
class SmoothenValue():
"Create a smooth moving average for a value (loss, etc) using `beta`."
def __init__(self, beta:float):
self.beta,self.n,self.mov_avg = beta,0,0
def add_value(self, val:float)->None:
"Add `val` to calculate updated smoothed value."
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n)
CallbackList = Collection[Callback]
def _get_init_state(): return {'epoch':0, 'iteration':0, 'num_batch':0}
@dataclass
class CallbackHandler():
"Manage all of the registered `callbacks` and `metrics`, smoothing loss by momentum `beta`."
callbacks:CallbackList=None
metrics:CallbackList=None
beta:float=0.98
def __post_init__(self)->None:
"Initialize smoother and learning stats."
self.callbacks = ifnone(self.callbacks, [])
self.metrics = ifnone(self.metrics, [])
self.metrics = [(met if isinstance(met, Callback) else AverageMetric(met)) for met in self.metrics]
self.callbacks = sorted(self.callbacks, key=lambda o: getattr(o, '_order', 0))
self.smoothener = SmoothenValue(self.beta)
self.state_dict:Dict[str,Union[int,float,Tensor]]=_get_init_state()
def __call__(self, cb_name, call_mets=True, **kwargs)->None:
"Call through to all of the `CallbakHandler` functions."
if call_mets: [getattr(met, f'on_{cb_name}')(**self.state_dict, **kwargs) for met in self.metrics]
return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
"About to start learning."
self.state_dict = _get_init_state()
self.state_dict['n_epochs'],self.state_dict['pbar'],self.state_dict['metrics'] = epochs,pbar,metrics
names = [(met.name if hasattr(met, 'name') else camel2snake(met.__class__.__name__)) for met in self.metrics]
self('train_begin', metrics_names=names)
def on_epoch_begin(self)->None:
"Handle new epoch."
self.state_dict['num_batch'] = 0
self('epoch_begin')
def on_batch_begin(self, xb:Tensor, yb:Tensor, train:bool=True)->None:
"Handle new batch `xb`,`yb` in `train` or validation."
self.state_dict['last_input'], self.state_dict['last_target'] = xb, yb
self.state_dict['train'] = train
cbs = self.callbacks if train else self.metrics + self.callbacks
for cb in self.callbacks:
a = cb.on_batch_begin(**self.state_dict)
if a is not None: self.state_dict['last_input'], self.state_dict['last_target'] = a
return self.state_dict['last_input'], self.state_dict['last_target']
def on_loss_begin(self, out:Tensor)->None:
"Handle start of loss calculation with model output `out`."
self.state_dict['last_output'] = out
for cb in self.callbacks:
a = cb.on_loss_begin(**self.state_dict)
if a is not None: self.state_dict['last_output'] = a
return self.state_dict['last_output']
def on_backward_begin(self, loss:Tensor)->None:
"Handle gradient calculation on `loss`."
self.smoothener.add_value(loss.detach().cpu())
self.state_dict['last_loss'], self.state_dict['smooth_loss'] = loss, self.smoothener.smooth
for cb in self.callbacks:
a = cb.on_backward_begin(**self.state_dict)
if a is not None: self.state_dict['last_loss'] = a
return self.state_dict['last_loss']
def on_backward_end(self)->None:
"Handle end of gradient calculation."
self('backward_end', False)
def on_step_end(self)->None:
"Handle end of optimization step."
self('step_end', False)
def on_batch_end(self, loss:Tensor)->None:
"Handle end of processing one batch with `loss`."
self.state_dict['last_loss'] = loss
stop = np.any(self('batch_end', not self.state_dict['train']))
if self.state_dict['train']:
self.state_dict['iteration'] += 1
self.state_dict['num_batch'] += 1
return stop
def on_epoch_end(self, val_loss:Tensor)->bool:
"Epoch is done, process `val_loss`."
self.state_dict['last_metrics'] = [val_loss] if val_loss is not None else None
self.state_dict['epoch'] += 1
if not self.state_dict['train']:
for met in self.metrics:
met.on_epoch_end(**self.state_dict)
self.state_dict['last_metrics'].append(met.metric)
return np.any(self('epoch_end', False))
def on_train_end(self, exception:Union[bool,Exception])->None:
"Handle end of training, `exception` is an `Exception` or False if no exceptions during training."
self('train_end', exception=exception)
class AverageMetric(Callback):
"Wrap a `func` in a callback for metrics computation."
def __init__(self, func):
# If it's a partial, use func.func
name = getattr(func,'func',func).__name__
self.func, self.name = func, name
def on_epoch_begin(self, **kwargs):
"Set the inner value to 0."
self.val, self.count = 0.,0
def on_batch_end(self, last_output, last_target, **kwargs):
"Update metric computation with `last_output` and `last_target`."
if not is_listy(last_target): last_target=[last_target]
self.count += last_target[0].size(0)
self.val += last_target[0].size(0) * self.func(last_output, *last_target).detach().cpu()
def on_epoch_end(self, **kwargs):
"Sets the final result in `self.metric`."
self.metric = self.val/self.count
def annealing_no(start:Number, end:Number, pct:float)->Number:
"No annealing, always return `start`."
return start
def annealing_linear(start:Number, end:Number, pct:float)->Number:
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
def annealing_exp(start:Number, end:Number, pct:float)->Number:
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
def annealing_cos(start:Number, end:Number, pct:float)->Number:
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
def do_annealing_poly(start:Number, end:Number, pct:float, degree:Number)->Number:
"Helper function for `anneal_poly`."
return end + (start-end) * (1-pct)**degree
def annealing_poly(degree:Number)->Number:
"Anneal polynomically from `start` to `end` as pct goes from 0.0 to 1.0."
return functools.partial(do_annealing_poly, degree=degree)
class Stepper():
"Used to \"step\" from start,end (`vals`) over `n_iter` iterations on a schedule defined by `func`"
def __init__(self, vals:StartOptEnd, n_iter:int, func:Optional[AnnealFunc]=None):
self.start,self.end = (vals[0],vals[1]) if is_tuple(vals) else (vals,0)
self.n_iter = max(1,n_iter)
if func is None: self.func = annealing_linear if is_tuple(vals) else annealing_no
else: self.func = func
self.n = 0
def step(self)->Number:
"Return next value along annealed schedule."
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
@property
def is_done(self)->bool:
"Return `True` if schedule completed."
return self.n >= self.n_iter
| 45.3875
| 135
| 0.648582
|
8120fbc1398acb0e443d2ce42899df6a7acc331b
| 60,463
|
py
|
Python
|
applied_python/applied_python/lib/python2.7/site-packages/ansible/utils/__init__.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/ansible/utils/__init__.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/ansible/utils/__init__.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import errno
import sys
import re
import os
import shlex
import yaml
import copy
import optparse
import operator
from ansible import errors
from ansible import __version__
from ansible.utils.display_functions import *
from ansible.utils.plugins import *
from ansible.utils.su_prompts import *
from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
from ansible.callbacks import display
from ansible.module_utils.splitter import split_args, unquote
from ansible.module_utils.basic import heuristic_log_sanitize
from ansible.utils.unicode import to_bytes, to_unicode
import ansible.constants as C
import ast
import time
import StringIO
import stat
import termios
import tty
import pipes
import random
import difflib
import warnings
import traceback
import getpass
import sys
import subprocess
import contextlib
from vault import VaultLib
VERBOSITY=0
MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
# caching the compilation of the regex used
# to check for lookup calls within data
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
CODE_REGEX = re.compile(r'(?:{%|%})')
try:
# simplejson can be much faster if it's available
import simplejson as json
except ImportError:
import json
try:
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
try:
import builtin
except ImportError:
import __builtin__ as builtin
KEYCZAR_AVAILABLE=False
try:
try:
# some versions of pycrypto may not have this?
from Crypto.pct_warnings import PowmInsecureWarning
except ImportError:
PowmInsecureWarning = RuntimeWarning
with warnings.catch_warnings(record=True) as warning_handler:
warnings.simplefilter("error", PowmInsecureWarning)
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
except PowmInsecureWarning:
system_warning(
"The version of gmp you have installed has a known issue regarding " + \
"timing vulnerabilities when used with pycrypto. " + \
"If possible, you should update it (i.e. yum update gmp)."
)
warnings.resetwarnings()
warnings.simplefilter("ignore")
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
###############################################################
# Abstractions around keyczar
###############################################################
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
if not os.path.exists(key_path):
os.makedirs(key_path, mode=0700)
os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
elif not os.path.isdir(key_path):
raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
key_path = os.path.join(key_path, hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate(size=256)
fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
fh = os.fdopen(fd, 'w')
fh.write(str(key))
fh.close()
return key
else:
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def encrypt(key, msg):
return key.Encrypt(msg.encode('utf-8'))
def decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise errors.AnsibleError("decryption failed")
###############################################################
# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
###############################################################
def read_vault_file(vault_password_file):
"""Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
if vault_password_file:
this_path = os.path.realpath(os.path.expanduser(vault_password_file))
if is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError, e:
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
stdout, stderr = p.communicate()
vault_pass = stdout.strip('\r\n')
else:
try:
f = open(this_path, "rb")
vault_pass=f.read().strip()
f.close()
except (OSError, IOError), e:
raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
return vault_pass
else:
return None
def err(msg):
''' print an error message to stderr '''
print >> sys.stderr, msg
def exit(msg, rc=1):
''' quit with an error to stdout and a failure code '''
err(msg)
sys.exit(rc)
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
result2 = result.copy()
for key, value in result2.items():
if type(value) is str:
result2[key] = value.decode('utf-8', 'ignore')
indent = None
if format:
indent = 4
try:
return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
except UnicodeDecodeError:
return json.dumps(result2, sort_keys=True, indent=indent)
def write_tree_file(tree, hostname, buf):
''' write something into treedir/hostname '''
# TODO: might be nice to append playbook runs per host in a similar way
# in which case, we'd want append mode.
path = os.path.join(tree, hostname)
buf = to_bytes(buf)
with open(path, 'wb+') as fd:
fd.write(buf)
def is_failed(result):
''' is a given JSON result a failed result? '''
return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
def is_changed(result):
''' is a given JSON result a changed result? '''
return (result.get('changed', False) in [ True, 'True', 'true'])
def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
from ansible.utils import template
if conditional is None or conditional == '':
return True
if isinstance(conditional, list):
for x in conditional:
if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
return False
return True
if not isinstance(conditional, basestring):
return conditional
conditional = conditional.replace("jinja2_compare ","")
# allow variable names
if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'):
conditional = to_unicode(inject[conditional], nonstring='simplerepr')
conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","")
# a Jinja2 evaluation that results in something Python can eval!
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = template.template(basedir, presented, inject)
val = conditional.strip()
if val == presented:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
# otherwise fail
if "is undefined" in conditional:
return True
elif "is defined" in conditional:
return False
else:
raise errors.AnsibleError("error while evaluating conditional: %s" % original)
elif val == "True":
return True
elif val == "False":
return False
else:
raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def unfrackpath(path):
'''
returns a path that is free of symlinks, environment
variables, relative path traversals and symbols (~)
example:
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
def prepare_writeable_dir(tree,mode=0777):
''' make sure a directory exists and is writeable '''
# modify the mode to ensure the owner at least
# has read/write access to this directory
mode |= 0700
# make sure the tree path is always expanded
# and normalized and free of symlinks
tree = unfrackpath(tree)
if not os.path.exists(tree):
try:
os.makedirs(tree, mode)
except (IOError, OSError), e:
raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
if not os.access(tree, os.W_OK):
raise errors.AnsibleError("Cannot write to path %s" % tree)
return tree
def path_dwim(basedir, given):
'''
make relative paths work like folks expect.
'''
if given.startswith("'"):
given = given[1:-1]
if given.startswith("/"):
return os.path.abspath(given)
elif given.startswith("~"):
return os.path.abspath(os.path.expanduser(given))
else:
if basedir is None:
basedir = "."
return os.path.abspath(os.path.join(basedir, given))
def path_dwim_relative(original, dirname, source, playbook_base, check=True):
''' find one file in a directory one level up in a dir named dirname relative to current '''
# (used by roles code)
from ansible.utils import template
basedir = os.path.dirname(original)
if os.path.islink(basedir):
basedir = unfrackpath(basedir)
template2 = os.path.join(basedir, dirname, source)
else:
template2 = os.path.join(basedir, '..', dirname, source)
source2 = path_dwim(basedir, template2)
if os.path.exists(source2):
return source2
obvious_local_path = path_dwim(playbook_base, source)
if os.path.exists(obvious_local_path):
return obvious_local_path
if check:
raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
return source2 # which does not exist
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
role_spec = role_spec.strip()
role_version = ''
default_role_versions = dict(git='master', hg='tip')
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
def role_yaml_parse(role):
if 'role' in role:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role_info = role_spec_parse(role['role'])
if isinstance(role_info, dict):
# Warning: Slight change in behaviour here. name may be being
# overloaded. Previously, name was only a parameter to the role.
# Now it is both a parameter to the role and the name that
# ansible-galaxy will install under on the local system.
if 'name' in role and 'name' in role_info:
del role_info['name']
role.update(role_info)
else:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
return role
def json_loads(data):
''' parse a JSON string and return a data structure '''
try:
loaded = json.loads(data)
except ValueError,e:
raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e))
return loaded
def _clean_data(orig_data, from_remote=False, from_inventory=False):
''' remove jinja2 template tags from a string '''
if not isinstance(orig_data, basestring):
return orig_data
# when the data is marked as having come from a remote, we always
# replace any print blocks (ie. {{var}}), however when marked as coming
# from inventory we only replace print blocks that contain a call to
# a lookup plugin (ie. {{lookup('foo','bar'))}})
replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
with contextlib.closing(StringIO.StringIO(orig_data)) as data:
# these variables keep track of opening block locations, as we only
# want to replace matched pairs of print/block tags
print_openings = []
block_openings = []
for mo in regex.finditer(orig_data):
token = mo.group(0)
token_start = mo.start(0)
if token[0] == '{':
if token == '{%':
block_openings.append(token_start)
elif token == '{{':
print_openings.append(token_start)
elif token[1] == '}':
prev_idx = None
if token == '%}' and block_openings:
prev_idx = block_openings.pop()
elif token == '}}' and print_openings:
prev_idx = print_openings.pop()
if prev_idx is not None:
# replace the opening
data.seek(prev_idx, os.SEEK_SET)
data.write('{#')
# replace the closing
data.seek(token_start, os.SEEK_SET)
data.write('#}')
else:
assert False, 'Unhandled regex match'
return data.getvalue()
def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
'''
walk a complex data structure, and use _clean_data() to
remove any template tags that may exist
'''
if not from_remote and not from_inventory:
raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
if isinstance(orig_data, dict):
data = orig_data.copy()
for key in data:
new_key = _clean_data_struct(key, from_remote, from_inventory)
new_val = _clean_data_struct(data[key], from_remote, from_inventory)
if key != new_key:
del data[key]
data[new_key] = new_val
elif isinstance(orig_data, list):
data = orig_data[:]
for i in range(0, len(data)):
data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
elif isinstance(orig_data, basestring):
data = _clean_data(orig_data, from_remote, from_inventory)
else:
data = orig_data
return data
def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
''' this version for module return data only '''
orig_data = raw_data
# ignore stuff like tcgetattr spewage or other warnings
data = filter_leading_non_json_lines(raw_data)
try:
results = json.loads(data)
except:
if no_exceptions:
return dict(failed=True, parsed=False, msg=raw_data)
else:
raise
if from_remote:
results = _clean_data_struct(results, from_remote, from_inventory)
return results
def serialize_args(args):
'''
Flattens a dictionary args to a k=v string
'''
module_args = ""
for (k,v) in args.iteritems():
if isinstance(v, basestring):
module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
elif isinstance(v, bool):
module_args = "%s=%s %s" % (k, str(v), module_args)
return module_args.strip()
def merge_module_args(current_args, new_args):
'''
merges either a dictionary or string of k=v pairs with another string of k=v pairs,
and returns a new k=v string without duplicates.
'''
if not isinstance(current_args, basestring):
raise errors.AnsibleError("expected current_args to be a basestring")
# we use parse_kv to split up the current args into a dictionary
final_args = parse_kv(current_args)
if isinstance(new_args, dict):
final_args.update(new_args)
elif isinstance(new_args, basestring):
new_args_kv = parse_kv(new_args)
final_args.update(new_args_kv)
return serialize_args(final_args)
def parse_yaml(data, path_hint=None):
''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
stripped_data = data.lstrip()
loaded = None
if stripped_data.startswith("{") or stripped_data.startswith("["):
# since the line starts with { or [ we can infer this is a JSON document.
try:
loaded = json.loads(data)
except ValueError, ve:
if path_hint:
raise errors.AnsibleError(path_hint + ": " + str(ve))
else:
raise errors.AnsibleError(str(ve))
else:
# else this is pretty sure to be a YAML document
loaded = yaml.load(data, Loader=Loader)
return loaded
def process_common_errors(msg, probline, column):
replaced = probline.replace(" ","")
if ":{{" in replaced and "}}" in replaced:
msg = msg + """
This one looks easy to fix. YAML thought it was looking for the start of a
hash/dictionary and was confused to see a second "{". Most likely this was
meant to be an ansible template evaluation instead, so we have to give the
parser a small hint that we wanted a string instead. The solution here is to
just quote the entire value.
For instance, if the original line was:
app_path: {{ base_path }}/foo
It should be written as:
app_path: "{{ base_path }}/foo"
"""
return msg
elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
msg = msg + """
This one looks easy to fix. There seems to be an extra unquoted colon in the line
and this is confusing the parser. It was only expecting to find one free
colon. The solution is just add some quotes around the colon, or quote the
entire line after the first colon.
For instance, if the original line was:
copy: src=file.txt dest=/path/filename:with_colon.txt
It can be written as:
copy: src=file.txt dest='/path/filename:with_colon.txt'
Or:
copy: 'src=file.txt dest=/path/filename:with_colon.txt'
"""
return msg
else:
parts = probline.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
unbalanced = True
if match:
msg = msg + """
This one looks easy to fix. It seems that there is a value started
with a quote, and the YAML parser is expecting to see the line ended
with the same kind of quote. For instance:
when: "ok" in result.stdout
Could be written as:
when: '"ok" in result.stdout'
or equivalently:
when: "'ok' in result.stdout"
"""
return msg
if unbalanced:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
unbalanced quotes. If starting a value with a quote, make sure the
line ends with the same set of quotes. For instance this arbitrary
example:
foo: "bad" "wolf"
Could be written as:
foo: '"bad" "wolf"'
"""
return msg
return msg
def process_yaml_error(exc, data, path=None, show_content=True):
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
if show_content:
if mark.line -1 >= 0:
before_probline = data.split("\n")[mark.line-1]
else:
before_probline = ''
probline = data.split("\n")[mark.line]
arrow = " " * mark.column + "^"
msg = """Syntax Error while loading YAML script, %s
Note: The error may actually appear before this position: line %s, column %s
%s
%s
%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
unquoted_var = None
if '{{' in probline and '}}' in probline:
if '"{{' not in probline or "'{{" not in probline:
unquoted_var = True
if not unquoted_var:
msg = process_common_errors(msg, probline, mark.column)
else:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
missing quotes. Always quote template expression brackets when they
start a value. For instance:
with_items:
- {{ foo }}
Should be written as:
with_items:
- "{{ foo }}"
"""
else:
# most likely displaying a file with sensitive content,
# so don't show any of the actual lines of yaml just the
# line number itself
msg = """Syntax error while loading YAML script, %s
The error appears to have been on line %s, column %s, but may actually
be before there depending on the exact syntax problem.
""" % (path, mark.line + 1, mark.column + 1)
else:
# No problem markers means we have to throw a generic
# "stuff messed up" type message. Sry bud.
if path:
msg = "Could not parse YAML. Check over %s again." % path
else:
msg = "Could not parse YAML."
raise errors.AnsibleYAMLValidationFailed(msg)
def parse_yaml_from_file(path, vault_password=None):
''' convert a yaml file to a data structure '''
data = None
show_content = True
try:
data = open(path).read()
except IOError:
raise errors.AnsibleError("file could not read: %s" % path)
vault = VaultLib(password=vault_password)
if vault.is_encrypted(data):
# if the file is encrypted and no password was specified,
# the decrypt call would throw an error, but we check first
# since the decrypt function doesn't know the file name
if vault_password is None:
raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
data = vault.decrypt(data)
show_content = False
try:
return parse_yaml(data, path_hint=path)
except yaml.YAMLError, exc:
process_yaml_error(exc, data, path, show_content)
def parse_kv(args):
''' convert a string of key/value items to a dict '''
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError, ve:
if 'no closing quotation' in str(ve).lower():
raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
else:
raise
for x in vargs:
if "=" in x:
k, v = x.split("=",1)
options[k.strip()] = unquote(v.strip())
return options
def _validate_both_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise errors.AnsibleError(
"failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
)
def merge_hash(a, b):
''' recursively merges hash b into a
keys from b take precedence over keys from a '''
result = {}
# we check here as well as in combine_vars() since this
# function can work recursively with nested dicts
_validate_both_dicts(a, b)
for dicts in a, b:
# next, iterate over b keys and values
for k, v in dicts.iteritems():
# if there's already such key in a
# and that key contains dict
if k in result and isinstance(result[k], dict):
# merge those dicts recursively
result[k] = merge_hash(a[k], v)
else:
# otherwise, just copy a value from b to a
result[k] = v
return result
def default(value, function):
''' syntactic sugar around lazy evaluation of defaults '''
if value is None:
return function()
return value
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = branch[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
else:
result = ''
return result
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = _git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def version(prog):
result = "{0} {1}".format(prog, __version__)
gitinfo = _gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
return result
def version_info(gitinfo=False):
if gitinfo:
# expensive call, user with care
ansible_version_string = version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
def getch():
''' read in a single character '''
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def sanitize_output(arg_string):
''' strips private info out of a string '''
private_keys = ('password', 'login_password')
output = []
for part in arg_string.split():
try:
(k, v) = part.split('=', 1)
except ValueError:
v = heuristic_log_sanitize(part)
output.append(v)
continue
if k in private_keys:
v = 'VALUE_HIDDEN'
else:
v = heuristic_log_sanitize(v)
output.append('%s=%s' % (k, v))
output = ' '.join(output)
return output
####################################################################
# option handling code for /usr/bin/ansible and ansible-playbook
# below this line
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
def increment_debug(option, opt, value, parser):
global VERBOSITY
VERBOSITY += 1
def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
''' create an options parser for any ansible script '''
parser = SortedOptParser(usage, version=version("%prog"))
parser.add_option('-v','--verbose', default=False, action="callback",
callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
default=constants.DEFAULT_HOST_LIST)
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password')
parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
dest='vault_password_file', help="vault password file")
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
default=None)
if subset_opts:
parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
# priv user defaults to root later on to enable detecting when this option was given here
parser.add_option('-K', '--ask-sudo-pass', default=constants.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
parser.add_option('--ask-su-pass', default=constants.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
parser.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (nopasswd implied)")
parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
parser.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if connect_opts:
parser.add_option('-c', '--connection', dest='connection',
default=constants.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
if async_opts:
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur"
)
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
return parser
def parse_extra_vars(extra_vars_opts, vault_pass):
extra_vars = {}
for extra_vars_opt in extra_vars_opts:
extra_vars_opt = to_unicode(extra_vars_opt)
if extra_vars_opt.startswith(u"@"):
# Argument is a YAML file (JSON is a subset of YAML)
extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass))
elif extra_vars_opt and extra_vars_opt[0] in u'[{':
# Arguments as YAML
extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt))
else:
# Arguments as Key-value
extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt))
return extra_vars
def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
vault_pass = None
new_vault_pass = None
if ask_vault_pass:
vault_pass = getpass.getpass(prompt="Vault password: ")
if ask_vault_pass and confirm_vault:
vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
if vault_pass != vault_pass2:
raise errors.AnsibleError("Passwords do not match")
if ask_new_vault_pass:
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
if ask_new_vault_pass and confirm_new:
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
if new_vault_pass != new_vault_pass2:
raise errors.AnsibleError("Passwords do not match")
# enforce no newline chars at the end of passwords
if vault_pass:
vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
if new_vault_pass:
new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
return vault_pass, new_vault_pass
def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
sshpass = None
becomepass = None
vaultpass = None
become_prompt = ''
if ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % become_method.upper()
if become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
if ask_vault_pass:
vaultpass = getpass.getpass(prompt="Vault password: ")
if vaultpass:
vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
return (sshpass, becomepass, vaultpass)
def choose_pass_prompt(options):
if options.ask_su_pass:
return 'su'
elif options.ask_sudo_pass:
return 'sudo'
return options.become_method
def normalize_become_options(options):
options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
if options.become:
pass
elif options.sudo:
options.become = True
options.become_method = 'sudo'
elif options.su:
options.become = True
options.become_method = 'su'
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
try:
crypt = getattr(passlib.hash, encrypt)
except:
raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
if salt_size:
result = crypt.encrypt(result, salt_size=salt_size)
elif salt:
result = crypt.encrypt(result, salt=salt)
else:
result = crypt.encrypt(result)
else:
raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
return result
def last_non_blank_line(buf):
all_lines = buf.splitlines()
all_lines.reverse()
for line in all_lines:
if (len(line) > 0):
return line
# shouldn't occur unless there's no output
return ""
def filter_leading_non_json_lines(buf):
'''
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO.StringIO()
stop_filtering = False
for line in buf.splitlines():
if stop_filtering or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue()
def boolean(value):
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
"""
helper function for connection plugins to create privilege escalation commands
"""
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
prompt = None
becomecmd = None
shell = shell or '$SHELL'
if method == 'sudo':
# Rather than detect if sudo wants a password this time, -k makes sudo always ask for
# a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
# string to the user's shell. We loop reading output until we see the randomly-generated
# sudo prompt set with the -p option.
prompt = '[sudo via ansible, key=%s] password: ' % randbits
exe = exe or C.DEFAULT_SUDO_EXE
becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
(exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
elif method == 'su':
exe = exe or C.DEFAULT_SU_EXE
flags = flags or C.DEFAULT_SU_FLAGS
becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
elif method == 'pbrun':
prompt = 'assword:'
exe = exe or 'pbrun'
flags = flags or ''
becomecmd = '%s -b %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
elif method == 'pfexec':
exe = exe or 'pfexec'
flags = flags or ''
# No user as it uses it's own exec_attr to figure it out
becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
if becomecmd is None:
raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
"""
helper function for connection plugins to create sudo commands
"""
return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
def make_su_cmd(su_user, executable, cmd):
"""
Helper function for connection plugins to create direct su commands
"""
return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
def get_diff(diff):
# called by --diff usage in playbook and runner via callbacks
# include names in diffs 'before' and 'after' and do diff -U 10
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ret = []
if 'dst_binary' in diff:
ret.append("diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append("diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
if 'before_header' in diff:
before_header = "before: %s" % diff['before_header']
else:
before_header = 'before'
if 'after_header' in diff:
after_header = "after: %s" % diff['after_header']
else:
after_header = 'after'
differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
for line in list(differ):
ret.append(line)
return u"".join(ret)
except UnicodeDecodeError:
return ">> the files are different, but the diff library cannot compare unicode strings"
def is_list_of_strings(items):
for x in items:
if not isinstance(x, basestring):
return False
return True
def list_union(a, b):
result = []
for x in a:
if x not in result:
result.append(x)
for x in b:
if x not in result:
result.append(x)
return result
def list_intersection(a, b):
result = []
for x in a:
if x in b and x not in result:
result.append(x)
return result
def list_difference(a, b):
result = []
for x in a:
if x not in b and x not in result:
result.append(x)
for x in b:
if x not in a and x not in result:
result.append(x)
return result
def contains_vars(data):
'''
returns True if the data contains a variable pattern
'''
return "$" in data or "{{" in data
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if not sys.version.startswith('2.6'):
SAFE_NODES.union(
set(
(ast.Set,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, {}, locals)
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError, e:
# special handling for syntax errors, we just return
# the expression string back as-is
if include_exceptions:
return (expr, None)
return expr
except Exception, e:
if include_exceptions:
return (expr, e)
return expr
def listify_lookup_plugin_terms(terms, basedir, inject):
from ansible.utils import template
if isinstance(terms, basestring):
# someone did:
# with_items: alist
# OR
# with_items: {{ alist }}
stripped = terms.strip()
if not (stripped.startswith('{') or stripped.startswith('[')) and \
not stripped.startswith("/") and \
not stripped.startswith('set([') and \
not LOOKUP_REGEX.search(terms):
# if not already a list, get ready to evaluate with Jinja2
# not sure why the "/" is in above code :)
try:
new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
if isinstance(new_terms, basestring) and "{{" in new_terms:
pass
else:
terms = new_terms
except:
pass
if '{' in terms or '[' in terms:
# Jinja2 already evaluated a variable to a list.
# Jinja2-ified list needs to be converted back to a real type
# TODO: something a bit less heavy than eval
return safe_eval(terms)
if isinstance(terms, basestring):
terms = [ terms ]
return terms
def combine_vars(a, b):
_validate_both_dicts(a, b)
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
return dict(a.items() + b.items())
def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
'''Return a random password string of length containing only chars.'''
password = []
while len(password) < length:
new_char = os.urandom(1)
if new_char in chars:
password.append(new_char)
return ''.join(password)
def before_comment(msg):
''' what's the part of a string before a comment? '''
msg = msg.replace("\#","**NOT_A_COMMENT**")
msg = msg.split("#")[0]
msg = msg.replace("**NOT_A_COMMENT**","#")
return msg
def load_vars(basepath, results, vault_password=None):
"""
Load variables from any potential yaml filename combinations of basepath,
returning result.
"""
paths_to_check = [ "".join([basepath, ext])
for ext in C.YAML_FILENAME_EXTENSIONS ]
found_paths = []
for path in paths_to_check:
found, results = _load_vars_from_path(path, results, vault_password=vault_password)
if found:
found_paths.append(path)
# disallow the potentially confusing situation that there are multiple
# variable files for the same name. For example if both group_vars/all.yml
# and group_vars/all.yaml
if len(found_paths) > 1:
raise errors.AnsibleError("Multiple variable files found. "
"There should only be one. %s" % ( found_paths, ))
return results
## load variables from yaml files/dirs
# e.g. host/group_vars
#
def _load_vars_from_path(path, results, vault_password=None):
"""
Robustly access the file at path and load variables, carefully reporting
errors in a friendly/informative way.
Return the tuple (found, new_results, )
"""
try:
# in the case of a symbolic link, we want the stat of the link itself,
# not its target
pathstat = os.lstat(path)
except os.error, err:
# most common case is that nothing exists at that path.
if err.errno == errno.ENOENT:
return False, results
# otherwise this is a condition we should report to the user
raise errors.AnsibleError(
"%s is not accessible: %s."
" Please check its permissions." % ( path, err.strerror))
# symbolic link
if stat.S_ISLNK(pathstat.st_mode):
try:
target = os.path.realpath(path)
except os.error, err2:
raise errors.AnsibleError("The symbolic link at %s "
"is not readable: %s. Please check its permissions."
% (path, err2.strerror, ))
# follow symbolic link chains by recursing, so we repeat the same
# permissions checks above and provide useful errors.
return _load_vars_from_path(target, results, vault_password)
# directory
if stat.S_ISDIR(pathstat.st_mode):
# support organizing variables across multiple files in a directory
return True, _load_vars_from_folder(path, results, vault_password=vault_password)
# regular file
elif stat.S_ISREG(pathstat.st_mode):
data = parse_yaml_from_file(path, vault_password=vault_password)
if data and type(data) != dict:
raise errors.AnsibleError(
"%s must be stored as a dictionary/hash" % path)
elif data is None:
data = {}
# combine vars overrides by default but can be configured to do a
# hash merge in settings
results = combine_vars(results, data)
return True, results
# something else? could be a fifo, socket, device, etc.
else:
raise errors.AnsibleError("Expected a variable file or directory "
"but found a non-file object at path %s" % (path, ))
def _load_vars_from_folder(folder_path, results, vault_password=None):
"""
Load all variables within a folder recursively.
"""
# this function and _load_vars_from_path are mutually recursive
try:
names = os.listdir(folder_path)
except os.error, err:
raise errors.AnsibleError(
"This folder cannot be listed: %s: %s."
% ( folder_path, err.strerror))
# evaluate files in a stable order rather than whatever order the
# filesystem lists them.
names.sort()
# do not parse hidden files or dirs, e.g. .svn/
paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')]
for path in paths:
_found, results = _load_vars_from_path(path, results, vault_password=vault_password)
return results
def update_hash(hash, key, new_value):
''' used to avoid nested .update calls on the parent '''
value = hash.get(key, {})
value.update(new_value)
hash[key] = value
def censor_unlogged_data(data):
'''
used when the no_log: True attribute is passed to a task to keep data from a callback.
NOT intended to prevent variable registration, but only things from showing up on
screen
'''
new_data = {}
for (x,y) in data.iteritems():
if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
new_data[x] = y
new_data['censored'] = 'results hidden due to no_log parameter'
return new_data
def check_mutually_exclusive_privilege(options, parser):
# privilege escalation command line arguments need to be mutually exclusive
if (options.su or options.su_user or options.ask_su_pass) and \
(options.sudo or options.sudo_user or options.ask_sudo_pass) or \
(options.su or options.su_user or options.ask_su_pass) and \
(options.become or options.become_user or options.become_ask_pass) or \
(options.sudo or options.sudo_user or options.ask_sudo_pass) and \
(options.become or options.become_user or options.become_ask_pass):
parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass')"
" are exclusive of each other")
| 36.401565
| 314
| 0.627276
|
3a03e66c53df3cefb14f13a7a86879f51ec9446e
| 53,220
|
py
|
Python
|
awx/main/management/commands/inventory_import.py
|
cvick/awx
|
f7effd82dcacab5f0cedf3f7e5312f4ea7cada0f
|
[
"Apache-2.0"
] | null | null | null |
awx/main/management/commands/inventory_import.py
|
cvick/awx
|
f7effd82dcacab5f0cedf3f7e5312f4ea7cada0f
|
[
"Apache-2.0"
] | null | null | null |
awx/main/management/commands/inventory_import.py
|
cvick/awx
|
f7effd82dcacab5f0cedf3f7e5312f4ea7cada0f
|
[
"Apache-2.0"
] | 1
|
2019-09-17T04:35:27.000Z
|
2019-09-17T04:35:27.000Z
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import json
import logging
import os
import re
import subprocess
import sys
import time
import traceback
import shutil
# Django
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, transaction
from django.utils.encoding import smart_text
# AWX
from awx.main.models import * # noqa
from awx.main.utils import (
ignore_inventory_computed_fields,
check_proot_installed,
wrap_args_with_proot,
build_proot_temp_dir,
get_licenser
)
from awx.main.utils.mem_inventory import MemInventory, dict_to_mem_data
from awx.main.signals import disable_activity_stream
logger = logging.getLogger('awx.main.commands.inventory_import')
LICENSE_EXPIRED_MESSAGE = '''\
License expired.
See http://www.ansible.com/renew for license extension information.'''
LICENSE_NON_EXISTANT_MESSAGE = '''\
No license.
See http://www.ansible.com/renew for license information.'''
LICENSE_MESSAGE = '''\
Number of licensed instances exceeded, would bring available instances to %(new_count)d, system is licensed for %(available_instances)d.
See http://www.ansible.com/renew for license extension information.'''
DEMO_LICENSE_MESSAGE = '''\
Demo mode free license count exceeded, would bring available instances to %(new_count)d, demo mode allows %(available_instances)d.
See http://www.ansible.com/renew for licensing information.'''
def functioning_dir(path):
if os.path.isdir(path):
return path
return os.path.dirname(path)
class AnsibleInventoryLoader(object):
'''
Given executable `source` (directory, executable, or file) this will
use the ansible-inventory CLI utility to convert it into in-memory
representational objects. Example:
/usr/bin/ansible/ansible-inventory -i hosts --list
If it fails to find this, it uses the backported script instead
'''
def __init__(self, source, group_filter_re=None, host_filter_re=None, is_custom=False):
self.source = source
self.source_dir = functioning_dir(self.source)
self.is_custom = is_custom
self.tmp_private_dir = None
self.method = 'ansible-inventory'
self.group_filter_re = group_filter_re
self.host_filter_re = host_filter_re
self.is_vendored_source = False
if self.source_dir == os.path.join(settings.BASE_DIR, 'plugins', 'inventory'):
self.is_vendored_source = True
def build_env(self):
env = dict(os.environ.items())
env['VIRTUAL_ENV'] = settings.ANSIBLE_VENV_PATH
env['PATH'] = os.path.join(settings.ANSIBLE_VENV_PATH, "bin") + ":" + env['PATH']
env['ANSIBLE_INVENTORY_UNPARSED_FAILED'] = '1'
venv_libdir = os.path.join(settings.ANSIBLE_VENV_PATH, "lib")
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
if os.path.isdir(os.path.join(venv_libdir, "python2.7")):
env['PYTHONPATH'] = os.path.join(venv_libdir, "python2.7", "site-packages") + ":"
return env
def get_base_args(self):
# get ansible-inventory absolute path for running in bubblewrap/proot, in Popen
for path in os.environ["PATH"].split(os.pathsep):
potential_path = os.path.join(path.strip('"'), 'ansible-inventory')
if os.path.isfile(potential_path) and os.access(potential_path, os.X_OK):
logger.debug('Using system install of ansible-inventory CLI: {}'.format(potential_path))
return [potential_path, '-i', self.source]
# Stopgap solution for group_vars, do not use backported module for official
# vendored cloud modules or custom scripts TODO: remove after Ansible 2.3 deprecation
if self.is_vendored_source or self.is_custom:
self.method = 'inventory script invocation'
return [self.source]
# ansible-inventory was not found, look for backported module TODO: remove after Ansible 2.3 deprecation
abs_module_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'plugins',
'ansible_inventory', 'backport.py'))
self.method = 'ansible-inventory backport'
if not os.path.exists(abs_module_path):
raise ImproperlyConfigured('Cannot find inventory module')
logger.debug('Using backported ansible-inventory module: {}'.format(abs_module_path))
return [abs_module_path, '-i', self.source]
def get_proot_args(self, cmd, env):
cwd = os.getcwd()
if not check_proot_installed():
raise RuntimeError("proot is not installed but is configured for use")
kwargs = {}
if self.is_custom:
# use source's tmp dir for proot, task manager will delete folder
logger.debug("Using provided directory '{}' for isolation.".format(self.source_dir))
kwargs['proot_temp_dir'] = self.source_dir
cwd = self.source_dir
else:
# we cannot safely store tmp data in source dir or trust script contents
if env['AWX_PRIVATE_DATA_DIR']:
# If this is non-blank, file credentials are being used and we need access
private_data_dir = functioning_dir(env['AWX_PRIVATE_DATA_DIR'])
logger.debug("Using private credential data in '{}'.".format(private_data_dir))
kwargs['private_data_dir'] = private_data_dir
self.tmp_private_dir = build_proot_temp_dir()
logger.debug("Using fresh temporary directory '{}' for isolation.".format(self.tmp_private_dir))
kwargs['proot_temp_dir'] = self.tmp_private_dir
# Run from source's location so that custom script contents are in `show_paths`
cwd = functioning_dir(self.source)
logger.debug("Running from `{}` working directory.".format(cwd))
return wrap_args_with_proot(cmd, cwd, **kwargs)
def command_to_json(self, cmd):
data = {}
stdout, stderr = '', ''
env = self.build_env()
if ((self.is_custom or 'AWX_PRIVATE_DATA_DIR' in env) and
getattr(settings, 'AWX_PROOT_ENABLED', False)):
cmd = self.get_proot_args(cmd, env)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
stdout, stderr = proc.communicate()
if self.tmp_private_dir:
shutil.rmtree(self.tmp_private_dir, True)
if proc.returncode != 0:
raise RuntimeError('%s failed (rc=%d) with stdout:\n%s\nstderr:\n%s' % (
self.method, proc.returncode, stdout, stderr))
for line in stderr.splitlines():
logger.error(line)
try:
data = json.loads(stdout)
if not isinstance(data, dict):
raise TypeError('Returned JSON must be a dictionary, got %s instead' % str(type(data)))
except Exception:
logger.error('Failed to load JSON from: %s', stdout)
raise
return data
def load(self):
base_args = self.get_base_args()
logger.info('Reading Ansible inventory source: %s', self.source)
data = self.command_to_json(base_args + ['--list'])
# TODO: remove after we run custom scripts through ansible-inventory
if self.is_custom and '_meta' not in data or 'hostvars' not in data['_meta']:
# Invoke the executable once for each host name we've built up
# to set their variables
data.setdefault('_meta', {})
data['_meta'].setdefault('hostvars', {})
logger.warning('Re-calling script for hostvars individually.')
for group_name, group_data in data.iteritems():
if group_name == '_meta':
continue
if isinstance(group_data, dict):
group_host_list = group_data.get('hosts', [])
elif isinstance(group_data, list):
group_host_list = group_data
else:
logger.warning('Group data for "%s" is not a dict or list',
group_name)
group_host_list = []
for hostname in group_host_list:
logger.debug('Obtaining hostvars for %s' % hostname.encode('utf-8'))
hostdata = self.command_to_json(
base_args + ['--host', hostname.encode("utf-8")]
)
if isinstance(hostdata, dict):
data['_meta']['hostvars'][hostname] = hostdata
else:
logger.warning(
'Expected dict of vars for host "%s" when '
'calling with `--host`, got %s instead',
k, str(type(data))
)
logger.info('Processing JSON output...')
inventory = MemInventory(
group_filter_re=self.group_filter_re, host_filter_re=self.host_filter_re)
inventory = dict_to_mem_data(data, inventory=inventory)
return inventory
def load_inventory_source(source, group_filter_re=None,
host_filter_re=None, exclude_empty_groups=False,
is_custom=False):
'''
Load inventory from given source directory or file.
'''
# Sanity check: We sanitize these module names for our API but Ansible proper doesn't follow
# good naming conventions
source = source.replace('rhv.py', 'ovirt4.py')
source = source.replace('satellite6.py', 'foreman.py')
source = source.replace('vmware.py', 'vmware_inventory.py')
if not os.path.exists(source):
raise IOError('Source does not exist: %s' % source)
source = os.path.join(os.getcwd(), os.path.dirname(source),
os.path.basename(source))
source = os.path.normpath(os.path.abspath(source))
inventory = AnsibleInventoryLoader(
source=source,
group_filter_re=group_filter_re,
host_filter_re=host_filter_re,
is_custom=is_custom).load()
logger.debug('Finished loading from source: %s', source)
# Exclude groups that are completely empty.
if exclude_empty_groups:
inventory.delete_empty_groups()
logger.info('Loaded %d groups, %d hosts', len(inventory.all_group.all_groups),
len(inventory.all_group.all_hosts))
return inventory.all_group
class Command(BaseCommand):
'''
Management command to import inventory from a directory, ini file, or
dynamic inventory script.
'''
help = 'Import or sync external inventory sources'
def add_arguments(self, parser):
parser.add_argument('--inventory-name', dest='inventory_name',
type=str, default=None, metavar='n',
help='name of inventory to sync')
parser.add_argument('--inventory-id', dest='inventory_id', type=int,
default=None, metavar='i',
help='id of inventory to sync')
parser.add_argument('--overwrite', dest='overwrite', action='store_true', default=False,
help='overwrite the destination hosts and groups')
parser.add_argument('--overwrite-vars', dest='overwrite_vars',
action='store_true', default=False,
help='overwrite (rather than merge) variables')
parser.add_argument('--keep-vars', dest='keep_vars', action='store_true', default=False,
help='use database variables if set')
parser.add_argument('--custom', dest='custom', action='store_true', default=False,
help='this is a custom inventory script')
parser.add_argument('--source', dest='source', type=str, default=None,
metavar='s', help='inventory directory, file, or script to load')
parser.add_argument('--enabled-var', dest='enabled_var', type=str,
default=None, metavar='v', help='host variable used to '
'set/clear enabled flag when host is online/offline, may '
'be specified as "foo.bar" to traverse nested dicts.')
parser.add_argument('--enabled-value', dest='enabled_value', type=str,
default=None, metavar='v', help='value of host variable '
'specified by --enabled-var that indicates host is '
'enabled/online.')
parser.add_argument('--group-filter', dest='group_filter', type=str,
default=None, metavar='regex', help='regular expression '
'to filter group name(s); only matches are imported.')
parser.add_argument('--host-filter', dest='host_filter', type=str,
default=None, metavar='regex', help='regular expression '
'to filter host name(s); only matches are imported.')
parser.add_argument('--exclude-empty-groups', dest='exclude_empty_groups',
action='store_true', default=False, help='when set, '
'exclude all groups that have no child groups, hosts, or '
'variables.')
parser.add_argument('--instance-id-var', dest='instance_id_var', type=str,
default=None, metavar='v', help='host variable that '
'specifies the unique, immutable instance ID, may be '
'specified as "foo.bar" to traverse nested dicts.')
def set_logging_level(self):
log_levels = dict(enumerate([logging.WARNING, logging.INFO,
logging.DEBUG, 0]))
logger.setLevel(log_levels.get(self.verbosity, 0))
def _get_instance_id(self, from_dict, default=''):
'''
Retrieve the instance ID from the given dict of host variables.
The instance ID variable may be specified as 'foo.bar', in which case
the lookup will traverse into nested dicts, equivalent to:
from_dict.get('foo', {}).get('bar', default)
'''
instance_id = default
if getattr(self, 'instance_id_var', None):
for key in self.instance_id_var.split('.'):
if not hasattr(from_dict, 'get'):
instance_id = default
break
instance_id = from_dict.get(key, default)
from_dict = instance_id
return smart_text(instance_id)
def _get_enabled(self, from_dict, default=None):
'''
Retrieve the enabled state from the given dict of host variables.
The enabled variable may be specified as 'foo.bar', in which case
the lookup will traverse into nested dicts, equivalent to:
from_dict.get('foo', {}).get('bar', default)
'''
enabled = default
if getattr(self, 'enabled_var', None):
default = object()
for key in self.enabled_var.split('.'):
if not hasattr(from_dict, 'get'):
enabled = default
break
enabled = from_dict.get(key, default)
from_dict = enabled
if enabled is not default:
enabled_value = getattr(self, 'enabled_value', None)
if enabled_value is not None:
enabled = bool(unicode(enabled_value) == unicode(enabled))
else:
enabled = bool(enabled)
if enabled is default:
return None
elif isinstance(enabled, bool):
return enabled
else:
raise NotImplementedError('Value of enabled {} not understood.'.format(enabled))
def load_inventory_from_database(self):
'''
Load inventory and related objects from the database.
'''
# Load inventory object based on name or ID.
if self.inventory_id:
q = dict(id=self.inventory_id)
else:
q = dict(name=self.inventory_name)
try:
self.inventory = Inventory.objects.get(**q)
except Inventory.DoesNotExist:
raise CommandError('Inventory with %s = %s cannot be found' % q.items()[0])
except Inventory.MultipleObjectsReturned:
raise CommandError('Inventory with %s = %s returned multiple results' % q.items()[0])
logger.info('Updating inventory %d: %s' % (self.inventory.pk,
self.inventory.name))
# Load inventory source if specified via environment variable (when
# inventory_import is called from an InventoryUpdate task).
inventory_source_id = os.getenv('INVENTORY_SOURCE_ID', None)
inventory_update_id = os.getenv('INVENTORY_UPDATE_ID', None)
if inventory_source_id:
try:
self.inventory_source = InventorySource.objects.get(pk=inventory_source_id,
inventory=self.inventory)
except InventorySource.DoesNotExist:
raise CommandError('Inventory source with id=%s not found' %
inventory_source_id)
try:
self.inventory_update = InventoryUpdate.objects.get(pk=inventory_update_id)
except InventoryUpdate.DoesNotExist:
raise CommandError('Inventory update with id=%s not found' %
inventory_update_id)
# Otherwise, create a new inventory source to capture this invocation
# via command line.
else:
with ignore_inventory_computed_fields():
self.inventory_source, created = InventorySource.objects.get_or_create(
inventory=self.inventory,
source='file',
source_path=os.path.abspath(self.source),
overwrite=self.overwrite,
overwrite_vars=self.overwrite_vars,
)
self.inventory_update = self.inventory_source.create_inventory_update(
_eager_fields=dict(
job_args=json.dumps(sys.argv),
job_env=dict(os.environ.items()),
job_cwd=os.getcwd())
)
# FIXME: Wait or raise error if inventory is being updated by another
# source.
def _batch_add_m2m(self, related_manager, *objs, **kwargs):
key = (related_manager.instance.pk, related_manager.through._meta.db_table)
flush = bool(kwargs.get('flush', False))
if not hasattr(self, '_batch_add_m2m_cache'):
self._batch_add_m2m_cache = {}
cached_objs = self._batch_add_m2m_cache.setdefault(key, [])
cached_objs.extend(objs)
if len(cached_objs) > self._batch_size or flush:
if len(cached_objs):
related_manager.add(*cached_objs)
self._batch_add_m2m_cache[key] = []
def _build_db_instance_id_map(self):
'''
Find any hosts in the database without an instance_id set that may
still have one available via host variables.
'''
self.db_instance_id_map = {}
if self.instance_id_var:
host_qs = self.inventory_source.hosts.all()
host_qs = host_qs.filter(instance_id='',
variables__contains=self.instance_id_var.split('.')[0])
for host in host_qs:
instance_id = self._get_instance_id(host.variables_dict)
if not instance_id:
continue
self.db_instance_id_map[instance_id] = host.pk
def _build_mem_instance_id_map(self):
'''
Update instance ID for each imported host and define a mapping of
instance IDs to MemHost instances.
'''
self.mem_instance_id_map = {}
if self.instance_id_var:
for mem_host in self.all_group.all_hosts.values():
instance_id = self._get_instance_id(mem_host.variables)
if not instance_id:
logger.warning('Host "%s" has no "%s" variable',
mem_host.name, self.instance_id_var)
continue
mem_host.instance_id = instance_id
self.mem_instance_id_map[instance_id] = mem_host.name
def _delete_hosts(self):
'''
For each host in the database that is NOT in the local list, delete
it. When importing from a cloud inventory source attached to a
specific group, only delete hosts beneath that group. Delete each
host individually so signal handlers will run.
'''
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
hosts_qs = self.inventory_source.hosts
# Build list of all host pks, remove all that should not be deleted.
del_host_pks = set(hosts_qs.values_list('pk', flat=True))
if self.instance_id_var:
all_instance_ids = self.mem_instance_id_map.keys()
instance_ids = []
for offset in xrange(0, len(all_instance_ids), self._batch_size):
instance_ids = all_instance_ids[offset:(offset + self._batch_size)]
for host_pk in hosts_qs.filter(instance_id__in=instance_ids).values_list('pk', flat=True):
del_host_pks.discard(host_pk)
for host_pk in set([v for k,v in self.db_instance_id_map.items() if k in instance_ids]):
del_host_pks.discard(host_pk)
all_host_names = list(set(self.mem_instance_id_map.values()) - set(self.all_group.all_hosts.keys()))
else:
all_host_names = self.all_group.all_hosts.keys()
for offset in xrange(0, len(all_host_names), self._batch_size):
host_names = all_host_names[offset:(offset + self._batch_size)]
for host_pk in hosts_qs.filter(name__in=host_names).values_list('pk', flat=True):
del_host_pks.discard(host_pk)
# Now delete all remaining hosts in batches.
all_del_pks = sorted(list(del_host_pks))
for offset in xrange(0, len(all_del_pks), self._batch_size):
del_pks = all_del_pks[offset:(offset + self._batch_size)]
for host in hosts_qs.filter(pk__in=del_pks):
host_name = host.name
host.delete()
logger.info('Deleted host "%s"', host_name)
if settings.SQL_DEBUG:
logger.warning('host deletions took %d queries for %d hosts',
len(connection.queries) - queries_before,
len(all_del_pks))
def _delete_groups(self):
'''
# If overwrite is set, for each group in the database that is NOT in
# the local list, delete it. When importing from a cloud inventory
# source attached to a specific group, only delete children of that
# group. Delete each group individually so signal handlers will run.
'''
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
groups_qs = self.inventory_source.groups.all()
# Build list of all group pks, remove those that should not be deleted.
del_group_pks = set(groups_qs.values_list('pk', flat=True))
all_group_names = self.all_group.all_groups.keys()
for offset in xrange(0, len(all_group_names), self._batch_size):
group_names = all_group_names[offset:(offset + self._batch_size)]
for group_pk in groups_qs.filter(name__in=group_names).values_list('pk', flat=True):
del_group_pks.discard(group_pk)
if self.inventory_source.deprecated_group_id in del_group_pks: # TODO: remove in 3.3
logger.warning(
'Group "%s" from v1 API is not deleted by overwrite',
self.inventory_source.deprecated_group.name
)
del_group_pks.discard(self.inventory_source.deprecated_group_id)
# Now delete all remaining groups in batches.
all_del_pks = sorted(list(del_group_pks))
for offset in xrange(0, len(all_del_pks), self._batch_size):
del_pks = all_del_pks[offset:(offset + self._batch_size)]
for group in groups_qs.filter(pk__in=del_pks):
group_name = group.name
with ignore_inventory_computed_fields():
group.delete()
logger.info('Group "%s" deleted', group_name)
if settings.SQL_DEBUG:
logger.warning('group deletions took %d queries for %d groups',
len(connection.queries) - queries_before,
len(all_del_pks))
def _delete_group_children_and_hosts(self):
'''
Clear all invalid child relationships for groups and all invalid host
memberships. When importing from a cloud inventory source attached to
a specific group, only clear relationships for hosts and groups that
are beneath the inventory source group.
'''
# FIXME: Optimize performance!
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
group_group_count = 0
group_host_count = 0
db_groups = self.inventory_source.groups
for db_group in db_groups.all():
if self.inventory_source.deprecated_group_id == db_group.id: # TODO: remove in 3.3
logger.info(
'Group "%s" from v1 API child group/host connections preserved',
db_group.name
)
continue
# Delete child group relationships not present in imported data.
db_children = db_group.children
db_children_name_pk_map = dict(db_children.values_list('name', 'pk'))
mem_children = self.all_group.all_groups[db_group.name].children
for mem_group in mem_children:
db_children_name_pk_map.pop(mem_group.name, None)
del_child_group_pks = list(set(db_children_name_pk_map.values()))
for offset in xrange(0, len(del_child_group_pks), self._batch_size):
child_group_pks = del_child_group_pks[offset:(offset + self._batch_size)]
for db_child in db_children.filter(pk__in=child_group_pks):
group_group_count += 1
db_group.children.remove(db_child)
logger.info('Group "%s" removed from group "%s"',
db_child.name, db_group.name)
# FIXME: Inventory source group relationships
# Delete group/host relationships not present in imported data.
db_hosts = db_group.hosts
del_host_pks = set(db_hosts.values_list('pk', flat=True))
mem_hosts = self.all_group.all_groups[db_group.name].hosts
all_mem_host_names = [h.name for h in mem_hosts if not h.instance_id]
for offset in xrange(0, len(all_mem_host_names), self._batch_size):
mem_host_names = all_mem_host_names[offset:(offset + self._batch_size)]
for db_host_pk in db_hosts.filter(name__in=mem_host_names).values_list('pk', flat=True):
del_host_pks.discard(db_host_pk)
all_mem_instance_ids = [h.instance_id for h in mem_hosts if h.instance_id]
for offset in xrange(0, len(all_mem_instance_ids), self._batch_size):
mem_instance_ids = all_mem_instance_ids[offset:(offset + self._batch_size)]
for db_host_pk in db_hosts.filter(instance_id__in=mem_instance_ids).values_list('pk', flat=True):
del_host_pks.discard(db_host_pk)
all_db_host_pks = [v for k,v in self.db_instance_id_map.items() if k in all_mem_instance_ids]
for db_host_pk in all_db_host_pks:
del_host_pks.discard(db_host_pk)
del_host_pks = list(del_host_pks)
for offset in xrange(0, len(del_host_pks), self._batch_size):
del_pks = del_host_pks[offset:(offset + self._batch_size)]
for db_host in db_hosts.filter(pk__in=del_pks):
group_host_count += 1
if db_host not in db_group.hosts.all():
continue
db_group.hosts.remove(db_host)
logger.info('Host "%s" removed from group "%s"',
db_host.name, db_group.name)
if settings.SQL_DEBUG:
logger.warning('group-group and group-host deletions took %d queries for %d relationships',
len(connection.queries) - queries_before,
group_group_count + group_host_count)
def _update_inventory(self):
'''
Update inventory variables from "all" group.
'''
# TODO: We disable variable overwrite here in case user-defined inventory variables get
# mangled. But we still need to figure out a better way of processing multiple inventory
# update variables mixing with each other.
all_obj = self.inventory
db_variables = all_obj.variables_dict
db_variables.update(self.all_group.variables)
if db_variables != all_obj.variables_dict:
all_obj.variables = json.dumps(db_variables)
all_obj.save(update_fields=['variables'])
logger.info('Inventory variables updated from "all" group')
else:
logger.info('Inventory variables unmodified')
def _create_update_groups(self):
'''
For each group in the local list, create it if it doesn't exist in the
database. Otherwise, update/replace database variables from the
imported data. Associate with the inventory source group if importing
from cloud inventory source.
'''
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
all_group_names = sorted(self.all_group.all_groups.keys())
root_group_names = set()
for k,v in self.all_group.all_groups.items():
if not v.parents:
root_group_names.add(k)
if len(v.parents) == 1 and v.parents[0].name == 'all':
root_group_names.add(k)
existing_group_names = set()
for offset in xrange(0, len(all_group_names), self._batch_size):
group_names = all_group_names[offset:(offset + self._batch_size)]
for group in self.inventory.groups.filter(name__in=group_names):
mem_group = self.all_group.all_groups[group.name]
db_variables = group.variables_dict
if self.overwrite_vars:
db_variables = mem_group.variables
else:
db_variables.update(mem_group.variables)
if db_variables != group.variables_dict:
group.variables = json.dumps(db_variables)
group.save(update_fields=['variables'])
if self.overwrite_vars:
logger.info('Group "%s" variables replaced', group.name)
else:
logger.info('Group "%s" variables updated', group.name)
else:
logger.info('Group "%s" variables unmodified', group.name)
existing_group_names.add(group.name)
self._batch_add_m2m(self.inventory_source.groups, group)
for group_name in all_group_names:
if group_name in existing_group_names:
continue
mem_group = self.all_group.all_groups[group_name]
group = self.inventory.groups.update_or_create(
name=group_name,
defaults={
'variables':json.dumps(mem_group.variables),
'description':'imported'
}
)[0]
logger.info('Group "%s" added', group.name)
self._batch_add_m2m(self.inventory_source.groups, group)
self._batch_add_m2m(self.inventory_source.groups, flush=True)
if settings.SQL_DEBUG:
logger.warning('group updates took %d queries for %d groups',
len(connection.queries) - queries_before,
len(self.all_group.all_groups))
def _update_db_host_from_mem_host(self, db_host, mem_host):
# Update host variables.
db_variables = db_host.variables_dict
if self.overwrite_vars:
db_variables = mem_host.variables
else:
db_variables.update(mem_host.variables)
update_fields = []
if db_variables != db_host.variables_dict:
db_host.variables = json.dumps(db_variables)
update_fields.append('variables')
# Update host enabled flag.
enabled = self._get_enabled(mem_host.variables)
if enabled is not None and db_host.enabled != enabled:
db_host.enabled = enabled
update_fields.append('enabled')
# Update host name.
if mem_host.name != db_host.name:
old_name = db_host.name
db_host.name = mem_host.name
update_fields.append('name')
# Update host instance_id.
instance_id = self._get_instance_id(mem_host.variables)
if instance_id != db_host.instance_id:
old_instance_id = db_host.instance_id
db_host.instance_id = instance_id
update_fields.append('instance_id')
# Update host and display message(s) on what changed.
if update_fields:
db_host.save(update_fields=update_fields)
if 'name' in update_fields:
logger.info('Host renamed from "%s" to "%s"', old_name, mem_host.name)
if 'instance_id' in update_fields:
if old_instance_id:
logger.info('Host "%s" instance_id updated', mem_host.name)
else:
logger.info('Host "%s" instance_id added', mem_host.name)
if 'variables' in update_fields:
if self.overwrite_vars:
logger.info('Host "%s" variables replaced', mem_host.name)
else:
logger.info('Host "%s" variables updated', mem_host.name)
else:
logger.info('Host "%s" variables unmodified', mem_host.name)
if 'enabled' in update_fields:
if enabled:
logger.info('Host "%s" is now enabled', mem_host.name)
else:
logger.info('Host "%s" is now disabled', mem_host.name)
self._batch_add_m2m(self.inventory_source.hosts, db_host)
def _create_update_hosts(self):
'''
For each host in the local list, create it if it doesn't exist in the
database. Otherwise, update/replace database variables from the
imported data. Associate with the inventory source group if importing
from cloud inventory source.
'''
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
host_pks_updated = set()
mem_host_pk_map = {}
mem_host_instance_id_map = {}
mem_host_name_map = {}
mem_host_names_to_update = set(self.all_group.all_hosts.keys())
for k,v in self.all_group.all_hosts.iteritems():
mem_host_name_map[k] = v
instance_id = self._get_instance_id(v.variables)
if instance_id in self.db_instance_id_map:
mem_host_pk_map[self.db_instance_id_map[instance_id]] = v
elif instance_id:
mem_host_instance_id_map[instance_id] = v
# Update all existing hosts where we know the PK based on instance_id.
all_host_pks = sorted(mem_host_pk_map.keys())
for offset in xrange(0, len(all_host_pks), self._batch_size):
host_pks = all_host_pks[offset:(offset + self._batch_size)]
for db_host in self.inventory.hosts.filter( pk__in=host_pks):
if db_host.pk in host_pks_updated:
continue
mem_host = mem_host_pk_map[db_host.pk]
self._update_db_host_from_mem_host(db_host, mem_host)
host_pks_updated.add(db_host.pk)
mem_host_names_to_update.discard(mem_host.name)
# Update all existing hosts where we know the instance_id.
all_instance_ids = sorted(mem_host_instance_id_map.keys())
for offset in xrange(0, len(all_instance_ids), self._batch_size):
instance_ids = all_instance_ids[offset:(offset + self._batch_size)]
for db_host in self.inventory.hosts.filter( instance_id__in=instance_ids):
if db_host.pk in host_pks_updated:
continue
mem_host = mem_host_instance_id_map[db_host.instance_id]
self._update_db_host_from_mem_host(db_host, mem_host)
host_pks_updated.add(db_host.pk)
mem_host_names_to_update.discard(mem_host.name)
# Update all existing hosts by name.
all_host_names = sorted(mem_host_name_map.keys())
for offset in xrange(0, len(all_host_names), self._batch_size):
host_names = all_host_names[offset:(offset + self._batch_size)]
for db_host in self.inventory.hosts.filter( name__in=host_names):
if db_host.pk in host_pks_updated:
continue
mem_host = mem_host_name_map[db_host.name]
self._update_db_host_from_mem_host(db_host, mem_host)
host_pks_updated.add(db_host.pk)
mem_host_names_to_update.discard(mem_host.name)
# Create any new hosts.
for mem_host_name in sorted(mem_host_names_to_update):
mem_host = self.all_group.all_hosts[mem_host_name]
host_attrs = dict(variables=json.dumps(mem_host.variables),
description='imported')
enabled = self._get_enabled(mem_host.variables)
if enabled is not None:
host_attrs['enabled'] = enabled
if self.instance_id_var:
instance_id = self._get_instance_id(mem_host.variables)
host_attrs['instance_id'] = instance_id
db_host = self.inventory.hosts.update_or_create(name=mem_host_name, defaults=host_attrs)[0]
if enabled is False:
logger.info('Host "%s" added (disabled)', mem_host_name)
else:
logger.info('Host "%s" added', mem_host_name)
self._batch_add_m2m(self.inventory_source.hosts, db_host)
self._batch_add_m2m(self.inventory_source.hosts, flush=True)
if settings.SQL_DEBUG:
logger.warning('host updates took %d queries for %d hosts',
len(connection.queries) - queries_before,
len(self.all_group.all_hosts))
@transaction.atomic
def _create_update_group_children(self):
'''
For each imported group, create all parent-child group relationships.
'''
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
all_group_names = sorted([k for k,v in self.all_group.all_groups.iteritems() if v.children])
group_group_count = 0
for offset in xrange(0, len(all_group_names), self._batch_size):
group_names = all_group_names[offset:(offset + self._batch_size)]
for db_group in self.inventory.groups.filter(name__in=group_names):
mem_group = self.all_group.all_groups[db_group.name]
group_group_count += len(mem_group.children)
all_child_names = sorted([g.name for g in mem_group.children])
for offset2 in xrange(0, len(all_child_names), self._batch_size):
child_names = all_child_names[offset2:(offset2 + self._batch_size)]
db_children_qs = self.inventory.groups.filter(name__in=child_names)
for db_child in db_children_qs.filter(children__id=db_group.id):
logger.info('Group "%s" already child of group "%s"', db_child.name, db_group.name)
for db_child in db_children_qs.exclude(children__id=db_group.id):
self._batch_add_m2m(db_group.children, db_child)
logger.info('Group "%s" added as child of "%s"', db_child.name, db_group.name)
self._batch_add_m2m(db_group.children, flush=True)
if settings.SQL_DEBUG:
logger.warning('Group-group updates took %d queries for %d group-group relationships',
len(connection.queries) - queries_before, group_group_count)
@transaction.atomic
def _create_update_group_hosts(self):
# For each host in a mem group, add it to the parent(s) to which it
# belongs.
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
all_group_names = sorted([k for k,v in self.all_group.all_groups.iteritems() if v.hosts])
group_host_count = 0
for offset in xrange(0, len(all_group_names), self._batch_size):
group_names = all_group_names[offset:(offset + self._batch_size)]
for db_group in self.inventory.groups.filter(name__in=group_names):
mem_group = self.all_group.all_groups[db_group.name]
group_host_count += len(mem_group.hosts)
all_host_names = sorted([h.name for h in mem_group.hosts if not h.instance_id])
for offset2 in xrange(0, len(all_host_names), self._batch_size):
host_names = all_host_names[offset2:(offset2 + self._batch_size)]
db_hosts_qs = self.inventory.hosts.filter(name__in=host_names)
for db_host in db_hosts_qs.filter(groups__id=db_group.id):
logger.info('Host "%s" already in group "%s"', db_host.name, db_group.name)
for db_host in db_hosts_qs.exclude(groups__id=db_group.id):
self._batch_add_m2m(db_group.hosts, db_host)
logger.info('Host "%s" added to group "%s"', db_host.name, db_group.name)
all_instance_ids = sorted([h.instance_id for h in mem_group.hosts if h.instance_id])
for offset2 in xrange(0, len(all_instance_ids), self._batch_size):
instance_ids = all_instance_ids[offset2:(offset2 + self._batch_size)]
db_hosts_qs = self.inventory.hosts.filter(instance_id__in=instance_ids)
for db_host in db_hosts_qs.filter(groups__id=db_group.id):
logger.info('Host "%s" already in group "%s"', db_host.name, db_group.name)
for db_host in db_hosts_qs.exclude(groups__id=db_group.id):
self._batch_add_m2m(db_group.hosts, db_host)
logger.info('Host "%s" added to group "%s"', db_host.name, db_group.name)
self._batch_add_m2m(db_group.hosts, flush=True)
if settings.SQL_DEBUG:
logger.warning('Group-host updates took %d queries for %d group-host relationships',
len(connection.queries) - queries_before, group_host_count)
def load_into_database(self):
'''
Load inventory from in-memory groups to the database, overwriting or
merging as appropriate.
'''
# FIXME: Attribute changes to superuser?
# Perform __in queries in batches (mainly for unit tests using SQLite).
self._batch_size = 500
self._build_db_instance_id_map()
self._build_mem_instance_id_map()
if self.overwrite:
self._delete_hosts()
self._delete_groups()
self._delete_group_children_and_hosts()
self._update_inventory()
self._create_update_groups()
self._create_update_hosts()
self._create_update_group_children()
self._create_update_group_hosts()
def check_license(self):
license_info = get_licenser().validate()
if license_info.get('license_key', 'UNLICENSED') == 'UNLICENSED':
logger.error(LICENSE_NON_EXISTANT_MESSAGE)
raise CommandError('No license found!')
elif license_info.get('license_type', 'UNLICENSED') == 'open':
return
available_instances = license_info.get('available_instances', 0)
free_instances = license_info.get('free_instances', 0)
time_remaining = license_info.get('time_remaining', 0)
new_count = Host.objects.active_count()
if time_remaining <= 0 and not license_info.get('demo', False):
logger.error(LICENSE_EXPIRED_MESSAGE)
raise CommandError("License has expired!")
if free_instances < 0:
d = {
'new_count': new_count,
'available_instances': available_instances,
}
if license_info.get('demo', False):
logger.error(DEMO_LICENSE_MESSAGE % d)
else:
logger.error(LICENSE_MESSAGE % d)
raise CommandError('License count exceeded!')
def mark_license_failure(self, save=True):
self.inventory_update.license_error = True
self.inventory_update.save(update_fields=['license_error'])
def handle(self, *args, **options):
self.verbosity = int(options.get('verbosity', 1))
self.set_logging_level()
self.inventory_name = options.get('inventory_name', None)
self.inventory_id = options.get('inventory_id', None)
self.overwrite = bool(options.get('overwrite', False))
self.overwrite_vars = bool(options.get('overwrite_vars', False))
self.keep_vars = bool(options.get('keep_vars', False))
self.is_custom = bool(options.get('custom', False))
self.source = options.get('source', None)
self.enabled_var = options.get('enabled_var', None)
self.enabled_value = options.get('enabled_value', None)
self.group_filter = options.get('group_filter', None) or r'^.+$'
self.host_filter = options.get('host_filter', None) or r'^.+$'
self.exclude_empty_groups = bool(options.get('exclude_empty_groups', False))
self.instance_id_var = options.get('instance_id_var', None)
self.celery_invoked = False if os.getenv('INVENTORY_SOURCE_ID', None) is None else True
# Load inventory and related objects from database.
if self.inventory_name and self.inventory_id:
raise CommandError('--inventory-name and --inventory-id are mutually exclusive')
elif not self.inventory_name and not self.inventory_id:
raise CommandError('--inventory-name or --inventory-id is required')
if (self.overwrite or self.overwrite_vars) and self.keep_vars:
raise CommandError('--overwrite/--overwrite-vars and --keep-vars are mutually exclusive')
if not self.source:
raise CommandError('--source is required')
try:
self.group_filter_re = re.compile(self.group_filter)
except re.error:
raise CommandError('invalid regular expression for --group-filter')
try:
self.host_filter_re = re.compile(self.host_filter)
except re.error:
raise CommandError('invalid regular expression for --host-filter')
'''
TODO: Remove this deprecation when we remove support for rax.py
'''
if self.source == "rax.py":
logger.info("Rackspace inventory sync is Deprecated in Tower 3.1.0 and support for Rackspace will be removed in a future release.")
begin = time.time()
self.load_inventory_from_database()
try:
self.check_license()
except CommandError as e:
self.mark_license_failure(save=True)
raise e
status, tb, exc = 'error', '', None
try:
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
# Update inventory update for this command line invocation.
with ignore_inventory_computed_fields():
iu = self.inventory_update
if iu.status != 'running':
with transaction.atomic():
self.inventory_update.status = 'running'
self.inventory_update.save()
# Load inventory from source.
self.all_group = load_inventory_source(self.source,
self.group_filter_re,
self.host_filter_re,
self.exclude_empty_groups,
self.is_custom)
if settings.DEBUG:
# depending on inventory source, this output can be
# *exceedingly* verbose - crawling a deeply nested
# inventory/group data structure and printing metadata about
# each host and its memberships
#
# it's easy for this scale of data to overwhelm pexpect,
# (and it's likely only useful for purposes of debugging the
# actual inventory import code), so only print it if we have to:
# https://github.com/ansible/ansible-tower/issues/7414#issuecomment-321615104
self.all_group.debug_tree()
with batch_role_ancestor_rebuilding():
# Ensure that this is managed as an atomic SQL transaction,
# and thus properly rolled back if there is an issue.
with transaction.atomic():
# Merge/overwrite inventory into database.
if settings.SQL_DEBUG:
logger.warning('loading into database...')
with ignore_inventory_computed_fields():
if getattr(settings, 'ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC', True):
self.load_into_database()
else:
with disable_activity_stream():
self.load_into_database()
if settings.SQL_DEBUG:
queries_before2 = len(connection.queries)
self.inventory.update_computed_fields()
if settings.SQL_DEBUG:
logger.warning('update computed fields took %d queries',
len(connection.queries) - queries_before2)
try:
self.check_license()
except CommandError as e:
self.mark_license_failure(save=True)
raise e
if settings.SQL_DEBUG:
logger.warning('Inventory import completed for %s in %0.1fs',
self.inventory_source.name, time.time() - begin)
else:
logger.info('Inventory import completed for %s in %0.1fs',
self.inventory_source.name, time.time() - begin)
status = 'successful'
# If we're in debug mode, then log the queries and time
# used to do the operation.
if settings.SQL_DEBUG:
queries_this_import = connection.queries[queries_before:]
sqltime = sum(float(x['time']) for x in queries_this_import)
logger.warning('Inventory import required %d queries '
'taking %0.3fs', len(queries_this_import),
sqltime)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
status = 'canceled'
exc = e
elif isinstance(e, CommandError):
exc = e
else:
tb = traceback.format_exc()
exc = e
transaction.rollback()
if self.celery_invoked is False:
with ignore_inventory_computed_fields():
self.inventory_update = InventoryUpdate.objects.get(pk=self.inventory_update.pk)
self.inventory_update.result_traceback = tb
self.inventory_update.status = status
self.inventory_update.save(update_fields=['status', 'result_traceback'])
if exc and isinstance(exc, CommandError):
sys.exit(1)
elif exc:
raise
| 49.878163
| 143
| 0.6059
|
05279591d25aba3a54e7f1100a692907e4d67be2
| 514
|
py
|
Python
|
djangorestautomatepm/restautomatepm/migrations/0017_auto_20200630_1738.py
|
sammydowds/django-rest-automate-pm
|
e3bd85f5c46ec72d564a2749785f058095bcd2e1
|
[
"MIT"
] | null | null | null |
djangorestautomatepm/restautomatepm/migrations/0017_auto_20200630_1738.py
|
sammydowds/django-rest-automate-pm
|
e3bd85f5c46ec72d564a2749785f058095bcd2e1
|
[
"MIT"
] | 7
|
2020-07-10T18:34:58.000Z
|
2021-09-22T19:24:37.000Z
|
djangorestautomatepm/restautomatepm/migrations/0017_auto_20200630_1738.py
|
sammydowds/django-rest-automate-pm
|
e3bd85f5c46ec72d564a2749785f058095bcd2e1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-06-30 17:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restautomatepm', '0016_projects_owner'),
]
operations = [
migrations.RemoveField(
model_name='projects',
name='status',
),
migrations.AlterField(
model_name='log',
name='description',
field=models.TextField(default='..change not recorded'),
),
]
| 22.347826
| 68
| 0.577821
|
5b7ac8b2232587a234cda862c211d7d967184ad4
| 5,609
|
py
|
Python
|
raspa/analyze/raspa_output.py
|
kbsezginel/tutorials
|
ea9797f987b862d4dc6c7e2628c6eed0ca66e0e1
|
[
"MIT"
] | 11
|
2017-12-07T20:30:50.000Z
|
2022-03-14T09:07:01.000Z
|
raspa/analyze/raspa_output.py
|
kbsezginel/tutorials
|
ea9797f987b862d4dc6c7e2628c6eed0ca66e0e1
|
[
"MIT"
] | null | null | null |
raspa/analyze/raspa_output.py
|
kbsezginel/tutorials
|
ea9797f987b862d4dc6c7e2628c6eed0ca66e0e1
|
[
"MIT"
] | 4
|
2018-09-09T02:34:56.000Z
|
2019-06-19T20:07:55.000Z
|
"""
Read RASPA output file for gas adsorption simulations.
- Can read mixture simulations
- Can read unfinished simulation data (reads the last cycle)
>>> python read-raspa-output.py Output/System_0/output_IRMOF-1_1.1.1_298.000000_1e+07.data
"""
import os
import sys
import glob
def parse_output(data_file, verbose=False, save=False, loading='absolute', framework=None):
"""Parse output file for gas adsorption data.
Args:
data_file (str): path to RASPA simulation output file.
Returns:
results (dict): absolute and excess molar, gravimetric, and volumetric
gas loadings, as well as energy of average, van der Waals, and
Coulombic host-host, host-adsorbate, and adsorbate-adsorbate
interactions.
"""
with open(data_file) as ads_data:
data_lines = ads_data.readlines()
results = dict(ads={}, err={}, finished=False, warnings=[], components=[])
if framework is not None:
results['framework'] = framework
else:
results['framework'] = os.path.basename(data_file).split('_')[1]
for i, line in enumerate(data_lines):
if 'Number of molecules:' in line:
ads_start = i
if 'Average Widom Rosenbluth factor:' in line:
ads_end = i
if 'Simulation finished' in line:
results['finished'] = True
if 'WARNING' in line:
results['warnings'].append(line)
if '(Adsorbate molecule)' in line:
results['components'].append(line.split()[2].replace('[', '').replace(']', ''))
if len(results['warnings']) > 0:
print('%s - %i warning(s) found -> %s' %
(results['name'], len(results['warnings']),
results['warnings'][0].strip())) if verbose else None
if results['finished']:
ads_lines = data_lines[ads_start:ads_end]
for i, line in enumerate(ads_lines):
if 'Component' in line:
comp_name = line.split()[2].replace('[', '').replace(']', '')
results['ads'][comp_name] = {'id': line.split()[1]}
results['err'][comp_name] = {'id': line.split()[1]}
if 'Average loading %s [molecules/unit cell]' % loading in line:
results['ads'][comp_name]['mol/uc'] = float(ads_lines[i].split()[5])
results['err'][comp_name]['mol/uc'] = float(ads_lines[i].split()[7])
results['ads'][comp_name]['mol/kg'] = float(ads_lines[i + 1].split()[5])
results['err'][comp_name]['mol/kg'] = float(ads_lines[i + 1].split()[7])
results['ads'][comp_name]['mg/g'] = float(ads_lines[i + 2].split()[5])
results['err'][comp_name]['mg/g'] = float(ads_lines[i + 2].split()[7])
results['ads'][comp_name]['cc/g'] = float(ads_lines[i + 3].split()[6])
results['err'][comp_name]['cc/g'] = float(ads_lines[i + 3].split()[8])
results['ads'][comp_name]['cc/cc'] = float(ads_lines[i + 4].split()[6])
results['err'][comp_name]['cc/cc'] = float(ads_lines[i + 4].split()[8])
else:
results['initialization'], results['cycle'] = False, 0
for i, line in enumerate(data_lines):
if 'Current cycle' in line and line[0] == 'C': # If production cycle
results['cycle'] = int(line.split()[2])
results['initialization'] = True
if 'Loadings per component' in line and results['initialization']:
for j, comp_name in enumerate(results['components']):
results['ads'][comp_name] = {'id': j}
results['ads'][comp_name]['mol/uc'] = float(data_lines[i + 3 + 6 * j].split()[2])
results['ads'][comp_name]['mol/kg'] = float(data_lines[i + 3 + 6 * j].split()[6])
results['ads'][comp_name]['mg/g'] = float(data_lines[i + 3 + 6 * j].split()[10])
results['ads'][comp_name]['cc/g'] = float(data_lines[i + 4 + 6 * j].split()[0])
results['ads'][comp_name]['cc/cc'] = float(data_lines[i + 4 + 6 * j].split()[5])
# Errors are not printed for unfinished simulations
results['err'][comp_name] = {'id': j}
results['err'][comp_name]['mol/uc'] = 0
results['err'][comp_name]['mol/kg'] = 0
results['err'][comp_name]['mg/g'] = 0
results['err'][comp_name]['cc/g'] = 0
results['err'][comp_name]['cc/cc'] = 0
print('%s\nSimulation not finished!' % ('=' * 50))
print('Initialization: %s | Last cycle: %i' % (results['initialization'], results['cycle']))
if verbose:
units = ['mol/uc', 'mg/g', 'cc/cc']
for component in results['ads']:
print('=' * 50)
print("%-15s\t%s" % ('%s [%s]' % (component, results['ads'][component]['id']), loading))
print('-' * 50)
for u in units:
print('%s\t\t%8.3f +/- %5.2f' % (u, results['ads'][component][u], results['err'][component][u]))
print('=' * 50)
if save:
import yaml
with open('raspa_ads.yaml', 'w') as rads:
yaml.dump(results, rads)
return results
if __name__ == "__main__":
# ads_path = glob.glob(os.path.join(sys.argv[1], 'Output', 'System_0', '*.data'))[0]
ads_path = os.path.abspath(sys.argv[1])
if len(sys.argv) > 2 and sys.argv[2] == 's':
parse_output(ads_path, verbose=True, save=True)
else:
parse_output(ads_path, verbose=True, save=False)
| 47.940171
| 112
| 0.548939
|
3a110ff95c653f9874905beb893c733e369b491c
| 1,833
|
py
|
Python
|
Python/unify.py
|
BuserLukas/Logic
|
cc0447554cfa75b213a10a2db37ce82c42afb91d
|
[
"MIT"
] | 13
|
2019-10-03T13:25:02.000Z
|
2021-12-26T11:49:25.000Z
|
Python/unify.py
|
BuserLukas/Logic
|
cc0447554cfa75b213a10a2db37ce82c42afb91d
|
[
"MIT"
] | 19
|
2015-01-14T15:36:24.000Z
|
2019-04-21T02:13:23.000Z
|
Python/unify.py
|
BuserLukas/Logic
|
cc0447554cfa75b213a10a2db37ce82c42afb91d
|
[
"MIT"
] | 18
|
2019-10-03T16:05:46.000Z
|
2021-12-10T19:44:15.000Z
|
def apply(t, σ):
"Apply the substitution σ to the term t."
if isinstance(t, set): # t is a set of clauses
return { apply(c, σ) for c in t }
if isinstance(t, frozenset): # t is a clause
return frozenset({ apply(l, σ) for l in t })
if isinstance(t, str): # t is a variable
if t in σ:
return σ[t]
else:
return t
else:
f = t[0]
ts = t[1:]
return (f,) + tuple(apply(s, σ) for s in ts)
def compose(σ, τ):
Result = { x: apply(s, τ) for (x, s) in σ.items() }
Result.update(τ)
return Result
def occurs(x, t):
if x == t:
return True
if isinstance(t, str):
return False
return any(occurs(x, arg) for arg in t[1:])
def mgu(s, t):
return solve({('≐', s, t)}, {})
def solve(E, σ):
while E != set():
_, s, t = E.pop()
if s == t:
continue
if isinstance(s, str): # s is a variable
if occurs(s, t):
return None
else:
E = apply(E, { s: t })
σ = compose(σ, { s: t })
elif isinstance(t, str): # t is a variable, but s is not
E.add(('≐', t, s))
else:
f , g = s[0] , t[0]
sArgs, tArgs = s[1:] , t[1:]
m , n = len(sArgs), len(tArgs)
if f != g or m != n:
return None
else:
E |= { ('≐', sArgs[i], tArgs[i]) for i in range(m) }
return σ
if __name__ == '__main__':
import folParser as fp
def parseTerm(s):
parser = fp.LogicParser(s)
return parser.parse()
t1 = parseTerm('P(x1,F(x4))')
t2 = parseTerm('P(x2,x3)')
μ = mgu(t1, t2)
print(μ)
| 27.772727
| 76
| 0.434261
|
ff4798703d2043e02461905104b66270816100f1
| 385
|
py
|
Python
|
setup.py
|
levi-rs/cloc.me
|
56ac158e5085390177f261220dcf9dfba72871de
|
[
"MIT"
] | 2
|
2018-12-08T22:13:49.000Z
|
2018-12-08T22:13:54.000Z
|
setup.py
|
levi-rs/cloc.me
|
56ac158e5085390177f261220dcf9dfba72871de
|
[
"MIT"
] | 5
|
2018-10-08T20:33:10.000Z
|
2018-10-15T22:20:09.000Z
|
setup.py
|
levi-rs/cloc.me
|
56ac158e5085390177f261220dcf9dfba72871de
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup
modules = ['clocme', ]
setup(
name="clocme",
author='Levi Noecker',
author_email='levi.noecker@gmail.com',
url='https://github.com/levi-rs/cloc.me',
description="Clocme controller",
py_modules=modules,
version='0.0.1',
entry_points={
'console_scripts': [
'clocme=cli:main',
]
}
)
| 19.25
| 45
| 0.602597
|
2a3bbf38037b81b7d0b5163dd8069c0c5cdd48bb
| 446
|
py
|
Python
|
products/urls.py
|
Luka-pp/Milestone-Projet-4
|
98b2c842e702682cd9bce7deb8a7b385eb83484e
|
[
"BSD-Source-Code"
] | null | null | null |
products/urls.py
|
Luka-pp/Milestone-Projet-4
|
98b2c842e702682cd9bce7deb8a7b385eb83484e
|
[
"BSD-Source-Code"
] | null | null | null |
products/urls.py
|
Luka-pp/Milestone-Projet-4
|
98b2c842e702682cd9bce7deb8a7b385eb83484e
|
[
"BSD-Source-Code"
] | null | null | null |
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.all_products, name='products'),
path('<int:product_id>/', views.product_detail, name='product_detail'),
path('add/', views.add_product, name='add_product'),
path('edit/<int:product_id>/', views.edit_product, name='edit_product'),
path('delete/<int:product_id>/', views.delete_product, name='delete_product')
]
| 31.857143
| 81
| 0.710762
|
593b64591d65a2937b629637a21cf92930a25286
| 359
|
py
|
Python
|
basic-programming/file-io/file-poitions.py
|
sinhdev/python-demo
|
77e97d01d853799aef9a71cd6b7892847f85727d
|
[
"MIT"
] | null | null | null |
basic-programming/file-io/file-poitions.py
|
sinhdev/python-demo
|
77e97d01d853799aef9a71cd6b7892847f85727d
|
[
"MIT"
] | null | null | null |
basic-programming/file-io/file-poitions.py
|
sinhdev/python-demo
|
77e97d01d853799aef9a71cd6b7892847f85727d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Open a file
fo = open("foo.txt", "r+")
str = fo.read(10)
print "Read String is : ", str
# Check current position
position = fo.tell()
print "Current file position : ", position
# Reposition pointer at the beginning once again
position = fo.seek(0, 0);
str = fo.read(10)
print "Again read String is : ", str
# Close opend file
fo.close()
| 21.117647
| 48
| 0.679666
|
fea45255b7a22f584be1683a5180e9ed9adda258
| 16,817
|
py
|
Python
|
my_plugins/youcompleteme/third_party/ycmd/ycmd/tests/bindings/cpp_bindings_general_test.py
|
dragon7-fc/vimrc
|
d5968c222023bfdbd68b4f047f6e407e978cc82f
|
[
"MIT"
] | null | null | null |
my_plugins/youcompleteme/third_party/ycmd/ycmd/tests/bindings/cpp_bindings_general_test.py
|
dragon7-fc/vimrc
|
d5968c222023bfdbd68b4f047f6e407e978cc82f
|
[
"MIT"
] | null | null | null |
my_plugins/youcompleteme/third_party/ycmd/ycmd/tests/bindings/cpp_bindings_general_test.py
|
dragon7-fc/vimrc
|
d5968c222023bfdbd68b4f047f6e407e978cc82f
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from ycmd.completers.cpp.clang_completer import ConvertCompletionData
from ycmd.responses import BuildDiagnosticData
from ycmd.tests.bindings import PathToTestFile
from ycmd.tests.test_utils import ( ClangOnly, TemporaryTestDir,
TemporaryClangProject )
from ycmd.utils import ImportCore
from hamcrest import ( assert_that,
contains_exactly,
contains_inanyorder,
contains_string,
equal_to,
has_entries,
has_properties )
ycm_core = ImportCore()
import os
def CppBindings_FilterAndSortCandidates_test():
candidates = [ 'foo1', 'foo2', 'foo3' ]
query = 'oo'
candidate_property = ''
result_full = ycm_core.FilterAndSortCandidates( candidates,
candidate_property,
query )
result_2 = ycm_core.FilterAndSortCandidates( candidates,
candidate_property,
query,
2 )
del candidates
del query
del candidate_property
assert_that( result_full, contains_exactly( 'foo1', 'foo2', 'foo3' ) )
assert_that( result_2, contains_exactly( 'foo1', 'foo2' ) )
def CppBindings_IdentifierCompleter_test():
identifier_completer = ycm_core.IdentifierCompleter()
identifiers = ycm_core.StringVector()
identifiers.append( 'foo' )
identifiers.append( 'bar' )
identifiers.append( 'baz' )
identifier_completer.AddIdentifiersToDatabase( identifiers, 'foo', 'file' )
del identifiers
query_fo_10 = identifier_completer.CandidatesForQueryAndType(
'fo', 'foo', 10 )
query_fo = identifier_completer.CandidatesForQueryAndType( 'fo', 'foo' )
query_a = identifier_completer.CandidatesForQueryAndType( 'a', 'foo' )
assert_that( query_fo_10, contains_exactly( 'foo' ) )
assert_that( query_fo, contains_exactly( 'foo' ) )
assert_that( query_a, contains_exactly( 'bar', 'baz' ) )
identifiers = ycm_core.StringVector()
identifiers.append( 'oof' )
identifiers.append( 'rab' )
identifiers.append( 'zab' )
identifier_completer.ClearForFileAndAddIdentifiersToDatabase(
identifiers, 'foo', 'file' )
query_a_10 = identifier_completer.CandidatesForQueryAndType( 'a', 'foo' )
assert_that( query_a_10, contains_exactly( 'rab', 'zab' ) )
@ClangOnly
def CppBindings_UnsavedFile_test():
unsaved_file = ycm_core.UnsavedFile()
filename = 'foo'
contents = 'bar\\n'
length = len( contents )
unsaved_file.filename_ = filename
unsaved_file.contents_ = contents
unsaved_file.length_ = length
del filename
del contents
del length
assert_that( unsaved_file, has_properties( {
'filename_': 'foo',
'contents_': 'bar\\n',
'length_': len( 'bar\\n' )
} ) )
@ClangOnly
def CppBindings_DeclarationLocation_test():
translation_unit = PathToTestFile( 'foo.c' )
filename = PathToTestFile( 'foo.c' )
line = 9
column = 17
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( '-xc++' )
reparse = True
clang_completer = ycm_core.ClangCompleter()
location = clang_completer.GetDeclarationLocation( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that( location,
has_properties( { 'line_number_': 2,
'column_number_': 5,
'filename_': PathToTestFile( 'foo.c' ) } ) )
@ClangOnly
def CppBindings_DefinitionOrDeclarationLocation_test():
translation_unit = PathToTestFile( 'foo.c' )
filename = PathToTestFile( 'foo.c' )
line = 9
column = 17
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( '-xc++' )
reparse = True
clang_completer = ycm_core.ClangCompleter()
location = ( clang_completer.
GetDefinitionOrDeclarationLocation( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse ) )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that( location,
has_properties( { 'line_number_': 2,
'column_number_': 5,
'filename_': PathToTestFile( 'foo.c' ) } ) )
@ClangOnly
def CppBindings_DefinitionLocation_test():
translation_unit = PathToTestFile( 'foo.c' )
filename = PathToTestFile( 'foo.c' )
line = 9
column = 17
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( '-xc++' )
reparse = True
clang_completer = ycm_core.ClangCompleter()
location = clang_completer.GetDefinitionLocation( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that( location,
has_properties( { 'line_number_': 2,
'column_number_': 5,
'filename_': PathToTestFile( 'foo.c' ) } ) )
@ClangOnly
def CppBindings_Candidates_test():
translation_unit = PathToTestFile( 'foo.c' )
filename = PathToTestFile( 'foo.c' )
line = 11
column = 6
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( '-xc' )
reparse = True
clang_completer = ycm_core.ClangCompleter()
candidates = ( clang_completer
.CandidatesForLocationInFile( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags ) )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
candidates = [ ConvertCompletionData( x ) for x in candidates ]
assert_that( candidates, contains_inanyorder(
has_entries( {
'detailed_info': 'float b\n',
'extra_menu_info': 'float',
'insertion_text': 'b',
'kind': 'MEMBER',
'menu_text': 'b'
} ),
has_entries( {
'detailed_info': 'int a\n',
'extra_menu_info': 'int',
'insertion_text': 'a',
'kind': 'MEMBER',
'menu_text': 'a'
} )
) )
@ClangOnly
def CppBindings_GetType_test():
translation_unit = PathToTestFile( 'foo.c' )
filename = PathToTestFile( 'foo.c' )
line = 9
column = 17
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( '-xc++' )
reparse = True
clang_completer = ycm_core.ClangCompleter()
type_at_cursor = clang_completer.GetTypeAtLocation( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that( 'int ()', equal_to( type_at_cursor ) )
@ClangOnly
def CppBindings_GetParent_test():
translation_unit = PathToTestFile( 'foo.c' )
filename = PathToTestFile( 'foo.c' )
line = 9
column = 17
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( '-xc++' )
reparse = True
clang_completer = ycm_core.ClangCompleter()
enclosing_function = ( clang_completer
.GetEnclosingFunctionAtLocation( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse ) )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that( 'bar', equal_to( enclosing_function ) )
@ClangOnly
def CppBindings_FixIt_test():
translation_unit = PathToTestFile( 'foo.c' )
filename = PathToTestFile( 'foo.c' )
line = 3
column = 5
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( '-xc++' )
reparse = True
clang_completer = ycm_core.ClangCompleter()
fixits = clang_completer.GetFixItsForLocationInFile( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that(
fixits,
contains_exactly( has_properties( {
'text': ( PathToTestFile( 'foo.c' ) +
':3:16: error: expected \';\' at end of declaration' ),
'location': has_properties( {
'line_number_': 3,
'column_number_': 16,
'filename_': PathToTestFile( 'foo.c' )
} ),
'chunks': contains_exactly( has_properties( {
'replacement_text': ';',
'range': has_properties( {
'start_': has_properties( {
'line_number_': 3,
'column_number_': 16,
} ),
'end_': has_properties( {
'line_number_': 3,
'column_number_': 16,
} ),
} )
} ) ),
'kind': None,
} ) ) )
@ClangOnly
def CppBindings_Docs_test():
translation_unit = PathToTestFile( 'foo.c' )
filename = PathToTestFile( 'foo.c' )
line = 9
column = 16
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( '-xc++' )
reparse = True
clang_completer = ycm_core.ClangCompleter()
docs = clang_completer.GetDocsForLocationInFile( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that(
docs,
has_properties( {
'comment_xml': '<Function file="' + PathToTestFile( 'foo.c' ) + '"'
' line="2" column="5"><Name>foooo</Name><USR>c:@F@foooo#'
'</USR><Declaration>int foooo()</Declaration><Abstract>'
'<Para> Foo</Para></Abstract></Function>',
'brief_comment': 'Foo',
'raw_comment': '/// Foo',
'canonical_type': 'int ()',
'display_name': 'foooo' } ) )
@ClangOnly
def CppBindings_Diags_test():
filename = PathToTestFile( 'foo.c' )
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( '-xc++' )
reparse = True
clang_completer = ycm_core.ClangCompleter()
diag_vector = clang_completer.UpdateTranslationUnit( filename,
unsaved_file_vector,
flags )
diags = [ BuildDiagnosticData( x ) for x in diag_vector ]
del diag_vector
del filename
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that(
diags,
contains_exactly(
has_entries( {
'kind': 'ERROR',
'text': contains_string( 'expected \';\' at end of declaration' ),
'ranges': contains_exactly(),
'location': has_entries( {
'line_num': 3,
'column_num': 16,
} ),
'location_extent': has_entries( {
'start': has_entries( {
'line_num': 3,
'column_num': 16,
} ),
'end': has_entries( {
'line_num': 3,
'column_num': 16,
} ),
} ),
} ) ) )
@ClangOnly
def CppBindings_CompilationDatabase_test():
with TemporaryTestDir() as tmp_dir:
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -x c++ -I. -I/absolute/path -Wall',
'file': os.path.join( tmp_dir, 'test.cc' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
db = ycm_core.CompilationDatabase( tmp_dir )
db_successful = db.DatabaseSuccessfullyLoaded()
db_busy = db.AlreadyGettingFlags()
db_dir = db.database_directory
compilation_info = db.GetCompilationInfoForFile(
compile_commands[ 0 ][ 'file' ] )
del db
del compile_commands
assert_that( db_successful, equal_to( True ) )
assert_that( db_busy, equal_to( False ) )
assert_that( db_dir, equal_to( tmp_dir ) )
assert_that( compilation_info,
has_properties( {
'compiler_working_dir_': tmp_dir,
'compiler_flags_': contains_exactly( 'clang++',
'--driver-mode=g++',
'-x',
'c++',
'-I.',
'-I/absolute/path',
'-Wall' )
} ) )
def Dummy_test():
# Workaround for https://github.com/pytest-dev/pytest-rerunfailures/issues/51
assert True
| 34.04251
| 80
| 0.516858
|
140f1f80370fdfb221a36c6db139889110b27520
| 444
|
py
|
Python
|
dkulib/parallelizer/__init__.py
|
Muennighoff/dss-plugin-dkulib
|
8d9a954841c23f163f1992822a2a8e4171695f73
|
[
"Apache-2.0"
] | null | null | null |
dkulib/parallelizer/__init__.py
|
Muennighoff/dss-plugin-dkulib
|
8d9a954841c23f163f1992822a2a8e4171695f73
|
[
"Apache-2.0"
] | null | null | null |
dkulib/parallelizer/__init__.py
|
Muennighoff/dss-plugin-dkulib
|
8d9a954841c23f163f1992822a2a8e4171695f73
|
[
"Apache-2.0"
] | null | null | null |
########################################################
# ------------- dkulib.parallelizer: 0.1.0 ----------------
# For more information, see https://github.com/dataiku/dss-plugin-dkulib/tree/main/dkulib/parallelizer
# Library version: 0.1.0
# Last update: 2021-07-16
# Author: Dataiku (Alex Combessie, Niklas Muennighoff)
#########################################################
from .parallelizer import DataFrameParallelizer
| 40.363636
| 103
| 0.506757
|
dda4f422b63ea7371681daa35fa0f4a7eb3da541
| 199
|
py
|
Python
|
atariari/testAtariAri.py
|
nguyendohoangkhoi/Deductive-Exploration
|
371502f00305d8f2e6c14b65eaa8686d2a5609e4
|
[
"MIT"
] | null | null | null |
atariari/testAtariAri.py
|
nguyendohoangkhoi/Deductive-Exploration
|
371502f00305d8f2e6c14b65eaa8686d2a5609e4
|
[
"MIT"
] | null | null | null |
atariari/testAtariAri.py
|
nguyendohoangkhoi/Deductive-Exploration
|
371502f00305d8f2e6c14b65eaa8686d2a5609e4
|
[
"MIT"
] | null | null | null |
import gym
from atariari.benchmark.wrapper import AtariARIWrapper
env = AtariARIWrapper(gym.make('MontezumaRevengeNoFrameskip-v4'))
obs = env.reset()
obs, reward, done, info = env.step(1)
print(info)
| 33.166667
| 65
| 0.788945
|
ec38c37b7f9cecc1d89b5f4ff958f4f86af027d2
| 1,258
|
py
|
Python
|
ludwig/hyperopt/utils.py
|
carlogrisetti/ludwig
|
5c0887f14867e1577e0ddc3806c5cf7a781fb665
|
[
"Apache-2.0"
] | null | null | null |
ludwig/hyperopt/utils.py
|
carlogrisetti/ludwig
|
5c0887f14867e1577e0ddc3806c5cf7a781fb665
|
[
"Apache-2.0"
] | null | null | null |
ludwig/hyperopt/utils.py
|
carlogrisetti/ludwig
|
5c0887f14867e1577e0ddc3806c5cf7a781fb665
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import os
from ludwig.constants import HYPEROPT, PARAMETERS, PREPROCESSING
from ludwig.hyperopt.results import HyperoptResults
from ludwig.utils.data_utils import save_json
from ludwig.utils.print_utils import print_boxed
logger = logging.getLogger(__name__)
def print_hyperopt_results(hyperopt_results: HyperoptResults):
print_boxed('HYPEROPT RESULTS', print_fun=logger.info)
for trial_results in hyperopt_results.ordered_trials:
logger.info('score: {:.6f} | parameters: {}'.format(
trial_results.metric_score, trial_results.parameters
))
logger.info("")
def save_hyperopt_stats(hyperopt_stats, hyperopt_dir_name):
hyperopt_stats_fn = os.path.join(
hyperopt_dir_name,
'hyperopt_statistics.json'
)
save_json(hyperopt_stats_fn, hyperopt_stats)
def load_json_value(v):
try:
return json.loads(v)
except:
return v
def load_json_values(d):
return {
k: load_json_value(v)
for k, v in d.items()
}
def should_tune_preprocessing(config):
parameters = config[HYPEROPT][PARAMETERS]
for param_name in parameters.keys():
if f"{PREPROCESSING}." in param_name:
return True
return False
| 25.16
| 64
| 0.714626
|
2997df0b201fa0646b16d7c5c4b0e1cba72c1e64
| 2,247
|
py
|
Python
|
hdltools/vcd/generator.py
|
brunosmmm/hdltools
|
a98ca8c4d168740fa229c939a7b1f31ea73eec24
|
[
"MIT"
] | 2
|
2020-02-28T13:02:39.000Z
|
2021-06-30T09:15:35.000Z
|
hdltools/vcd/generator.py
|
brunosmmm/hdltools
|
a98ca8c4d168740fa229c939a7b1f31ea73eec24
|
[
"MIT"
] | 1
|
2020-03-22T17:32:45.000Z
|
2020-03-23T15:43:39.000Z
|
hdltools/vcd/generator.py
|
brunosmmm/hdltools
|
a98ca8c4d168740fa229c939a7b1f31ea73eec24
|
[
"MIT"
] | null | null | null |
"""VCD Dump Generator."""
from ..abshdl.codegen import HDLCodeGenerator
from datetime import datetime
class VCDGenerator(HDLCodeGenerator):
"""Generate VCD dumps."""
def __init__(self, **kwargs):
"""Initialize."""
self.elements = []
def add_elements(self, *elements):
"""Add elements."""
self.elements.extend(elements)
def gen_VCDDump(self, element, **kwargs):
"""Dump VCD dump descriptor."""
ret_str = ""
ret_str += "$date\n {} \n$end\n".format(datetime.now())
ret_str += "$version hdltools VCDGenerator $end\n"
ret_str += "$timescale {} $end".format(element.timescale) + "\n"
ret_str += "$scope module {} $end\n".format(element.name)
for identifier, var in element.variables.items():
ret_str += self.dump_element(var, identifier=identifier) + "\n"
ret_str += "$upscope $end\n"
ret_str += "$enddefinitions $end\n"
# dump initial
ret_str += "#0\n$dumpvars\n"
for name, value in element.initial.items():
if element.variables[element.variable_identifiers[name]].size > 1:
fmt_value = "b{0:b} ".format(value)
else:
fmt_value = "1" if bool(value) else "0"
ret_str += "{}{}\n".format(
fmt_value, element.variable_identifiers[name]
)
ret_str += "$end\n"
for step, changes in enumerate(element.vcd):
if len(changes) == 0:
continue
ret_str += "#{}\n".format(step + 1)
for name, value in changes.items():
ret_str += "{}{}\n".format(
value, element.variable_identifiers[name]
)
# ret_str += '$dumpoff\n'
# for name, value in element.initial.items():
# ret_str += 'x{}\n'.format(element.variable_identifiers[name])
# ret_str += '$end'
return ret_str
def gen_VCDVariable(self, element, **kwargs):
"""Dump variable."""
ret_str = "$var {} {} {} {} $end".format(
element.var_type,
element.size,
kwargs["identifier"],
*element.identifiers
)
return ret_str
| 33.537313
| 78
| 0.542501
|
04c151d1e8d7548944ee3c06676375528dc3e66e
| 10,614
|
py
|
Python
|
nova/volume/cinder.py
|
bopopescu/nova_vmware_compute_driver
|
60d3936b68030647b9f11970c9e0d060fc286dd9
|
[
"Apache-2.0"
] | null | null | null |
nova/volume/cinder.py
|
bopopescu/nova_vmware_compute_driver
|
60d3936b68030647b9f11970c9e0d060fc286dd9
|
[
"Apache-2.0"
] | null | null | null |
nova/volume/cinder.py
|
bopopescu/nova_vmware_compute_driver
|
60d3936b68030647b9f11970c9e0d060fc286dd9
|
[
"Apache-2.0"
] | 2
|
2019-07-08T22:12:35.000Z
|
2020-07-24T08:27:24.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes + cinder.
"""
from copy import deepcopy
import sys
from cinderclient import exceptions as cinder_exception
from cinderclient import service_catalog
from cinderclient.v1 import client as cinder_client
from nova.db import base
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
cinder_opts = [
cfg.StrOpt('cinder_catalog_info',
default='volume:cinder:publicURL',
help='Info to match when looking for cinder in the service '
'catalog. Format is : separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>'),
cfg.StrOpt('cinder_endpoint_template',
default=None,
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
]
CONF = cfg.CONF
CONF.register_opts(cinder_opts)
LOG = logging.getLogger(__name__)
def cinderclient(context):
# FIXME: the cinderclient ServiceCatalog object is mis-named.
# It actually contains the entire access blob.
compat_catalog = {
'access': {'serviceCatalog': context.service_catalog or {}}
}
sc = service_catalog.ServiceCatalog(compat_catalog)
if CONF.cinder_endpoint_template:
url = CONF.cinder_endpoint_template % context.to_dict()
else:
info = CONF.cinder_catalog_info
service_type, service_name, endpoint_type = info.split(':')
url = sc.url_for(service_type=service_type,
service_name=service_name,
endpoint_type=endpoint_type)
LOG.debug(_('Cinderclient connection created using URL: %s') % url)
c = cinder_client.Client(context.user_id,
context.auth_token,
project_id=context.project_id,
auth_url=url)
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
context.project_id)
c.client.management_url = url
return c
def _untranslate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol.id
d['status'] = vol.status
d['size'] = vol.size
d['availability_zone'] = vol.availability_zone
d['created_at'] = vol.created_at
# TODO(jdg): The calling code expects attach_time and
# mountpoint to be set. When the calling
# code is more defensive this can be
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
if vol.attachments:
att = vol.attachments[0]
d['attach_status'] = 'attached'
d['instance_uuid'] = att['server_id']
d['mountpoint'] = att['device']
else:
d['attach_status'] = 'detached'
d['display_name'] = vol.display_name
d['display_description'] = vol.display_description
# TODO(jdg): Information may be lost in this translation
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
d['volume_metadata'] = []
for key, value in vol.metadata.items():
item = {}
item['key'] = key
item['value'] = value
d['volume_metadata'].append(item)
if hasattr(vol, 'volume_image_metadata'):
d['volume_image_metadata'] = deepcopy(vol.volume_image_metadata)
return d
def _untranslate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot.id
d['status'] = snapshot.status
d['progress'] = snapshot.progress
d['size'] = snapshot.size
d['created_at'] = snapshot.created_at
d['display_name'] = snapshot.display_name
d['display_description'] = snapshot.display_description
d['volume_id'] = snapshot.volume_id
d['project_id'] = snapshot.project_id
d['volume_size'] = snapshot.size
return d
class API(base.Base):
"""API for interacting with the volume manager."""
def _reraise_translated_volume_exception(self, volume_id):
"""Transform the exception for the volume but keep its traceback
intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = self._translate_volume_exception(volume_id, exc_value)
raise new_exc, None, exc_trace
def _translate_volume_exception(self, volume_id, exc_value):
if isinstance(exc_value, cinder_exception.NotFound):
return exception.VolumeNotFound(volume_id=volume_id)
return exc_value
def get(self, context, volume_id):
try:
item = cinderclient(context).volumes.get(volume_id)
return _untranslate_volume_summary_view(context, item)
except Exception:
self._reraise_translated_volume_exception(volume_id)
def get_all(self, context, search_opts={}):
items = cinderclient(context).volumes.list(detailed=True)
rval = []
for item in items:
rval.append(_untranslate_volume_summary_view(context, item))
return rval
def check_attach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("status must be available")
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("already attached")
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("already detached")
raise exception.InvalidVolume(reason=msg)
def reserve_volume(self, context, volume):
cinderclient(context).volumes.reserve(volume['id'])
def unreserve_volume(self, context, volume):
cinderclient(context).volumes.unreserve(volume['id'])
def begin_detaching(self, context, volume):
cinderclient(context).volumes.begin_detaching(volume['id'])
def roll_detaching(self, context, volume):
cinderclient(context).volumes.roll_detaching(volume['id'])
def attach(self, context, volume, instance_uuid, mountpoint):
cinderclient(context).volumes.attach(volume['id'],
instance_uuid,
mountpoint)
def detach(self, context, volume):
cinderclient(context).volumes.detach(volume['id'])
def initialize_connection(self, context, volume, connector):
return cinderclient(context).\
volumes.initialize_connection(volume['id'], connector)
def terminate_connection(self, context, volume, connector):
return cinderclient(context).\
volumes.terminate_connection(volume['id'], connector)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None):
if snapshot is not None:
snapshot_id = snapshot['id']
else:
snapshot_id = None
kwargs = dict(snapshot_id=snapshot_id,
display_name=name,
display_description=description,
volume_type=volume_type,
user_id=context.user_id,
project_id=context.project_id,
availability_zone=availability_zone,
metadata=metadata,
imageRef=image_id)
item = cinderclient(context).volumes.create(size, **kwargs)
return _untranslate_volume_summary_view(context, item)
def delete(self, context, volume):
cinderclient(context).volumes.delete(volume['id'])
def update(self, context, volume, fields):
raise NotImplementedError()
def get_snapshot(self, context, snapshot_id):
item = cinderclient(context).volume_snapshots.get(snapshot_id)
return _untranslate_snapshot_summary_view(context, item)
def get_all_snapshots(self, context):
items = cinderclient(context).volume_snapshots.list(detailed=True)
rvals = []
for item in items:
rvals.append(_untranslate_snapshot_summary_view(context, item))
return rvals
def create_snapshot(self, context, volume, name, description):
item = cinderclient(context).volume_snapshots.create(volume['id'],
False,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
def create_snapshot_force(self, context, volume, name, description):
item = cinderclient(context).volume_snapshots.create(volume['id'],
True,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
def delete_snapshot(self, context, snapshot):
cinderclient(context).volume_snapshots.delete(snapshot['id'])
def get_volume_metadata(self, context, volume):
raise NotImplementedError()
def delete_volume_metadata(self, context, volume, key):
raise NotImplementedError()
def update_volume_metadata(self, context, volume, metadata, delete=False):
raise NotImplementedError()
def get_volume_metadata_value(self, volume, key):
raise NotImplementedError()
| 36.6
| 79
| 0.631619
|
6bba0b904c0751deb9d3e5a472fbe3d68b08b44b
| 1,257
|
py
|
Python
|
fluent_contents/plugins/text/migrations/0001_initial.py
|
jayvdb/django-fluent-contents
|
dc07f42d80116c1e42efcd0afae91b2843c2e95f
|
[
"Apache-2.0"
] | null | null | null |
fluent_contents/plugins/text/migrations/0001_initial.py
|
jayvdb/django-fluent-contents
|
dc07f42d80116c1e42efcd0afae91b2843c2e95f
|
[
"Apache-2.0"
] | 1
|
2020-07-08T19:10:10.000Z
|
2020-07-08T19:10:10.000Z
|
fluent_contents/plugins/text/migrations/0001_initial.py
|
jayvdb/django-fluent-contents
|
dc07f42d80116c1e42efcd0afae91b2843c2e95f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import fluent_contents.extensions.model_fields
class Migration(migrations.Migration):
dependencies = [("fluent_contents", "0001_initial")]
operations = [
migrations.CreateModel(
name="TextItem",
fields=[
(
"contentitem_ptr",
models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to="fluent_contents.ContentItem",
on_delete=models.CASCADE,
),
),
(
"text",
fluent_contents.extensions.model_fields.PluginHtmlField(
verbose_name="text", blank=True
),
),
],
options={
"db_table": "contentitem_text_textitem",
"verbose_name": "Text",
"verbose_name_plural": "Text",
},
bases=("fluent_contents.contentitem",),
)
]
| 29.232558
| 76
| 0.460621
|
a7cacd4f75f19e0f313af288121334419e1cbbb2
| 771
|
py
|
Python
|
cpp/manual_wrappers/setup.py
|
BostonUniversitySeniorDesign/corner-camera
|
48bd550d14277c257ea9d96b446990629a783987
|
[
"MIT"
] | null | null | null |
cpp/manual_wrappers/setup.py
|
BostonUniversitySeniorDesign/corner-camera
|
48bd550d14277c257ea9d96b446990629a783987
|
[
"MIT"
] | null | null | null |
cpp/manual_wrappers/setup.py
|
BostonUniversitySeniorDesign/corner-camera
|
48bd550d14277c257ea9d96b446990629a783987
|
[
"MIT"
] | null | null | null |
from distutils.core import setup, Extension
#from setuptools import find_packages, setup, Extension
def main():
the_module = Extension("nlos",
include_dirs = ['/usr/local/include'],
libraries = ['opencv_core', 'opencv_imgproc', 'opencv_highgui'],
library_dirs = ['/usr/local/lib'],
sources = ["nlosmodule.cpp", "CalibrationWindow.cpp", "Inference.cpp", "nlos.cpp", "RollingDisplay.cpp", "WebCamInference.cpp", "ConfigParser.cpp", "VideoInference.cpp"
])
setup(name="nlos",
version="1.0.0",
description="Python interface for the NLOS algorithm developed by Bouman et. al",
author="Josh Bone",
author_email="jbone@bu.edu",
ext_modules=[the_module])
if __name__ == "__main__":
main()
| 36.714286
| 171
| 0.656291
|
0a84935d4eab7bae0503efad43a2259ee30bc6b4
| 1,088
|
py
|
Python
|
data/train/python/0a84935d4eab7bae0503efad43a2259ee30bc6b4routing.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/0a84935d4eab7bae0503efad43a2259ee30bc6b4routing.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/0a84935d4eab7bae0503efad43a2259ee30bc6b4routing.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
from webob.dec import wsgify
import webob
import routes.middleware
from comments.controller import CommentController
def comment_mappers(mapper):
mapper.connect('/comment/{pin_id}/create',
controller=CommentController(),
action='create',
conditions={'method': ['POST']})
mapper.connect('/comment/{pin_id}',
controller=CommentController(),
action='list',
conditions={'method': ['GET']})
mapper.connect('/comment/{pin_id}/{comment_id}/reply',
controller=CommentController(),
action='reply',
conditions={'method': ['POST']})
mapper.connect('/comment/{comment_id}/update',
controller=CommentController(),
action='update',
conditions={'method': ['PUT']})
mapper.connect('/comment/{comment_id}/delete',
controller=CommentController(),
action='delete',
conditions={'method': ['DELETE']})
| 38.857143
| 58
| 0.54136
|
be740353e9a9b864625c8f20eefea92134c3ea12
| 554
|
py
|
Python
|
build/aruco_msgs/catkin_generated/pkg.develspace.context.pc.py
|
FProgrammerLIU/caster_man_ros
|
a75b503fad3a470f985072a2b3953e89074f3223
|
[
"MIT"
] | null | null | null |
build/aruco_msgs/catkin_generated/pkg.develspace.context.pc.py
|
FProgrammerLIU/caster_man_ros
|
a75b503fad3a470f985072a2b3953e89074f3223
|
[
"MIT"
] | null | null | null |
build/aruco_msgs/catkin_generated/pkg.develspace.context.pc.py
|
FProgrammerLIU/caster_man_ros
|
a75b503fad3a470f985072a2b3953e89074f3223
|
[
"MIT"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/caster/ros_ws/caster/devel/.private/aruco_msgs/include".split(';') if "/home/caster/ros_ws/caster/devel/.private/aruco_msgs/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;geometry_msgs;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "aruco_msgs"
PROJECT_SPACE_DIR = "/home/caster/ros_ws/caster/devel/.private/aruco_msgs"
PROJECT_VERSION = "0.2.4"
| 61.555556
| 187
| 0.758123
|
a325f3b0c2c7b8abc17f44a42883688bd8c0c38b
| 9,710
|
py
|
Python
|
modules/database.py
|
Smarandii/cryptoBot-remaster-
|
a9fd71bd5037ca126b27490349bdfa9dc3648e08
|
[
"MIT"
] | 7
|
2020-09-18T10:30:40.000Z
|
2021-03-25T11:17:23.000Z
|
modules/database.py
|
Smarandii/cryptoBot-remaster-
|
a9fd71bd5037ca126b27490349bdfa9dc3648e08
|
[
"MIT"
] | null | null | null |
modules/database.py
|
Smarandii/cryptoBot-remaster-
|
a9fd71bd5037ca126b27490349bdfa9dc3648e08
|
[
"MIT"
] | null | null | null |
import sqlite3
from sqlite3 import Error
from modules.functions import get_user_from_db, get_request_from_db, request_time_is_done
from modules.models import User, Request
class DataBase:
def __init__(self, db_file='database.db'):
self.db_file = db_file
self.create_connection()
self.c = self._get_connection()
self.cursor = self.c.cursor()
self.create_tables()
self.check_requests_shell_life()
def _get_connection(self) -> sqlite3.connect:
c = sqlite3.connect(self.db_file)
return c
def create_connection(self):
""" create a database connection to a SQLite database """
conn = None
try:
conn = sqlite3.connect(self.db_file)
except Error as e:
print(e)
finally:
if conn:
conn.close()
def create_tables(self):
with self.c:
self.c.execute('''CREATE TABLE IF NOT EXISTS users
(id integer PRIMARY KEY, telegram_id text, balance bigint, status text,
is_follower int, invited_by text, q_of_trades bigint, earned_from_partnership bigint)''')
self.c.execute('''CREATE TABLE IF NOT EXISTS requests
(id integer PRIMARY KEY, telegram_id text, status text,
type text, when_created text, comment text, wallet text)''')
self.c.commit()
def get_request_by_telegram_id(self, telegram_id: int, rq_type='trade', status='any') -> Request or None:
telegram_id = int(telegram_id)
requests = self.get_all_requests()
if status == 'any':
for request in requests:
if request.telegram_id == telegram_id \
and rq_type in request.type \
and request.status != "user_confirmed" \
and request.status != 'user_payed':
return request
else:
return None
else:
for request in requests:
if request.telegram_id == telegram_id and rq_type in request.type:
return request
return None
def get_all_requests(self) -> list:
requests_from_db = self.select_column_from_db('*', 'requests')
requests = []
for request in requests_from_db:
requests.append(get_request_from_db(request))
return requests
def get_request_by_id(self, rq_id: int):
rq_id = int(rq_id)
requests = self.get_all_requests()
for request in requests:
if request.db_id == rq_id:
return request
return None
def add_request_to_db(self, request):
request = get_request_from_db(request)
if not self.request_in_db(request):
self.insert_request_in_db(request)
print(request, 'added', request.type)
return self.get_request_by_telegram_id(telegram_id=request.telegram_id, rq_type=request.type)
def insert_request_in_db(self, request):
sql = f''' INSERT INTO requests(telegram_id, status, type, when_created, comment, wallet)
VALUES(?,?,?,?,?,?) '''
self.cursor.execute(sql, request.database_list())
self.c.commit()
def top_up_user_balance(self, user: User, amount: (int or float)):
user.balance = float(user.balance) + float(amount)
self.update_user_in_db(user)
def user_in_db(self, telegram_id):
return self.get_user_by_telegram_id(telegram_id) is not None
def get_status_message(self, call):
call_data, request_id, client_id, status = call.split(" ")
if status in ["no_payment", 'close_request']:
self.delete_request_from_db(request_id)
status_msgs = {'payment_s': 'Платёж подтверждён!',
'crypto_sent': 'Криптовалюта отправлена!',
'replenish_s': 'Баланс пополнен!',
'not_enough': 'Было отправленно недостаточно средств!',
'no_payment': "Не удалось найти ваш платёж!",
'close_request': 'Заявка закрыта!'}
message = status_msgs[status]
return client_id, message
def update_user_in_db(self, user):
cursor = self.c.cursor()
cursor.execute(f'UPDATE users SET id = ?, telegram_id = ?, balance = ?, status = ?, '
f'is_follower = ?, invited_by = ?, '
f'q_of_trades = ?, earned_from_partnership = ? WHERE telegram_id = {user.telegram_id}',
user.update_database_list())
self.c.commit()
print(self.get_user_by_telegram_id(user.telegram_id), 'usr updated')
def update_request_in_db(self, request: Request):
with self.c:
self.cursor.execute(f'UPDATE requests SET id = ?, telegram_id = ?, status = ?, type = ?, '
f'when_created = ?, comment = ?, wallet = ? '
f'WHERE id = {request.db_id}', request.update_database_list())
self.c.commit()
print(self.get_request_by_id(request.db_id), 'rq updated')
def get_requests(self, user: User):
trade_request = self.get_request_by_telegram_id(user.telegram_id)
help_request = self.get_request_by_telegram_id(user.telegram_id, rq_type='help_request')
replenish_request = self.get_request_by_telegram_id(user.telegram_id, rq_type='replenish')
service_request = self.get_request_by_telegram_id(user.telegram_id, rq_type='service_request')
return_request = self.get_request_by_telegram_id(user.telegram_id, rq_type='return')
return trade_request, help_request, replenish_request, service_request, return_request
def delete_request_from_db(self, request_id: int):
with self.c:
self.cursor.execute(f'DELETE FROM requests WHERE id = {request_id}')
self.c.commit()
def print_all_requests(self):
requests = self.get_all_requests()
for request in requests:
print(request)
def print_all_users(self):
users = self.get_all_users()
for user in users:
print(user)
def check_requests_shell_life(self):
requests = self.get_all_requests()
for request in requests:
if request_time_is_done(request.when_created) and request.type != 'help_request' \
and request.status != 'user_confirmed' \
and request.status != 'T: user_payed':
self.delete_request_from_db(request.db_id)
def select_column_from_db(self, column, table):
with self.c:
cursor = self.c.cursor()
cursor.execute(f"SELECT {column} FROM {table}")
result = cursor.fetchall()
return result
def add_new_user_to_db(self, user_id, follow_status=0, invited_by=0):
user = User(telegram_id=user_id, is_follower=follow_status, invited_by=invited_by)
telegram_ids_from_db = self.select_column_from_db('telegram_id', 'users')
if telegram_ids_from_db is None or self.get_user_by_telegram_id(user.telegram_id) is None:
self.insert_user_in_db(user)
return user
else:
user = self.get_user_by_telegram_id(user.telegram_id)
return user
def insert_user_in_db(self, user):
sql = f'INSERT INTO users(telegram_id, balance, status, is_follower, invited_by, ' \
f'q_of_trades, earned_from_partnership) VALUES(?,?,?,?,?,?,?)'
self.cursor.execute(sql, user.database_list())
self.c.commit()
def get_all_users(self):
users_from_db = self.select_column_from_db('*', 'users')
users = []
for user in users_from_db:
user = get_user_from_db(user)
users.append(user)
return users
def get_all_unprocessed_requests_in_list(self) -> list:
requests = self.get_all_requests()
unprocessed_requests = []
for request in requests:
if request.status == "user_confirmed" or request.status == 'user_payed':
unprocessed_requests.append(request)
return unprocessed_requests
def get_user_by_telegram_id(self, telegram_id: int) -> User or None:
telegram_id = int(telegram_id)
users = self.get_all_users()
for user in users:
if user.telegram_id == telegram_id:
return user
return None
def get_number_of_invitations(self, telegram_id):
self.cursor.execute(f"SELECT * FROM users WHERE invited_by = ({telegram_id})")
res = self.cursor.fetchall()
number = len(res)
return number
def pay_inviter(self, telegram_id, fee):
# TODO
user = self.get_user_by_telegram_id(telegram_id)
if user.invited_by != 0:
inviter = self.get_user_by_telegram_id(user.invited_by)
inviter.earned_from_partnership = inviter.earned_from_partnership + fee # earned from partnership increased
inviter.balance = inviter.balance + fee # balance increased
self.update_user_in_db(inviter)
def request_in_db(self, request):
return self.get_request_by_telegram_id(telegram_id=request.telegram_id,
rq_type=request.type,
status=request.status) is not None
if __name__ == '__main__':
database = DataBase()
database.print_all_users()
print('_' * 100)
database.print_all_requests()
print('_' * 100)
| 41.853448
| 120
| 0.613697
|
d62854295242ad39fa794733c5972a6b92f8725c
| 2,588
|
py
|
Python
|
data/p4VQE/R4/benchmark/startQiskit_QC230.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_QC230.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_QC230.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=11
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.z(input_qubit[3]) # number=7
prog.h(input_qubit[1]) # number=2
prog.z(input_qubit[1]) # number=8
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[3],input_qubit[0]) # number=5
prog.swap(input_qubit[3],input_qubit[0]) # number=6
prog.x(input_qubit[3]) # number=9
prog.x(input_qubit[3]) # number=10
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_QC230.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.531915
| 118
| 0.634853
|
2127cf3651d05e010490cfcf691f554a7d4bd74d
| 1,789
|
py
|
Python
|
tests/test_non_interactive.py
|
david-poirier-csn/aws_azuread_login
|
350871b9f69a48e45cfc95980e17ef8121ac6ef7
|
[
"Apache-2.0"
] | 1
|
2021-09-23T12:27:57.000Z
|
2021-09-23T12:27:57.000Z
|
tests/test_non_interactive.py
|
david-poirier/aws_azuread_login
|
350871b9f69a48e45cfc95980e17ef8121ac6ef7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_non_interactive.py
|
david-poirier/aws_azuread_login
|
350871b9f69a48e45cfc95980e17ef8121ac6ef7
|
[
"Apache-2.0"
] | 1
|
2021-12-31T03:13:14.000Z
|
2021-12-31T03:13:14.000Z
|
import getpass
import os
import aws_azuread_login
ENTRY_URL = os.environ['AWS_AZUREAD_ENTRY_URL']
USERNAME = input('Username: ')
PASSWORD = getpass.getpass('Password: ')
CODE = input('OTP: ')
def test_auth():
roles = aws_azuread_login.authenticate(
ENTRY_URL,
username=USERNAME,
password=PASSWORD,
code=CODE,
stay_signed_in=False,
headless=False)
assert(len(roles) > 0)
def test_get_credentials():
roles = aws_azuread_login.authenticate(
ENTRY_URL,
username=USERNAME,
password=PASSWORD,
code=CODE,
stay_signed_in=False,
headless=False)
assert(len(roles) > 0)
if len(roles) > 3:
roles = roles[:3]
creds = []
for role in roles]:
try:
creds.append(role.get_credentials())
print('.', end='', flush=True)
except:
print('!', end='', flush=True)
assert(len(creds) > 0)
def test_get_multiple_credentials():
roles = aws_azuread_login.authenticate(
ENTRY_URL,
username=USERNAME,
password=PASSWORD,
code=CODE,
stay_signed_in=False,
headless=False)
assert(len(roles) > 0)
creds = aws_azuread_login.get_multiple_credentials(roles)
assert(len(creds) > 0)
def test_roles_on_creds():
roles = aws_azuread_login.authenticate(
ENTRY_URL,
username=USERNAME,
password=PASSWORD,
code=CODE,
stay_signed_in=False,
headless=False)
creds = roles[0].get_credentials()
assert(creds.role.account != None)
| 27.106061
| 61
| 0.551705
|
6298ef3d8428071cb5e4f3b82753bc3fed6df437
| 371
|
py
|
Python
|
tributary/streaming/base.py
|
ceball/tributary
|
5e30f90d1a5cc176c0f231f525d9dc5a81353925
|
[
"Apache-2.0"
] | null | null | null |
tributary/streaming/base.py
|
ceball/tributary
|
5e30f90d1a5cc176c0f231f525d9dc5a81353925
|
[
"Apache-2.0"
] | null | null | null |
tributary/streaming/base.py
|
ceball/tributary
|
5e30f90d1a5cc176c0f231f525d9dc5a81353925
|
[
"Apache-2.0"
] | null | null | null |
class StreamingGraph(object):
def __init__(self, output_node):
self._node = output_node
def graph(self):
return self._node.graph()
def graphviz(self):
return self._node.graphviz()
def dagre(self):
return self._node.dagre()
def run(self):
from tributary.streaming import run
return run(self._node)
| 19.526316
| 43
| 0.625337
|
8d07d1aa8ca28ec668bda39c9fbd6e26289fe303
| 421
|
py
|
Python
|
test/GameRunnerTest.py
|
avijit90/tic-tac-toe
|
737f74e828a6a59f26af1edc38e915b86c4a8e9d
|
[
"Apache-2.0"
] | null | null | null |
test/GameRunnerTest.py
|
avijit90/tic-tac-toe
|
737f74e828a6a59f26af1edc38e915b86c4a8e9d
|
[
"Apache-2.0"
] | null | null | null |
test/GameRunnerTest.py
|
avijit90/tic-tac-toe
|
737f74e828a6a59f26af1edc38e915b86c4a8e9d
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from unittest.mock import patch
from GameRunner import GameRunner
class MyTestCase(unittest.TestCase):
def setUp(self):
with patch(GameRunner, "__init__", lambda a, b, c, d: None) as mock:
self.game_runner = GameRunner(None, None, None, None)
def test_something(self):
assert self.game_runner.input_service is None
if __name__ == '__main__':
unittest.main()
| 22.157895
| 76
| 0.695962
|
2e650a8e36b4abdf48ef5cacd016479e9005ed02
| 96
|
py
|
Python
|
src/strictest/fix/__init__.py
|
adamtheturtle/strictest
|
3413ba7c3018d841b46aa5de81e8486b2d92cd78
|
[
"MIT"
] | null | null | null |
src/strictest/fix/__init__.py
|
adamtheturtle/strictest
|
3413ba7c3018d841b46aa5de81e8486b2d92cd78
|
[
"MIT"
] | 24
|
2019-05-05T11:11:47.000Z
|
2020-10-19T09:38:36.000Z
|
src/strictest/fix/__init__.py
|
adamtheturtle/strictest
|
3413ba7c3018d841b46aa5de81e8486b2d92cd78
|
[
"MIT"
] | null | null | null |
"""
XXX
"""
import click
@click.group(name='fix')
def fix() -> None:
"""
XXX
"""
| 7.384615
| 24
| 0.458333
|
bfc7c2719093b4e090acd8ebe32b430dfa49d87f
| 1,038
|
py
|
Python
|
Advent_of_Code/2018/Day_02/day_02.1.py
|
Zubieta/CPP
|
fb4a3cbf2e4edcc590df15663cd28fb9ecab679c
|
[
"MIT"
] | 8
|
2017-03-02T07:56:45.000Z
|
2021-08-07T20:20:19.000Z
|
Advent_of_Code/2018/Day_02/day_02.1.py
|
zubie7a/Algorithms
|
fb4a3cbf2e4edcc590df15663cd28fb9ecab679c
|
[
"MIT"
] | null | null | null |
Advent_of_Code/2018/Day_02/day_02.1.py
|
zubie7a/Algorithms
|
fb4a3cbf2e4edcc590df15663cd28fb9ecab679c
|
[
"MIT"
] | 1
|
2021-08-07T20:20:20.000Z
|
2021-08-07T20:20:20.000Z
|
# https://adventofcode.com/2018/day/2
from collections import Counter
lines = []
# Any better way to read until EOF without any clear number of lines?
try:
while True:
line = str(input())
# print(line)
lines.append(line)
except Exception:
# print("EOF")
None
# The counts of words where at least one character occurs twice, and
# where at least one character occurs thrice.
count_2, count_3 = 0, 0
# Once you find a two-times character, stop counting for this word
# since it only matters once.
for line in lines:
counter = Counter(line)
# print(counter)
for key in counter.keys():
if counter[key] == 2:
count_2 += 1
break
# Once you find a three-times character, stop counting for this word
# since it only matters once.
for line in lines:
counter = Counter(line)
# print(counter)
for key in counter.keys():
if counter[key] == 3:
count_3 += 1
break
result = count_2 * count_3
# Result: 4980.
print(result)
| 23.590909
| 69
| 0.640655
|
f33933486ebf58e3754769849140b49277605c95
| 1,402
|
py
|
Python
|
hcipy/__init__.py
|
kian1377/hcipy
|
f398e82797b3adbc263e9a35d9389ba7b62342f2
|
[
"MIT"
] | 55
|
2018-06-29T01:13:26.000Z
|
2022-03-13T09:18:06.000Z
|
hcipy/__init__.py
|
kian1377/hcipy
|
f398e82797b3adbc263e9a35d9389ba7b62342f2
|
[
"MIT"
] | 121
|
2018-06-12T05:01:05.000Z
|
2022-02-10T20:11:13.000Z
|
hcipy/__init__.py
|
kian1377/hcipy
|
f398e82797b3adbc263e9a35d9389ba7b62342f2
|
[
"MIT"
] | 21
|
2018-07-09T11:01:29.000Z
|
2022-03-15T02:47:24.000Z
|
# Import all submodules.
from . import aperture
from . import atmosphere
from . import coronagraphy
from . import field
from . import fourier
from . import interpolation
from . import mode_basis
from . import optics
from . import plotting
from . import propagation
from . import util
from . import wavefront_control
from . import wavefront_sensing
# Import all core submodules in default namespace.
from .aperture import *
from .atmosphere import *
from .config import *
from .coronagraphy import *
from .field import *
from .fourier import *
from .interpolation import *
from .metrics import *
from .mode_basis import *
from .optics import *
from .plotting import *
from .propagation import *
from .util import *
from .wavefront_control import *
from .wavefront_sensing import *
# Export default namespaces.
__all__ = []
__all__.extend(aperture.__all__)
__all__.extend(atmosphere.__all__)
__all__.extend(config.__all__)
__all__.extend(coronagraphy.__all__)
__all__.extend(field.__all__)
__all__.extend(fourier.__all__)
__all__.extend(interpolation.__all__)
__all__.extend(metrics.__all__)
__all__.extend(mode_basis.__all__)
__all__.extend(optics.__all__)
__all__.extend(plotting.__all__)
__all__.extend(propagation.__all__)
__all__.extend(util.__all__)
__all__.extend(wavefront_control.__all__)
__all__.extend(wavefront_sensing.__all__)
from .version import get_version
__version__ = get_version()
| 26.45283
| 50
| 0.805991
|
0edd5ff27df414375824ffe238c781bfd001163e
| 7,899
|
py
|
Python
|
backend/birdshare_33816/settings.py
|
crowdbotics-apps/birdshare-33816
|
84c3552779013b9748b9873c0189c58cc64b87fe
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/birdshare_33816/settings.py
|
crowdbotics-apps/birdshare-33816
|
84c3552779013b9748b9873c0189c58cc64b87fe
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/birdshare_33816/settings.py
|
crowdbotics-apps/birdshare-33816
|
84c3552779013b9748b9873c0189c58cc64b87fe
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
Django settings for birdshare_33816 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'birdshare_33816.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'birdshare_33816.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
| 30.380769
| 112
| 0.736802
|
4b7176245f3cafb502fc0a52be9483bcb74d5926
| 1,722
|
py
|
Python
|
angr-doc/examples/defcon2016quals_baby-re_0/solve.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
angr-doc/examples/defcon2016quals_baby-re_0/solve.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
angr-doc/examples/defcon2016quals_baby-re_0/solve.py
|
Ruide/angr-dev
|
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python2
"""
Author: David Manouchehri <manouchehri@protonmail.com>
DEFCON CTF Qualifier 2016
Challenge: baby-re
Team: hack.carleton
Write-up: http://hack.carleton.team/2016/05/21/defcon-ctf-qualifier-2016-baby-re/
Runtime: ~8 minutes (single threaded E5-2650L v3 @ 1.80GHz on DigitalOcean)
DigitalOcean is horrible for single threaded applications, I would highly suggest using something else.
"""
import angr
def main():
proj = angr.Project('./baby-re', load_options={'auto_load_libs': False})
sm = proj.factory.simgr(threads=4) # Doesn't really help to have more threads, but whatever.
# If we get to 0x402941, "Wrong" is going to be printed out, so definitely avoid that.
sm.explore(find=0x40294b, avoid=0x402941)
# If you use anywhere before 0x40292c, angr won't have the flag to print out yet. So don't do that.
return sm.found[0].posix.dumps(1) # The flag is at the end.
"""
Note: There will be a bunch of warnings on your terminal that look like this.
WARNING | 2016-05-21 17:34:33,185 | angr.state_plugins.symbolic_memory | Concretizing symbolic length. Much sad; think about implementing.
WARNING | 2016-05-21 17:34:49,353 | angr.state_plugins.symbolic_memory | Concretizing symbolic length. Much sad; think about implementing.
WARNING | 2016-05-21 17:35:11,810 | angr.state_plugins.symbolic_memory | Concretizing symbolic length. Much sad; think about implementing.
WARNING | 2016-05-21 17:35:44,170 | angr.state_plugins.symbolic_memory | Concretizing symbolic length. Much sad; think about implementing.
Don't worry about these, they're not an issue for this challenge.
"""
def test():
assert 'Math is hard!' in main()
if __name__ == '__main__':
print(repr(main()))
| 39.136364
| 139
| 0.750871
|
43da1540f91575b74b14181882773281fe861bc0
| 7,184
|
py
|
Python
|
python/tink/streaming_aead/streaming_aead_key_templates_test.py
|
bfloch/tink
|
aac780590902f726a8e7d6c4e3aa1cd75f4b0ed5
|
[
"Apache-2.0"
] | null | null | null |
python/tink/streaming_aead/streaming_aead_key_templates_test.py
|
bfloch/tink
|
aac780590902f726a8e7d6c4e3aa1cd75f4b0ed5
|
[
"Apache-2.0"
] | null | null | null |
python/tink/streaming_aead/streaming_aead_key_templates_test.py
|
bfloch/tink
|
aac780590902f726a8e7d6c4e3aa1cd75f4b0ed5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.python.tink.streaming_aead_key_templates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from tink.proto import aes_ctr_hmac_streaming_pb2
from tink.proto import aes_gcm_hkdf_streaming_pb2
from tink.proto import common_pb2
from tink.proto import tink_pb2
from tink.streaming_aead import streaming_aead_key_templates
class StreamingAeadKeyTemplatesTest(absltest.TestCase):
def test_aes128_gcm_hkdf_4kb(self):
template = streaming_aead_key_templates.AES128_GCM_HKDF_4KB
self.assertEqual(
'type.googleapis.com/google.crypto.tink.AesGcmHkdfStreamingKey',
template.type_url)
self.assertEqual(tink_pb2.RAW, template.output_prefix_type)
key_format = aes_gcm_hkdf_streaming_pb2.AesGcmHkdfStreamingKeyFormat()
key_format.ParseFromString(template.value)
self.assertEqual(16, key_format.key_size)
self.assertEqual(common_pb2.HashType.SHA256,
key_format.params.hkdf_hash_type)
self.assertEqual(16, key_format.params.derived_key_size)
self.assertEqual(4096, key_format.params.ciphertext_segment_size)
def test_aes256_gcm_hkdf_4kb(self):
template = streaming_aead_key_templates.AES256_GCM_HKDF_4KB
self.assertEqual(
'type.googleapis.com/google.crypto.tink.AesGcmHkdfStreamingKey',
template.type_url)
self.assertEqual(tink_pb2.RAW, template.output_prefix_type)
key_format = aes_gcm_hkdf_streaming_pb2.AesGcmHkdfStreamingKeyFormat()
key_format.ParseFromString(template.value)
self.assertEqual(32, key_format.key_size)
self.assertEqual(common_pb2.HashType.SHA256,
key_format.params.hkdf_hash_type)
self.assertEqual(32, key_format.params.derived_key_size)
self.assertEqual(4096, key_format.params.ciphertext_segment_size)
def test_aes256_gcm_hkdf_1mb(self):
template = streaming_aead_key_templates.AES256_GCM_HKDF_1MB
self.assertEqual(
'type.googleapis.com/google.crypto.tink.AesGcmHkdfStreamingKey',
template.type_url)
self.assertEqual(tink_pb2.RAW, template.output_prefix_type)
key_format = aes_gcm_hkdf_streaming_pb2.AesGcmHkdfStreamingKeyFormat()
key_format.ParseFromString(template.value)
self.assertEqual(32, key_format.key_size)
self.assertEqual(common_pb2.HashType.SHA256,
key_format.params.hkdf_hash_type)
self.assertEqual(32, key_format.params.derived_key_size)
self.assertEqual(1048576, key_format.params.ciphertext_segment_size)
def test_aes128_ctr_hmac_sha256_4kb(self):
template = streaming_aead_key_templates.AES128_CTR_HMAC_SHA256_4KB
self.assertEqual(
'type.googleapis.com/google.crypto.tink.AesCtrHmacStreamingKey',
template.type_url)
self.assertEqual(tink_pb2.RAW, template.output_prefix_type)
key_format = aes_ctr_hmac_streaming_pb2.AesCtrHmacStreamingKeyFormat()
key_format.ParseFromString(template.value)
self.assertEqual(16, key_format.key_size)
self.assertEqual(common_pb2.HashType.SHA256,
key_format.params.hkdf_hash_type)
self.assertEqual(16, key_format.params.derived_key_size)
self.assertEqual(common_pb2.HashType.SHA256,
key_format.params.hmac_params.hash)
self.assertEqual(32, key_format.params.hmac_params.tag_size)
self.assertEqual(4096, key_format.params.ciphertext_segment_size)
def test_aes256_ctr_hmac_sha256_4kb(self):
template = streaming_aead_key_templates.AES256_CTR_HMAC_SHA256_4KB
self.assertEqual(
'type.googleapis.com/google.crypto.tink.AesCtrHmacStreamingKey',
template.type_url)
self.assertEqual(tink_pb2.RAW, template.output_prefix_type)
key_format = aes_ctr_hmac_streaming_pb2.AesCtrHmacStreamingKeyFormat()
key_format.ParseFromString(template.value)
self.assertEqual(32, key_format.key_size)
self.assertEqual(common_pb2.HashType.SHA256,
key_format.params.hkdf_hash_type)
self.assertEqual(32, key_format.params.derived_key_size)
self.assertEqual(common_pb2.HashType.SHA256,
key_format.params.hmac_params.hash)
self.assertEqual(32, key_format.params.hmac_params.tag_size)
self.assertEqual(4096, key_format.params.ciphertext_segment_size)
def test_create_aes_gcm_hkdf_streaming_key_template(self):
# Intentionally using 'weird' or invalid values for parameters,
# to test that the function correctly puts them in the resulting template.
template = streaming_aead_key_templates.create_aes_gcm_hkdf_streaming_key_template(
aes_key_size=42,
hash_type=common_pb2.HashType.SHA1,
derived_key_size=76,
ciphertext_segment_size=64,
)
self.assertEqual(
'type.googleapis.com/google.crypto.tink.AesGcmHkdfStreamingKey',
template.type_url)
self.assertEqual(tink_pb2.RAW, template.output_prefix_type)
key_format = aes_gcm_hkdf_streaming_pb2.AesGcmHkdfStreamingKeyFormat()
key_format.ParseFromString(template.value)
self.assertEqual(42, key_format.key_size)
self.assertEqual(common_pb2.HashType.SHA1, key_format.params.hkdf_hash_type)
self.assertEqual(76, key_format.params.derived_key_size)
self.assertEqual(64, key_format.params.ciphertext_segment_size)
def test_create_aes_ctr_hmac_streaming_key_template(self):
# Intentionally using 'weird' or invalid values for parameters,
# to test that the function correctly puts them in the resulting template.
template = streaming_aead_key_templates.create_aes_ctr_hmac_streaming_key_template(
aes_key_size=42,
hkdf_hash_type=common_pb2.HashType.SHA1,
derived_key_size=76,
mac_hash_type=common_pb2.HashType.UNKNOWN_HASH,
tag_size=39,
ciphertext_segment_size=64,
)
self.assertEqual(
'type.googleapis.com/google.crypto.tink.AesCtrHmacStreamingKey',
template.type_url)
self.assertEqual(tink_pb2.RAW, template.output_prefix_type)
key_format = aes_ctr_hmac_streaming_pb2.AesCtrHmacStreamingKeyFormat()
key_format.ParseFromString(template.value)
self.assertEqual(42, key_format.key_size)
self.assertEqual(common_pb2.HashType.SHA1, key_format.params.hkdf_hash_type)
self.assertEqual(76, key_format.params.derived_key_size)
self.assertEqual(common_pb2.HashType.UNKNOWN_HASH,
key_format.params.hmac_params.hash)
self.assertEqual(39, key_format.params.hmac_params.tag_size)
self.assertEqual(64, key_format.params.ciphertext_segment_size)
if __name__ == '__main__':
absltest.main()
| 45.757962
| 87
| 0.776169
|
51a29e6a2ee107d21346fdb9ed24fcac9a3ccc78
| 866
|
py
|
Python
|
tests/factories.py
|
GFlorio/pytimeset
|
147ca0f074ea9753cb35f05fef8fd80210f82fb5
|
[
"MIT"
] | null | null | null |
tests/factories.py
|
GFlorio/pytimeset
|
147ca0f074ea9753cb35f05fef8fd80210f82fb5
|
[
"MIT"
] | 1
|
2020-03-24T17:28:14.000Z
|
2020-03-24T17:28:14.000Z
|
tests/factories.py
|
GFlorio/pytimeset
|
147ca0f074ea9753cb35f05fef8fd80210f82fb5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from datetime import datetime
from random import uniform, randrange
from typing import List
from timeset import TimeInterval, TimeSet
def make_moments(number: int, start: datetime, end: datetime) -> List[datetime]:
diff = end-start
return sorted([
start+uniform(0, 1)*diff for _ in range(number)
])
def make_intervals(number: int, start: datetime, end: datetime) -> List[TimeInterval]:
diff = end-start
intervals = []
for _ in range(number):
s = uniform(0, 1)
e = uniform(s, 1)
intervals.append(TimeInterval(start+s*diff, start+e*diff))
return intervals
def make_sets(number: int, start: datetime, end: datetime, max_components=5) -> List[TimeSet]:
return [
TimeSet(make_intervals(randrange(1, max_components+1), start, end))
for _ in range(number)
]
| 27.935484
| 94
| 0.666282
|
dd501a18f441ebed0269a9455606ff857d2a4b67
| 765
|
py
|
Python
|
project_root/project_root/urls.py
|
saharisrael31/marketing-service-api
|
1fb6c016c2bf65134bf43da5b245a78bbcdc5294
|
[
"bzip2-1.0.6"
] | null | null | null |
project_root/project_root/urls.py
|
saharisrael31/marketing-service-api
|
1fb6c016c2bf65134bf43da5b245a78bbcdc5294
|
[
"bzip2-1.0.6"
] | null | null | null |
project_root/project_root/urls.py
|
saharisrael31/marketing-service-api
|
1fb6c016c2bf65134bf43da5b245a78bbcdc5294
|
[
"bzip2-1.0.6"
] | null | null | null |
"""project_root URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
]
| 33.26087
| 77
| 0.709804
|
141905a8eacb4e3403a80cfba36d7dc84ca90820
| 4,269
|
py
|
Python
|
src/modeling/cnn_lstm.py
|
ShotaroKataoka/work_detection_PLOSONE
|
b8016e0003e11f6eb01355f52804c790a26a7741
|
[
"MIT"
] | null | null | null |
src/modeling/cnn_lstm.py
|
ShotaroKataoka/work_detection_PLOSONE
|
b8016e0003e11f6eb01355f52804c790a26a7741
|
[
"MIT"
] | null | null | null |
src/modeling/cnn_lstm.py
|
ShotaroKataoka/work_detection_PLOSONE
|
b8016e0003e11f6eb01355f52804c790a26a7741
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class Block(nn.Module):
def __init__(self):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(256, 256, 3, stride=1)
self.conv2 = nn.Conv2d(256, 256, 3, stride=1)
self.conv3 = nn.Conv2d(256, 256, 3, stride=2 ,padding=14)
self.relu = nn.ReLU(inplace=True)
self.norm1 = nn.BatchNorm2d(256)
self.norm2 = nn.BatchNorm2d(256)
self.norm3 = nn.BatchNorm2d(256)
def forward(self, x):
"""
Parameters
----------
x: todo
torch.Size([1600, 256, 30, 40])
4-D Tensor either of shape (b * sequence_length, c,h, w)
Returns
-------
last_state_list, layer_output
"""
x = self.relu(self.norm1(self.conv1(x))) # in : torch.Size([1600, 256, h, w]) out :torch.Size([800, 256, h-2, w-2])
x = self.relu(self.norm2(self.conv2(x))) # in : torch.Size([1600, 256, h-2, w-2]) out :torch.Size([800, 256, h-4, w-4])
x = self.relu(self.norm3(self.conv3(x))) # in : torch.Size([1600, 256, h-4, w-4]) out :torch.Size([800, 256, ((h-4)/2 )-1, ((w-6)/2) -1])
return x
class CnnModule(nn.Module):
def __init__(self):
super(CnnModule, self).__init__()
self.conv1 = nn.Conv2d(256, 256, 3, stride=2)
self.conv2 = nn.Conv2d(256, 256, 3, stride=2)
self.conv3 = nn.Conv2d(256, 256, 3, stride=2)
self.relu = nn.ReLU(inplace=True)
self.norm1 = nn.BatchNorm2d(256)
self.norm2 = nn.BatchNorm2d(256)
self.norm3 = nn.BatchNorm2d(256)
def forward(self, x):
"""
Parameters
----------
x: todo
torch.Size([1600, 256, 30, 40])
4-D Tensor either of shape (b * sequence_length, c,h, w)
Returns
-------
last_state_list, layer_output
"""
x = self.relu(self.norm1(self.conv1(x))) # in : torch.Size([1600, 256, 30, 40]) out :torch.Size([800, 256, 28, 38])
x = self.relu(self.norm2(self.conv2(x))) # in : torch.Size([800, 256, 28, 38]) out :torch.Size([800, 256, 26, 36])
x = self.relu(self.norm3(self.conv3(x))) # in : torch.Size([800, 256, 26, 36]) out :torch.Size([800, 256, 12, 17])
# ↓ Global average pooling
x = F.avg_pool2d(x, kernel_size=x.size()[2:]) # in: torch.Size([1600, 512, 28, 38]) out: torch.Size([1600, 512, 1, 1])
sequence_length , c ,h,w = x.shape
x = x.view(sequence_length, c) # in: torch.Size([1600, 512, 1, 1]) out: torch.Size([1600, 512])
# ↑ Global average pooling
return x
class CNN_LSTM(nn.Module):
def __init__(self,sequence_length, input_dim, hidden_dim, nclass, dropout=0.2):
super(CNN_LSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.sequence_length = sequence_length
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers=1, batch_first=True)
self.dropout = nn.Dropout(p=dropout)
self.hidden2tag = nn.Linear(hidden_dim, nclass)
self.block1 = Block()
self.block2 = Block()
self.cnn = CnnModule()
self.soft = nn.Softmax(dim = 1)
def forward(self, x):
"""
Parameters
----------
x: todo
torch.Size([batch, 200 , 256, 30, 40])
5-D Tensor either of shape (b, sequence_length, c,h, w)
Returns
-------
last_state_list, layer_output
"""
x=x.float()
b, sequence_length,c, h ,w = x.shape
x = x.view(b * sequence_length, c,h,w) # torch.Size([200 * batch, 3, 480, 640])
cnn_result = self.cnn(x) #in : torch.Size([200 * batch, 3, 480, 640]) out: torch.Size([1600, 128])
cnn_result = cnn_result.view(b,sequence_length, -1) # in: torch.Size([1600, 128]) out: shape torch.Size([8, 200, 128])
lstm_out, (h_n, c_n) = self.lstm(cnn_result) # in: shape torch.Size([8, 200, 128]) out: torch.Size([8, 200, 64])
lstm_out = self.dropout(lstm_out)
tag_score = self.hidden2tag(lstm_out) # in: torch.Size([8, 200, 64]) out: torch.Size([8, 200, 13])
return tag_score
| 40.273585
| 145
| 0.560319
|
d40e6f0abd8e6758cb2bc1a63f5851c78a8e47bd
| 1,380
|
py
|
Python
|
geolucidate/constants.py
|
siaavenza/geolucidate
|
afafd38f43c1714c814d1676e70ec9b3fef2d4c3
|
[
"MIT"
] | null | null | null |
geolucidate/constants.py
|
siaavenza/geolucidate
|
afafd38f43c1714c814d1676e70ec9b3fef2d4c3
|
[
"MIT"
] | null | null | null |
geolucidate/constants.py
|
siaavenza/geolucidate
|
afafd38f43c1714c814d1676e70ec9b3fef2d4c3
|
[
"MIT"
] | 1
|
2020-07-03T14:59:21.000Z
|
2020-07-03T14:59:21.000Z
|
import re
"""List of Minutes/Seconds Characters for normalization
Was generated with unicodedata.name and unicodedata.char
and searched for names that include "QUOTATION" and "PRIME"
"""
MINUTE_CHARACTERS = {
# Quotations
"LEFT SINGLE QUOTATION MARK": "‘",
"RIGHT SINGLE QUOTATION MARK": "’",
"HEAVY SINGLE TURNED COMMA QUOTATION MARK ORNAMENT": "❛",
"HEAVY SINGLE COMMA QUOTATION MARK ORNAMENT": "❜",
"SINGLE HIGH-REVERSED-9 QUOTATION MARK": "‛",
# Primes
"PRIME": "′",
"MODIFIER LETTER PRIME": "ʹ",
"REVERSED PRIME": "‵",
}
SECOND_CHARACTERS = {
# Quotations
"LEFT DOUBLE QUOTATION MARK": "“",
"RIGHT DOUBLE QUOTATION MARK": "”",
"REVERSED DOUBLE PRIME QUOTATION MARK": "〝",
"DOUBLE HIGH-REVERSED-9 QUOTATION MARK": "‟",
"HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT": "❝",
"HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT": "❞",
"DOUBLE PRIME QUOTATION MARK": "〞",
"FULLWIDTH QUOTATION MARK": """,
# Primes
"MODIFIER LETTER DOUBLE PRIME": "ʺ",
"DOUBLE PRIME": "″",
"REVERSED DOUBLE PRIME": "‶",
}
# Use above dicts to generate RegEx character group string
# Example Output: MINUTE_CHARACTERS_RE >> "[‘’❛❜‛′ʹ‵]"
MINUTE_CHARACTERS_RE = re.escape('[{}]'.format(''.join(MINUTE_CHARACTERS.values())))
SECOND_CHARACTERS_RE = re.escape('[{}]'.format(''.join(SECOND_CHARACTERS.values())))
| 33.658537
| 84
| 0.65942
|
a07c38ada5eb00ada778e7a6a89dc4872da34580
| 3,256
|
py
|
Python
|
docker/exporter/exporter.py
|
Kalakaarboyz1/osv
|
538dd770464c01bd459dc99940ded90a47677ed0
|
[
"Apache-2.0"
] | 1
|
2021-07-21T03:58:50.000Z
|
2021-07-21T03:58:50.000Z
|
docker/exporter/exporter.py
|
Kalakaarboyz1/osv
|
538dd770464c01bd459dc99940ded90a47677ed0
|
[
"Apache-2.0"
] | null | null | null |
docker/exporter/exporter.py
|
Kalakaarboyz1/osv
|
538dd770464c01bd459dc99940ded90a47677ed0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OSV Exporter."""
import argparse
import concurrent.futures
import logging
import os
import tempfile
import zipfile
from google.cloud import ndb
from google.cloud import storage
import osv
DEFAULT_WORK_DIR = '/work'
_EXPORT_BUCKET = 'osv-vulnerabilities'
_EXPORT_WORKERS = 32
class Exporter:
"""Exporter."""
def __init__(self, work_dir, export_bucket):
self._work_dir = work_dir
self._export_bucket = export_bucket
def run(self):
"""Run exporter."""
query = osv.Bug.query(projection=[osv.Bug.ecosystem], distinct=True)
ecosystems = [bug.ecosystem for bug in query if bug.ecosystem]
for ecosystem in ecosystems:
with tempfile.TemporaryDirectory() as tmp_dir:
self._export_ecosystem_to_bucket(ecosystem, tmp_dir)
def _export_ecosystem_to_bucket(self, ecosystem, tmp_dir):
"""Export ecosystem vulns to bucket."""
logging.info('Exporting vulnerabilities for ecosystem %s', ecosystem)
storage_client = storage.Client()
bucket = storage_client.get_bucket(self._export_bucket)
zip_path = os.path.join(tmp_dir, 'all.zip')
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):
if not bug.public or not bug.status == osv.BugStatus.PROCESSED:
continue
file_path = os.path.join(tmp_dir, bug.id() + '.json')
osv.write_vulnerability(
bug.to_vulnerability(include_source=True), file_path)
zip_file.write(file_path, os.path.basename(file_path))
def upload_single(source_path, target_path):
"""Upload a single vulnerability."""
logging.info('Uploading %s', target_path)
try:
blob = bucket.blob(target_path)
blob.upload_from_filename(source_path)
except Exception as e:
logging.error('Failed to export: %s', e)
with concurrent.futures.ThreadPoolExecutor(
max_workers=_EXPORT_WORKERS) as executor:
for filename in os.listdir(tmp_dir):
executor.submit(upload_single, os.path.join(tmp_dir, filename),
f'{ecosystem}/{filename}')
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='Exporter')
parser.add_argument(
'--work_dir', help='Working directory', default=DEFAULT_WORK_DIR)
args = parser.parse_args()
tmp_dir = os.path.join(args.work_dir, 'tmp')
os.makedirs(tmp_dir, exist_ok=True)
os.environ['TMPDIR'] = tmp_dir
exporter = Exporter(args.work_dir, _EXPORT_BUCKET)
exporter.run()
if __name__ == '__main__':
_ndb_client = ndb.Client()
with _ndb_client.context():
main()
| 31.921569
| 74
| 0.713452
|
5fb7fa1dcbdaca23a09f4123bee698c4eafa12b6
| 11,140
|
py
|
Python
|
spider.py
|
ljw8947/renren-dumps
|
d83622b75fb9995a2fa847290201e91c65d2b9a6
|
[
"MIT"
] | null | null | null |
spider.py
|
ljw8947/renren-dumps
|
d83622b75fb9995a2fa847290201e91c65d2b9a6
|
[
"MIT"
] | null | null | null |
spider.py
|
ljw8947/renren-dumps
|
d83622b75fb9995a2fa847290201e91c65d2b9a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import datetime
import html
import json
import os
import pickle
import random
import re
import lxml.html
from typing import Callable, Dict, List, Union
import html2text
from requests import Session
JSONType = Dict[str, Union[str, int]]
SimpleCallback = Callable[[], None]
class LoginFailed(Exception):
pass
class iCodeRequired(Exception):
pass
def encrypt_string(enc, mo, s):
b = 0
pos = 0
for ch in s:
b += ord(ch) << pos
pos += 8
crypt = pow(b, enc, mo)
return f'{crypt:x}'
class RenrenSpider:
ENCRYPT_KEY_URL = "http://login.renren.com/ajax/getEncryptKey"
LOGIN_URL = "http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp={ts}"
LOGIN_3G_URL = "http://3g.renren.com/login.do?autoLogin=true&"
ICODE_URL = "http://icode.renren.com/getcode.do?t=web_login&rnd={rnd}"
MAX_RETRY = 3
def __init__(self) -> None:
self.ui = None
self.user_id = None
self.output_dir = None
self.s = Session()
self.s.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36"
}
self.re = None
self.rn = None
self.rk = None
def login(self, email: str, password: str, icode: str = "", keep: bool = False) -> None:
if not all([self.re, self.rn, self.rk]):
self.s.cookies.clear()
enc_data = self.s.get(self.ENCRYPT_KEY_URL).json()
self.re = int(enc_data['e'], 16)
self.rn = int(enc_data['n'], 16)
self.rk = enc_data['rkey']
payload = {
'email': email,
'password': encrypt_string(self.re, self.rn, password),
'rkey': self.rk,
'key_id': 1,
'captcha_type': 'web_login',
'icode': icode,
}
now = datetime.datetime.now()
ts = '{year}{month}{weekday}{hour}{second}{ms}'.format(
year=now.year,
month=now.month - 1,
weekday=(now.weekday() + 1) % 7,
hour=now.hour,
second=now.second,
ms=int(now.microsecond / 1000),
)
login_data = self.s.post(self.LOGIN_URL.format(ts=ts), data=payload).json()
if not login_data.get('code', False) or 'id' not in self.s.cookies:
raise iCodeRequired(login_data.get('failDescription'))
payload = {
'ref': 'http://m.renren.com/q.do?null',
'email': email,
'password': password
}
r = self.s.post(self.LOGIN_3G_URL, data=payload)
assert r.ok, "3G login failed"
if not self.user_id:
self.user_id = self.s.cookies["id"]
if keep:
with open(".session", "wb") as f:
pickle.dump(self.s.cookies, f)
def set_params(self, *, user_id=None, output_dir=None) -> None:
if user_id:
self.user_id = user_id
self.output_dir = output_dir
def get_icode_image(self) -> bytes:
resp = self.s.get(self.ICODE_URL.format(rnd=random.random()))
return resp.content
def is_login(self) -> bool:
"""login and get cookies."""
if not os.path.isfile(".session"):
return False
with open(".session", "rb") as f:
self.s.cookies = pickle.load(f)
self.s.cookies.clear_expired_cookies()
if "id" not in self.s.cookies:
return False
if not self.user_id:
self.user_id = self.s.cookies["id"]
return True
def parse_album_list(self) -> List[JSONType]:
collections_url = f"http://photo.renren.com/photo/{self.user_id}/albumlist/v7?offset=0&limit=40&showAll=1"
resp = self.s.get(collections_url)
albumlist = json.loads(
re.findall(r"'albumList':\s*(\[[\s\S]*?\])", resp.text)[0]
)
return [item for item in albumlist if item.get("photoCount")]
def download_album(self, album: JSONType) -> None:
album_name = html.unescape(album["albumName"]).strip("./")
photo_list = []
album_url = f"http://photo.renren.com/photo/{self.user_id}/album-{album['albumId']}/bypage/ajax/v7?pageSize=100"
for i in range(int(album["photoCount"] // 100) + 1):
resp = self.s.get(f"{album_url}&page={i+1}")
resp.raise_for_status()
photo_list.extend(resp.json()["photoList"])
download_dir = os.path.join(self.output_dir, "albums", album_name)
if not os.path.isdir(download_dir):
os.makedirs(download_dir)
def download_image(image: JSONType, callback: SimpleCallback) -> None:
url = image["url"]
image_path = os.path.join(download_dir, os.path.basename(url))
if os.path.isfile(image_path):
callback()
return
r = self.s.get(url)
r.raise_for_status()
with open(image_path, "wb") as f:
f.write(r.content)
callback()
t = self.ui.progressbar(
total=int(album["photoCount"]), desc=f"Dumping album {album_name}"
)
for image in photo_list:
download_image(image, t.update)
def dump_albums(self) -> None:
for album in self.parse_album_list():
self.download_album(album)
def parse_article_list(self) -> List[JSONType]:
start_url = f'http://3g.renren.com/blog/wmyblog.do?id={self.user_id}'
results = []
if not os.path.isdir(f"{self.output_dir}/articles"):
os.makedirs(f"{self.output_dir}/articles")
def _parse_one_page(url):
resp = self.s.get(url)
tree = lxml.html.fromstring(resp.text)
for element in tree.xpath('//div[@class="list"]/div[not(@class)]'):
item = {
'title': element.xpath('a/text()')[0].strip(),
'url': element.xpath('a/@href')[0].strip(),
'createTime': element.xpath('p/text()')[0].strip()
}
results.append(item)
next_url = tree.xpath('//a[@title="下一页"]/@href')
if next_url:
_parse_one_page(next_url[0].strip())
_parse_one_page(start_url)
return results
def download_article(self, article: JSONType, callback: SimpleCallback) -> None:
url = article["url"].replace('flag=0', 'flag=1')
title = article["title"]
datetime = article["createTime"]
if os.path.isfile(f"{self.output_dir}/articles/{title}.md"):
callback()
return
resp = self.s.get(url)
resp.raise_for_status()
text = re.findall(
r'<div class="con">([\s\S]*?)</div>',
resp.text,
)[0].strip()
template = """\
{title}
=======
日期: {datetime}
{content}
"""
with open(f"{self.output_dir}/articles/{title}.md", "w", encoding="utf-8") as f:
f.write(
template.format(
title=title, datetime=datetime, content=html2text.html2text(text)
)
)
callback()
def dump_articles(self) -> None:
articles = self.parse_article_list()
t = self.ui.progressbar(total=len(articles), desc="Dumping articles")
for article in articles:
self.download_article(article, t.update)
def dump_status(self) -> None:
url = f"http://status.renren.com/GetSomeomeDoingList.do?userId={self.user_id}&curpage="
status_url="http://comment.renren.com/comment/xoa2?type=status&entryId={0}&entryOwnerId={1}"
i = 0
total = 0
results = []
statusDict={}
comentsDic={}
while i == 0 or i * 20 < total:
r = self.s.get(url + str(i))
r.raise_for_status()
data = r.json()
if not total:
total = data["count"]
tempResult=data['doingArray']
results.extend(tempResult)
for item in tempResult:
if item.get("location"):
heading = f"{item['dtime']} 在 {item['location']}"
else:
heading = item['dtime']
content = html2text.html2text(item['content'])
statusDict[heading]=content
print(f"{heading}{content}")
statusId=html2text.html2text(str(int(item['id'])))
statusInfo=self.s.get(status_url.format(statusId,self.user_id))
statusInfo.raise_for_status();
cData=statusInfo.json()
if('comments' in cData):
commentData=(statusInfo.json())['comments']
comentsDic[heading]=commentData;
#print(commentData)
else:
print(f"no comments")
i += 1
if not os.path.isdir(f"{self.output_dir}"):
os.makedirs(f"{self.output_dir}")
with open(f"{self.output_dir}/status.md", "w", encoding="utf-8") as f:
progressbar = self.ui.progressbar(total=len(results), desc="Dumping status")
sorted(statusDict.keys())
for time,status in statusDict.items():
f.write(f"### {time}\n\n{status}")
if(comentsDic.get(time)):
for commentData in comentsDic.get(time):
commentTime=html2text.html2text(commentData['time'])
commentAuthorId=html2text.html2text(str(commentData['authorId']))
commentAuthorName=html2text.html2text(commentData['authorName'])
comment=html2text.html2text(commentData['content'])
comment=f" -- | {commentAuthorName} | {comment} | {commentTime} | {commentAuthorId} ";
comment=comment.replace('\n','');
f.write(f"{comment}\n\n")
f.write(f"\n\n")
progressbar.update()
print("dump end\n")
def main(self, ui) -> None:
self.ui = ui
self.dump_albums()
self.dump_articles()
self.dump_status()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-k",
"--keep",
default=False,
action="store_true",
help="Whether keep the login cookies",
)
parser.add_argument("--user", help="Specify the user ID to parse")
parser.add_argument(
"--email",
default=os.getenv("RENREN_EMAIL"),
help="Login email, defaults to envvar RENREN_EMAIL",
)
parser.add_argument(
"--password",
default=os.getenv("RENREN_PASSWD"),
help="Login password, defaults to envvar RENREN_PASSWD",
)
parser.add_argument(
"-o", "--output", default="output", help="Specify output directory"
)
args = parser.parse_args()
spider = RenrenSpider(args)
spider.main()
| 34.596273
| 138
| 0.549461
|
ce0df2753cef4ba008a57b44c40af48d9f72775a
| 741
|
py
|
Python
|
functions_legacy/VGpdf.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2021-04-10T13:24:30.000Z
|
2022-03-26T08:20:42.000Z
|
functions_legacy/VGpdf.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | null | null | null |
functions_legacy/VGpdf.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2019-08-13T22:02:17.000Z
|
2022-02-09T17:49:12.000Z
|
from numpy import pi, abs, log, exp
from scipy.special import gamma, kv
def VGpdf(x,par,tau):
# This function computes the Variance-Gamma pdf at horizon tau, evaluated
# at points x
# INPUTS
# x :[vector] points at which the pdf is evaluated
# par :[struct] parameters of the VG model the struct has fields {c, m, g}
# tau :[scalar] time horizon
# OPS
# y :[vector] VG pdf
## Code
c = par.c
m = par.m
g = par.g
alpha = c*tau*log(g*m)-2*log(gamma(c*tau))-(2*c*tau-1)*log(g+m)
b = (g+m)*abs(x)
beta = log(gamma(c*tau))-log(pi)/2+(c*tau-1/2)*log(b)+b/2+log(kv(c*tau-1/2,b/2))
ln_pdf = alpha + ((g-m)/2)*x - ((g+m)/2)*abs(x) + beta
y = exp(ln_pdf)
return y
| 26.464286
| 84
| 0.57085
|
502c179e1adb0ab9139def80e0bea1aca43fee56
| 4,619
|
py
|
Python
|
lib/train/BaseTrainer.py
|
McMasterAI/RadiologyandAI-MedicalZooPytorch
|
606a1654f08b8bae7c265608694d55fecc1001ed
|
[
"MIT"
] | 995
|
2019-07-23T11:34:22.000Z
|
2022-03-30T21:10:52.000Z
|
lib/train/BaseTrainer.py
|
pyushkevich/MedicalZooPytorch
|
c6831d8ddebfbc1b33c04f8cec0d01c2ceb828f6
|
[
"MIT"
] | 18
|
2020-04-27T03:38:22.000Z
|
2022-01-18T20:55:20.000Z
|
lib/train/BaseTrainer.py
|
pyushkevich/MedicalZooPytorch
|
c6831d8ddebfbc1b33c04f8cec0d01c2ceb828f6
|
[
"MIT"
] | 209
|
2019-08-21T13:41:13.000Z
|
2022-03-30T08:01:52.000Z
|
import torch
from abc import abstractmethod
from numpy import inf
from lib.visual3D_temp import TensorboardWriter
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(self, model, criterion, metric_ftns, optimizer, config):
self.config = config
self.logger = config.get_logger('trainer', config['trainer']['verbosity'])
# setup GPU device if available, move model into configured device
self.device, device_ids = self._prepare_device(config['n_gpu'])
self.model = model.to(self.device)
if len(device_ids) > 1:
self.model = torch.nn.DataParallel(model, device_ids=device_ids)
self.criterion = criterion
self.metric_ftns = metric_ftns
self.optimizer = optimizer
cfg_trainer = config['trainer']
self.epochs = cfg_trainer['epochs']
self.save_period = cfg_trainer['save_period']
self.monitor = cfg_trainer.get('monitor', 'off')
# configuration to monitor model performance and save best
if self.monitor == 'off':
self.mnt_mode = 'off'
self.mnt_best = 0
else:
self.mnt_mode, self.mnt_metric = self.monitor.split()
assert self.mnt_mode in ['min', 'max']
self.mnt_best = inf if self.mnt_mode == 'min' else -inf
self.early_stop = cfg_trainer.get('early_stop', inf)
self.start_epoch = 1
self.checkpoint_dir = config.save_dir
# setup visualization writer instance
self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard'])
if config.resume is not None:
self._resume_checkpoint(config.resume)
@abstractmethod
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current epoch number
"""
raise NotImplementedError
def train(self):
"""
Full training logic
"""
not_improved_count = 0
for epoch in range(self.start_epoch, self.epochs + 1):
result = self._train_epoch(epoch)
# save logged informations into log dict
log = {'epoch': epoch}
log.update(result)
# print logged informations to the screen
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
# evaluate model performance according to configured metric, save best checkpoint as model_best
best = False
if self.mnt_mode != 'off':
try:
# check whether model performance improved or not, according to specified metric(mnt_metric)
improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or \
(self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best)
except KeyError:
self.logger.warning("Warning: Metric '{}' is not found. "
"Model performance monitoring is disabled.".format(self.mnt_metric))
self.mnt_mode = 'off'
improved = False
if improved:
self.mnt_best = log[self.mnt_metric]
not_improved_count = 0
best = True
else:
not_improved_count += 1
if not_improved_count > self.early_stop:
self.logger.info("Validation performance didn\'t improve for {} epochs. "
"Training stops.".format(self.early_stop))
break
if epoch % self.save_period == 0:
self._save_checkpoint(epoch, save_best=best)
def _prepare_device(self, n_gpu_use):
"""
setup GPU device if available, move model into configured device
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
self.logger.warning("Warning: There\'s no GPU available on this machine,"
"training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
self.logger.warning("Warning: The number of GPU\'s configured to use is {}, but only {} are available "
"on this machine.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
| 38.491667
| 115
| 0.573284
|
063abfe41d72719dadba934c3f77498135d0b9a7
| 301
|
py
|
Python
|
7_Sarven_Desert/303-Yakstraction/yakstraction.py
|
katitek/Code-Combat
|
fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef
|
[
"MIT"
] | null | null | null |
7_Sarven_Desert/303-Yakstraction/yakstraction.py
|
katitek/Code-Combat
|
fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef
|
[
"MIT"
] | null | null | null |
7_Sarven_Desert/303-Yakstraction/yakstraction.py
|
katitek/Code-Combat
|
fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef
|
[
"MIT"
] | null | null | null |
def flagFunc(flag):
pos = flag.pos
if hero.gold >= 25:
hero.buildXY("decoy", pos.x, pos.y)
hero.pickUpFlag(flag)
while True:
flag = hero.findFlag()
if flag:
flagFunc(flag)
item = hero.findNearestItem()
if item:
hero.moveXY(item.pos.x, item.pos.y)
| 20.066667
| 43
| 0.58804
|
2cc7637ae7f20599c6ba5e6dd882b9d1f386fccb
| 3,648
|
py
|
Python
|
extract_weights.py
|
HaGeza/DeepLabV3PlusModified
|
5c46c167ce907e167b3b7ca51f4fabc66586f6d9
|
[
"MIT"
] | null | null | null |
extract_weights.py
|
HaGeza/DeepLabV3PlusModified
|
5c46c167ce907e167b3b7ca51f4fabc66586f6d9
|
[
"MIT"
] | null | null | null |
extract_weights.py
|
HaGeza/DeepLabV3PlusModified
|
5c46c167ce907e167b3b7ca51f4fabc66586f6d9
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import get_file
def get_xception_filename(key):
"""Rename tensor name to the corresponding Keras layer weight name.
# Arguments
key: tensor name in TF (determined by tf.variable_scope)
"""
filename = str(key)
filename = filename.replace('/', '_')
filename = filename.replace('xception_65_', '')
filename = filename.replace('decoder_', '', 1)
filename = filename.replace('BatchNorm', 'BN')
if 'Momentum' in filename:
return None
if 'entry_flow' in filename or 'exit_flow' in filename:
filename = filename.replace('_unit_1_xception_module', '')
elif 'middle_flow' in filename:
filename = filename.replace('_block1', '')
filename = filename.replace('_xception_module', '')
# from TF to Keras naming
filename = filename.replace('_weights', '_kernel')
filename = filename.replace('_biases', '_bias')
return filename + '.npy'
def get_mobilenetv2_filename(key):
"""Rename tensor name to the corresponding Keras layer weight name.
# Arguments
key: tensor name in TF (determined by tf.variable_scope)
"""
filename = str(key)
filename = filename.replace('/', '_')
filename = filename.replace('MobilenetV2_', '')
filename = filename.replace('BatchNorm', 'BN')
if 'Momentum' in filename:
return None
# from TF to Keras naming
filename = filename.replace('_weights', '_kernel')
filename = filename.replace('_biases', '_bias')
return filename + '.npy'
def extract_tensors_from_checkpoint_file(filename, output_folder='weights', net_name=None):
"""Extract tensors from a TF checkpoint file.
# Arguments
filename: TF checkpoint file
output_folder: where to save the output numpy array files
"""
if not os.path.exists(output_folder):
os.makedirs(output_folder)
reader = tf.train.NewCheckpointReader(filename)
for key in reader.get_variable_to_shape_map():
# convert tensor name into the corresponding Keras layer weight name and save
if net_name == 'xception':
filename = get_xception_filename(key)
elif net_name == 'mobilenetv2':
filename = get_mobilenetv2_filename(key)
if filename:
path = os.path.join(output_folder, filename)
arr = reader.get_tensor(key)
np.save(path, arr)
print("tensor_name: ", key)
CKPT_URL = 'http://download.tensorflow.org/models/deeplabv3_pascal_trainval_2018_01_04.tar.gz'
CKPT_URL_MOBILE = 'http://download.tensorflow.org/models/deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz'
MODEL_DIR = 'models'
MODEL_SUBDIR = 'deeplabv3_pascal_trainval'
MODEL_SUBDIR_MOBILE = 'deeplabv3_mnv2_pascal_trainval'
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
checkpoint_tar = get_file(
'deeplabv3_pascal_trainval_2018_01_04.tar.gz',
CKPT_URL,
extract=True,
cache_subdir='',
cache_dir=MODEL_DIR)
checkpoint_tar_mobile = get_file(
'deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz',
CKPT_URL_MOBILE,
extract=True,
cache_subdir='',
cache_dir=MODEL_DIR)
checkpoint_file = os.path.join(MODEL_DIR, MODEL_SUBDIR, 'model.ckpt')
extract_tensors_from_checkpoint_file(
checkpoint_file, net_name='xception', output_folder='weights/xception')
checkpoint_file = os.path.join(
MODEL_DIR, MODEL_SUBDIR_MOBILE, 'model.ckpt-30000')
extract_tensors_from_checkpoint_file(
checkpoint_file, net_name='mobilenetv2', output_folder='weights/mobilenetv2')
| 34.093458
| 106
| 0.705044
|
798c09d491aa93c3b0eeb772c3f3901f1e057770
| 4,537
|
py
|
Python
|
src/u_net.py
|
mori97/U-Net_MUSDB18
|
d452f0e6378c1d74e823dcb1e95d92307f4dea46
|
[
"MIT"
] | 5
|
2020-02-06T05:44:08.000Z
|
2021-07-21T07:16:49.000Z
|
src/u_net.py
|
mori97/U-Net_MUSDB18
|
d452f0e6378c1d74e823dcb1e95d92307f4dea46
|
[
"MIT"
] | 2
|
2021-06-21T11:09:30.000Z
|
2021-07-12T07:35:09.000Z
|
src/u_net.py
|
mori97/U-Net_MUSDB18
|
d452f0e6378c1d74e823dcb1e95d92307f4dea46
|
[
"MIT"
] | 1
|
2021-06-05T03:13:12.000Z
|
2021-06-05T03:13:12.000Z
|
import torch
import torch.nn.functional as F
EPS = 1e-8
class UNet(torch.nn.Module):
"""An implementation of U-Net for music source separation.
It has been proposed in "Singing Voice Separation with Deep U-Net
Convolutional Networks".
(https://ismir2017.smcnus.org/wp-content/uploads/2017/10/171_Paper.pdf)
Args:
n_class (int): Number of output classes.
"""
def __init__(self, n_class):
super(UNet, self).__init__()
self.conv1 = torch.nn.Conv2d(
1, 16, kernel_size=5, stride=2, padding=2)
self.conv_bn1 = torch.nn.BatchNorm2d(16)
self.conv2 = torch.nn.Conv2d(
16, 32, kernel_size=5, stride=2, padding=2)
self.conv_bn2 = torch.nn.BatchNorm2d(32)
self.conv3 = torch.nn.Conv2d(
32, 64, kernel_size=5, stride=2, padding=2)
self.conv_bn3 = torch.nn.BatchNorm2d(64)
self.conv4 = torch.nn.Conv2d(
64, 128, kernel_size=5, stride=2, padding=2)
self.conv_bn4 = torch.nn.BatchNorm2d(128)
self.conv5 = torch.nn.Conv2d(
128, 256, kernel_size=5, stride=2, padding=2)
self.conv_bn5 = torch.nn.BatchNorm2d(256)
self.conv6 = torch.nn.Conv2d(
256, 512, kernel_size=5, stride=2, padding=2)
self.conv_bn6 = torch.nn.BatchNorm2d(512)
self.deconv1 = torch.nn.ConvTranspose2d(
512, 256, kernel_size=5, stride=2, padding=2, output_padding=1)
self.deconv_bn1 = torch.nn.BatchNorm2d(256)
self.dropout1 = torch.nn.Dropout2d(0.5)
self.deconv2 = torch.nn.ConvTranspose2d(
512, 128, kernel_size=5, stride=2, padding=2, output_padding=1)
self.deconv_bn2 = torch.nn.BatchNorm2d(128)
self.dropout2 = torch.nn.Dropout2d(0.5)
self.deconv3 = torch.nn.ConvTranspose2d(
256, 64, kernel_size=5, stride=2, padding=2, output_padding=1)
self.deconv_bn3 = torch.nn.BatchNorm2d(64)
self.dropout3 = torch.nn.Dropout2d(0.5)
self.deconv4 = torch.nn.ConvTranspose2d(
128, 32, kernel_size=5, stride=2, padding=2, output_padding=1)
self.deconv_bn4 = torch.nn.BatchNorm2d(32)
self.deconv5 = torch.nn.ConvTranspose2d(
64, 16, kernel_size=5, stride=2, padding=2, output_padding=1)
self.deconv_bn5 = torch.nn.BatchNorm2d(16)
self.deconv6 = torch.nn.ConvTranspose2d(
32, n_class, kernel_size=5, stride=2, padding=2, output_padding=1)
def forward(self, x):
"""Compute the separation mask.
Args:
x (torch.Tensor): Shape of (n_batch, n_frequency, n_frame).
The number of time frames should be a multiple of 64.
Returns:
torch.Tensor: Shape of (n_batch, n_part, n_frequency, n_frame).
Separation mask.
"""
# Add channel dimension
x = x.unsqueeze(1)
x = torch.log(x + EPS)
h1 = F.leaky_relu(self.conv_bn1(self.conv1(x)), 0.2)
h2 = F.leaky_relu(self.conv_bn2(self.conv2(h1)), 0.2)
h3 = F.leaky_relu(self.conv_bn3(self.conv3(h2)), 0.2)
h4 = F.leaky_relu(self.conv_bn4(self.conv4(h3)), 0.2)
h5 = F.leaky_relu(self.conv_bn5(self.conv5(h4)), 0.2)
h = F.leaky_relu(self.conv_bn6(self.conv6(h5)), 0.2)
h = self.dropout1(F.relu(self.deconv_bn1(self.deconv1(h))))
h = torch.cat((h, h5), dim=1)
h = self.dropout2(F.relu(self.deconv_bn2(self.deconv2(h))))
h = torch.cat((h, h4), dim=1)
h = self.dropout3(F.relu(self.deconv_bn3(self.deconv3(h))))
h = torch.cat((h, h3), dim=1)
h = F.relu(self.deconv_bn4(self.deconv4(h)))
h = torch.cat((h, h2), dim=1)
h = F.relu(self.deconv_bn5(self.deconv5(h)))
h = torch.cat((h, h1), dim=1)
h = F.softmax(self.deconv6(h), dim=1)
return h
def padding(sound_stft):
"""Apply reflection padding to ensure that number of time frames of
`sound`'s STFT representation is multiple of 64.
Args:
sound_stft (torch.Tensor): Spectrogram to be padded.
Returns:
Tuple[torch.Tensor, Tuple[int, int]]: Reflection padded spectrogram and
number of rows padded to left-side and right-side, respectively.
"""
n_frames = sound_stft.size(-1)
n_pad = (64 - n_frames % 64) % 64
if n_pad:
left = n_pad // 2
right = n_pad - left
return F.pad(sound_stft, (left, right), mode='reflect'), (left, right)
else:
return sound_stft, (0, 0)
| 39.798246
| 79
| 0.612519
|
d1ca8dbc9522b8107e6ae3860301404cbb0a65f7
| 2,426
|
py
|
Python
|
script.py
|
RocqJones/api
|
20edbd6ca4a200df233fbd40d0cf9f2b57cade30
|
[
"Apache-2.0"
] | null | null | null |
script.py
|
RocqJones/api
|
20edbd6ca4a200df233fbd40d0cf9f2b57cade30
|
[
"Apache-2.0"
] | null | null | null |
script.py
|
RocqJones/api
|
20edbd6ca4a200df233fbd40d0cf9f2b57cade30
|
[
"Apache-2.0"
] | null | null | null |
import flask
import sqlite3
from flask import request, jsonify
"""
- The product database has 3 tables FRUITS, CERIALS AND VEGETABLES
- Our API allows users to filter by three fields: fruits, cerials, vegetables
"""
app = flask.Flask(__name__)
app.config["DEBUG"] = True
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def set_headers():
response = flask.Response()
response.headers["Access-Control-Allow-Origin"] = "*"
return response
@app.route('/', methods=['GET'])
def home():
set_headers()
return '''<h1>Products API</h1><p>An API to show demands for products according to number mentions.</p>'''
@app.route('/api/products/all', methods=['GET'])
def api_all():
set_headers()
conn = sqlite3.connect('products.db')
conn.row_factory = dict_factory
cur = conn.cursor()
all_fruits = cur.execute('SELECT * FROM fruits;').fetchall()
all_cerials = cur.execute('SELECT * FROM cerials;').fetchall()
all_vegetables = cur.execute('SELECT * FROM vegetables;').fetchall()
return jsonify(all_fruits, all_cerials, all_vegetables)
@app.errorhandler(404)
def page_not_found(e):
return "<h1>404</h1><p>The resource could not be found.</p>", 404
@app.route('/api/products=fruits', methods=['GET'])
def api_fruits():
set_headers()
conn = sqlite3.connect('products.db')
conn.row_factory = dict_factory
cur = conn.cursor()
all_fruits = cur.execute('SELECT * FROM fruits;').fetchall()
return jsonify(all_fruits)
@app.route('/api/products=cerials', methods=['GET'])
def api_cerials():
set_headers()
conn = sqlite3.connect('products.db')
conn.row_factory = dict_factory
cur = conn.cursor()
all_cerials = cur.execute('SELECT * FROM cerials;').fetchall()
return jsonify(all_cerials)
@app.route('/api/products=vegetables', methods=['GET'])
def api_vegetables():
set_headers()
conn = sqlite3.connect('products.db')
conn.row_factory = dict_factory
cur = conn.cursor()
all_vegetables = cur.execute('SELECT * FROM vegetables;').fetchall()
return jsonify(all_vegetables)
if __name__ == '__main__':
app.run()
"""
API Endpoints
http://127.0.0.1:5000/api/products/all
http://127.0.0.1:5000/api/products=fruits
http://127.0.0.1:5000/api/products=cerials
http://127.0.0.1:5000/api/products=vegetables
"""
| 28.541176
| 110
| 0.679308
|
252e6783537b4a8a94fd4c159e0bd5b0eb89f23f
| 11,507
|
py
|
Python
|
utils/callbacks.py
|
bubbliiiing/mobilenet-yolov4-keras
|
bc52cda271fc1e2dbf4e313bf11daeee6afc215a
|
[
"MIT"
] | 10
|
2022-01-20T12:11:13.000Z
|
2022-03-16T11:33:36.000Z
|
utils/callbacks.py
|
bubbliiiing/mobilenet-yolov4-keras
|
bc52cda271fc1e2dbf4e313bf11daeee6afc215a
|
[
"MIT"
] | 1
|
2022-01-29T08:51:26.000Z
|
2022-01-29T08:51:26.000Z
|
utils/callbacks.py
|
bubbliiiing/mobilenet-yolov4-keras
|
bc52cda271fc1e2dbf4e313bf11daeee6afc215a
|
[
"MIT"
] | 6
|
2021-12-31T08:38:55.000Z
|
2022-03-18T03:32:56.000Z
|
import os
import math
import keras
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import scipy.signal
import shutil
import numpy as np
from keras import backend as K
from PIL import Image
from tqdm import tqdm
from .utils import cvtColor, preprocess_input, resize_image
from .utils_bbox import DecodeBox
from .utils_map import get_coco_map, get_map
class LossHistory(keras.callbacks.Callback):
def __init__(self, log_dir):
self.log_dir = log_dir
self.losses = []
self.val_loss = []
os.makedirs(self.log_dir)
def on_epoch_end(self, epoch, logs={}):
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.losses.append(logs.get('loss'))
self.val_loss.append(logs.get('val_loss'))
with open(os.path.join(self.log_dir, "epoch_loss.txt"), 'a') as f:
f.write(str(logs.get('loss')))
f.write("\n")
with open(os.path.join(self.log_dir, "epoch_val_loss.txt"), 'a') as f:
f.write(str(logs.get('val_loss')))
f.write("\n")
self.loss_plot()
def loss_plot(self):
iters = range(len(self.losses))
plt.figure()
plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss')
plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss')
try:
if len(self.losses) < 25:
num = 5
else:
num = 15
plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss')
plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss')
except:
pass
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('A Loss Curve')
plt.legend(loc="upper right")
plt.savefig(os.path.join(self.log_dir, "epoch_loss.png"))
plt.cla()
plt.close("all")
class ExponentDecayScheduler(keras.callbacks.Callback):
def __init__(self,
decay_rate,
verbose=0):
super(ExponentDecayScheduler, self).__init__()
self.decay_rate = decay_rate
self.verbose = verbose
self.learning_rates = []
def on_epoch_end(self, batch, logs=None):
learning_rate = K.get_value(self.model.optimizer.lr) * self.decay_rate
K.set_value(self.model.optimizer.lr, learning_rate)
if self.verbose > 0:
print('Setting learning rate to %s.' % (learning_rate))
class WarmUpCosineDecayScheduler(keras.callbacks.Callback):
def __init__(self, T_max, eta_min=0, verbose=0):
super(WarmUpCosineDecayScheduler, self).__init__()
self.T_max = T_max
self.eta_min = eta_min
self.verbose = verbose
self.init_lr = 0
self.last_epoch = 0
def on_train_begin(self, batch, logs=None):
self.init_lr = K.get_value(self.model.optimizer.lr)
def on_epoch_end(self, batch, logs=None):
learning_rate = self.eta_min + (self.init_lr - self.eta_min) * (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2
self.last_epoch += 1
K.set_value(self.model.optimizer.lr, learning_rate)
if self.verbose > 0:
print('Setting learning rate to %s.' % (learning_rate))
class ParallelModelCheckpoint(keras.callbacks.ModelCheckpoint):
def __init__(self, model, filepath, monitor='val_loss', verbose=0,
save_best_only=False, save_weights_only=False,
mode='auto', period=1):
self.single_model = model
super(ParallelModelCheckpoint,self).__init__(filepath, monitor, verbose,save_best_only, save_weights_only,mode, period)
def set_model(self, model):
super(ParallelModelCheckpoint,self).set_model(self.single_model)
class EvalCallback(keras.callbacks.Callback):
def __init__(self, model_body, input_shape, anchors, anchors_mask, class_names, num_classes, val_lines, log_dir,\
map_out_path=".temp_map_out", max_boxes=100, confidence=0.05, nms_iou=0.5, letterbox_image=True, MINOVERLAP=0.5, eval_flag=True, period=1):
super(EvalCallback, self).__init__()
self.input_image_shape = K.placeholder(shape=(2, ))
self.sess = K.get_session()
self.model_body = model_body
self.input_shape = input_shape
self.anchors = anchors
self.anchors_mask = anchors_mask
self.class_names = class_names
self.num_classes = num_classes
self.val_lines = val_lines
self.log_dir = log_dir
self.map_out_path = map_out_path
self.max_boxes = max_boxes
self.confidence = confidence
self.nms_iou = nms_iou
self.letterbox_image = letterbox_image
self.MINOVERLAP = MINOVERLAP
self.eval_flag = eval_flag
self.period = period
#---------------------------------------------------------#
# 在yolo_eval函数中,我们会对预测结果进行后处理
# 后处理的内容包括,解码、非极大抑制、门限筛选等
#---------------------------------------------------------#
self.boxes, self.scores, self.classes = DecodeBox(
self.model_body.get_output_at(0),
self.anchors,
self.num_classes,
self.input_image_shape,
self.input_shape,
anchor_mask = self.anchors_mask,
max_boxes = self.max_boxes,
confidence = self.confidence,
nms_iou = self.nms_iou,
letterbox_image = self.letterbox_image
)
self.maps = [0]
self.epoches = [0]
if self.eval_flag:
with open(os.path.join(self.log_dir, "epoch_map.txt"), 'a') as f:
f.write(str(0))
f.write("\n")
def get_map_txt(self, image_id, image, class_names, map_out_path):
f = open(os.path.join(map_out_path, "detection-results/"+image_id+".txt"),"w")
#---------------------------------------------------------#
# 在这里将图像转换成RGB图像,防止灰度图在预测时报错。
#---------------------------------------------------------#
image = cvtColor(image)
#---------------------------------------------------------#
# 给图像增加灰条,实现不失真的resize
# 也可以直接resize进行识别
#---------------------------------------------------------#
image_data = resize_image(image, (self.input_shape[1],self.input_shape[0]), self.letterbox_image)
#---------------------------------------------------------#
# 添加上batch_size维度,并进行归一化
#---------------------------------------------------------#
image_data = np.expand_dims(preprocess_input(np.array(image_data, dtype='float32')), 0)
#---------------------------------------------------------#
# 将图像输入网络当中进行预测!
#---------------------------------------------------------#
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.model_body.get_input_at(0): image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0})
top_100 = np.argsort(out_scores)[::-1][:self.max_boxes]
out_boxes = out_boxes[top_100]
out_scores = out_scores[top_100]
out_classes = out_classes[top_100]
for i, c in enumerate(out_classes):
predicted_class = self.class_names[int(c)]
score = str(out_scores[i])
top, left, bottom, right = out_boxes[i]
if predicted_class not in class_names:
continue
f.write("%s %s %s %s %s %s\n" % (predicted_class, score[:6], str(int(left)), str(int(top)), str(int(right)),str(int(bottom))))
f.close()
return
def on_epoch_end(self, epoch, logs=None):
temp_epoch = epoch + 1
if temp_epoch % self.period == 0 and self.eval_flag:
if not os.path.exists(self.map_out_path):
os.makedirs(self.map_out_path)
if not os.path.exists(os.path.join(self.map_out_path, "ground-truth")):
os.makedirs(os.path.join(self.map_out_path, "ground-truth"))
if not os.path.exists(os.path.join(self.map_out_path, "detection-results")):
os.makedirs(os.path.join(self.map_out_path, "detection-results"))
print("Get map.")
for annotation_line in tqdm(self.val_lines):
line = annotation_line.split()
image_id = os.path.basename(line[0]).split('.')[0]
#------------------------------#
# 读取图像并转换成RGB图像
#------------------------------#
image = Image.open(line[0])
#------------------------------#
# 获得预测框
#------------------------------#
gt_boxes = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
#------------------------------#
# 获得预测txt
#------------------------------#
self.get_map_txt(image_id, image, self.class_names, self.map_out_path)
#------------------------------#
# 获得真实框txt
#------------------------------#
with open(os.path.join(self.map_out_path, "ground-truth/"+image_id+".txt"), "w") as new_f:
for box in gt_boxes:
left, top, right, bottom, obj = box
obj_name = self.class_names[obj]
new_f.write("%s %s %s %s %s\n" % (obj_name, left, top, right, bottom))
print("Calculate Map.")
try:
temp_map = get_coco_map(class_names = self.class_names, path = self.map_out_path)[1]
except:
temp_map = get_map(self.MINOVERLAP, False, path = self.map_out_path)
self.maps.append(temp_map)
self.epoches.append(temp_epoch)
with open(os.path.join(self.log_dir, "epoch_map.txt"), 'a') as f:
f.write(str(temp_map))
f.write("\n")
plt.figure()
plt.plot(self.epoches, self.maps, 'red', linewidth = 2, label='train map')
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Map %s'%str(self.MINOVERLAP))
plt.title('A Map Curve')
plt.legend(loc="upper right")
plt.savefig(os.path.join(self.log_dir, "epoch_map.png"))
plt.cla()
plt.close("all")
print("Get map done.")
shutil.rmtree(self.map_out_path)
| 42.618519
| 152
| 0.506474
|
2172bd88b1bf63d53e18dc63e445d756331a80c2
| 705
|
py
|
Python
|
src/d2/d2.py
|
nqryn/AoC-2019
|
d21ecafe8ecb34caa2a578bb691304193c9b03ac
|
[
"MIT"
] | null | null | null |
src/d2/d2.py
|
nqryn/AoC-2019
|
d21ecafe8ecb34caa2a578bb691304193c9b03ac
|
[
"MIT"
] | null | null | null |
src/d2/d2.py
|
nqryn/AoC-2019
|
d21ecafe8ecb34caa2a578bb691304193c9b03ac
|
[
"MIT"
] | null | null | null |
def program_assist(intcode):
i = 0
while True:
opcode = intcode[i]
if opcode == 99:
return intcode[0]
elif opcode == 1:
intcode[intcode[i+3]] = intcode[intcode[i+1]] + intcode[intcode[i+2]]
elif opcode == 2:
intcode[intcode[i+3]] = intcode[intcode[i+1]] * intcode[intcode[i+2]]
i += 4
def solve():
expected = 19690720
with open('d2.in', 'r') as fin:
intcode = [int(x) for x in fin.readline().split(',')]
for i in range(100):
intcode[1] = i
for j in range(100):
intcode[2] = j
# Force a copy of the list to be sent, so that we don't modify it
if program_assist(intcode[:]) == expected:
print(i * 100 + j)
return
if __name__ == '__main__':
solve()
| 24.310345
| 72
| 0.611348
|
d2db9df1f1096f10642401560cf08a16356638aa
| 40
|
py
|
Python
|
bulk_sched/__init__.py
|
PrynsTag/oneBarangay
|
6a8d56003d85b8385e91f5c5d81208619023c1ee
|
[
"Apache-2.0"
] | null | null | null |
bulk_sched/__init__.py
|
PrynsTag/oneBarangay
|
6a8d56003d85b8385e91f5c5d81208619023c1ee
|
[
"Apache-2.0"
] | 96
|
2021-08-28T12:37:02.000Z
|
2022-03-23T04:25:12.000Z
|
bulk_sched/__init__.py
|
PrynsTag/oneBarangay
|
6a8d56003d85b8385e91f5c5d81208619023c1ee
|
[
"Apache-2.0"
] | null | null | null |
"""This is init file for bulk_sched."""
| 20
| 39
| 0.675
|
816efc1bc11aab9986f9847b7e8be859410992a9
| 1,262
|
py
|
Python
|
server/apps/comments/views.py
|
ojengwa/base
|
5b5529b307643f37fc34bdda412e2b90c3d444e1
|
[
"MIT"
] | 9
|
2019-03-08T22:12:38.000Z
|
2022-02-28T10:10:58.000Z
|
server/apps/comments/views.py
|
ojengwa/base
|
5b5529b307643f37fc34bdda412e2b90c3d444e1
|
[
"MIT"
] | 4
|
2020-06-05T18:49:03.000Z
|
2021-06-01T22:28:04.000Z
|
server/apps/comments/views.py
|
ojengwa/base
|
5b5529b307643f37fc34bdda412e2b90c3d444e1
|
[
"MIT"
] | 2
|
2020-05-26T10:58:31.000Z
|
2020-12-12T11:45:16.000Z
|
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from .models import Comment
from .serializers import CommentSerializer
class CommentViewSet(ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
def create(self, request, *args, **kwargs):
"""Add content_type, object_id and author to the comment."""
self.request.data['content_type'] = kwargs['content_type'].pk
self.request.data['object_id'] = str(kwargs['pk'])
self.request.data['author'] = self.request.user.pk
return super().create(self.request, *args, **kwargs)
def list(self, request, *args, **kwargs):
"""list as in DRF but filter the queryset with kwargs.
this is why we (have to) overwrite the whole thing.
"""
queryset = self.queryset.filter(
content_type=kwargs['content_type']).filter(object_id=kwargs['pk'])
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
| 36.057143
| 79
| 0.681458
|
890e8e3d8efc1521d299dd8e55dcfc3ecb3dd29e
| 11,243
|
py
|
Python
|
py2neo/wiring.py
|
VitalyRomanov/py2neo
|
2d0683cf2ab8b77b0c5bbba4eade0003c68d5905
|
[
"Apache-2.0"
] | null | null | null |
py2neo/wiring.py
|
VitalyRomanov/py2neo
|
2d0683cf2ab8b77b0c5bbba4eade0003c68d5905
|
[
"Apache-2.0"
] | null | null | null |
py2neo/wiring.py
|
VitalyRomanov/py2neo
|
2d0683cf2ab8b77b0c5bbba4eade0003c68d5905
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Low-level module for network communication.
This module provides a convenience socket wrapper class (:class:`.Wire`)
as well as classes for modelling IP addresses, based on tuples.
"""
from socket import AF_INET, AF_INET6, SHUT_WR
from monotonic import monotonic
from six import raise_from
from py2neo.compat import xstr, BaseRequestHandler
BOLT_PORT_NUMBER = 7687
class Address(tuple):
""" Address of a machine on a network.
"""
@classmethod
def parse(cls, s, default_host=None, default_port=None):
s = xstr(s)
if not isinstance(s, str):
raise TypeError("Address.parse requires a string argument")
if s.startswith("["):
# IPv6
host, _, port = s[1:].rpartition("]")
port = port.lstrip(":")
try:
port = int(port)
except (TypeError, ValueError):
pass
return cls((host or default_host or "localhost",
port or default_port or 0, 0, 0))
else:
# IPv4
host, _, port = s.partition(":")
try:
port = int(port)
except (TypeError, ValueError):
pass
return cls((host or default_host or "localhost",
port or default_port or 0))
def __new__(cls, iterable):
if isinstance(iterable, cls):
return iterable
n_parts = len(iterable)
inst = tuple.__new__(cls, iterable)
if n_parts == 2:
inst.__class__ = IPv4Address
elif n_parts == 4:
inst.__class__ = IPv6Address
else:
raise ValueError("Addresses must consist of either "
"two parts (IPv4) or four parts (IPv6)")
return inst
#: Address family (AF_INET or AF_INET6)
family = None
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, tuple(self))
@property
def host(self):
return self[0]
@property
def port(self):
return self[1]
@property
def port_number(self):
from socket import getservbyname
if self.port == "bolt":
# Special case, just because. The regular /etc/services
# file doesn't contain this, but it can be found in
# /usr/share/nmap/nmap-services if nmap is installed.
return BOLT_PORT_NUMBER
try:
return getservbyname(self.port)
except (OSError, TypeError):
# OSError: service/proto not found
# TypeError: getservbyname() argument 1 must be str, not X
try:
return int(self.port)
except (TypeError, ValueError) as e:
raise type(e)("Unknown port value %r" % self.port)
class IPv4Address(Address):
""" Address subclass, specifically for IPv4 addresses.
"""
family = AF_INET
def __str__(self):
return "{}:{}".format(*self)
class IPv6Address(Address):
""" Address subclass, specifically for IPv6 addresses.
"""
family = AF_INET6
def __str__(self):
return "[{}]:{}".format(*self)
class Wire(object):
""" Buffered socket wrapper for reading and writing bytes.
"""
__closed = False
__broken = False
@classmethod
def open(cls, address, timeout=None, keep_alive=False, on_broken=None):
""" Open a connection to a given network :class:`.Address`.
:param address:
:param timeout:
:param keep_alive:
:param on_broken: callback for when the wire is broken after a
successful connection has first been established (this does
not trigger if the connection never opens successfully)
:returns: :class:`.Wire` object
:raises WireError: if connection fails to open
"""
from socket import socket, SOL_SOCKET, SO_KEEPALIVE
address = Address(address)
s = socket(family=address.family)
if keep_alive:
s.setsockopt(SOL_SOCKET, SO_KEEPALIVE, 1)
s.settimeout(timeout)
try:
s.connect(address)
except (IOError, OSError) as error:
raise_from(WireError("Cannot connect to %r" % (address,)), error)
return cls(s, on_broken=on_broken)
def __init__(self, s, on_broken=None):
s.settimeout(None) # ensure wrapped socket is in blocking mode
self.__socket = s
self.__active_time = monotonic()
self.__bytes_received = 0
self.__bytes_sent = 0
self.__input = bytearray()
self.__input_len = 0
self.__output = bytearray()
self.__on_broken = on_broken
def secure(self, verify=True, hostname=None):
""" Apply a layer of security onto this connection.
"""
from ssl import SSLContext, SSLError
try:
# noinspection PyUnresolvedReferences
from ssl import PROTOCOL_TLS
except ImportError:
from ssl import PROTOCOL_SSLv23
context = SSLContext(PROTOCOL_SSLv23)
else:
context = SSLContext(PROTOCOL_TLS)
if verify:
from ssl import CERT_REQUIRED
context.verify_mode = CERT_REQUIRED
context.check_hostname = bool(hostname)
else:
from ssl import CERT_NONE
context.verify_mode = CERT_NONE
context.load_default_certs()
try:
self.__socket = context.wrap_socket(self.__socket, server_hostname=hostname)
except (IOError, OSError) as error:
# TODO: add connection failure/diagnostic callback
if error.errno == 0:
raise BrokenWireError("Peer closed connection during TLS handshake; "
"server may not be configured for secure connections")
else:
raise WireError("Unable to establish secure connection with remote peer")
else:
self.__active_time = monotonic()
def read(self, n):
""" Read bytes from the network.
"""
while self.__input_len < n:
required = n - self.__input_len
requested = max(required, 16384)
try:
received = self.__socket.recv(requested)
except (IOError, OSError):
self.__set_broken("Wire broken")
else:
if received:
self.__active_time = monotonic()
new_bytes_received = len(received)
self.__input.extend(received)
self.__input_len += new_bytes_received
self.__bytes_received += new_bytes_received
else:
self.__set_broken("Network read incomplete "
"(received %d of %d bytes)" %
(self.__input_len, n))
data = self.__input[:n]
self.__input[:n] = []
self.__input_len -= n
return data
def peek(self):
""" Return any buffered unread data.
"""
return self.__input
def write(self, b):
""" Write bytes to the output buffer.
"""
self.__output.extend(b)
def send(self, final=False):
""" Send the contents of the output buffer to the network.
"""
if self.__closed:
raise WireError("Closed")
sent = 0
while self.__output:
try:
n = self.__socket.send(self.__output)
except (IOError, OSError):
self.__set_broken("Wire broken")
else:
self.__active_time = monotonic()
self.__bytes_sent += n
self.__output[:n] = []
sent += n
if final:
try:
self.__socket.shutdown(SHUT_WR)
except (IOError, OSError):
self.__set_broken("Wire broken")
return sent
def close(self):
""" Close the connection.
"""
try:
self.__socket.close()
except (IOError, OSError):
self.__set_broken("Wire broken")
else:
self.__closed = True
@property
def closed(self):
""" Flag indicating whether this connection has been closed locally.
"""
return self.__closed
@property
def broken(self):
""" Flag indicating whether this connection has been closed remotely.
"""
return self.__broken
@property
def local_address(self):
""" The local :class:`.Address` to which this connection is bound.
"""
return Address(self.__socket.getsockname())
@property
def remote_address(self):
""" The remote :class:`.Address` to which this connection is bound.
"""
return Address(self.__socket.getpeername())
@property
def bytes_sent(self):
return self.__bytes_sent
@property
def bytes_received(self):
return self.__bytes_received
def __set_broken(self, message):
idle_time = monotonic() - self.__active_time
message += (" after %.01fs idle (%r bytes sent, "
"%r bytes received)" % (idle_time,
self.__bytes_sent,
self.__bytes_received))
if callable(self.__on_broken):
self.__on_broken(message)
self.__broken = True
raise BrokenWireError(message, idle_time=idle_time,
bytes_sent=self.__bytes_sent,
bytes_received=self.__bytes_received)
class WireRequestHandler(BaseRequestHandler):
""" Base handler for use with the `socketserver` module that wraps
the request attribute as a :class:`.Wire` object.
"""
__wire = None
@property
def wire(self):
if self.__wire is None:
self.__wire = Wire(self.request)
return self.__wire
class WireError(OSError):
""" Raised when a connection error occurs.
:param idle_time:
:param bytes_sent:
:param bytes_received:
"""
def __init__(self, *args, **kwargs):
super(WireError, self).__init__(*args)
self.idle_time = kwargs.get("idle_time", None)
self.bytes_sent = kwargs.get("bytes_sent", 0)
self.bytes_received = kwargs.get("bytes_received", 0)
class BrokenWireError(WireError):
""" Raised when a connection is broken by the network or remote peer.
"""
| 31.492997
| 92
| 0.578315
|
eaced566b5736ace1dc2f4d8c81fef923af66a92
| 41,608
|
py
|
Python
|
scraper/views.py
|
ezequiellagos/uninews
|
7054feb63dfe42c4ad6fadf45c66e21ed183ae1b
|
[
"MIT"
] | 2
|
2019-06-24T20:41:56.000Z
|
2020-11-07T01:10:05.000Z
|
scraper/views.py
|
ezequiellagos/uninews
|
7054feb63dfe42c4ad6fadf45c66e21ed183ae1b
|
[
"MIT"
] | 9
|
2020-02-11T23:30:43.000Z
|
2022-03-12T00:58:40.000Z
|
scraper/views.py
|
ezequiellagos/uninews
|
7054feb63dfe42c4ad6fadf45c66e21ed183ae1b
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from news.models import Universidad, Noticia
from bs4 import BeautifulSoup
from django.conf import settings
import feedparser, unicodedata, urllib.request, time, re, datetime, time, threading
import ssl
import dateutil.parser
import logging
import unidecode
result = []
# Create your views here.
def scraper(request):
hora = {}
hora["start"] = time.strftime("%H:%M:%S")
hora_inicio = time.time()
if settings.DEBUG == False:
# Usar hilos para Producción
logging.basicConfig( level=logging.DEBUG, format='[%(levelname)s] - %(threadName)-10s : %(message)s')
universidades = [
{'target':pucv, 'name':'PUCV'},
{'target':ucn, 'name':'UCN'},
{'target':utfsm, 'name':'UTFSM'},
{'target':uv, 'name':'UV'},
{'target':upla, 'name':'UPLA'},
{'target':udec, 'name':'UDEC'},
{'target':utalca, 'name':'UTALCA'},
{'target':ulagos, 'name':'ULAGOS'},
{'target':unap, 'name':'UNAP'},
{'target':ua, 'name':'UA'},
{'target':uda, 'name':'UDA'},
{'target':userena, 'name':'USERENA'},
{'target':uoh, 'name':'UOH'},
{'target':ucm, 'name':'UCM'},
{'target':ubiobio, 'name':'UBIOBIO'},
{'target':ucsc, 'name':'UCSC'},
{'target':ufro, 'name':'UFRO'},
{'target':uct, 'name':'UCT'},
{'target':uach, 'name':'UACH'},
{'target':uaysen, 'name':'UAYSEN'},
{'target':umag, 'name':'UMAG'},
{'target':uta, 'name':'UTA'}
]
# Por cada universidad crea un hilo de ejecución
for universidad in universidades:
threading.Thread(target=universidad['target'], name=universidad['name']).start()
else:
# Este metodo de ejecutar los scraper es muy lento
# Pero el panel uninews.datoslab.cl/scraper solo muestra información acerca de los errores e información si se usa este metodo
# Usar solo para Desarrollo
#pucv() # Funcionando
#ucn() # Funcionando
#utfsm() # Funcionando
#uv() # Funcionando
#upla() # Funcionando #Revisar
#udec() # Funcionando
#utalca() # Funcionando #Revisar
#ulagos() # Funcionando
#ucsc() # Funcionando
#ubiobio() # Funcionando
#uda() # En Funcionando
#userena() # En Funcionando #Revisar
# unap() # Funcionando
#ua() # Funcionando
# uoh() No se pudo scrapear
# ucm() # Funcionando
# ufro() # Funcionando
# uct() # Funciona con angular, usar selenium
# uach()
# uaysen()
umag() # Funcionando - Revisar la bajada
# uta() # Funcionando
hora_fin = time.time()
hora["finish"] = time.strftime("%H:%M:%S")
hora["total"] = hora_fin - hora_inicio
result.append({'status':"", 'error_message':'', 'universidad':'', 'titulo':'', 'bajada':'', 'fecha':'', 'link_noticia':'', 'link_recurso':'', 'categoria':''})
return render(request, "scraper/scraper.html", {'result':result, 'hora':hora})
def saveNew(new):
try:
# Busca la noticia en la base de datos
# Si no la encuentra genera un error y ejecuta el except
n = Noticia.objects.get(titulo=new['titulo'], id_universidad__alias = new['universidad'].alias)
print(new['universidad'].alias + ": " + new['titulo'] + " | Existe")
e = "Existe"
# Si la encuentra agrega un mensaje que se mostrará al de depuración
result.append({'status':"exist", 'error_message':e, 'universidad':new['universidad'], 'titulo':new['titulo'], 'bajada':new['bajada'], 'fecha':new['fecha'], 'link_noticia':new['link_noticia'], 'link_recurso':new['link_recurso'], 'categoria':new['categoria']})
except Noticia.DoesNotExist as e:
# Si la noticia no se encuentra la crea
n = Noticia(
titulo=new['titulo'],
titulo_busqueda=formatear_busqueda(new['titulo']),
bajada=new['bajada'],
bajada_busqueda=formatear_busqueda(new['bajada']),
fecha=new['fecha'],
link_noticia=new['link_noticia'],
link_recurso=new['link_recurso'],
id_universidad=new['universidad'],
categoria=new['categoria'],
contador_visitas=0
)
n.save() # Guarda la noticia en la base de datos
print(new['universidad'].alias + ": " + new['titulo'] + " | Insertada")
e = "Insertada"
result.append({'status':"ok", 'error_message':e, 'universidad':new['universidad'], 'titulo':new['titulo'], 'bajada':new['bajada'], 'fecha':new['fecha'], 'link_noticia':new['link_noticia'], 'link_recurso':new['link_recurso'], 'categoria':new['categoria']})
def formatear_busqueda(text):
# Al cambiar algo tambien debe ser modificado en search_fix de views de news
text = unidecode.unidecode(text).lower()
text = text.replace('"', "")
text = text.replace('?', "")
text = text.replace('¿', "")
text = text.replace(':', "")
text = text.replace('#', "")
text = text.replace('.', "")
text = text.replace(',', "")
text = text.replace(';', "")
text = text.replace('(', "")
text = text.replace(')', "")
return text
def formatear_fecha(fecha, universidad):
if universidad == "uv":
fecha = fecha.split()
dia = fecha[0]
mes = fecha[2].lower()
anno = fecha[4]
elif universidad == "upla":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == "ufsm":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == "ucn":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == "pucv":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[3].lower()
anno = fecha[5]
elif universidad == "udec":
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == "utalca":
fecha = fecha.lower().split()
dia = fecha[0]
mes = fecha[1]
anno = fecha[2]
elif universidad == "ulagos":
fecha = fecha.lower().split('/')
dia = fecha[0]
mes = fecha[1]
anno = fecha[2]
elif universidad == "ucsc":
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == "ubiobio":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == 'uda':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'userena':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'unap':
fecha = fecha.lower().split()
dia = fecha[1]
mes = fecha[3]
anno = fecha[5]
elif universidad == 'ua':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'ucm':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'ufro':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'uta':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'umag':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
if mes == "enero" or mes == "jan" or mes == '1':
mes = '01'
elif mes == "febrero" or mes == "feb" or mes == '2':
mes = '02'
elif mes == "marzo" or mes == "mar" or mes == '3':
mes = '03'
elif mes == "abril" or mes == "apr" or mes == '4':
mes = '04'
elif mes == "mayo" or mes == "may" or mes == '5':
mes = '05'
elif mes == "junio" or mes == "jun" or mes == '6':
mes = '06'
elif mes == "julio" or mes == "jul" or mes == '7':
mes = '07'
elif mes == "agosto" or mes == "aug" or mes == '8':
mes = '08'
elif mes == "septiembre" or mes == "sep" or mes == '9':
mes = '09'
elif mes == "octubre" or mes == "oct" or mes == '10':
mes = '10'
elif mes == "noviembre" or mes == "nov" or mes == '11':
mes = '11'
elif mes == "diciembre" or mes == "dec" or mes == '12':
mes = '12'
if dia == "1":
dia = '01'
elif dia == "2":
dia = '02'
elif dia == "3" :
dia = '03'
elif dia == "4":
dia = '04'
elif dia == "5":
dia = '05'
elif dia == "6":
dia = '06'
elif dia == "7":
dia = '07'
elif dia == "8":
dia = '08'
elif dia == "9":
dia = '09'
#fecha = dia + "/" + mes + "/" + anno
fecha = anno + "-" + mes + "-" + dia
return fecha
# Realiza limpieza a cada categoria
def setCategoria(categoria = ''):
if categoria == '' or categoria == None:
return 'sin-categoria'
else:
categoria = categoria.lower()
categoria = elimina_tildes(categoria)
categoria = categoria.replace(" ", "-")
categoria = categoria.replace("&", "y")
return categoria
def elimina_tildes(s):
return ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
# Universidad de Playa Ancha
def upla():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UPLA')
url_rss = "https://www.upla.cl/noticias/feed/" # URL de feed RSS
feed = feedparser.parse( url_rss ) # Se obtiene el XML y se procesa
for item in feed['items']:
try:
titulo = item['title']
bajada = item['summary']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "upla")
# Se obtiene y filtra la categoria para ser buscada
categoria_busqueda = setCategoria(item['category'])
if categoria_busqueda == 'gestion-institucional':
categoria_busqueda = 'gestion'
# Entra en la pagina de cada categoria y busca todas las noticias
contents = urllib.request.urlopen("https://www.upla.cl/noticias/category/"+categoria_busqueda).read()
bs = BeautifulSoup(contents, "html.parser")
# Se realizan ajustes para las catergorias con alguna particularidad
if categoria_busqueda == 'coronavirus':
articles = bs.find_all("div", ["timeline-content"])
else:
articles = bs.find_all("article", ["item-list"])
# Por cada noticia de cada categoria obtiene su titulo
for article in articles:
if categoria_busqueda == 'coronavirus':
titulo_articulo = article.h2.a.text
else:
titulo_articulo = article.find("a").text
# Si el titulo de la noticia es igual al titulo obtenido del XML, obtiene la imagen de esa noticia y termina el ciclo
if titulo_articulo == titulo:
imagen = article.find("img")['src']
break
else:
imagen = ''
# Se ejecuta la función para guardar la noticia en la base de datos
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
# Si ocurre un error se individualiza y se prepara para mostrar
# en la pantalla de depuración
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Pontificia Universidad Católica de Valparaíso
def pucv():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='PUCV')
nombre_uni = "pucv"
context = ssl._create_unverified_context()
contents = urllib.request.urlopen("https://www.pucv.cl/pucv/site/tax/port/all/taxport_1___1.html", context=context).read()
bs = BeautifulSoup(contents, "html.parser")
articulos = bs.find_all("article")
for articulo in articulos:
try:
link = articulo.a['href']
link = "https://www.pucv.cl" + link.replace("..", "")
fecha = articulo.find("span",{"class":"fecha aright"})
imagen = articulo.img['src']
imagen = "https://pucv.cl" + imagen.replace("..","")
pagina_noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(pagina_noticia, "html.parser")
titulo = bs_noticia.find("h1", { "class" : "titular" }).text
if fecha is None:
fecha = time.strftime("%Y-%m-%d")
else:
fecha = formatear_fecha(fecha.text,nombre_uni)
try:
bajada = bs_noticia.find("p",{ "class" : "bajada" }).text
except Exception as e:
bajada = ''
result.append({'status':"warning", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
# No encuentra una categoría
try:
newpage = urllib.request.urlopen(link).read()
bs_cate = BeautifulSoup(newpage, "html.parser")
categoria = bs_cate.find("div",{ "class" : "breadcrumbs" })
categorias = categoria.findAll("a")
category = categorias[2].text
categoria_busqueda = setCategoria(category)
except Exception as e:
categoria_busqueda = 'sin-categoria'
result.append({'status':"warning", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
saveNew({'status':"ok", 'error_message':'', 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad Católica del Norte
def ucn():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCN')
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
d = feedparser.parse("https://www.noticias.ucn.cl/feed/")
for e in d.entries:
try:
titulo = (e.title)
nombre_uni = "ucn"
link = (e.link)
categoria_busqueda = setCategoria((e.category))
fecha = e.published
fecha = formatear_fecha(fecha,nombre_uni)
description = e.description.split("/>")
bajada = description[1]
cuerpo = e['content']
contenido = cuerpo[0].value
imagen = re.search('(?P<url>https?://[^\s]+(png|jpeg|jpg))', contenido).group("url").replace("-150x150", "")
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
#Universidad Técnico Federico Santa María
def utfsm():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UTFSM')
d = feedparser.parse("https://noticias.usm.cl/feed/")
for e in d.entries:
try:
titulo = (e.title)
nombre_uni = "ufsm"
link = (e.link)
categoria_busqueda = setCategoria((e.category))
bajada = (e.description).replace("[…]", "").strip()
fecha = e.published
fecha = formatear_fecha(fecha,nombre_uni)
cuerpo = e['content']
contenido = cuerpo[0].value
try:
imagen = re.search('(?P<url>https?://[^\s]+(png|jpeg|jpg))', contenido).group("url")
except:
imagen = ''
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Valparaíso
def uv():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UV')
contents = urllib.request.urlopen("https://www.uv.cl/pdn/archivo/").read()
bs = BeautifulSoup(contents, "html.parser")
divs = bs.find_all("div", ["item n_caja borde6", "item n_caja borde6 fin"])
for div in divs:
try:
fecha = div.find("div", ["fecha"]).text
fecha = formatear_fecha(fecha, "uv")
link = div.a['href']
link = "https://www.uv.cl/pdn" + link.replace("..", "")
# Accede a la pagina de la noticia
pagina_noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(pagina_noticia, "html.parser")
titulo = bs_noticia.find("div", id="n_titulo").text
bajada = bs_noticia.find("div", id="n_bajada").text
try:
imagen = bs_noticia.find("div", id="n_clipex").img['src']
imagen = "https://www.uv.cl" + imagen
except TypeError:
imagen = div.find("img", ["sombra"])['src']
imagen = "https://www.uv.cl/pdn" + imagen.replace("..", "")
categoria_busqueda = setCategoria()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Concepción
def udec():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UDEC')
url_rss = "https://noticias.udec.cl/feed/"
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
bajada = BeautifulSoup(item['summary'], "html.parser").find('p').text.strip()
fecha = item['published']
fecha = formatear_fecha(fecha, "udec")
categoria_busqueda = setCategoria(item['category'])
imagen = BeautifulSoup(urllib.request.urlopen(link).read(), "html.parser").find_all('img', {'class': 'attachment-large size-large'})[1]['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Talca
def utalca():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UTALCA')
contents = urllib.request.urlopen("https://www.utalca.cl/noticias/").read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find('div', {'class': 'section-news'})
items = items.find_all("div", {"class": "card-news"})
items = list(set(items)) # Elimina elementos duplicados
for item in items:
try:
link = item.a['href']
titulo = item.find("h5").text
if item.div.p is None:
categoria_busqueda = setCategoria()
else:
categoria_busqueda = setCategoria(item.div.p.text)
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
bajada = bs_noticia.find("div", {"class": "interior-body"}).h6.text
fecha = bs_noticia.find("div", {"class": "interior-body"}).span.text
fecha = formatear_fecha(fecha, 'utalca')
imagen = bs_noticia.find("img", {"class": "attachment-post-thumbnail size-post-thumbnail wp-post-image"})['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Los Lagos
def ulagos():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='ULAGOS')
items = []
categorias = ['campus-osorno', 'campus-pto-montt', 'sede-santiago', 'sede-chiloe']
for categoria in categorias:
contents = urllib.request.urlopen("https://www.ulagos.cl/category/" + categoria + "/").read()
bs = BeautifulSoup(contents, "html.parser")
items.extend(bs.find_all("div", {"class": "ultimas-noticias"}))
for item in items:
try:
link = item.a['href']
titulo = item.find("div", {"class": "overflow_titulo_noticias"}).text.strip()
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
bajada = bs_noticia.find("div", {"class":"title-post"}).span.text.strip()
categoria_busqueda = bs_noticia.find("div", {"class":"category-post"}).a.text.lower().strip()
categoria_busqueda = setCategoria(categoria_busqueda)
fecha = bs_noticia.find("div", {"class":"conten-post-date"}).text.strip()
fecha = formatear_fecha(fecha, "ulagos")
if bs_noticia.find("img", {"class": "img-destacado"}) is None:
imagen = ''
else:
imagen = bs_noticia.find("img", {"class": "img-destacado"})["src"]
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad Católica de la Santísima Concepción
def ucsc():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCSC')
contents = urllib.request.urlopen("https://www.ucsc.cl/noticias/").read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find_all("article", {"class": "hentry-news"})
items = list(set(items)) # Elimina elementos duplicados
for item in items:
try:
link = item.header.h2.a['href']
titulo = item.header.h2.a.text
fecha = item.header.p.time['datetime']
fecha = formatear_fecha(fecha, 'ucsc')
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
bajada = bs_noticia.find("div", {"class": "entry-summary"}).p.text
try:
imagen = bs_noticia.find("article", {"class": "hentry hentry-news"}).header.span.img['src']
except Exception as e:
imagen = ''
categoria_busqueda = bs_noticia.find("a", {"rel": "category tag"})
categoria_busqueda = setCategoria(categoria_busqueda.text)
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad del Bío-Bío
def ubiobio():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UBIOBIO')
d = feedparser.parse("http://noticias.ubiobio.cl/feed/")
for e in d.entries:
try:
titulo = (e.title)
link = (e.link)
categoria_busqueda = setCategoria(e.category)
bajada = (e.description).replace("[…]", "")
bs_bajada = BeautifulSoup(bajada, "html.parser")
bajada = bs_bajada.find("p").text
fecha = e.published
fecha = formatear_fecha(fecha,'ubiobio')
cuerpo = e['content']
contenido = cuerpo[0].value
imagen = re.search('(?P<url>https?://[^\s]+(png|jpeg|jpg))', contenido).group("url")
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad Arturo Prat
def unap():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UNAP')
url_base = 'https://www.unap.cl'
urls_news = {
'investigacion': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_13_48__1.html',
'vinculacion': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_38_39__1.html',
'acreditacion': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_83_113__1.html',
'casa-central': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_15__1.html',
'sede-victoria': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_17__1.html',
'noticias-arica': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_12__1.html',
'noticias-antofagasta': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_14__1.html',
'noticias-santiago': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_16__1.html'
}
for cat, url in urls_news.items():
contents = urllib.request.urlopen(url).read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find_all("div", {"class": "taxport-item"})
items = list(set(items)) # Elimina elementos duplicados
for item in items:
try:
link = url_base + item.find("div", {"class": "titular"}).a['href'].strip()
titulo = item.find("div", {"class": "titular"}).a.text.strip()
fecha = item.find("div", {"class": "fecha"}).text.strip()
fecha = formatear_fecha(fecha, 'unap')
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
try:
bajada = bs_noticia.find(id='content').find('h2', {'class': 'bajada'}).text.strip()
except Exception:
bajada = bs_noticia.find("div", {"class": "CUERPO"}).find_all('p')
for b in bajada:
b = b.text.strip()
if b: # Si la bajada no está vacia devuelvela y termina de buscar
bajada = b
break
try:
imagen = url_base + bs_noticia.find("div", {"class": "CUERPO"}).find("img")['src'].strip()
except Exception:
imagen = ''
categoria_busqueda = setCategoria(cat)
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Antofagasta
def ua():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UA')
url_rss = "http://www.comunicacionesua.cl/feed/"
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
bajada = item['description']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "ua")
categoria_busqueda = setCategoria(item['category'])
noticia = urllib.request.urlopen(link).read()
imagen = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'qode-post-image'}).img['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Atacama
def uda():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UDA')
url_rss = "http://www.uda.cl/index.php?option=com_content&view=category&layout=blog&id=15&Itemid=253&format=feed&type=atom"
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
bajada = BeautifulSoup(item['summary'], "html.parser").find('p').text
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "uda")
categoria_busqueda = setCategoria(item['category'])
imagen = "http://www.uda.cl/" + BeautifulSoup(item['summary'], "html.parser").find('img')['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de La Serena
# Región de Coquimbo
def userena():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='USERENA')
url_rss = ['http://www.userena.cl/actualidad-uls.feed?type=rss',
'http://www.userena.cl/cultura-y-extension.feed?type=rss',
'http://www.userena.cl/dgae.feed?type=rss']
feeds = []
for url in url_rss:
feeds.append(feedparser.parse( url ))
for feed in feeds:
for item in feed['items']:
try:
titulo = item['title']
bajada = BeautifulSoup(item['summary'], "html.parser").find_all('p')[2].text
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "userena")
categoria_busqueda = setCategoria(item['category'])
imagen = BeautifulSoup(item['summary'], "html.parser").p.img['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de O'Higgins
def uoh():
# https://www.uoh.cl/
# https://www.uoh.cl/#noticias-y-eventos
logging.debug('Lanzado')
# universidad = Universidad.objects.get(alias='UOH')
# contents = urllib.request.urlopen("https://www.uoh.cl/#noticias-y-eventos").read()
logging.debug('Deteniendo')
# Universidad Católica del Maule
def ucm():
# http://portal.ucm.cl/
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCM')
url_rss = "https://portal.ucm.cl/feed" # URL de feed RSS
feed = feedparser.parse( url_rss ) # Se obtiene el XML y se procesa
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "ucm")
categoria_busqueda = setCategoria(item['category'])
noticia = urllib.request.urlopen(link).read()
imagen = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'section-content-image'}).img['src']
bajada = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'section-content-paragraph'}).find_all('p')[1].text
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de la Frontera
def ufro():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UFRO')
url_rss = 'https://www.ufro.cl/index.php/noticias/12-destacadas?format=feed&type=rss'
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "ufro")
categoria_busqueda = setCategoria(item['category'])
noticia = urllib.request.urlopen(link).read()
imagen = 'https://www.ufro.cl' + BeautifulSoup(noticia, "html.parser").find('td', {'id': 'imagen'}).p.img['src']
bajada = BeautifulSoup(noticia, "html.parser").find('p', {'class': 'bajada'}).text.strip()
if not bajada:
bajada = BeautifulSoup(noticia, "html.parser").find('table', {'class': 'tnoticia'}).tbody.tr.find_all('td')[1].p.text.strip()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# https://www.ufro.cl/
# Universidad Católica de Temuco
def uct():
# Esta página carga con Angular, se debe usar selenium
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCT')
url_base = 'https://www.uct.cl/actualidad/?typing=noticias'
# contents = urllib.request.urlopen(url_base).read()
# bs = BeautifulSoup(contents, "html.parser")
# items = bs.find_all("div", {"class": "cardwdetail"})
# print('------------------')
# print( items )
# print('------------------')
logging.debug('Deteniendo')
# https://www.uct.cl/
pass
# Universidad Austral de Chile
def uach():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UACH')
url = ''
logging.debug('Deteniendo')
# https://www.uach.cl/
pass
# Universidad de Aysén
def uaysen():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UAYSEN')
url = ''
logging.debug('Deteniendo')
# https://uaysen.cl/
pass
# Universidad de Magallanes
def umag():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UMAG')
url = 'http://www.umag.cl/vcm/?page_id=459'
contents = urllib.request.urlopen(url).read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find_all("div", {"class": "not-col11"})
for item in items:
try:
link = item.find('a', {'class': 'link'})['href']
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
titulo = bs_noticia.find('div', {'class': 'post-title'}).h2.a.text.strip()
fecha = bs_noticia.find('span', {'class': 'post-dates'}).text.strip()
fecha = formatear_fecha(fecha, "umag")
categoria_busqueda = setCategoria('')
try:
imagen = bs_noticia.find('div', {'class': 'entry'}).find('a').find('img')['src']
except:
imagen = ''
bajada = bs_noticia.find('div', {'class': 'entry'}).p.text.strip()
if not bajada:
bajada = bs_noticia.find('div', {'class': 'entry'}).find_all('p')[2].text.strip()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# http://www.umag.cl/
# Universidad de Tarapacá
def uta():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UTA')
url_rss = 'https://www.uta.cl/index.php/feed/'
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "uta")
try:
categoria_busqueda = setCategoria(item['category'])
except:
categoria_busqueda = setCategoria()
bajada = item['summary'].strip()
noticia = urllib.request.urlopen(link).read()
try:
imagen = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'wp-block-image'}).figure.a.img['src']
except:
try:
imagen = BeautifulSoup(noticia, "html.parser").find('figure', {'class': 'wp-block-image'}).a.img['src']
except:
imagen = ''
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# https://www.uta.cl/
| 45.226087
| 266
| 0.593852
|
cdbfec758d4ad3a29e301f4ced8cac5b8d6dfa38
| 3,098
|
py
|
Python
|
rlpyt/samplers/parallel/worker.py
|
BorenTsai/rlpyt
|
58637fc3a1949b7dd39719a5bb8a055bfb33a845
|
[
"MIT"
] | null | null | null |
rlpyt/samplers/parallel/worker.py
|
BorenTsai/rlpyt
|
58637fc3a1949b7dd39719a5bb8a055bfb33a845
|
[
"MIT"
] | null | null | null |
rlpyt/samplers/parallel/worker.py
|
BorenTsai/rlpyt
|
58637fc3a1949b7dd39719a5bb8a055bfb33a845
|
[
"MIT"
] | null | null | null |
import psutil
import time
import torch
from rlpyt.utils.collections import AttrDict
from rlpyt.utils.logging import logger
from rlpyt.utils.seed import set_seed
def initialize_worker(rank, seed=None, cpu=None, torch_threads=None):
log_str = f"Sampler rank {rank} initialized"
cpu = [cpu] if isinstance(cpu, int) else cpu
p = psutil.Process()
try:
if cpu is not None:
p.cpu_affinity(cpu)
cpu_affin = p.cpu_affinity()
except AttributeError:
cpu_affin = "UNAVAILABLE MacOS"
log_str += f", CPU affinity {cpu_affin}"
torch_threads = (1 if torch_threads is None and cpu is not None else
torch_threads) # Default to 1 to avoid possible MKL hang.
if torch_threads is not None:
torch.set_num_threads(torch_threads)
log_str += f", Torch threads {torch.get_num_threads()}"
if seed is not None:
set_seed(seed)
time.sleep(0.3) # (so the printing from set_seed is not intermixed)
log_str += f", Seed {seed}"
logger.log(log_str)
def sampling_process(common_kwargs, worker_kwargs):
"""Arguments fed from the Sampler class in master process."""
c, w = AttrDict(**common_kwargs), AttrDict(**worker_kwargs)
initialize_worker(w.rank, w.seed, w.cpus, c.torch_threads)
envs = [c.EnvCls(**c.env_kwargs) for _ in range(w.n_envs)]
collector = c.CollectorCls(
rank=w.rank,
envs=envs,
samples_np=w.samples_np,
batch_T=c.batch_T,
TrajInfoCls=c.TrajInfoCls,
agent=c.get("agent", None), # Optional depending on parallel setup.
sync=w.get("sync", None),
step_buffer_np=w.get("step_buffer_np", None),
global_B=c.get("global_B", 1),
env_ranks=w.get("env_ranks", None),
)
agent_inputs, traj_infos = collector.start_envs(c.max_decorrelation_steps)
collector.start_agent()
if c.get("eval_n_envs", 0) > 0:
eval_envs = [c.EnvCls(**c.env_kwargs) for _ in range(c.eval_n_envs)]
eval_collector = c.eval_CollectorCls(
rank=w.rank,
envs=eval_envs,
TrajInfoCls=c.TrajInfoCls,
traj_infos_queue=c.eval_traj_infos_queue,
max_T=c.eval_max_T,
agent=c.get("agent", None),
sync=w.get("sync", None),
step_buffer_np=w.get("eval_step_buffer_np", None),
)
else:
eval_envs = list()
ctrl = c.ctrl
ctrl.barrier_out.wait()
while True:
collector.reset_if_needed(agent_inputs) # Outside barrier?
ctrl.barrier_in.wait()
if ctrl.quit.value:
break
# if ctrl.do_eval.value:
# print("PASS")
# eval_collector.collect_evaluation(ctrl.itr.value) # Traj_infos to queue inside.
else:
agent_inputs, traj_infos, completed_infos = collector.collect_batch(
agent_inputs, traj_infos, ctrl.itr.value)
for info in completed_infos:
c.traj_infos_queue.put(info)
ctrl.barrier_out.wait()
for env in envs + eval_envs:
env.close()
| 34.808989
| 94
| 0.633635
|
0754d7665c155cacd38aa8c369273ff94c19bf65
| 4,009
|
py
|
Python
|
vaultier/accounts/models.py
|
dz0ny/Vaultier
|
e23d86c7576f4785b4e369242d7b5f7125e4d8c6
|
[
"BSD-3-Clause"
] | 30
|
2015-07-13T11:11:23.000Z
|
2021-01-25T14:21:18.000Z
|
vaultier/accounts/models.py
|
corpusops/vaultier
|
3baef4346add0b3bdff322257467f74b2a0c856c
|
[
"BSD-3-Clause"
] | null | null | null |
vaultier/accounts/models.py
|
corpusops/vaultier
|
3baef4346add0b3bdff322257467f74b2a0c856c
|
[
"BSD-3-Clause"
] | 31
|
2015-08-10T12:10:16.000Z
|
2020-09-18T09:43:28.000Z
|
from django.contrib.auth.models import AbstractBaseUser
from django.db import models
from django.db.models.deletion import CASCADE, PROTECT
from django.db.models.signals import pre_save, post_save
from accounts.business.fields import MemberStatusField
from libs.changes.changes import ChangesMixin
from libs.lowercasefield.lowercasefield import LowerCaseCharField
from .business.managers import UserManager, LostKeyManager, MemberManager, \
TokenManager
import random
from datetime import datetime
class User(ChangesMixin, AbstractBaseUser):
nickname = models.CharField(max_length=255, blank=False, null=False)
public_key = models.CharField(max_length=1024)
email = LowerCaseCharField(max_length=255, unique=True)
is_active = models.BooleanField(default=True)
objects = UserManager()
REQUIRED_FIELDS = []
USERNAME_FIELD = 'email'
class Meta:
db_table = u'vaultier_user'
class Token(ChangesMixin, models.Model):
TOKEN_LENGTH = 64
"""
Length of generated token
"""
token = models.CharField(max_length=TOKEN_LENGTH, unique=True,
db_index=True)
user = models.ForeignKey('accounts.User', on_delete=CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
last_used_at = models.DateTimeField(null=True)
objects = TokenManager()
class Meta:
db_table = u'vaultier_token'
def save(self, *args, **kwargs):
if not self.token:
self.token = self.generate_token()
if not self.last_used_at:
self.last_used_at = datetime.utcnow()
return super(Token, self).save(*args, **kwargs)
def generate_token(self):
chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVW' \
'XYZ!"#$%&\()*+,-./:;<=>?@[\]^_`{|}~'
unique = ''.join(random.choice(chars) for i in range(
Token.TOKEN_LENGTH))
return unique
def __unicode__(self):
return self.key
class LostKey(models.Model):
objects = LostKeyManager()
hash = models.TextField(null=False)
created_by = models.ForeignKey('accounts.User', on_delete=PROTECT,
related_name='distracted')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(null=False)
used = models.BooleanField(default=False)
class Meta:
db_table = u'vaultier_lost_key'
class Member(ChangesMixin, models.Model):
node = models.ForeignKey(
'nodes.Node', on_delete=CASCADE, related_name='membership',
default=None, null=True)
user = models.ForeignKey(
'accounts.User', on_delete=CASCADE, null=True,
default=None)
invitation_hash = models.CharField(max_length=64, null=True, unique=True)
invitation_email = LowerCaseCharField(max_length=1024, null=True)
workspace_key = models.CharField(max_length=4096)
status = MemberStatusField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(
'accounts.User', on_delete=PROTECT, related_name='members_created')
objects = MemberManager()
class Meta:
db_table = u'vaultier_member'
unique_together = ('user', 'node')
def is_invitation(self):
if self.status == MemberStatusField.STATUS_INVITED:
return True
return False
def save(self, *args, **kwargs):
if not self.invitation_hash:
self.invitation_hash = Member.objects.generate_invitation_hash()
return super(Member, self).save(*args, **kwargs)
def register_signals():
pre_save.connect(LostKey.objects.on_pre_save, sender=LostKey)
post_save.connect(LostKey.objects.send_notification, sender=LostKey)
post_save.connect(
Member.objects.send_transfer_workspace_key_info, sender=Member)
| 33.974576
| 79
| 0.697431
|
3d261d9db57579988fc127d7c4c99545c3c23aea
| 5,029
|
py
|
Python
|
utilities/python/crispr_convert/main.py
|
knightjdr/prohits-viz-containers
|
a696e8f2a3c9fca398aa2141f64c6b2003cff8d0
|
[
"MIT"
] | null | null | null |
utilities/python/crispr_convert/main.py
|
knightjdr/prohits-viz-containers
|
a696e8f2a3c9fca398aa2141f64c6b2003cff8d0
|
[
"MIT"
] | null | null | null |
utilities/python/crispr_convert/main.py
|
knightjdr/prohits-viz-containers
|
a696e8f2a3c9fca398aa2141f64c6b2003cff8d0
|
[
"MIT"
] | null | null | null |
import argparse
import os
import pandas as pd
'''
Usage:
python3 main.py \
-f folder \
-t bagel
output: [tool]-converted.txt
'''
def convert():
options = parse_args()
files = get_files(options.folder)
df = pd.DataFrame()
if options.tool == 'bagel':
df = merge_and_add_conditions(files)
if options.tool == 'drugz':
df = merge_and_add_conditions(files)
if options.tool == 'mageck':
df = convert_mageck(files)
if options.tool == 'ranks':
df = convert_ranks(files)
df.to_csv(f'{options.tool}-converted.txt', sep='\t', index=False)
def parse_args():
parser = argparse.ArgumentParser(description='Convert CRISPR output files to a single file compatible with ProHits-viz')
parser.add_argument(
'--folder', '-f',
help='Folder containing the files to merge/convert',
required=True,
)
parser.add_argument(
'--tool', '-t',
help='The tool used for CRISPR analysis. Should be one of "bagel", "drugz", "mageck" or "ranks"',
required=True,
)
return parser.parse_args()
def get_files(folder):
files = os.listdir(folder)
return [f'{folder}/{file}' for file in files]
def extract_condition_from_filename(file):
base = os.path.basename(file)
return os.path.splitext(base)[0]
def get_column_names(file):
df = pd.read_csv(file, sep='\t')
return df.columns.values.tolist()
def merge_and_add_conditions(files):
'''
Files are simply merged together, adding one addition column to the start
specifying the condition (using the filename)
'''
data = []
for file in files:
df = pd.read_csv(file, sep='\t')
condition = extract_condition_from_filename(file)
df['condition'] = condition
data.append(df)
merged = pd.concat(data, axis=0)
merged.reset_index(drop=True, inplace=True)
return (move_condition_column(merged))
def move_condition_column(df):
return df[ ['condition'] + [ col for col in df.columns if col != 'condition' ] ]
def convert_mageck(files):
'''
MAGeCK has two output formats. The first from the "test" command, or RRA, just requires
merging of files and adding the condition name. The second from the "mle" command has each
condition with its own set of columns in a single file. For example:
| Gene | sgRNA | HL60|beta | HL60|z | HL60|p-value | HL60|fdr | HL60|wald-p-value | HL60|wald-fdr | KBM7|beta | etc... |
'''
columns = get_column_names(files[0])
# Check for "test" output format
if 'neg|score' in columns:
return merge_and_add_conditions(files)
def extract_condition(column):
return column.split('|')[0]
def filter_condition_columns(column):
return 'beta' in column
data = []
desired_column_names = ['Gene', 'sgRNA', 'beta', 'z', 'p-value', 'fdr', 'wald-p-value', 'wald-fdr']
for file in files:
df = pd.read_csv(file, sep='\t')
columns = get_column_names(file)
conditions = list(map(extract_condition, filter(filter_condition_columns, columns)))
for condition in conditions:
condition_columns = ['Gene', 'sgRNA', f'{condition}|beta', f'{condition}|z', f'{condition}|p-value', f'{condition}|fdr', f'{condition}|wald-p-value', f'{condition}|wald-fdr']
df_partial = df[condition_columns].copy()
df_partial.columns = desired_column_names
df_partial['condition'] = condition
data.append(df_partial)
merged = pd.concat(data, axis=0)
merged.reset_index(drop=True, inplace=True)
return (move_condition_column(merged))
def convert_ranks(files):
'''
RANKS has a single output format, but for v1 it can have two entries for the same gene, one
with a negative (depletion) score and one with a positive score. Sometimes it may have only one score if
only depletion was calculated. v2 produces a single score for each gene encapsulating both
depletion and enrichment.
'''
merged = merge_and_add_conditions(files)
# Check for v1 formatted data will duplicated rows
if merged.duplicated(['condition','Gene']).any():
df_depletion = merged[merged['RANKS_score'] <= 0]
df_depletion.columns = ['condition', 'Gene', 'depletion_score', 'depletion_p-value', 'depletion_FDR', 'depletion_#_of_sgRNAs_considered']
df_enrichment = merged[merged['RANKS_score'] > 0]
df_enrichment.columns = ['condition', 'Gene', 'enrichment_score', 'enrichment_p-value', 'enrichment_FDR', 'enrichment_#_of_sgRNAs_considered']
df = pd.merge(df_depletion, df_enrichment, how='outer', on=['condition', 'Gene'])
nanFillValues = {
'depletion_score': 0,
'depletion_p-value': 1,
'depletion_FDR': 1,
'depletion_#_of_sgRNAs_considered': 0,
'enrichment_score': 0,
'enrichment_p-value': 1,
'enrichment_FDR': 1,
'enrichment_#_of_sgRNAs_considered': 0,
}
df.fillna(value=nanFillValues, inplace=True)
df[['depletion_#_of_sgRNAs_considered', 'enrichment_#_of_sgRNAs_considered']] = df[['depletion_#_of_sgRNAs_considered', 'enrichment_#_of_sgRNAs_considered']].astype('int64')
return df;
return merged
if __name__ == "__main__":
convert()
| 33.085526
| 180
| 0.696958
|
0d0f92c6a33f123c48f5df67e85d8f1b6ce64cc9
| 6,162
|
py
|
Python
|
test/test_remote.py
|
netconstructor/home-assistant
|
ce1a5de6070c0350996136bb5ccf571995085b23
|
[
"MIT"
] | null | null | null |
test/test_remote.py
|
netconstructor/home-assistant
|
ce1a5de6070c0350996136bb5ccf571995085b23
|
[
"MIT"
] | null | null | null |
test/test_remote.py
|
netconstructor/home-assistant
|
ce1a5de6070c0350996136bb5ccf571995085b23
|
[
"MIT"
] | 1
|
2020-11-13T10:06:21.000Z
|
2020-11-13T10:06:21.000Z
|
"""
test.remote
~~~~~~~~~~~
Tests Home Assistant remote methods and classes.
"""
# pylint: disable=protected-access,too-many-public-methods
import unittest
import homeassistant as ha
import homeassistant.remote as remote
import homeassistant.components.http as http
API_PASSWORD = "test1234"
HTTP_BASE_URL = "http://127.0.0.1:{}".format(remote.SERVER_PORT)
HA_HEADERS = {remote.AUTH_HEADER: API_PASSWORD}
hass, slave, master_api = None, None, None
def _url(path=""):
""" Helper method to generate urls. """
return HTTP_BASE_URL + path
def setUpModule(): # pylint: disable=invalid-name
""" Initalizes a Home Assistant server and Slave instance. """
global hass, slave, master_api
hass = ha.HomeAssistant()
hass.bus.listen('test_event', lambda _: _)
hass.states.set('test.test', 'a_state')
http.setup(hass,
{http.DOMAIN: {http.CONF_API_PASSWORD: API_PASSWORD}})
hass.start()
master_api = remote.API("127.0.0.1", API_PASSWORD)
# Start slave
local_api = remote.API("127.0.0.1", API_PASSWORD, 8124)
slave = remote.HomeAssistant(master_api, local_api)
http.setup(slave,
{http.DOMAIN: {http.CONF_API_PASSWORD: API_PASSWORD,
http.CONF_SERVER_PORT: 8124}})
slave.start()
def tearDownModule(): # pylint: disable=invalid-name
""" Stops the Home Assistant server and slave. """
global hass, slave
hass.stop()
slave.stop()
class TestRemoteMethods(unittest.TestCase):
""" Test the homeassistant.remote module. """
def test_validate_api(self):
""" Test Python API validate_api. """
self.assertEqual(remote.APIStatus.OK, remote.validate_api(master_api))
self.assertEqual(remote.APIStatus.INVALID_PASSWORD,
remote.validate_api(
remote.API("127.0.0.1", API_PASSWORD + "A")))
def test_get_event_listeners(self):
""" Test Python API get_event_listeners. """
local_data = hass.bus.listeners
remote_data = remote.get_event_listeners(master_api)
for event in remote_data:
self.assertEqual(local_data.pop(event["event"]),
event["listener_count"])
self.assertEqual(len(local_data), 0)
def test_fire_event(self):
""" Test Python API fire_event. """
test_value = []
def listener(event): # pylint: disable=unused-argument
""" Helper method that will verify our event got called. """
test_value.append(1)
hass.listen_once_event("test.event_no_data", listener)
remote.fire_event(master_api, "test.event_no_data")
hass._pool.block_till_done()
self.assertEqual(1, len(test_value))
def test_get_state(self):
""" Test Python API get_state. """
self.assertEqual(
hass.states.get('test.test'),
remote.get_state(master_api, 'test.test'))
def test_get_states(self):
""" Test Python API get_state_entity_ids. """
self.assertEqual(
remote.get_states(master_api), hass.states.all())
def test_set_state(self):
""" Test Python API set_state. """
self.assertTrue(remote.set_state(master_api, 'test.test', 'set_test'))
self.assertEqual('set_test', hass.states.get('test.test').state)
def test_is_state(self):
""" Test Python API is_state. """
self.assertTrue(
remote.is_state(master_api, 'test.test',
hass.states.get('test.test').state))
def test_get_services(self):
""" Test Python API get_services. """
local_services = hass.services.services
for serv_domain in remote.get_services(master_api):
local = local_services.pop(serv_domain["domain"])
self.assertEqual(local, serv_domain["services"])
def test_call_service(self):
""" Test Python API call_service. """
test_value = []
def listener(service_call): # pylint: disable=unused-argument
""" Helper method that will verify that our service got called. """
test_value.append(1)
hass.services.register("test_domain", "test_service", listener)
remote.call_service(master_api, "test_domain", "test_service")
hass._pool.block_till_done()
self.assertEqual(1, len(test_value))
class TestRemoteClasses(unittest.TestCase):
""" Test the homeassistant.remote module. """
def test_home_assistant_init(self):
""" Test HomeAssistant init. """
self.assertRaises(
ha.HomeAssistantError, remote.HomeAssistant,
remote.API('127.0.0.1', API_PASSWORD + 'A', 8124))
def test_statemachine_init(self):
""" Tests if remote.StateMachine copies all states on init. """
self.assertEqual(len(hass.states.all()),
len(slave.states.all()))
for state in hass.states.all():
self.assertEqual(
state, slave.states.get(state.entity_id))
def test_statemachine_set(self):
""" Tests if setting the state on a slave is recorded. """
slave.states.set("remote.test", "remote.statemachine test")
# Wait till slave tells master
slave._pool.block_till_done()
# Wait till master gives updated state
hass._pool.block_till_done()
self.assertEqual("remote.statemachine test",
slave.states.get("remote.test").state)
def test_eventbus_fire(self):
""" Test if events fired from the eventbus get fired. """
test_value = []
def listener(event): # pylint: disable=unused-argument
""" Helper method that will verify our event got called. """
test_value.append(1)
slave.listen_once_event("test.event_no_data", listener)
slave.bus.fire("test.event_no_data")
# Wait till slave tells master
slave._pool.block_till_done()
# Wait till master gives updated event
hass._pool.block_till_done()
self.assertEqual(1, len(test_value))
| 30.50495
| 79
| 0.630964
|
b7694603e32ac00ae9781f39a799f7b51463802f
| 11,652
|
py
|
Python
|
gdscript-docs-maker/gdscript_docs_maker/modules/gdscript_objects.py
|
Gamemap/godot-console
|
21eb060a17af2ae32c5f2eda464d76517a1fb7ac
|
[
"MIT"
] | 372
|
2020-04-29T07:43:46.000Z
|
2022-03-28T21:33:18.000Z
|
gdscript-docs-maker/gdscript_docs_maker/modules/gdscript_objects.py
|
Shockblast/godot-console
|
400fd0c496ecb9a03c3ab841019d45f017d29002
|
[
"MIT"
] | 48
|
2020-05-13T10:23:26.000Z
|
2022-03-09T12:32:00.000Z
|
gdscript-docs-maker/gdscript_docs_maker/modules/gdscript_objects.py
|
Shockblast/godot-console
|
400fd0c496ecb9a03c3ab841019d45f017d29002
|
[
"MIT"
] | 39
|
2020-05-27T19:21:26.000Z
|
2022-03-09T07:09:41.000Z
|
"""Converts the json representation of GDScript classes as dictionaries into objects
"""
import itertools
import operator
import re
from dataclasses import dataclass
from enum import Enum
from typing import List, Tuple
from .make_markdown import make_bold, make_code_inline, make_list, surround_with_html
from .utils import build_re_pattern
BUILTIN_VIRTUAL_CALLBACKS = [
"_process",
"_physics_process",
"_input",
"_unhandled_input",
"_gui_input",
"_draw",
"_get_configuration_warning",
"_ready",
"_enter_tree",
"_exit_tree",
"_get",
"_get_property_list",
"_notification",
"_set",
"_to_string",
"_clips_input",
"_get_minimum_size",
"_gui_input",
"_make_custom_tooltip",
]
TYPE_CONSTRUCTOR = "_init"
@dataclass
class Metadata:
"""Container for metadata for Elements"""
tags: List[str]
category: str
def extract_metadata(description: str) -> Tuple[str, Metadata]:
"""Finds metadata keys in the provided description and returns the description
without the corresponding lines, as well as the metadata. In the source text,
Metadata should be of the form key: value, e.g. category: Category Name
"""
tags: List[str] = []
category: str = ""
lines: List[str] = description.split("\n")
description_trimmed: List[str] = []
pattern_tags = build_re_pattern("tags")
pattern_category = build_re_pattern("category")
for _, line in enumerate(lines):
line_stripped: str = line.strip().lower()
match_tags = re.match(pattern_tags, line_stripped)
match_category = re.match(pattern_category, line_stripped)
if match_tags:
tags = match_tags.group(1).split(",")
tags = list(map(lambda t: t.strip(), tags))
elif match_category:
category = match_category.group(1)
else:
description_trimmed.append(line)
metadata: Metadata = Metadata(tags, category)
return "\n".join(description_trimmed), metadata
class FunctionTypes(Enum):
METHOD = 1
VIRTUAL = 2
STATIC = 3
@dataclass
class ProjectInfo:
name: str
description: str
version: str
@staticmethod
def from_dict(data: dict):
return ProjectInfo(data["name"], data["description"], data["version"])
@dataclass
class Element:
"""Base type for all main GDScript symbol types. Contains properties common to
Signals, Functions, Member variables, etc."""
signature: str
name: str
description: str
is_deprecated: str
def __post_init__(self):
_description, self.metadata = extract_metadata(self.description)
self.description = _description.strip("\n")
def get_heading_as_string(self) -> str:
"""Returns an empty string. Virtual method to get a list of strings representing
the element as a markdown heading."""
is_deprecated_strike: str = ""
if self.is_deprecated:
is_deprecated_strike = "~~"
return "{}{}{}{}".format(
is_deprecated_strike,
self.name,
is_deprecated_strike,
" " + surround_with_html("(deprecated)", "small") if self.is_deprecated else ""
)
def get_unique_attributes_as_markdown(self) -> List[str]:
"""Returns an empty list. Virtual method to get a list of strings describing the
unique attributes of this element."""
return []
@staticmethod
def from_dict(data: dict) -> "Element":
return Element(data["signature"], data["name"], data["description"], data["is_deprecated"])
@dataclass
class Signal(Element):
arguments: List[str]
@staticmethod
def from_dict(data: dict) -> "Signal":
return Signal(
data["signature"],
data["name"],
data["description"],
"is_deprecated" in data and data["is_deprecated"],
data["arguments"],
)
@dataclass
class Argument:
"""Container for function arguments."""
name: str
type: str
@dataclass
class Function(Element):
kind: FunctionTypes
return_type: str
arguments: List[Argument]
rpc_mode: int
def __post_init__(self):
super().__post_init__()
self.signature = self.signature.replace("-> null", "-> void", 1)
self.return_type = self.return_type.replace("null", "void", 1)
def summarize(self) -> List[str]:
return [self.return_type, self.signature]
def get_heading_as_string(self) -> str:
"""Returns an empty list. Virtual method to get a list of strings representing
the element as a markdown heading."""
heading: str = super().get_heading_as_string()
if self.kind == FunctionTypes.VIRTUAL:
heading += " " + surround_with_html("(virtual)", "small")
if self.kind == FunctionTypes.STATIC:
heading += " " + surround_with_html("(static)", "small")
return heading
@staticmethod
def from_dict(data: dict) -> "Function":
kind: FunctionTypes = FunctionTypes.METHOD
if data["is_static"]:
kind = FunctionTypes.STATIC
elif data["is_virtual"]:
kind = FunctionTypes.VIRTUAL
return Function(
data["signature"],
data["name"],
data["description"],
"is_deprecated" in data and data["is_deprecated"],
kind,
data["return_type"],
Function._get_arguments(data["arguments"]),
data["rpc_mode"] if "rpc_mode" in data else 0,
)
@staticmethod
def _get_arguments(data: List[dict]) -> List[Argument]:
return [Argument(entry["name"], entry["type"],) for entry in data]
@dataclass
class Enumeration(Element):
"""Represents an enum with its constants"""
values: dict
@staticmethod
def from_dict(data: dict) -> "Enumeration":
return Enumeration(
data["signature"],
data["name"],
data["description"],
"is_deprecated" in data and data["is_deprecated"],
data["value"],
)
@dataclass
class Member(Element):
"""Represents a property or member variable"""
type: str
default_value: str
is_exported: bool
setter: str
getter: str
def summarize(self) -> List[str]:
return [self.type, self.name]
def get_unique_attributes_as_markdown(self) -> List[str]:
setget: List[str] = []
if self.setter and not self.setter.startswith("_"):
setget.append(make_bold("Setter") + ": " + make_code_inline(self.setter))
if self.getter and not self.getter.startswith("_"):
setget.append(make_bold("Getter") + ": " + make_code_inline(self.getter))
setget = make_list(setget)
if len(setget) > 0:
setget.append("")
return setget
@staticmethod
def from_dict(data: dict) -> "Member":
return Member(
data["signature"],
data["name"],
data["description"],
"is_deprecated" in data and data["is_deprecated"],
data["data_type"],
data["default_value"],
data["export"],
data["setter"],
data["getter"],
)
@dataclass
class GDScriptClass:
name: str
extends: str
description: str
path: str
functions: List[Function]
members: List[Member]
signals: List[Signal]
enums: List[Enumeration]
sub_classes: List["GDScriptClass"]
def __post_init__(self):
description, self.metadata = extract_metadata(self.description)
self.description = description.strip("\n ")
elements = self.functions + self.members + self.signals + self.enums
self.symbols = {element.name for element in elements}
@staticmethod
def from_dict(data: dict):
# the extends_class field is a list in json even though it only has one
# class.
extends: str = data["extends_class"][0] if data["extends_class"] else ""
return GDScriptClass(
data["name"],
extends,
data["description"],
data["path"],
_get_functions(data["methods"])
+ _get_functions(data["static_functions"], is_static=True),
_get_members(data["members"]),
_get_signals(data["signals"]),
[
Enumeration.from_dict(entry)
for entry in data["constants"]
if entry["data_type"] == "Dictionary"
and not entry["name"].startswith("_")
],
[GDScriptClass.from_dict(data) for data in data["sub_classes"]],
)
def get_extends_tree(self, classes: "GDScriptClasses") -> List[str]:
"""Returns the list of ancestors for this class, starting from self.extends.
Arguments:
- classes: a GDScriptClasses list of GDScriptClass this object is part
of.
"""
extends: str = self.extends
extends_tree: List[str] = []
while extends != "":
extends_tree.append(extends)
extends = next((cls.extends for cls in classes if cls.name == extends), "")
return extends_tree
class GDScriptClasses(list):
"""Container for a list of GDScriptClass objects
Provides methods for filtering and grouping GDScript classes"""
def __init__(self, *args):
super(GDScriptClasses, self).__init__(args[0])
self.class_index = {
gdscript_class.name: gdscript_class.symbols for gdscript_class in self
}
def _get_grouped_by(self, attribute: str) -> List[List[GDScriptClass]]:
if not self or attribute not in self[0].__dict__:
return []
groups = []
get_attribute = operator.attrgetter(attribute)
data = sorted(self, key=get_attribute)
for key, group in itertools.groupby(data, get_attribute):
groups.append(list(group))
return groups
def get_grouped_by_category(self) -> List[List[GDScriptClass]]:
"""Returns a list of lists of GDScriptClass objects, grouped by their `category`
attribute"""
return self._get_grouped_by("category")
@staticmethod
def from_dict_list(data: List[dict]):
return GDScriptClasses(
[GDScriptClass.from_dict(entry) for entry in data if "name" in entry]
)
def _get_signals(data: List[dict]) -> List[Signal]:
return [Signal.from_dict(entry) for entry in data]
def _get_functions(data: List[dict], is_static: bool = False) -> List[Function]:
"""Returns a list of valid functions to put in the class reference. Skips
built-in virtual callbacks, except for constructor functions marked for
inclusion, and private methods."""
functions: List[Function] = []
for entry in data:
name: str = entry["name"]
if name in BUILTIN_VIRTUAL_CALLBACKS:
continue
if name == TYPE_CONSTRUCTOR and not entry["arguments"]:
continue
_, metadata = extract_metadata(entry["description"])
is_virtual: bool = "virtual" in metadata.tags and not is_static
is_private: bool = name.startswith("_") and not is_virtual and name != TYPE_CONSTRUCTOR
if is_private:
continue
function_data: dict = entry
function_data["is_virtual"] = is_virtual
function_data["is_static"] = is_static
functions.append(Function.from_dict(function_data))
return functions
def _get_members(data: List[dict]) -> List[Member]:
return [
Member.from_dict(entry) for entry in data if not entry["name"].startswith("_")
]
| 29.876923
| 99
| 0.625901
|
c2cd672d6b8c993954f16b1675a7dda26fb79192
| 12,433
|
py
|
Python
|
tests/integration-tests/tests/update/test_update.py
|
agobeaux/aws-parallelcluster
|
ec337c6b8341f9b84616b6bbbe8687a0a5f71126
|
[
"Apache-2.0"
] | null | null | null |
tests/integration-tests/tests/update/test_update.py
|
agobeaux/aws-parallelcluster
|
ec337c6b8341f9b84616b6bbbe8687a0a5f71126
|
[
"Apache-2.0"
] | null | null | null |
tests/integration-tests/tests/update/test_update.py
|
agobeaux/aws-parallelcluster
|
ec337c6b8341f9b84616b6bbbe8687a0a5f71126
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import time
import boto3
import configparser
import pytest
from assertpy import assert_that
from remote_command_executor import RemoteCommandExecutor
from time_utils import minutes
from tests.common.scaling_common import (
get_batch_ce_max_size,
get_batch_ce_min_size,
get_max_asg_capacity,
get_min_asg_capacity,
watch_compute_nodes,
)
from tests.common.schedulers_common import SlurmCommands
@pytest.mark.dimensions("eu-west-1", "c5.xlarge", "centos7", "slurm")
@pytest.mark.usefixtures("os", "scheduler", "instance")
def test_update_slurm(region, pcluster_config_reader, clusters_factory, test_datadir, s3_bucket_factory):
# Create S3 bucket for pre/post install scripts
bucket_name = s3_bucket_factory()
bucket = boto3.resource("s3", region_name=region).Bucket(bucket_name)
bucket.upload_file(str(test_datadir / "preinstall.sh"), "scripts/preinstall.sh")
bucket.upload_file(str(test_datadir / "postinstall.sh"), "scripts/postinstall.sh")
# Create cluster with initial configuration
init_config_file = pcluster_config_reader()
cluster = clusters_factory(init_config_file)
# Command executors
command_executor = RemoteCommandExecutor(cluster)
slurm_commands = SlurmCommands(command_executor)
# Create shared dir for script results
command_executor.run_remote_command("mkdir /shared/script_results")
# Update cluster with new configuration
updated_config_file = pcluster_config_reader(config_file="pcluster.config.update.ini", bucket=bucket_name)
cluster.config_file = str(updated_config_file)
cluster.update()
# Get initial, new and old compute instances references, to be able to execute specific tests in different group of
# instances
# Get initial compute nodes
initial_compute_nodes = slurm_commands.get_compute_nodes()
# Get new compute nodes
new_compute_nodes = _add_compute_nodes(slurm_commands)
# Old compute node instance refs
old_compute_node = initial_compute_nodes[0]
old_compute_instance = _get_instance(region, cluster.cfn_name, old_compute_node)
# New compute node instance refs
new_compute_node = new_compute_nodes[0]
new_compute_instance = _get_instance(region, cluster.cfn_name, new_compute_node)
# Read updated configuration
updated_config = configparser.ConfigParser()
updated_config.read(updated_config_file)
# Check new ASG settings
_check_initial_queue(region, cluster.cfn_name, updated_config.getint("cluster default", "initial_queue_size"))
_check_max_queue(region, cluster.cfn_name, updated_config.getint("cluster default", "max_queue_size"))
# Check new S3 resources
_check_s3_read_resource(region, cluster, updated_config.get("cluster default", "s3_read_resource"))
_check_s3_read_write_resource(region, cluster, updated_config.get("cluster default", "s3_read_write_resource"))
# Check new Additional IAM policies
_check_role_attached_policy(region, cluster, updated_config.get("cluster default", "additional_iam_policies"))
# Check old and new compute instance types
_check_compute_instance_type(old_compute_instance, cluster.config.get("cluster default", "compute_instance_type"))
_check_compute_instance_type(new_compute_instance, updated_config.get("cluster default", "compute_instance_type"))
# Check old and new instance life cycle
_check_ondemand_instance(old_compute_instance)
_check_spot_instance(new_compute_instance)
# Check old and new compute root volume size
_check_compute_root_volume_size(
command_executor,
slurm_commands,
test_datadir,
cluster.config.get("cluster default", "compute_root_volume_size"),
old_compute_node,
)
_check_compute_root_volume_size(
command_executor,
slurm_commands,
test_datadir,
updated_config.get("cluster default", "compute_root_volume_size"),
new_compute_node,
)
# Check old and new extra_json
_check_extra_json(command_executor, slurm_commands, old_compute_node, "test_value_1")
_check_extra_json(command_executor, slurm_commands, new_compute_node, "test_value_2")
# Check pre and post install on new nodes
_check_script(
command_executor,
slurm_commands,
new_compute_node,
"preinstall",
updated_config.get("cluster default", "pre_install_args"),
)
_check_script(
command_executor,
slurm_commands,
new_compute_node,
"postinstall",
updated_config.get("cluster default", "post_install_args"),
)
def _check_max_queue(region, stack_name, queue_size):
asg_max_size = get_max_asg_capacity(region, stack_name)
assert_that(asg_max_size).is_equal_to(queue_size)
def _check_initial_queue(region, stack_name, queue_size):
asg_min_size = get_min_asg_capacity(region, stack_name)
assert_that(asg_min_size).is_equal_to(queue_size)
def _add_compute_nodes(slurm_commands, number_of_nodes=1):
"""
Add new compute nodes to the cluster.
It is required because some changes will be available only on new compute nodes.
:param cluster: the cluster
:param number_of_nodes: number of nodes to add
:return an array containing the new compute nodes only
"""
initial_compute_nodes = slurm_commands.get_compute_nodes()
number_of_nodes = len(initial_compute_nodes) + number_of_nodes
# submit a job to perform a scaling up action and have new instances
result = slurm_commands.submit_command("sleep 1", nodes=number_of_nodes)
slurm_commands.assert_job_submitted(result.stdout)
estimated_scaleup_time = 8
watch_compute_nodes(
scheduler_commands=slurm_commands,
max_monitoring_time=minutes(estimated_scaleup_time),
number_of_nodes=number_of_nodes,
)
return [node for node in slurm_commands.get_compute_nodes() if node not in initial_compute_nodes]
def _get_instance(region, stack_name, host, none_expected=False):
hostname = "{0}.{1}.compute.internal".format(host, region)
ec2_resource = boto3.resource("ec2", region_name=region)
instance = next(
iter(
ec2_resource.instances.filter(
Filters=[
{"Name": "tag:Application", "Values": [stack_name]},
{"Name": "private-dns-name", "Values": [hostname]},
]
)
or []
),
None,
)
if not none_expected:
assert_that(instance).is_not_none()
return instance
def _check_compute_instance_type(instance, compute_instance_type):
assert_that(instance.instance_type).is_equal_to(compute_instance_type)
def _check_spot_instance(instance):
assert_that(instance.instance_lifecycle).is_equal_to("spot")
def _check_ondemand_instance(instance):
assert_that(not hasattr(instance, "instance_life_cycle"))
def _check_compute_root_volume_size(command_executor, slurm_commands, test_datadir, compute_root_volume_size, host):
# submit a job to retrieve compute root volume size and save in a file
result = slurm_commands.submit_script(str(test_datadir / "slurm_get_root_volume_size.sh"), host=host)
job_id = slurm_commands.assert_job_submitted(result.stdout)
slurm_commands.wait_job_completed(job_id)
slurm_commands.assert_job_succeeded(job_id)
# read volume size from file
time.sleep(5) # wait a bit to be sure to have the file
result = command_executor.run_remote_command("cat /shared/{0}_root_volume_size.txt".format(host))
assert_that(result.stdout).matches(r"{size}G".format(size=compute_root_volume_size))
def _retrieve_script_output(slurm_commands, script_name, host):
# submit a job to retrieve pre and post install outputs
command = "cp /tmp/{0}_out.txt /shared/script_results/{1}_{0}_out.txt".format(script_name, host)
result = slurm_commands.submit_command(command, host=host)
job_id = slurm_commands.assert_job_submitted(result.stdout)
slurm_commands.wait_job_completed(job_id)
slurm_commands.assert_job_succeeded(job_id)
time.sleep(5) # wait a bit to be sure to have the files
def _check_script(command_executor, slurm_commands, host, script_name, script_arg):
_retrieve_script_output(slurm_commands, script_name, host)
result = command_executor.run_remote_command("cat /shared/script_results/{1}_{0}_out.txt".format(script_name, host))
assert_that(result.stdout).matches(r"{0}-{1}".format(script_name, script_arg))
def _retrieve_extra_json(slurm_commands, host):
# submit a job to retrieve the value of the custom key test_key provided with extra_json
command = "jq .test_key /etc/chef/dna.json > /shared/{0}_extra_json.txt".format(host)
result = slurm_commands.submit_command(command, host=host)
job_id = slurm_commands.assert_job_submitted(result.stdout)
slurm_commands.wait_job_completed(job_id)
slurm_commands.assert_job_succeeded(job_id)
time.sleep(5) # wait a bit to be sure to have the files
def _check_extra_json(command_executor, slurm_commands, host, expected_value):
_retrieve_extra_json(slurm_commands, host)
result = command_executor.run_remote_command("cat /shared/{0}_extra_json.txt".format(host))
assert_that(result.stdout).is_equal_to('"{0}"'.format(expected_value))
def _check_role_inline_policy(region, cluster, policy_name, policy_statement):
iam_client = boto3.client("iam", region_name=region)
root_role = cluster.cfn_resources.get("RootRole")
statement = (
iam_client.get_role_policy(RoleName=root_role, PolicyName=policy_name)
.get("PolicyDocument")
.get("Statement")[0]
.get("Resource")[0]
)
assert_that(statement).is_equal_to(policy_statement)
def _check_s3_read_resource(region, cluster, s3_arn):
_check_role_inline_policy(region, cluster, "S3Read", s3_arn)
def _check_s3_read_write_resource(region, cluster, s3_arn):
_check_role_inline_policy(region, cluster, "S3ReadWrite", s3_arn)
def _check_role_attached_policy(region, cluster, policy_arn):
iam_client = boto3.client("iam", region_name=region)
root_role = cluster.cfn_resources.get("RootRole")
result = iam_client.list_attached_role_policies(RoleName=root_role)
policies = [p["PolicyArn"] for p in result["AttachedPolicies"]]
assert policy_arn in policies
@pytest.mark.dimensions("eu-west-1", "c5.xlarge", "alinux2", "awsbatch")
@pytest.mark.usefixtures("os", "scheduler", "instance")
def test_update_awsbatch(region, pcluster_config_reader, clusters_factory, test_datadir, s3_bucket_factory):
# Create cluster with initial configuration
init_config_file = pcluster_config_reader()
cluster = clusters_factory(init_config_file)
# Verify initial configuration
_verify_initialization(region, cluster, cluster.config)
# Update cluster with new configuration
updated_config_file = pcluster_config_reader(config_file="pcluster.config.update.ini")
cluster.config_file = str(updated_config_file)
cluster.update()
# Read updated configuration
updated_config = configparser.ConfigParser()
updated_config.read(updated_config_file)
# verify updated parameters
_verify_initialization(region, cluster, updated_config)
def _verify_initialization(region, cluster, config):
# Verify initial settings
_test_max_vcpus(region, cluster.cfn_name, config.getint("cluster default", "max_vcpus"))
_test_min_vcpus(region, cluster.cfn_name, config.getint("cluster default", "min_vcpus"))
def _test_max_vcpus(region, stack_name, vcpus):
asg_max_size = get_batch_ce_max_size(stack_name, region)
assert_that(asg_max_size).is_equal_to(vcpus)
def _test_min_vcpus(region, stack_name, vcpus):
asg_min_size = get_batch_ce_min_size(stack_name, region)
assert_that(asg_min_size).is_equal_to(vcpus)
| 39.22082
| 120
| 0.754766
|
83afe6e665b52e607801668cf12a0796e6b163fc
| 2,298
|
py
|
Python
|
app/balltracking/pubnubpython/endpoints/presence/here_now.py
|
gdmgent-1718-wot/interactive-wall
|
af7ecff126b1ee9c85c270fe13d1338aa790c34b
|
[
"Apache-2.0"
] | 146
|
2015-01-05T03:14:53.000Z
|
2022-03-16T16:51:52.000Z
|
app/balltracking/pubnubpython/endpoints/presence/here_now.py
|
gdmgent-1718-wot/interactive-wall
|
af7ecff126b1ee9c85c270fe13d1338aa790c34b
|
[
"Apache-2.0"
] | 48
|
2015-01-15T15:27:41.000Z
|
2022-03-21T14:17:05.000Z
|
app/balltracking/pubnubpython/endpoints/presence/here_now.py
|
gdmgent-1718-wot/interactive-wall
|
af7ecff126b1ee9c85c270fe13d1338aa790c34b
|
[
"Apache-2.0"
] | 128
|
2015-01-05T03:40:59.000Z
|
2022-03-02T20:50:58.000Z
|
from pubnub import utils
from pubnub.endpoints.endpoint import Endpoint
from pubnub.enums import HttpMethod, PNOperationType
from pubnub.models.consumer.presence import PNHereNowResult
class HereNow(Endpoint):
HERE_NOW_PATH = "/v2/presence/sub-key/%s/channel/%s"
HERE_NOW_GLOBAL_PATH = "/v2/presence/sub-key/%s"
def __init__(self, pubnub):
Endpoint.__init__(self, pubnub)
self._channels = []
self._channel_groups = []
self._include_state = False
self._include_uuids = True
def channels(self, channels):
utils.extend_list(self._channels, channels)
return self
def channel_groups(self, channel_groups):
utils.extend_list(self._channel_groups, channel_groups)
return self
def include_state(self, should_include_state):
self._include_state = should_include_state
return self
def include_uuids(self, include_uuids):
self._include_uuids = include_uuids
return self
def custom_params(self):
params = {}
if len(self._channel_groups) > 0:
params['channel-group'] = utils.join_items_and_encode(self._channel_groups)
if self._include_state:
params['state'] = "1"
if not self._include_uuids:
params['disable_uuids'] = "1"
return params
def build_path(self):
if len(self._channels) == 0 and len(self._channel_groups) == 0:
return HereNow.HERE_NOW_GLOBAL_PATH % self.pubnub.config.subscribe_key
else:
return HereNow.HERE_NOW_PATH % (self.pubnub.config.subscribe_key,
utils.join_channels(self._channels))
def http_method(self):
return HttpMethod.GET
def validate_params(self):
self.validate_subscribe_key()
def is_auth_required(self):
return True
def create_response(self, envelope):
return PNHereNowResult.from_json(envelope, self._channels)
def request_timeout(self):
return self.pubnub.config.non_subscribe_request_timeout
def connect_timeout(self):
return self.pubnub.config.connect_timeout
def operation_type(self):
return PNOperationType.PNHereNowOperation
def name(self):
return "HereNow"
| 29.461538
| 87
| 0.668407
|
753524ffeef07a1219c92ae145ac45d76013ea1e
| 4,423
|
py
|
Python
|
sparse_dot_mkl/sparse_dot.py
|
mynameisvinn/sparse_dot
|
493c0be532a6397e59d5079e8c1cd7cb4321eb1d
|
[
"MIT"
] | null | null | null |
sparse_dot_mkl/sparse_dot.py
|
mynameisvinn/sparse_dot
|
493c0be532a6397e59d5079e8c1cd7cb4321eb1d
|
[
"MIT"
] | null | null | null |
sparse_dot_mkl/sparse_dot.py
|
mynameisvinn/sparse_dot
|
493c0be532a6397e59d5079e8c1cd7cb4321eb1d
|
[
"MIT"
] | null | null | null |
from sparse_dot_mkl._sparse_sparse import _sparse_dot_sparse as _sds
from sparse_dot_mkl._sparse_dense import _sparse_dot_dense as _sdd
from sparse_dot_mkl._dense_dense import _dense_dot_dense as _ddd
from sparse_dot_mkl._sparse_vector import _sparse_dot_vector as _sdv
from sparse_dot_mkl._sparse_qr_solver import sparse_qr_solver as _qrs
from sparse_dot_mkl._mkl_interface import get_version_string, _is_dense_vector
import scipy.sparse as _spsparse
import numpy as _np
def dot_product_mkl(matrix_a, matrix_b, cast=False, copy=True, reorder_output=False, dense=False, debug=False):
"""
Multiply together matrixes using the intel Math Kernel Library.
This currently only supports float32 and float64 data
:param matrix_a: Sparse matrix A in CSC/CSR format or dense matrix in numpy format
:type matrix_a: scipy.sparse.spmatrix, np.ndarray
:param matrix_b: Sparse matrix B in CSC/CSR format or dense matrix in numpy format
:type matrix_b: scipy.sparse.spmatrix, np.ndarray
:param cast: Should the data be coerced into float64 if it isn't float32 or float64
If set to True and any other dtype is passed, the matrix data will copied internally before multiplication
If set to False and any dtype that isn't float32 or float64 is passed, a ValueError will be raised
Defaults to False
:param copy: Deprecated flag to force copy. Removed because the behavior was inconsistent.
:type copy: bool
:param reorder_output: Should the array indices be reordered using MKL
If set to True, the object in C will be ordered and then exported into python
If set to False, the array column indices will not be ordered.
The scipy sparse dot product does not yield ordered column indices so this defaults to False
:type reorder_output: bool
:param dense: Should the matrix multiplication be put into a dense numpy array
This does not require any copy and is memory efficient if the output array density is > 50%
Note that this flag has no effect if one input array is dense; then the output will always be dense
:type dense: bool
:param debug: Should debug and timing messages be printed. Defaults to false.
:type debug: bool
:return: Matrix that is the result of A * B in input-dependent format
:rtype: scipy.sparse.csr_matrix, scipy.sparse.csc_matrix, np.ndarray
"""
dprint = print if debug else lambda *x: x
if get_version_string() is None and debug:
dprint("mkl-service must be installed to get full debug messaging")
elif debug:
dprint(get_version_string())
num_sparse = sum((_spsparse.issparse(matrix_a), _spsparse.issparse(matrix_b)))
num_vectors = sum((_is_dense_vector(matrix_a), _is_dense_vector(matrix_b)))
# SPARSE (DOT) SPARSE #
if num_sparse == 2:
return _sds(matrix_a, matrix_b, cast=cast, reorder_output=reorder_output, dense=dense, dprint=dprint)
# SPARSE (DOT) VECTOR #
elif num_sparse == 1 and num_vectors == 1:
return _sdv(matrix_a, matrix_b, cast=cast, dprint=dprint)
# SPARSE (DOT) DENSE & DENSE (DOT) SPARSE #
elif num_sparse == 1:
return _sdd(matrix_a, matrix_b, cast=cast, dprint=dprint)
# SPECIAL CASE OF VECTOR (DOT) VECTOR #
# THIS IS JUST EASIER THAN GETTING THIS EDGE CONDITION RIGHT IN MKL #
elif num_vectors == 2:
return _np.dot(matrix_a, matrix_b)
# DENSE (DOT) DENSE
else:
return _ddd(matrix_a, matrix_b, cast=cast, dprint=dprint)
def sparse_qr_solve_mkl(matrix_a, matrix_b, cast=False, debug=False):
"""
Solve AX = B for X where A is sparse and B is dense
:param matrix_a: Sparse matrix (solver requires CSR; will convert if cast=True)
:type matrix_a: np.ndarray
:param matrix_b: Dense matrix
:type matrix_b: np.ndarray
:param cast: Should the data be coerced into float64 if it isn't float32 or float64,
and should a CSR matrix be cast to a CSC matrix.
Defaults to False
:type cast: bool
:param debug: Should debug messages be printed. Defaults to false.
:type debug: bool
:return: Dense array X
:rtype: np.ndarray
"""
dprint = print if debug else lambda *x: x
if get_version_string() is None and debug:
dprint("mkl-service must be installed to get full debug messaging")
elif debug:
dprint(get_version_string())
return _qrs(matrix_a, matrix_b, cast=cast, dprint=dprint)
| 44.676768
| 111
| 0.732534
|
b57bb01d564119ab3369ba4b364c1e0644dd4285
| 905
|
py
|
Python
|
daemon/clients/mixin.py
|
arijitdas123student/jina
|
54d916e362bde0391b0af0f12241d531b8453247
|
[
"Apache-2.0"
] | 15,179
|
2020-04-28T10:23:56.000Z
|
2022-03-31T14:35:25.000Z
|
daemon/clients/mixin.py
|
manavshah123/jina
|
f18b04eb82d18a3c554e2892bbae4b95fc0cb13e
|
[
"Apache-2.0"
] | 3,912
|
2020-04-28T13:01:29.000Z
|
2022-03-31T14:36:46.000Z
|
daemon/clients/mixin.py
|
manavshah123/jina
|
f18b04eb82d18a3c554e2892bbae4b95fc0cb13e
|
[
"Apache-2.0"
] | 1,955
|
2020-04-28T10:50:49.000Z
|
2022-03-31T12:28:34.000Z
|
from functools import partialmethod
from jina.helper import run_async
class AsyncToSyncMixin:
"""Mixin to convert `async def`s to `def`"""
def func(self, func_name, *args, **kwargs):
"""convert async method `func_name` to a normal method
:param func_name: name of method in super
:param args: positional args
:param kwargs: keyword args
:return: run func_name from super
"""
f = getattr(super(), func_name, None)
if f:
return run_async(f, any_event_loop=True, *args, **kwargs)
alive = partialmethod(func, 'alive')
status = partialmethod(func, 'status')
get = partialmethod(func, 'get')
list = partialmethod(func, 'list')
arguments = partialmethod(func, 'arguments')
create = partialmethod(func, 'create')
update = partialmethod(func, 'update')
delete = partialmethod(func, 'delete')
| 31.206897
| 69
| 0.647514
|
630774fd4884e4761a3ad8728804111146bc9870
| 1,909
|
py
|
Python
|
mogwai/plotting/precision_length.py
|
joshim5/mogwai
|
917fe5b2dea9c3adc3a3d1dfe41ae33c3ae86f55
|
[
"BSD-3-Clause"
] | 24
|
2020-11-20T19:10:23.000Z
|
2022-03-13T13:26:56.000Z
|
mogwai/plotting/precision_length.py
|
joshim5/mogwai
|
917fe5b2dea9c3adc3a3d1dfe41ae33c3ae86f55
|
[
"BSD-3-Clause"
] | 10
|
2020-10-21T21:42:14.000Z
|
2020-11-18T07:57:30.000Z
|
mogwai/plotting/precision_length.py
|
joshim5/mogwai
|
917fe5b2dea9c3adc3a3d1dfe41ae33c3ae86f55
|
[
"BSD-3-Clause"
] | 7
|
2020-12-27T00:44:18.000Z
|
2021-11-07T05:16:49.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import torch
def plot_precision_vs_length(
pred: torch.Tensor,
meas: torch.Tensor,
thresh: float = 1e-4,
superdiag: int = 6,
):
"""Plot precision versus length for various length cutoffs.
Analogous to a precision-recall curve.
Args:
pred (tensor): Predicted contact scores or probabilities.
meas (tensor): Binary matrix of true contacts.
thresh (float, optional): Threshold at which to call a predicted contact.
superdiag (int, optional): Ignore all true and predicted contacts from diag to superdiag.
"""
# Ignore nearby contacts
eval_idx = np.triu_indices_from(meas, superdiag)
pred_, meas_ = pred[eval_idx], meas[eval_idx]
# Sort by model confidence
sort_idx = pred_.argsort(descending=True)
# want to separate correct from incorrect indices
true_pos = list()
false_pos = list()
length = meas.shape[0]
precision = list()
optimal_precision = list()
num_contacts = len(np.nonzero(meas_))
# Only consider top 2L predictions
for i, idx in enumerate(sort_idx[: (2 * length)]):
# idx is in the flattened array of upper triang. values
# recover the position in the matrix
# Update optimal precision based on number of true contacts
if i <= num_contacts:
optimal_precision.append(1.0)
else:
num_false = i - num_contacts
optimal_precision.append(num_contacts / (num_contacts + num_false))
# Update model precision based on predictions
xy = (eval_idx[0][idx], eval_idx[1][idx])
if meas_[idx] >= thresh:
true_pos.append(xy)
else:
false_pos.append(xy)
precision.append(len(true_pos) / (len(true_pos) + len(false_pos)))
plt.plot(precision, color="b")
plt.plot(optimal_precision, color="k")
| 32.355932
| 97
| 0.654793
|
55e18af81efeee17bfd72edfaabecf8cd0a3292d
| 36,677
|
py
|
Python
|
trove/tests/unittests/guestagent/test_cassandra_manager.py
|
Tesora-Release/tesora-trove
|
042145a573ce08b5d7cb25e1491e391e777a20be
|
[
"Apache-2.0"
] | 2
|
2016-08-27T01:59:08.000Z
|
2018-06-08T10:02:08.000Z
|
trove/tests/unittests/guestagent/test_cassandra_manager.py
|
Tesora-Release/tesora-trove
|
042145a573ce08b5d7cb25e1491e391e777a20be
|
[
"Apache-2.0"
] | null | null | null |
trove/tests/unittests/guestagent/test_cassandra_manager.py
|
Tesora-Release/tesora-trove
|
042145a573ce08b5d7cb25e1491e391e777a20be
|
[
"Apache-2.0"
] | 7
|
2016-04-06T19:03:03.000Z
|
2018-10-12T21:50:51.000Z
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import string
from mock import ANY
from mock import call
from mock import DEFAULT
from mock import MagicMock
from mock import Mock
from mock import NonCallableMagicMock
from mock import patch
from oslo_utils import netutils
from testtools import ExpectedException
from trove.common import exception
from trove.common.instance import ServiceStatuses
from trove.guestagent import backup
from trove.guestagent.common.configuration import ImportOverrideStrategy
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.cassandra import (
manager as cass_manager)
from trove.guestagent.datastore.cassandra import (
service as cass_service)
from trove.guestagent.db import models
from trove.guestagent import pkg as pkg
from trove.guestagent import volume
from trove.tests.unittests import trove_testtools
class GuestAgentCassandraDBManagerTest(trove_testtools.TestCase):
__MOUNT_POINT = '/var/lib/cassandra'
__N_GAK = '_get_available_keyspaces'
__N_GLU = '_get_listed_users'
__N_BU = '_build_user'
__N_RU = '_rename_user'
__N_AUP = '_alter_user_password'
__N_CAU = 'trove.guestagent.db.models.CassandraUser'
__N_CU = '_create_user'
__N_GFA = '_grant_full_access_on_keyspace'
__N_DU = '_drop_user'
__ACCESS_MODIFIERS = ('ALTER', 'CREATE', 'DROP', 'MODIFY', 'SELECT')
__CREATE_DB_FORMAT = (
"CREATE KEYSPACE \"{}\" WITH REPLICATION = "
"{{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }};"
)
__DROP_DB_FORMAT = "DROP KEYSPACE \"{}\";"
__CREATE_USR_FORMAT = "CREATE USER '{}' WITH PASSWORD %s NOSUPERUSER;"
__ALTER_USR_FORMAT = "ALTER USER '{}' WITH PASSWORD %s;"
__DROP_USR_FORMAT = "DROP USER '{}';"
__GRANT_FORMAT = "GRANT {} ON KEYSPACE \"{}\" TO '{}';"
__REVOKE_FORMAT = "REVOKE ALL PERMISSIONS ON KEYSPACE \"{}\" FROM '{}';"
__LIST_PERMISSIONS_FORMAT = "LIST ALL PERMISSIONS NORECURSIVE;"
__LIST_PERMISSIONS_OF_FORMAT = "LIST ALL PERMISSIONS OF '{}' NORECURSIVE;"
__LIST_DB_FORMAT = "SELECT * FROM system.schema_keyspaces;"
__LIST_USR_FORMAT = "LIST USERS;"
@patch.object(ImportOverrideStrategy, '_initialize_import_directory')
@patch('trove.guestagent.datastore.cassandra.service.LOG')
def setUp(self, *args, **kwargs):
super(GuestAgentCassandraDBManagerTest, self).setUp()
conn_patcher = patch.multiple(cass_service.CassandraConnection,
_connect=DEFAULT,
is_active=Mock(return_value=True))
self.addCleanup(conn_patcher.stop)
conn_patcher.start()
self.real_status = cass_service.CassandraAppStatus.set_status
class FakeInstanceServiceStatus(object):
status = ServiceStatuses.NEW
def save(self):
pass
cass_service.CassandraAppStatus.set_status = MagicMock(
return_value=FakeInstanceServiceStatus())
self.context = trove_testtools.TroveTestContext(self)
self.manager = cass_manager.Manager()
self.manager._app = cass_service.CassandraApp()
self.manager._admin = cass_service.CassandraAdmin(
models.CassandraUser('Test'))
self.admin = self.manager._admin
self.admin._CassandraAdmin__client = MagicMock()
self.conn = self.admin._CassandraAdmin__client
self.pkg = cass_service.packager
self.origin_os_path_exists = os.path.exists
self.origin_format = volume.VolumeDevice.format
self.origin_migrate_data = volume.VolumeDevice.migrate_data
self.origin_mount = volume.VolumeDevice.mount
self.origin_mount_points = volume.VolumeDevice.mount_points
self.origin_stop_db = cass_service.CassandraApp.stop_db
self.origin_start_db = cass_service.CassandraApp.start_db
self.origin_install_db = cass_service.CassandraApp._install_db
self.original_get_ip = netutils.get_my_ipv4
self.orig_make_host_reachable = (
cass_service.CassandraApp.apply_initial_guestagent_configuration)
def tearDown(self):
super(GuestAgentCassandraDBManagerTest, self).tearDown()
cass_service.packager = self.pkg
os.path.exists = self.origin_os_path_exists
volume.VolumeDevice.format = self.origin_format
volume.VolumeDevice.migrate_data = self.origin_migrate_data
volume.VolumeDevice.mount = self.origin_mount
volume.VolumeDevice.mount_points = self.origin_mount_points
cass_service.CassandraApp.stop_db = self.origin_stop_db
cass_service.CassandraApp.start_db = self.origin_start_db
cass_service.CassandraApp._install_db = self.origin_install_db
netutils.get_my_ipv4 = self.original_get_ip
cass_service.CassandraApp.apply_initial_guestagent_configuration = (
self.orig_make_host_reachable)
cass_service.CassandraAppStatus.set_status = self.real_status
def test_update_status(self):
mock_status = MagicMock()
self.manager.app.status = mock_status
self.manager.update_status(self.context)
mock_status.update.assert_any_call()
def test_prepare_pkg(self):
self._prepare_dynamic(['cassandra'])
def test_prepare_no_pkg(self):
self._prepare_dynamic([])
def test_prepare_db_not_installed(self):
self._prepare_dynamic([], is_db_installed=False)
def test_prepare_db_not_installed_no_package(self):
self._prepare_dynamic([],
is_db_installed=True)
@patch.object(backup, 'restore')
def test_prepare_db_restore(self, restore):
backup_info = {'id': 'backup_id',
'instance_id': 'fake-instance-id',
'location': 'fake-location',
'type': 'InnoBackupEx',
'checksum': 'fake-checksum'}
self._prepare_dynamic(['cassandra'], is_db_installed=False,
backup_info=backup_info)
restore.assert_called_once_with(
self.context, backup_info, self.__MOUNT_POINT)
@patch.multiple(operating_system, enable_service_on_boot=DEFAULT,
disable_service_on_boot=DEFAULT)
@patch('trove.guestagent.datastore.cassandra.service.LOG')
def test_superuser_password_reset(
self, _, enable_service_on_boot, disable_service_on_boot):
fake_status = MagicMock()
fake_status.is_running = False
test_app = cass_service.CassandraApp()
test_app.status = fake_status
with patch.multiple(
test_app,
start_db=DEFAULT,
stop_db=DEFAULT,
restart=DEFAULT,
_CassandraApp__disable_remote_access=DEFAULT,
_CassandraApp__enable_remote_access=DEFAULT,
_CassandraApp__disable_authentication=DEFAULT,
_CassandraApp__enable_authentication=DEFAULT,
_reset_user_password_to_default=DEFAULT,
secure=DEFAULT) as calls:
test_app._reset_admin_password()
disable_service_on_boot.assert_called_once_with(
test_app.service_candidates)
calls[
'_CassandraApp__disable_remote_access'
].assert_called_once_with()
calls[
'_CassandraApp__disable_authentication'
].assert_called_once_with()
calls['start_db'].assert_called_once_with(update_db=False,
enable_on_boot=False),
calls[
'_CassandraApp__enable_authentication'
].assert_called_once_with()
pw_reset_mock = calls[
'_reset_user_password_to_default'
]
pw_reset_mock.assert_called_once_with(test_app._ADMIN_USER)
calls['secure'].assert_called_once_with(
update_user=pw_reset_mock.return_value)
calls['restart'].assert_called_once_with()
calls['stop_db'].assert_called_once_with()
calls[
'_CassandraApp__enable_remote_access'
].assert_called_once_with()
enable_service_on_boot.assert_called_once_with(
test_app.service_candidates)
@patch('trove.guestagent.datastore.cassandra.service.LOG')
def test_change_cluster_name(self, _):
fake_status = MagicMock()
fake_status.is_running = True
test_app = cass_service.CassandraApp()
test_app.status = fake_status
with patch.multiple(
test_app,
start_db=DEFAULT,
stop_db=DEFAULT,
restart=DEFAULT,
_update_cluster_name_property=DEFAULT,
_CassandraApp__reset_cluster_name=DEFAULT) as calls:
sample_name = NonCallableMagicMock()
test_app.change_cluster_name(sample_name)
calls['_CassandraApp__reset_cluster_name'].assert_called_once_with(
sample_name)
calls['_update_cluster_name_property'].assert_called_once_with(
sample_name)
calls['restart'].assert_called_once_with()
@patch.object(cass_service, 'CONF', DEFAULT)
@patch('trove.guestagent.datastore.cassandra.service.LOG')
def test_apply_post_restore_updates(self, _, conf_mock):
fake_status = MagicMock()
fake_status.is_running = False
test_app = cass_service.CassandraApp()
test_app.status = fake_status
with patch.multiple(
test_app,
start_db=DEFAULT,
stop_db=DEFAULT,
_update_cluster_name_property=DEFAULT,
_reset_admin_password=DEFAULT,
change_cluster_name=DEFAULT) as calls:
backup_info = {'instance_id': 'old_id'}
conf_mock.guest_id = 'new_id'
test_app._apply_post_restore_updates(backup_info)
calls['_update_cluster_name_property'].assert_called_once_with(
'old_id')
calls['_reset_admin_password'].assert_called_once_with()
calls['start_db'].assert_called_once_with(update_db=False)
calls['change_cluster_name'].assert_called_once_with('new_id')
calls['stop_db'].assert_called_once_with()
def _prepare_dynamic(self, packages,
config_content='MockContent', device_path='/dev/vdb',
is_db_installed=True, backup_info=None,
is_root_enabled=False,
overrides=None):
mock_status = MagicMock()
mock_app = MagicMock()
mock_app.status = mock_status
self.manager._app = mock_app
mock_status.begin_install = MagicMock(return_value=None)
mock_app.install_if_needed = MagicMock(return_value=None)
mock_app.init_storage_structure = MagicMock(return_value=None)
mock_app.write_config = MagicMock(return_value=None)
mock_app.apply_initial_guestagent_configuration = MagicMock(
return_value=None)
mock_app.restart = MagicMock(return_value=None)
mock_app.start_db = MagicMock(return_value=None)
mock_app.stop_db = MagicMock(return_value=None)
mock_app._remove_system_tables = MagicMock(return_value=None)
os.path.exists = MagicMock(return_value=True)
volume.VolumeDevice.format = MagicMock(return_value=None)
volume.VolumeDevice.migrate_data = MagicMock(return_value=None)
volume.VolumeDevice.mount = MagicMock(return_value=None)
volume.VolumeDevice.mount_points = MagicMock(return_value=[])
with patch.object(pkg.Package, 'pkg_is_installed',
return_value=is_db_installed):
# invocation
self.manager.prepare(context=self.context, packages=packages,
config_contents=config_content,
databases=None,
memory_mb='2048', users=None,
device_path=device_path,
mount_point=self.__MOUNT_POINT,
backup_info=backup_info,
overrides=None,
cluster_config=None)
# verification/assertion
mock_status.begin_install.assert_any_call()
mock_app.install_if_needed.assert_any_call(packages)
mock_app._remove_system_tables.assert_any_call()
mock_app.init_storage_structure.assert_any_call('/var/lib/cassandra')
mock_app.apply_initial_guestagent_configuration.assert_any_call(
cluster_name=None)
mock_app.start_db.assert_any_call(update_db=False)
mock_app.stop_db.assert_any_call()
if backup_info:
mock_app._apply_post_restore_updates.assert_called_once_with(
backup_info)
def test_keyspace_validation(self):
valid_name = self._get_random_name(32)
db = models.CassandraSchema(valid_name)
self.assertEqual(valid_name, db.name)
with ExpectedException(ValueError):
models.CassandraSchema(self._get_random_name(33))
def test_user_validation(self):
valid_name = self._get_random_name(65535)
usr = models.CassandraUser(valid_name, 'password')
self.assertEqual(valid_name, usr.name)
self.assertEqual('password', usr.password)
with ExpectedException(ValueError):
models.CassandraUser(self._get_random_name(65536))
@classmethod
def _serialize_collection(self, *collection):
return [item.serialize() for item in collection]
@classmethod
def _get_random_name(self, size, chars=string.letters + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def test_create_database(self):
db1 = models.CassandraSchema('db1')
db2 = models.CassandraSchema('db2')
db3 = models.CassandraSchema(self._get_random_name(32))
self.manager.create_database(self.context,
self._serialize_collection(db1, db2, db3))
self.conn.execute.assert_has_calls([
call(self.__CREATE_DB_FORMAT, (db1.name,)),
call(self.__CREATE_DB_FORMAT, (db2.name,)),
call(self.__CREATE_DB_FORMAT, (db3.name,))
])
def test_delete_database(self):
db = models.CassandraSchema(self._get_random_name(32))
self.manager.delete_database(self.context, db.serialize())
self.conn.execute.assert_called_once_with(
self.__DROP_DB_FORMAT, (db.name,))
def test_create_user(self):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2', '')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
self.manager.create_user(self.context,
self._serialize_collection(usr1, usr2, usr3))
self.conn.execute.assert_has_calls([
call(self.__CREATE_USR_FORMAT, (usr1.name,), (usr1.password,)),
call(self.__CREATE_USR_FORMAT, (usr2.name,), (usr2.password,)),
call(self.__CREATE_USR_FORMAT, (usr3.name,), (usr3.password,))
])
def test_delete_user(self):
usr = models.CassandraUser(self._get_random_name(1025), 'password')
self.manager.delete_user(self.context, usr.serialize())
self.conn.execute.assert_called_once_with(
self.__DROP_USR_FORMAT, (usr.name,))
def test_change_passwords(self):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2', '')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
self.manager.change_passwords(self.context, self._serialize_collection(
usr1, usr2, usr3))
self.conn.execute.assert_has_calls([
call(self.__ALTER_USR_FORMAT, (usr1.name,), (usr1.password,)),
call(self.__ALTER_USR_FORMAT, (usr2.name,), (usr2.password,)),
call(self.__ALTER_USR_FORMAT, (usr3.name,), (usr3.password,))
])
def test_alter_user_password(self):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2', '')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
self.admin.alter_user_password(usr1)
self.admin.alter_user_password(usr2)
self.admin.alter_user_password(usr3)
self.conn.execute.assert_has_calls([
call(self.__ALTER_USR_FORMAT, (usr1.name,), (usr1.password,)),
call(self.__ALTER_USR_FORMAT, (usr2.name,), (usr2.password,)),
call(self.__ALTER_USR_FORMAT, (usr3.name,), (usr3.password,))
])
def test_grant_access(self):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr1', 'password')
db1 = models.CassandraSchema('db1')
db2 = models.CassandraSchema('db2')
db3 = models.CassandraSchema('db3')
self.manager.grant_access(self.context, usr1.name, None, [db1.name,
db2.name])
self.manager.grant_access(self.context, usr2.name, None, [db3.name])
expected = []
for modifier in self.__ACCESS_MODIFIERS:
expected.append(call(self.__GRANT_FORMAT,
(modifier, db1.name, usr1.name)))
expected.append(call(self.__GRANT_FORMAT,
(modifier, db3.name, usr2.name)))
self.conn.execute.assert_has_calls(
expected,
any_order=True)
def test_revoke_access(self):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr1', 'password')
db1 = models.CassandraSchema('db1')
db2 = models.CassandraSchema('db2')
self.manager.revoke_access(self.context, usr1.name, None, db1.name)
self.manager.revoke_access(self.context, usr2.name, None, db2.name)
self.conn.execute.assert_has_calls([
call(self.__REVOKE_FORMAT, (db1.name, usr1.name)),
call(self.__REVOKE_FORMAT, (db2.name, usr2.name))
])
def test_get_available_keyspaces(self):
self.manager.list_databases(self.context)
self.conn.list_keyspaces.assert_called_once_with()
def test_list_databases(self):
db1 = models.CassandraSchema('db1')
db2 = models.CassandraSchema('db2')
db3 = models.CassandraSchema(self._get_random_name(32))
with patch.object(self.admin, self.__N_GAK, return_value={db1, db2,
db3}):
found = self.manager.list_databases(self.context)
self.assertEqual(2, len(found))
self.assertEqual(3, len(found[0]))
self.assertEqual(None, found[1])
self.assertIn(db1.serialize(), found[0])
self.assertIn(db2.serialize(), found[0])
self.assertIn(db3.serialize(), found[0])
with patch.object(self.admin, self.__N_GAK, return_value=set()):
found = self.manager.list_databases(self.context)
self.assertEqual(([], None), found)
def test_get_acl(self):
r0 = NonCallableMagicMock(username='user1', resource='<all keyspaces>',
permission='SELECT')
r1 = NonCallableMagicMock(username='user2', resource='<keyspace ks1>',
permission='SELECT')
r2 = NonCallableMagicMock(username='user2', resource='<keyspace ks2>',
permission='SELECT')
r3 = NonCallableMagicMock(username='user2', resource='<keyspace ks2>',
permission='ALTER')
r4 = NonCallableMagicMock(username='user3', resource='<table ks2.t1>',
permission='SELECT')
r5 = NonCallableMagicMock(username='user3', resource='',
permission='ALTER')
r6 = NonCallableMagicMock(username='user3', resource='<keyspace ks2>',
permission='')
r7 = NonCallableMagicMock(username='user3', resource='',
permission='')
r8 = NonCallableMagicMock(username='user3', resource='<keyspace ks1>',
permission='DELETE')
r9 = NonCallableMagicMock(username='user4', resource='<all keyspaces>',
permission='UPDATE')
r10 = NonCallableMagicMock(username='user4', resource='<keyspace ks1>',
permission='DELETE')
available_ks = {models.CassandraSchema('ks1'),
models.CassandraSchema('ks2'),
models.CassandraSchema('ks3')}
mock_result_set = [r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r9, r9, r10]
execute_mock = MagicMock(return_value=mock_result_set)
mock_client = MagicMock(execute=execute_mock)
with patch.object(self.admin,
self.__N_GAK, return_value=available_ks) as gak_mock:
acl = self.admin._get_acl(mock_client)
execute_mock.assert_called_once_with(
self.__LIST_PERMISSIONS_FORMAT)
gak_mock.assert_called_once_with(mock_client)
self.assertEqual({'user1': {'ks1': {'SELECT'},
'ks2': {'SELECT'},
'ks3': {'SELECT'}},
'user2': {'ks1': {'SELECT'},
'ks2': {'SELECT', 'ALTER'}},
'user3': {'ks1': {'DELETE'}},
'user4': {'ks1': {'UPDATE', 'DELETE'},
'ks2': {'UPDATE'},
'ks3': {'UPDATE'}}
},
acl)
mock_result_set = [r1, r2, r3]
execute_mock = MagicMock(return_value=mock_result_set)
mock_client = MagicMock(execute=execute_mock)
with patch.object(self.admin,
self.__N_GAK, return_value=available_ks) as gak_mock:
acl = self.admin._get_acl(mock_client, username='user2')
execute_mock.assert_called_once_with(
self.__LIST_PERMISSIONS_OF_FORMAT.format('user2'))
gak_mock.assert_not_called()
self.assertEqual({'user2': {'ks1': {'SELECT'},
'ks2': {'SELECT', 'ALTER'}}}, acl)
mock_result_set = []
execute_mock = MagicMock(return_value=mock_result_set)
mock_client = MagicMock(execute=execute_mock)
with patch.object(self.admin,
self.__N_GAK, return_value=available_ks) as gak_mock:
acl = self.admin._get_acl(mock_client, username='nonexisting')
execute_mock.assert_called_once_with(
self.__LIST_PERMISSIONS_OF_FORMAT.format('nonexisting'))
gak_mock.assert_not_called()
self.assertEqual({}, acl)
def test_get_listed_users(self):
usr1 = models.CassandraUser(self._get_random_name(1025))
usr2 = models.CassandraUser(self._get_random_name(1025))
usr3 = models.CassandraUser(self._get_random_name(1025))
db1 = models.CassandraSchema('db1')
db2 = models.CassandraSchema('db2')
usr1.databases.append(db1.serialize())
usr3.databases.append(db2.serialize())
rv_1 = NonCallableMagicMock()
rv_1.configure_mock(name=usr1.name, super=False)
rv_2 = NonCallableMagicMock()
rv_2.configure_mock(name=usr2.name, super=False)
rv_3 = NonCallableMagicMock()
rv_3.configure_mock(name=usr3.name, super=True)
with patch.object(self.conn, 'execute', return_value=iter(
[rv_1, rv_2, rv_3])):
with patch.object(self.admin, '_get_acl',
return_value={usr1.name: {db1.name: {'SELECT'},
db2.name: {}},
usr3.name: {db2.name: {'SELECT'}}}
):
usrs = self.manager.list_users(self.context)
self.conn.execute.assert_has_calls([
call(self.__LIST_USR_FORMAT),
], any_order=True)
self.assertIn(usr1.serialize(), usrs[0])
self.assertIn(usr2.serialize(), usrs[0])
self.assertIn(usr3.serialize(), usrs[0])
def test_list_access(self):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
db1 = models.CassandraSchema('db1').serialize()
db2 = models.CassandraSchema('db2').serialize()
usr2.databases.append(db1)
usr3.databases.append(db1)
usr3.databases.append(db2)
with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2,
usr3}):
usr1_dbs = self.manager.list_access(self.context, usr1.name, None)
usr2_dbs = self.manager.list_access(self.context, usr2.name, None)
usr3_dbs = self.manager.list_access(self.context, usr3.name, None)
self.assertEqual([], usr1_dbs)
self.assertEqual([db1], usr2_dbs)
self.assertEqual([db1, db2], usr3_dbs)
with patch.object(self.admin, self.__N_GLU, return_value=set()):
with ExpectedException(exception.UserNotFound):
self.manager.list_access(self.context, usr3.name, None)
def test_list_users(self):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2,
usr3}):
found = self.manager.list_users(self.context)
self.assertEqual(2, len(found))
self.assertEqual(3, len(found[0]))
self.assertEqual(None, found[1])
self.assertIn(usr1.serialize(), found[0])
self.assertIn(usr2.serialize(), found[0])
self.assertIn(usr3.serialize(), found[0])
with patch.object(self.admin, self.__N_GLU, return_value=set()):
self.assertEqual(([], None), self.manager.list_users(self.context))
def test_get_user(self):
usr1 = models.CassandraUser('usr1')
usr2 = models.CassandraUser('usr2')
usr3 = models.CassandraUser(self._get_random_name(1025), 'password')
with patch.object(self.admin, self.__N_GLU, return_value={usr1, usr2,
usr3}):
found = self.manager.get_user(self.context, usr2.name, None)
self.assertEqual(usr2.serialize(), found)
with patch.object(self.admin, self.__N_GLU, return_value=set()):
self.assertIsNone(
self.manager.get_user(self.context, usr2.name, None))
@patch.object(cass_service.CassandraAdmin, '_deserialize_keyspace',
side_effect=lambda p1: p1)
def test_rename_user(self, ks_deserializer):
usr = models.CassandraUser('usr')
db1 = models.CassandraSchema('db1').serialize()
db2 = models.CassandraSchema('db2').serialize()
usr.databases.append(db1)
usr.databases.append(db2)
new_user = models.CassandraUser('new_user')
with patch(self.__N_CAU, return_value=new_user):
with patch.object(self.admin, self.__N_BU, return_value=usr):
with patch.object(self.admin, self.__N_CU) as create:
with patch.object(self.admin, self.__N_GFA) as grant:
with patch.object(self.admin, self.__N_DU) as drop:
usr_attrs = {'name': 'user', 'password': 'trove'}
self.manager.update_attributes(self.context,
usr.name, None,
usr_attrs)
create.assert_called_once_with(ANY, new_user)
grant.assert_has_calls([call(ANY, db1, ANY),
call(ANY, db2, ANY)])
drop.assert_called_once_with(ANY, usr)
def test_update_attributes(self):
usr = models.CassandraUser('usr', 'pwd')
with patch.object(self.admin, self.__N_BU, return_value=usr):
usr_attrs = {'name': usr.name, 'password': usr.password}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
self.assertEqual(0, rename.call_count)
self.assertEqual(0, alter.call_count)
usr_attrs = {'name': 'user', 'password': 'password'}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
rename.assert_called_once_with(ANY, usr, usr_attrs['name'],
usr_attrs['password'])
self.assertEqual(0, alter.call_count)
usr_attrs = {'name': 'user', 'password': usr.password}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
rename.assert_called_once_with(ANY, usr, usr_attrs['name'],
usr_attrs['password'])
self.assertEqual(0, alter.call_count)
usr_attrs = {'name': 'user'}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
with ExpectedException(
exception.UnprocessableEntity, "Updating username "
"requires specifying a password as well."):
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
self.assertEqual(0, rename.call_count)
self.assertEqual(0, alter.call_count)
usr_attrs = {'name': usr.name, 'password': 'password'}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
alter.assert_called_once_with(ANY, usr)
self.assertEqual(0, rename.call_count)
usr_attrs = {'password': usr.password}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
self.assertEqual(0, rename.call_count)
self.assertEqual(0, alter.call_count)
usr_attrs = {'password': 'trove'}
with patch.object(self.admin, self.__N_RU) as rename:
with patch.object(self.admin, self.__N_AUP) as alter:
self.manager.update_attributes(self.context, usr.name,
None, usr_attrs)
alter.assert_called_once_with(ANY, usr)
self.assertEqual(0, rename.call_count)
def test_update_overrides(self):
cfg_mgr_mock = MagicMock()
self.manager._app.configuration_manager = cfg_mgr_mock
overrides = NonCallableMagicMock()
self.manager.update_overrides(Mock(), overrides)
cfg_mgr_mock.apply_user_override.assert_called_once_with(overrides)
cfg_mgr_mock.remove_user_override.assert_not_called()
def test_remove_overrides(self):
cfg_mgr_mock = MagicMock()
self.manager._app.configuration_manager = cfg_mgr_mock
self.manager.update_overrides(Mock(), {}, remove=True)
cfg_mgr_mock.remove_user_override.assert_called_once_with()
cfg_mgr_mock.apply_user_override.assert_not_called()
def test_apply_overrides(self):
self.assertIsNone(
self.manager.apply_overrides(Mock(), NonCallableMagicMock()))
def test_enable_root(self):
with patch.object(self.manager._app, 'is_root_enabled',
return_value=False):
with patch.object(cass_service.CassandraAdmin,
'_create_superuser') as create_mock:
self.manager.enable_root(self.context)
create_mock.assert_called_once_with(ANY)
with patch.object(self.manager._app, 'is_root_enabled',
return_value=True):
with patch.object(cass_service.CassandraAdmin,
'alter_user_password') as alter_mock:
self.manager.enable_root(self.context)
alter_mock.assert_called_once_with(ANY)
def test_is_root_enabled(self):
trove_admin = Mock()
trove_admin.configure_mock(name=self.manager._app._ADMIN_USER)
other_admin = Mock()
other_admin.configure_mock(name='someuser')
with patch.object(cass_service.CassandraAdmin,
'list_superusers', return_value=[]):
self.assertFalse(self.manager.is_root_enabled(self.context))
with patch.object(cass_service.CassandraAdmin,
'list_superusers', return_value=[trove_admin]):
self.assertFalse(self.manager.is_root_enabled(self.context))
with patch.object(cass_service.CassandraAdmin,
'list_superusers', return_value=[other_admin]):
self.assertTrue(self.manager.is_root_enabled(self.context))
with patch.object(cass_service.CassandraAdmin,
'list_superusers',
return_value=[trove_admin, other_admin]):
self.assertTrue(self.manager.is_root_enabled(self.context))
def test_guest_log_enable(self):
self._assert_guest_log_enable(False, 'INFO')
self._assert_guest_log_enable(True, 'OFF')
def _assert_guest_log_enable(self, disable, expected_level):
with patch.multiple(
self.manager._app,
logback_conf_manager=DEFAULT,
_run_nodetool_command=DEFAULT
) as app_mocks:
self.assertFalse(self.manager.guest_log_enable(
Mock(), Mock(), disable))
(app_mocks['logback_conf_manager'].apply_system_override.
assert_called_once_with(
{'configuration': {'root': {'@level': expected_level}}}))
app_mocks['_run_nodetool_command'].assert_called_once_with(
'setlogginglevel', 'root', expected_level)
| 46.367889
| 79
| 0.608747
|
188668b8537e18c1475337ca8b44b327a776daf8
| 6,499
|
py
|
Python
|
applied_python/applied_python/lib/python2.7/site-packages/netmiko/juniper/juniper_ssh.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/netmiko/juniper/juniper_ssh.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/netmiko/juniper/juniper_ssh.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import re
from netmiko.ssh_connection import BaseSSHConnection
from netmiko.netmiko_globals import MAX_BUFFER
import time
class JuniperSSH(BaseSSHConnection):
'''
Implement methods for interacting with Juniper Networks devices.
Subclass of SSHConnection. Disables `enable()` and `check_enable_mode()`
methods. Overrides several methods for Juniper-specific compatibility.
'''
def session_preparation(self):
"""
Prepare the session after the connection has been established.
Disable paging (the '--more--' prompts).
Set the base prompt for interaction ('>').
"""
self.enter_cli_mode()
self.set_base_prompt()
self.disable_paging(command="set cli screen-length 0\n")
def enter_cli_mode(self):
"""Check if at shell prompt root@.*% shell prompt and go into CLI."""
count = 0
cur_prompt = ''
while count < 50:
self.remote_conn.sendall("\n")
time.sleep(.1)
if self.remote_conn.recv_ready():
cur_prompt = self.remote_conn.recv(MAX_BUFFER).decode('utf-8', 'ignore')
if re.search(r'root@.*%', cur_prompt):
self.remote_conn.sendall("cli\n")
time.sleep(.3)
self.clear_buffer()
break
elif '>' in cur_prompt or '#' in cur_prompt:
break
count += 1
def check_enable_mode(self, *args, **kwargs):
"""No enable mode on Juniper."""
pass
def enable(self, *args, **kwargs):
"""No enable mode on Juniper."""
pass
def exit_enable_mode(self, *args, **kwargs):
"""No enable mode on Juniper."""
pass
def check_config_mode(self, check_string=']'):
"""Checks if the device is in configuration mode or not."""
return super(JuniperSSH, self).check_config_mode(check_string=check_string)
def config_mode(self, config_command='configure'):
"""Enter configuration mode."""
return super(JuniperSSH, self).config_mode(config_command=config_command)
def exit_config_mode(self, exit_config='exit configuration-mode'):
"""Exit configuration mode."""
output = ""
if self.check_config_mode():
output = self.send_command(exit_config, strip_prompt=False, strip_command=False)
if 'Exit with uncommitted changes?' in output:
output += self.send_command('yes', strip_prompt=False, strip_command=False)
if self.check_config_mode():
raise ValueError("Failed to exit configuration mode")
return output
def commit(self, confirm=False, confirm_delay=None, check=False, comment='',
and_quit=False, delay_factor=.1):
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
Automatically enters configuration mode
default:
command_string = commit
check and (confirm or confirm_dely or comment):
Exception
confirm_delay and no confirm:
Exception
confirm:
confirm_delay option
comment option
command_string = commit confirmed or commit confirmed <confirm_delay>
check:
command_string = commit check
"""
delay_factor = self.select_delay_factor(delay_factor)
if check and (confirm or confirm_delay or comment):
raise ValueError("Invalid arguments supplied with commit check")
if confirm_delay and not confirm:
raise ValueError("Invalid arguments supplied to commit method both confirm and check")
# Select proper command string based on arguments provided
command_string = 'commit'
commit_marker = 'commit complete'
if check:
command_string = 'commit check'
commit_marker = 'configuration check succeeds'
elif confirm:
if confirm_delay:
command_string = 'commit confirmed ' + str(confirm_delay)
else:
command_string = 'commit confirmed'
commit_marker = 'commit confirmed will be automatically rolled back in'
# wrap the comment in quotes
if comment:
if '"' in comment:
raise ValueError("Invalid comment contains double quote")
comment = '"{0}"'.format(comment)
command_string += ' comment ' + comment
if and_quit:
command_string += ' and-quit'
# Enter config mode (if necessary)
output = self.config_mode()
# and_quit will get out of config mode on commit
if and_quit:
prompt = self.base_prompt
output += self.send_command_expect(command_string, expect_string=prompt,
strip_prompt=False,
strip_command=False, delay_factor=delay_factor)
else:
output += self.send_command_expect(command_string, strip_prompt=False,
strip_command=False, delay_factor=delay_factor)
if commit_marker not in output:
raise ValueError("Commit failed with the following errors:\n\n{0}"
.format(output))
return output
def strip_prompt(self, *args, **kwargs):
"""Strip the trailing router prompt from the output."""
a_string = super(JuniperSSH, self).strip_prompt(*args, **kwargs)
return self.strip_context_items(a_string)
@staticmethod
def strip_context_items(a_string):
"""Strip Juniper-specific output.
Juniper will also put a configuration context:
[edit]
and various chassis contexts:
{master:0}, {backup:1}
This method removes those lines.
"""
strings_to_strip = [
r'\[edit.*\]',
r'\{master:.*\}',
r'\{backup:.*\}',
r'\{line.*\}',
r'\{primary.*\}',
r'\{secondary.*\}',
]
response_list = a_string.split('\n')
last_line = response_list[-1]
for pattern in strings_to_strip:
if re.search(pattern, last_line):
return "\n".join(response_list[:-1])
return a_string
| 35.513661
| 98
| 0.595784
|
a70a4181b4620c9f81921b7889af2fda2d0a91ed
| 6,785
|
py
|
Python
|
src/core/testenv.py
|
adamatousek/hsExprTest
|
af38af5f9601b37ef830d65d32c15e0563495c0f
|
[
"BSD-2-Clause"
] | null | null | null |
src/core/testenv.py
|
adamatousek/hsExprTest
|
af38af5f9601b37ef830d65d32c15e0563495c0f
|
[
"BSD-2-Clause"
] | null | null | null |
src/core/testenv.py
|
adamatousek/hsExprTest
|
af38af5f9601b37ef830d65d32c15e0563495c0f
|
[
"BSD-2-Clause"
] | null | null | null |
import tempfile
import config
import copy
import os.path
import aiofiles # type: ignore
import posix1e # type: ignore
import pwd
import os
import sys
import asyncio
import subprocess
import contextlib
import json
from typing import Tuple, List, Optional
import cgroup
class PointEntry:
def __init__(self, points : int, out_of : int, comment : str, **kvargs):
self.points = points
self.out_of = out_of
self.comment = comment
class RunResult:
def __init__(self, res : bool, stdout : str, stderr : str,
points : List[PointEntry]):
self.result = res
self.stdout = stdout
self.stderr = stderr
self.points = points
class TestEnvironment(object):
def __init__(self, question : Optional[str], answer : str,
course : config.Course, slots : cgroup.SlotManager):
self.question = question
self.answer = answer
self.course = course
self.slotmgr = slots
self.tmpdirHandle = tempfile.TemporaryDirectory(prefix="exprtest.")
async def __aenter__(self):
self.tmpdir = self.tmpdirHandle.__enter__()
if self.course.isolation:
user = f"rc-{self.course.name}"
uid = pwd.getpwnam(user).pw_uid
acl = posix1e.ACL(file=self.tmpdir)
e = acl.append()
# add entry for test user to ensure it can access test files
e.tag_type = posix1e.ACL_USER
e.qualifier = uid
e.permset.clear()
e.permset.read = True
e.permset.write = True
e.permset.execute = True
acl.calc_mask()
acl.applyto(self.tmpdir)
# add another default entry for checker to ensure we can delete
# everythibng
ec = acl.append()
ec.tag_type = posix1e.ACL_USER
ec.qualifier = os.geteuid()
ec.permset.clear()
ec.permset.read = True
ec.permset.write = True
ec.permset.execute = True
acl.calc_mask()
acl.applyto(self.tmpdir, posix1e.ACL_TYPE_DEFAULT)
ext = ""
self.qfile : Optional[str] = None
if self.question is not None:
ext = os.path.splitext(self.question)[1]
self.qfile = os.path.join(self.tmpdir, f"question{ext}")
self.afile = os.path.join(self.tmpdir, f"answer{ext}")
if self.question is not None:
async with aiofiles.open(self.question) as src:
async with aiofiles.open(self.qfile, "w") as tgt:
contents = await src.read()
await tgt.write(contents)
async with aiofiles.open(self.afile, "w") as ans:
await ans.write(self.answer)
return self
@staticmethod
async def get_points_pipe(estack : contextlib.ExitStack)\
-> Tuple[asyncio.StreamReader, int]:
rfd, wfd = os.pipe()
ro = open(rfd, 'rb', buffering=0)
loop = asyncio.get_running_loop()
reader = asyncio.StreamReader(loop=loop)
protocol = asyncio.StreamReaderProtocol(reader, loop=loop)
transport, _ = await loop.connect_read_pipe(lambda: protocol, ro)
estack.callback(lambda t: t.close(), transport)
return reader, wfd
async def run(self, *options, hint : bool)\
-> RunResult:
with self.slotmgr.get() as slot:
args = []
if self.course.isolation:
args.extend(["sudo", "-n", "-u", f"rc-{self.course.name}"])
args.extend(self.course.checker.split(' '))
if self.qfile is not None:
args.append(self.qfile)
args.extend([self.afile, f"-I{self.course.qdir}"])
args.extend([f"-o{opt}" for opt in options if opt is not None])
if hint:
args.append("--hint")
pass_fds : List[int] = []
points_read : Optional[asyncio.StreamReader] = None
points_wfd : Optional[int] = None
points : List[PointEntry] = []
env = copy.deepcopy(os.environ)
for var in self.course.path_append:
if var not in env:
env[var] = self.course.qdir
else:
env[var] = f"{env[var]}:{self.course.qdir}"
with contextlib.ExitStack() as estack:
if self.course.extended:
points_read, points_wfd = await self.get_points_pipe(estack)
args.append(f"-p{points_wfd}")
pass_fds = [points_wfd]
print("+ " + " ".join(args), file=sys.stderr, flush=True)
preexec = None
if self.slotmgr.available() and self.slotmgr.cg is not None:
preexec = lambda: self.slotmgr.cg.register_me(slot)
proc = await asyncio.create_subprocess_exec(
*args,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.tmpdir,
start_new_session=True,
pass_fds=pass_fds,
preexec_fn=preexec,
env=env)
if self.course.extended:
assert points_read is not None
assert points_wfd is not None
os.close(points_wfd)
(raw_stdout, raw_stderr), raw_points = await asyncio.gather(
proc.communicate(), points_read.read())
point_lines = raw_points.decode("utf8").splitlines()
points = [PointEntry(**json.loads(x))
for x in point_lines]
else:
raw_stdout, raw_stderr = await proc.communicate()
stdout = raw_stdout.decode("utf8")
stderr = raw_stderr.decode("utf8")
# documentation says the return code for signal termination
# should be negative, it seems that it also might be > 127
rc = proc.returncode
if rc > 127:
rc = -(rc - 128)
if rc < 0:
stdout += f"\n\nKILLED WITH SIGNAL {-rc}"
return RunResult(proc.returncode == 0, stdout, stderr, points)
async def __aexit__(self, type, value, traceback):
self.tmpdirHandle.__exit__(type, value, traceback)
# vim: colorcolumn=80 expandtab sw=4 ts=4
| 38.551136
| 83
| 0.531909
|
8c53f1f34059d2351e8ee330de28ca6c6290d923
| 453
|
py
|
Python
|
plotly/validators/histogram2dcontour/marker/_colorsrc.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/histogram2dcontour/marker/_colorsrc.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/histogram2dcontour/marker/_colorsrc.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='colorsrc',
parent_name='histogram2dcontour.marker',
**kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='none',
role='info',
**kwargs
)
| 23.842105
| 67
| 0.604857
|
b19ff01779378100db41b4f84c713750cc497a92
| 5,624
|
py
|
Python
|
downstream/finetune/dataset/video_dataset.py
|
KuangHaofei/SeCo-Sequence-Contrastive-Learning
|
f7de4008011ae78ee02fef921d3d4620ab164d2b
|
[
"MIT"
] | null | null | null |
downstream/finetune/dataset/video_dataset.py
|
KuangHaofei/SeCo-Sequence-Contrastive-Learning
|
f7de4008011ae78ee02fef921d3d4620ab164d2b
|
[
"MIT"
] | null | null | null |
downstream/finetune/dataset/video_dataset.py
|
KuangHaofei/SeCo-Sequence-Contrastive-Learning
|
f7de4008011ae78ee02fef921d3d4620ab164d2b
|
[
"MIT"
] | null | null | null |
import torch.utils.data
import os
import random
import torch
import lmdb
import io
from PIL import Image
class VideoDataset(torch.utils.data.Dataset):
def __init__(self, list_file, transform, root_path, clip_length=1, num_steps=1, num_segments=1, num_channels=3,
format="LMDB"):
super(VideoDataset, self).__init__()
self.list_file = list_file
self.transform = transform
self.root_path = root_path
self.clip_length = clip_length
self.num_steps = num_steps
self.num_segments = num_segments
self.num_channels = num_channels
self.format = format
self.samples = self._load_list(list_file)
def _load_list(self, list_root):
with open(list_root, 'r') as f:
samples = f.readlines()
return samples
# def _parse_rgb_lmdb(self, video_path, offsets):
# """Return the clip buffer sample from video lmdb."""
# lmdb_env = lmdb.open(os.path.join(self.root_path, video_path), readonly=True)
#
# with lmdb_env.begin() as lmdb_txn:
# image_list = []
# for offset in offsets:
# for frame_id in range(offset + 1, offset + self.num_steps * self.clip_length + 1, self.num_steps):
# bio = io.BytesIO(lmdb_txn.get('image_{:05d}.jpg'.format(frame_id).encode()))
# image = Image.open(bio).convert('RGB')
# image_list.append(image)
# lmdb_env.close()
# return image_list
def _parse_rgb_lmdb(self, video_path, offsets):
video_path = os.path.join(self.root_path, video_path)
image_list = []
for offset in offsets:
for frame_id in range(offset + 1, offset + self.num_steps * self.clip_length + 1, self.num_steps):
frame_path = os.path.join(video_path, 'img_{:05d}.jpg'.format(frame_id))
image = Image.open(frame_path).convert('RGB')
image_list.append(image)
return image_list
def __len__(self):
return len(self.samples)
def __getitem__(self, item):
raise NotImplementedError
class VideoTrainDataset(VideoDataset):
def _parse_sample_str(self, sample, video_idx):
ss = sample.split(' ')
video_path = sample[:-len(ss[-1]) - 1 - len(ss[-2]) - 1]
duration = int(ss[-2])
label = int(ss[-1][:-1])
# sample frames offsets
offsets = []
length_ext = self.clip_length * self.num_steps
ave_duration = duration // self.num_segments
if ave_duration >= length_ext:
for i in range(self.num_segments):
offsets.append(random.randint(0, ave_duration - length_ext) + i * ave_duration)
else:
if duration >= length_ext:
float_ave_duration = float(duration - length_ext) / float(self.num_segments)
for i in range(self.num_segments):
offsets.append(random.randint(0, int(float_ave_duration)) + int(i * float_ave_duration))
else:
print('{},duration={}, length_ext={}'.format(video_path, duration, length_ext))
raise NotImplementedError
return video_path, offsets, label
class VideoTestDataset(VideoDataset):
def __init__(self, list_file, num_clips, transform, root_path, clip_length=1, num_steps=1, num_segments=1,
num_channels=3, format="LMDB"):
super(VideoTestDataset, self).__init__(list_file, transform, root_path, clip_length, num_steps, num_segments,
num_channels, format)
self.num_clips = num_clips
def __len__(self):
return len(self.samples) * self.num_clips
def _parse_sample_str(self, sample, video_idx, clip_idx):
ss = sample.split(' ')
video_path = sample[:-len(ss[-1]) - 1 - len(ss[-2]) - 1]
duration = int(ss[-2])
label = int(ss[-1][:-1])
# sample frames offsets
offsets = []
length_ext = self.clip_length * self.num_steps
ave_duration = duration // self.num_segments
if ave_duration >= length_ext:
for i in range(self.num_segments):
offsets.append(int(float(ave_duration - length_ext) * clip_idx / self.num_clips) + i * ave_duration)
else:
if duration >= length_ext:
float_ave_duration = float(duration - length_ext) / float(self.num_segments)
for i in range(self.num_segments):
offsets.append(
int(float_ave_duration * clip_idx / self.num_clips) + int(i * float_ave_duration))
else:
raise NotImplementedError
return video_path, offsets, label
class VideoRGBTrainDataset(VideoTrainDataset):
def __getitem__(self, item):
video_path, offsets, label = self._parse_sample_str(self.samples[item], item)
image_list = self._parse_rgb_lmdb(video_path, offsets)
trans_image_list = self.transform(image_list)
return trans_image_list, label, item
class VideoRGBTestDataset(VideoTestDataset):
def __getitem__(self, item):
item_in = item % self.num_clips
item_out = item // self.num_clips
video_path, offsets, label = self._parse_sample_str(self.samples[item_out], item_out, item_in)
if not os.path.join(video_path):
raise FileNotFoundError(video_path)
image_list = self._parse_rgb_lmdb(video_path, offsets)
trans_image_list = self.transform(image_list)
return trans_image_list, label
| 39.328671
| 117
| 0.621977
|
8f3fd35fee404e4606b1dc56654c317d04514e6c
| 2,432
|
py
|
Python
|
cogs/help.py
|
MothmanHasLowIQ/Fanfiction-Finder
|
b7bb06dc86429e91b77ec567f63d93abb68275cc
|
[
"MIT"
] | 3
|
2021-06-26T09:37:00.000Z
|
2021-11-14T20:02:59.000Z
|
cogs/help.py
|
MothmanHasLowIQ/Fanfiction-Finder
|
b7bb06dc86429e91b77ec567f63d93abb68275cc
|
[
"MIT"
] | 1
|
2022-03-22T17:16:04.000Z
|
2022-03-22T17:16:04.000Z
|
cogs/help.py
|
MothmanHasLowIQ/Fanfiction-Finder
|
b7bb06dc86429e91b77ec567f63d93abb68275cc
|
[
"MIT"
] | 5
|
2021-06-04T09:14:21.000Z
|
2022-01-29T20:39:03.000Z
|
from discord.ext.commands import command, Cog
import discord
import re
from utils.embed_pages import get_embed
class Help(Cog):
def __init__(self, client):
self.client = client
@command()
async def help(self, ctx):
try:
embed_pg, page_limit = get_embed(0)
message = await ctx.send(embed=embed_pg)
await message.add_reaction('⏮')
await message.add_reaction('◀')
await message.add_reaction('▶')
await message.add_reaction('⏭')
def check(reaction, user):
return user == ctx.author
page = 0
reaction = None
while True:
if str(reaction) == '⏮':
page = 0
embed_pg, page_limit = get_embed(page)
await message.edit(embed=embed_pg)
elif str(reaction) == '◀':
if page > 0:
page -= 1
embed_pg, page_limit = get_embed(page)
await message.edit(embed=embed_pg)
elif str(reaction) == '▶':
if page < page_limit:
page += 1
embed_pg, page_limit = get_embed(page)
await message.edit(embed=embed_pg)
elif str(reaction) == '⏭':
page = page_limit-1
embed_pg, page_limit = get_embed(page)
await message.edit(embed=embed_pg)
reaction, user = await self.client.wait_for('reaction_add', timeout=30.0, check=check)
await message.remove_reaction(reaction, user)
except Exception as error:
if re.search("Missing Permissions", str(error), re.IGNORECASE):
embed = discord.Embed(
description="The bot is not allowed to send messages in that channel. Ask one of the server admins to use the `,allow` command in that channel to enable it."
)
await ctx.author.send(embed=embed)
elif re.search("TimeoutError", str(error), re.IGNORECASE):
pass # ignore Timeout errors
finally:
try:
await message.clear_reactions()
except UnboundLocalError:
pass # ignore this
def setup(client):
client.add_cog(Help(client))
| 35.246377
| 177
| 0.520148
|
f434524c16dfc2999738c1ddfab8c21bd8c15b38
| 4,799
|
py
|
Python
|
.history/src/data/data_20191021130729.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
.history/src/data/data_20191021130729.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
.history/src/data/data_20191021130729.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
def __init__(self, filename: Union[str, Path], age_bins = None, drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.extract_raw()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={"age": "age_known"})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
class TransformData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"Rev.": "Mr.",
"": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(
self,
raw_data,
adult_age_threshold_min=13,
age_bins = None,
Xy_age_estimate=None,
drop_columns=None,
):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if age_bins is None:
age_bins = [0,10,20,30, 40, 50, 60, np.inf]
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.raw = raw_data
self.adult_age_threshold_min = adult_age_threshold_min
self.Xy_age_estimate = Xy_age_estimate
self.age_bins = age_bins
self.Xy = self.raw.Xy_raw.copy()
self.extract_title()
self.extract_last_name()
self.extract_cabin_number()
self.extract_cabin_prefix()
self.estimate_age()
self.calc_age_bins()
self.calc_is_child()
self.calc_is_travelling_alone()
def calc_is_travelling_alone(self):
self.Xy["is_travelling_alone"] = (self.Xy.sibsp == 0) & (self.Xy.parch == 0)
def calc_is_child(self):
self.Xy["is_child"] = self.Xy.age < self.adult_age_threshold_min
def extract_cabin_number(self):
self.Xy["cabin_number"] = self.Xy.ticket.str.extract("(\d+)$")
def extract_cabin_prefix(self):
self.Xy["cabin_prefix"] = self.Xy.ticket.str.extract("^(.+) ")
def extract_title(self):
"""[summary]
"""
self.Xy["title"] = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
)
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def calc_age_bins(self):
self.Xy['age_bin'] = pd.cut(Xy.age, bins=[0,10,20,30, 40, 50, 60, np.inf])
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(self, groupby_columns=["sex", "title"]):
"""[summary]
Keyword Arguments:
groupby {list} -- [description] (default: {['sex','title']})
"""
if self.Xy_age_estimate is None:
Xy_age_estimate = (
self.Xy.groupby(groupby_columns).age_known.mean().to_frame().round(1)
)
Xy_age_estimate = Xy_age_estimate.rename(
columns={"age_known": "age_estimate"}
)
out_df = self.Xy.reset_index().merge(Xy_age_estimate, on=groupby_columns)
out_df["age"] = out_df["age_known"].fillna(out_df["age_estimate"])
self.Xy = out_df
self.Xy_age_estimate = Xy_age_estimate
| 29.807453
| 114
| 0.559492
|
4aa04c03c9e5d22d9b129b697f71b213d9fc37ac
| 48,880
|
py
|
Python
|
iot_api_client/api/things_v1_api.py
|
akash73/iot-client-py
|
5335dbaa816fb2d26097f0403d3d51796ebd9d99
|
[
"Apache-2.0"
] | 13
|
2020-01-19T10:54:35.000Z
|
2022-02-27T22:43:21.000Z
|
iot_api_client/api/things_v1_api.py
|
akash73/iot-client-py
|
5335dbaa816fb2d26097f0403d3d51796ebd9d99
|
[
"Apache-2.0"
] | 10
|
2019-11-26T04:39:32.000Z
|
2021-03-25T07:46:39.000Z
|
iot_api_client/api/things_v1_api.py
|
akash73/iot-client-py
|
5335dbaa816fb2d26097f0403d3d51796ebd9d99
|
[
"Apache-2.0"
] | 10
|
2020-01-19T10:54:42.000Z
|
2021-12-09T05:46:20.000Z
|
# coding: utf-8
"""
Iot API
Collection of all public API endpoints. # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from iot_api_client.api_client import ApiClient
from iot_api_client.exceptions import (
ApiTypeError,
ApiValueError
)
class ThingsV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def things_v1_create(self, create_things_v1_payload, **kwargs): # noqa: E501
"""create things_v1 # noqa: E501
Creates a new thing associated to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_create(create_things_v1_payload, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param CreateThingsV1Payload create_things_v1_payload: ThingPayload describes a thing (required)
:param bool force: If true, detach device from the other thing, and attach to this thing
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoThing
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v1_create_with_http_info(create_things_v1_payload, **kwargs) # noqa: E501
def things_v1_create_with_http_info(self, create_things_v1_payload, **kwargs): # noqa: E501
"""create things_v1 # noqa: E501
Creates a new thing associated to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_create_with_http_info(create_things_v1_payload, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param CreateThingsV1Payload create_things_v1_payload: ThingPayload describes a thing (required)
:param bool force: If true, detach device from the other thing, and attach to this thing
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoThing, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['create_things_v1_payload', 'force'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method things_v1_create" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'create_things_v1_payload' is set
if ('create_things_v1_payload' not in local_var_params or
local_var_params['create_things_v1_payload'] is None):
raise ApiValueError("Missing the required parameter `create_things_v1_payload` when calling `things_v1_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'force' in local_var_params:
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'create_things_v1_payload' in local_var_params:
body_params = local_var_params['create_things_v1_payload']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.arduino.thing+json', 'application/vnd.goa.error+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/things', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArduinoThing', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def things_v1_create_sketch(self, id, thing_sketch, **kwargs): # noqa: E501
"""createSketch things_v1 # noqa: E501
Creates a new sketch thing associated to the thing # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_create_sketch(id, thing_sketch, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param ThingSketch thing_sketch: ThingSketchPayload describes a sketch of a thing (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoThing
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v1_create_sketch_with_http_info(id, thing_sketch, **kwargs) # noqa: E501
def things_v1_create_sketch_with_http_info(self, id, thing_sketch, **kwargs): # noqa: E501
"""createSketch things_v1 # noqa: E501
Creates a new sketch thing associated to the thing # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_create_sketch_with_http_info(id, thing_sketch, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param ThingSketch thing_sketch: ThingSketchPayload describes a sketch of a thing (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoThing, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'thing_sketch'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method things_v1_create_sketch" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `things_v1_create_sketch`") # noqa: E501
# verify the required parameter 'thing_sketch' is set
if ('thing_sketch' not in local_var_params or
local_var_params['thing_sketch'] is None):
raise ApiValueError("Missing the required parameter `thing_sketch` when calling `things_v1_create_sketch`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'thing_sketch' in local_var_params:
body_params = local_var_params['thing_sketch']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.arduino.thing+json', 'application/vnd.goa.error+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/things/{id}/sketch', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArduinoThing', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def things_v1_delete(self, id, **kwargs): # noqa: E501
"""delete things_v1 # noqa: E501
Removes a thing associated to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_delete(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param bool force: If true, hard delete the thing
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v1_delete_with_http_info(id, **kwargs) # noqa: E501
def things_v1_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""delete things_v1 # noqa: E501
Removes a thing associated to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param bool force: If true, hard delete the thing
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'force'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method things_v1_delete" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `things_v1_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'force' in local_var_params:
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/things/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def things_v1_delete_sketch(self, id, **kwargs): # noqa: E501
"""deleteSketch things_v1 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_delete_sketch(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoThing
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v1_delete_sketch_with_http_info(id, **kwargs) # noqa: E501
def things_v1_delete_sketch_with_http_info(self, id, **kwargs): # noqa: E501
"""deleteSketch things_v1 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_delete_sketch_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoThing, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method things_v1_delete_sketch" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `things_v1_delete_sketch`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.arduino.thing+json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/things/{id}/sketch', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArduinoThing', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def things_v1_layout(self, id, **kwargs): # noqa: E501
"""layout things_v1 # noqa: E501
Returns the thing requested by the user, without last values data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_layout(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param bool show_deleted: If true, shows the soft deleted thing
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoThinglayout
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v1_layout_with_http_info(id, **kwargs) # noqa: E501
def things_v1_layout_with_http_info(self, id, **kwargs): # noqa: E501
"""layout things_v1 # noqa: E501
Returns the thing requested by the user, without last values data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_layout_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param bool show_deleted: If true, shows the soft deleted thing
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoThinglayout, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'show_deleted'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method things_v1_layout" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `things_v1_layout`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'show_deleted' in local_var_params:
query_params.append(('show_deleted', local_var_params['show_deleted'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.arduino.thinglayout+json', 'application/vnd.goa.error+json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/things/{id}/layout', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArduinoThinglayout', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def things_v1_list(self, **kwargs): # noqa: E501
"""list things_v1 # noqa: E501
Returns the list of things associated to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_list(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool across_user_ids: If true, returns all the things
:param str device_id: The id of the device you want to filter
:param bool show_deleted: If true, shows the soft deleted things
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[ArduinoThing]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v1_list_with_http_info(**kwargs) # noqa: E501
def things_v1_list_with_http_info(self, **kwargs): # noqa: E501
"""list things_v1 # noqa: E501
Returns the list of things associated to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool across_user_ids: If true, returns all the things
:param str device_id: The id of the device you want to filter
:param bool show_deleted: If true, shows the soft deleted things
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[ArduinoThing], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['across_user_ids', 'device_id', 'show_deleted'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method things_v1_list" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'across_user_ids' in local_var_params:
query_params.append(('across_user_ids', local_var_params['across_user_ids'])) # noqa: E501
if 'device_id' in local_var_params:
query_params.append(('device_id', local_var_params['device_id'])) # noqa: E501
if 'show_deleted' in local_var_params:
query_params.append(('show_deleted', local_var_params['show_deleted'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.arduino.thing+json; type=collection']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/things', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ArduinoThing]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def things_v1_show(self, id, **kwargs): # noqa: E501
"""show things_v1 # noqa: E501
Returns the thing requested by the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_show(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param bool show_deleted: If true, shows the soft deleted thing
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoThing
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v1_show_with_http_info(id, **kwargs) # noqa: E501
def things_v1_show_with_http_info(self, id, **kwargs): # noqa: E501
"""show things_v1 # noqa: E501
Returns the thing requested by the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_show_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param bool show_deleted: If true, shows the soft deleted thing
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoThing, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'show_deleted'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method things_v1_show" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `things_v1_show`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'show_deleted' in local_var_params:
query_params.append(('show_deleted', local_var_params['show_deleted'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.arduino.thing+json', 'application/vnd.goa.error+json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/things/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArduinoThing', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def things_v1_update(self, id, thing, **kwargs): # noqa: E501
"""update things_v1 # noqa: E501
Updates a thing associated to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_update(id, thing, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param Thing thing: ThingPayload describes a thing (required)
:param bool force: If true, detach device from the other thing, and attach to this thing
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoThing
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v1_update_with_http_info(id, thing, **kwargs) # noqa: E501
def things_v1_update_with_http_info(self, id, thing, **kwargs): # noqa: E501
"""update things_v1 # noqa: E501
Updates a thing associated to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_update_with_http_info(id, thing, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param Thing thing: ThingPayload describes a thing (required)
:param bool force: If true, detach device from the other thing, and attach to this thing
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoThing, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'thing', 'force'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method things_v1_update" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `things_v1_update`") # noqa: E501
# verify the required parameter 'thing' is set
if ('thing' not in local_var_params or
local_var_params['thing'] is None):
raise ApiValueError("Missing the required parameter `thing` when calling `things_v1_update`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'force' in local_var_params:
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'thing' in local_var_params:
body_params = local_var_params['thing']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.arduino.thing+json', 'application/vnd.goa.error+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/things/{id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArduinoThing', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def things_v1_update_sketch(self, id, sketch_id, **kwargs): # noqa: E501
"""updateSketch things_v1 # noqa: E501
Update an existing thing sketch # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_update_sketch(id, sketch_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param str sketch_id: The id of the sketch (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ArduinoThing
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.things_v1_update_sketch_with_http_info(id, sketch_id, **kwargs) # noqa: E501
def things_v1_update_sketch_with_http_info(self, id, sketch_id, **kwargs): # noqa: E501
"""updateSketch things_v1 # noqa: E501
Update an existing thing sketch # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.things_v1_update_sketch_with_http_info(id, sketch_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the thing (required)
:param str sketch_id: The id of the sketch (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ArduinoThing, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'sketch_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method things_v1_update_sketch" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in local_var_params or
local_var_params['id'] is None):
raise ApiValueError("Missing the required parameter `id` when calling `things_v1_update_sketch`") # noqa: E501
# verify the required parameter 'sketch_id' is set
if ('sketch_id' not in local_var_params or
local_var_params['sketch_id'] is None):
raise ApiValueError("Missing the required parameter `sketch_id` when calling `things_v1_update_sketch`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'sketch_id' in local_var_params:
path_params['sketchId'] = local_var_params['sketch_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/vnd.arduino.thing+json', 'application/vnd.goa.error+json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/things/{id}/sketch/{sketchId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArduinoThing', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 45.93985
| 138
| 0.606976
|
01f8a7c691e96edb5d476d1cebc6eb27e149e7a4
| 1,834
|
py
|
Python
|
ivi/chroma/chroma62006p3008.py
|
elopezga/ErrorRate
|
4f2be815f1e88ea832c8e7f0ddd6f922a0b9cdc1
|
[
"MIT"
] | 3
|
2016-04-28T10:08:12.000Z
|
2021-07-28T22:45:23.000Z
|
ivi/chroma/chroma62006p3008.py
|
elopezga/ErrorRate
|
4f2be815f1e88ea832c8e7f0ddd6f922a0b9cdc1
|
[
"MIT"
] | null | null | null |
ivi/chroma/chroma62006p3008.py
|
elopezga/ErrorRate
|
4f2be815f1e88ea832c8e7f0ddd6f922a0b9cdc1
|
[
"MIT"
] | 2
|
2017-07-22T09:13:41.000Z
|
2020-02-06T18:46:36.000Z
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .chroma62000p import *
class chroma62006p3008(chroma62000p):
"Chroma ATE 62006P-300-8 series IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '62006P-300-8')
super(chroma62006p3008, self).__init__(*args, **kwargs)
self._output_count = 1
self._output_spec = [
{
'range': {
'P300V': (300.0, 8.0)
},
'ovp_max': 330.0,
'ocp_max': 8.8,
'voltage_max': 300.0,
'current_max': 8.0
}
]
self._init_outputs()
| 33.345455
| 77
| 0.672301
|
eb1eb4070d905dda3a4952fb04f7bb724bc3afce
| 514
|
py
|
Python
|
Data Science With Python/16-statistical-thinking-in-python-(part-2)/2-bootstrap-confidence=intervals/confidence-interval-on-the-rate-of-no-hitters.py
|
aimanahmedmoin1997/DataCamp
|
c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d
|
[
"MIT"
] | 3
|
2019-05-12T04:49:24.000Z
|
2020-05-06T00:40:28.000Z
|
Data Science With Python/16-statistical-thinking-in-python-(part-2)/2-bootstrap-confidence=intervals/confidence-interval-on-the-rate-of-no-hitters.py
|
aimanahmedmoin1997/DataCamp
|
c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d
|
[
"MIT"
] | null | null | null |
Data Science With Python/16-statistical-thinking-in-python-(part-2)/2-bootstrap-confidence=intervals/confidence-interval-on-the-rate-of-no-hitters.py
|
aimanahmedmoin1997/DataCamp
|
c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d
|
[
"MIT"
] | 7
|
2018-11-06T17:43:31.000Z
|
2020-11-07T21:08:16.000Z
|
# Draw bootstrap replicates of the mean no-hitter time (equal to tau): bs_replicates
bs_replicates = draw_bs_reps(nohitter_times, np.mean, size=10000)
# Compute the 95% confidence interval: conf_int
conf_int = np.percentile(bs_replicates, [2.5, 97.5])
# Print the confidence interval
print('95% confidence interval =', conf_int, 'games')
# Plot the histogram of the replicates
_ = plt.hist(bs_replicates, bins=50, normed=True)
_ = plt.xlabel(r'$\tau$ (games)')
_ = plt.ylabel('PDF')
# Show the plot
plt.show()
| 30.235294
| 84
| 0.737354
|
6d4c97fd2f8a537d9195a4fffa77d31cad62e60a
| 1,746
|
py
|
Python
|
booking/urls.py
|
studentisgss/booking
|
e0e28f42cf2a466688b4ea3787eb28dbc0980cac
|
[
"MIT"
] | 7
|
2015-12-11T19:18:39.000Z
|
2020-10-30T12:50:19.000Z
|
booking/urls.py
|
studentisgss/booking
|
e0e28f42cf2a466688b4ea3787eb28dbc0980cac
|
[
"MIT"
] | 119
|
2015-11-03T22:21:09.000Z
|
2021-03-17T21:36:49.000Z
|
booking/urls.py
|
studentisgss/booking
|
e0e28f42cf2a466688b4ea3787eb28dbc0980cac
|
[
"MIT"
] | null | null | null |
"""booking URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, handler404, handler500
from django.contrib import admin
from activities import urls as activities_urls
from authentication import urls as auth_urls
from base import urls as base_urls
from events import urls as events_urls
from news import urls as news_urls
from rooms import urls as rooms_urls
from brochure import urls as brochure_urls
from attendances import urls as attendances_url
urlpatterns = [
# Admin site
url(r'^admin/', admin.site.urls),
url(r'^activities/', include(activities_urls, namespace="activities")),
url(r'^events/', include(events_urls, namespace="events")),
url(r'^news/', include(news_urls, namespace="news")),
url(r'^rooms/', include(rooms_urls, namespace="rooms")),
url(r'^auth/', include(auth_urls, namespace="auth")),
url(r'^brochure/', include(brochure_urls, namespace="brochure")),
url(r'^attendances/', include(attendances_url, namespace="attendances")),
# If none of the above urls matches, then
url(r'', include(base_urls)),
]
handler404 = "base.views.page_not_found"
handler500 = "base.views.server_error"
| 38.8
| 77
| 0.727377
|
5b003d1e411e43ca58a3bc271c3c3d159726fd5e
| 8,018
|
py
|
Python
|
tests/hazmat/primitives/test_ed25519.py
|
dvaerum/cryptography
|
63dfc57fca688d0f8d0515001f249c317d5e54dc
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 8
|
2015-01-29T19:16:40.000Z
|
2021-01-08T05:55:03.000Z
|
tests/hazmat/primitives/test_ed25519.py
|
dvaerum/cryptography
|
63dfc57fca688d0f8d0515001f249c317d5e54dc
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 12
|
2021-01-05T06:46:37.000Z
|
2022-03-30T19:06:26.000Z
|
tests/hazmat/primitives/test_ed25519.py
|
dvaerum/cryptography
|
63dfc57fca688d0f8d0515001f249c317d5e54dc
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 5
|
2015-11-06T01:47:01.000Z
|
2021-12-01T00:22:52.000Z
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import os
import pytest
from cryptography.exceptions import InvalidSignature, _Reasons
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.ed25519 import (
Ed25519PrivateKey,
Ed25519PublicKey,
)
from ...utils import load_vectors_from_file, raises_unsupported_algorithm
def load_ed25519_vectors(vector_data):
"""
djb's ed25519 vectors are structured as a colon delimited array:
0: secret key (32 bytes) + public key (32 bytes)
1: public key (32 bytes)
2: message (0+ bytes)
3: signature + message (64+ bytes)
"""
data = []
for line in vector_data:
secret_key, public_key, message, signature, _ = line.split(":")
secret_key = secret_key[0:64]
signature = signature[0:128]
data.append(
{
"secret_key": secret_key,
"public_key": public_key,
"message": message,
"signature": signature,
}
)
return data
@pytest.mark.supported(
only_if=lambda backend: not backend.ed25519_supported(),
skip_message="Requires OpenSSL without Ed25519 support",
)
def test_ed25519_unsupported(backend):
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
Ed25519PublicKey.from_public_bytes(b"0" * 32)
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
Ed25519PrivateKey.from_private_bytes(b"0" * 32)
with raises_unsupported_algorithm(
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
Ed25519PrivateKey.generate()
@pytest.mark.supported(
only_if=lambda backend: backend.ed25519_supported(),
skip_message="Requires OpenSSL with Ed25519 support",
)
class TestEd25519Signing(object):
@pytest.mark.parametrize(
"vector",
load_vectors_from_file(
os.path.join("asymmetric", "Ed25519", "sign.input"),
load_ed25519_vectors,
),
)
def test_sign_verify_input(self, vector, backend):
sk = binascii.unhexlify(vector["secret_key"])
pk = binascii.unhexlify(vector["public_key"])
message = binascii.unhexlify(vector["message"])
signature = binascii.unhexlify(vector["signature"])
private_key = Ed25519PrivateKey.from_private_bytes(sk)
computed_sig = private_key.sign(message)
assert computed_sig == signature
public_key = private_key.public_key()
assert (
public_key.public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
== pk
)
public_key.verify(signature, message)
def test_invalid_signature(self, backend):
key = Ed25519PrivateKey.generate()
signature = key.sign(b"test data")
with pytest.raises(InvalidSignature):
key.public_key().verify(signature, b"wrong data")
with pytest.raises(InvalidSignature):
key.public_key().verify(b"0" * 64, b"test data")
def test_generate(self, backend):
key = Ed25519PrivateKey.generate()
assert key
assert key.public_key()
def test_load_public_bytes(self, backend):
public_key = Ed25519PrivateKey.generate().public_key()
public_bytes = public_key.public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
public_key2 = Ed25519PublicKey.from_public_bytes(public_bytes)
assert public_bytes == public_key2.public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
def test_invalid_type_public_bytes(self, backend):
with pytest.raises(TypeError):
Ed25519PublicKey.from_public_bytes(object())
def test_invalid_type_private_bytes(self, backend):
with pytest.raises(TypeError):
Ed25519PrivateKey.from_private_bytes(object())
def test_invalid_length_from_public_bytes(self, backend):
with pytest.raises(ValueError):
Ed25519PublicKey.from_public_bytes(b"a" * 31)
with pytest.raises(ValueError):
Ed25519PublicKey.from_public_bytes(b"a" * 33)
def test_invalid_length_from_private_bytes(self, backend):
with pytest.raises(ValueError):
Ed25519PrivateKey.from_private_bytes(b"a" * 31)
with pytest.raises(ValueError):
Ed25519PrivateKey.from_private_bytes(b"a" * 33)
def test_invalid_private_bytes(self, backend):
key = Ed25519PrivateKey.generate()
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
None,
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.PKCS8,
None,
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.Raw,
serialization.NoEncryption(),
)
def test_invalid_public_bytes(self, backend):
key = Ed25519PrivateKey.generate().public_key()
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.Raw,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.PKCS1
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.Raw
)
@pytest.mark.parametrize(
("encoding", "fmt", "encryption", "passwd", "load_func"),
[
(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(b"password"),
b"password",
serialization.load_pem_private_key,
),
(
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(b"password"),
b"password",
serialization.load_der_private_key,
),
(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
None,
serialization.load_pem_private_key,
),
(
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
None,
serialization.load_der_private_key,
),
],
)
def test_round_trip_private_serialization(
self, encoding, fmt, encryption, passwd, load_func, backend
):
key = Ed25519PrivateKey.generate()
serialized = key.private_bytes(encoding, fmt, encryption)
loaded_key = load_func(serialized, passwd, backend)
assert isinstance(loaded_key, Ed25519PrivateKey)
def test_buffer_protocol(self, backend):
private_bytes = os.urandom(32)
key = Ed25519PrivateKey.from_private_bytes(bytearray(private_bytes))
assert (
key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
serialization.NoEncryption(),
)
== private_bytes
)
| 34.412017
| 79
| 0.628336
|
74ba319c5289d5da4465bc8f51045779605c2178
| 9,930
|
py
|
Python
|
Mac/Lib/lib-scriptpackages/StdSuites/QuickDraw_Graphics_Suite.py
|
SaadBazaz/ChinesePython
|
800955539dda912d4a1621bcf5a700aaaddc012f
|
[
"CNRI-Python-GPL-Compatible"
] | 3
|
2022-01-30T20:08:24.000Z
|
2022-02-12T08:51:12.000Z
|
Mac/Lib/lib-scriptpackages/StdSuites/QuickDraw_Graphics_Suite.py
|
SaadBazaz/ChinesePython
|
800955539dda912d4a1621bcf5a700aaaddc012f
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Mac/Lib/lib-scriptpackages/StdSuites/QuickDraw_Graphics_Suite.py
|
SaadBazaz/ChinesePython
|
800955539dda912d4a1621bcf5a700aaaddc012f
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
"""Suite QuickDraw Graphics Suite: A set of basic classes for graphics
Level 1, version 1
Generated from Macintosh HD:Systeemmap:Extensies:AppleScript
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'qdrw'
class QuickDraw_Graphics_Suite_Events:
pass
class arc(aetools.ComponentItem):
"""arc - An arc """
want = 'carc'
class arc_angle(aetools.NProperty):
"""arc angle - the angle of the arc in degrees """
which = 'parc'
want = 'fixd'
class bounds(aetools.NProperty):
"""bounds - the smallest rectangle that contains the entire arc """
which = 'pbnd'
want = 'qdrt'
class definition_rect(aetools.NProperty):
"""definition rect - the rectangle that contains the circle or oval used to define the arc """
which = 'pdrt'
want = 'qdrt'
class fill_color(aetools.NProperty):
"""fill color - the fill color """
which = 'flcl'
want = 'cRGB'
class fill_pattern(aetools.NProperty):
"""fill pattern - the fill pattern """
which = 'flpt'
want = 'cpix'
class pen_color(aetools.NProperty):
"""pen color - the pen color """
which = 'ppcl'
want = 'cRGB'
class pen_pattern(aetools.NProperty):
"""pen pattern - the pen pattern """
which = 'pppa'
want = 'cpix'
class pen_width(aetools.NProperty):
"""pen width - the pen width """
which = 'ppwd'
want = 'shor'
class start_angle(aetools.NProperty):
"""start angle - the angle that defines the start of the arc, in degrees """
which = 'pang'
want = 'fixd'
class transfer_mode(aetools.NProperty):
"""transfer mode - the transfer mode """
which = 'pptm'
want = 'tran'
arcs = arc
class drawing_area(aetools.ComponentItem):
"""drawing area - Container for graphics and supporting information """
want = 'cdrw'
class background_color(aetools.NProperty):
"""background color - the color used to fill in unoccupied areas """
which = 'pbcl'
want = 'cRGB'
class background_pattern(aetools.NProperty):
"""background pattern - the pattern used to fill in unoccupied areas """
which = 'pbpt'
want = 'cpix'
class color_table(aetools.NProperty):
"""color table - the color table """
which = 'cltb'
want = 'clrt'
class ordering(aetools.NProperty):
"""ordering - the ordered list of graphic objects in the drawing area """
which = 'gobs'
want = 'obj '
class name(aetools.NProperty):
"""name - the name """
which = 'pnam'
want = 'itxt'
class default_location(aetools.NProperty):
"""default location - the default location of each new graphic object """
which = 'pnel'
want = 'QDpt'
class pixel_depth(aetools.NProperty):
"""pixel depth - the number of bits per pixel """
which = 'pdpt'
want = 'shor'
class writing_code(aetools.NProperty):
"""writing code - the script system and language of text objects in the drawing area """
which = 'psct'
want = 'intl'
class text_color(aetools.NProperty):
"""text color - the default color for text objects """
which = 'ptxc'
want = 'cRGB'
class default_font(aetools.NProperty):
"""default font - the name of the default font for text objects """
which = 'ptxf'
want = 'itxt'
class default_size(aetools.NProperty):
"""default size - the default size for text objects """
which = 'ptps'
want = 'fixd'
class style(aetools.NProperty):
"""style - the default text style for text objects """
which = 'txst'
want = 'tsty'
class update_on_change(aetools.NProperty):
"""update on change - Redraw after each change? """
which = 'pupd'
want = 'bool'
drawing_areas = drawing_area
class graphic_line(aetools.ComponentItem):
"""graphic line - A graphic line """
want = 'glin'
class start_point(aetools.NProperty):
"""start point - the starting point of the line """
which = 'pstp'
want = 'QDpt'
class end_point(aetools.NProperty):
"""end point - the ending point of the line """
which = 'pend'
want = 'QDpt'
class dash_style(aetools.NProperty):
"""dash style - the dash style """
which = 'pdst'
want = 'tdas'
class arrow_style(aetools.NProperty):
"""arrow style - the arrow style """
which = 'arro'
want = 'arro'
graphic_lines = graphic_line
class graphic_object(aetools.ComponentItem):
"""graphic object - A graphic object """
want = 'cgob'
graphic_objects = graphic_object
class graphic_shape(aetools.ComponentItem):
"""graphic shape - A graphic shape """
want = 'cgsh'
graphic_shapes = graphic_shape
class graphic_text(aetools.ComponentItem):
"""graphic text - A series of characters within a drawing area """
want = 'cgtx'
class color(aetools.NProperty):
"""color - the color of the first character """
which = 'colr'
want = 'cRGB'
class font(aetools.NProperty):
"""font - the name of the font of the first character """
which = 'font'
want = 'ctxt'
class size(aetools.NProperty):
"""size - the size in points of the first character """
which = 'ptsz'
want = 'fixd'
class uniform_styles(aetools.NProperty):
"""uniform styles - the text styles that are uniform throughout the text """
which = 'ustl'
want = 'tsty'
class graphic_group(aetools.ComponentItem):
"""graphic group - Group of graphics """
want = 'cpic'
graphic_groups = graphic_group
class oval(aetools.ComponentItem):
"""oval - An oval """
want = 'covl'
ovals = oval
class pixel(aetools.ComponentItem):
"""pixel - A pixel """
want = 'cpxl'
# repeated property color the color
pixels = pixel
class pixel_map(aetools.ComponentItem):
"""pixel map - A pixel map """
want = 'cpix'
pixel_maps = pixel_map
class polygon(aetools.ComponentItem):
"""polygon - A polygon """
want = 'cpgn'
class point_list(aetools.NProperty):
"""point list - the list of points that define the polygon """
which = 'ptlt'
want = 'QDpt'
polygons = polygon
class rectangle(aetools.ComponentItem):
"""rectangle - A rectangle """
want = 'crec'
rectangles = rectangle
class rounded_rectangle(aetools.ComponentItem):
"""rounded rectangle - A rounded rectangle """
want = 'crrc'
class corner_curve_height(aetools.NProperty):
"""corner curve height - the height of the oval used to define the shape of the rounded corners """
which = 'pchd'
want = 'shor'
class corner_curve_width(aetools.NProperty):
"""corner curve width - the width of the oval used to define the shape of the rounded corners """
which = 'pcwd'
want = 'shor'
rounded_rectangles = rounded_rectangle
arc._propdict = {
'arc_angle' : arc_angle,
'bounds' : bounds,
'definition_rect' : definition_rect,
'fill_color' : fill_color,
'fill_pattern' : fill_pattern,
'pen_color' : pen_color,
'pen_pattern' : pen_pattern,
'pen_width' : pen_width,
'start_angle' : start_angle,
'transfer_mode' : transfer_mode,
}
arc._elemdict = {
}
drawing_area._propdict = {
'background_color' : background_color,
'background_pattern' : background_pattern,
'color_table' : color_table,
'ordering' : ordering,
'name' : name,
'default_location' : default_location,
'pixel_depth' : pixel_depth,
'writing_code' : writing_code,
'text_color' : text_color,
'default_font' : default_font,
'default_size' : default_size,
'style' : style,
'update_on_change' : update_on_change,
}
drawing_area._elemdict = {
}
graphic_line._propdict = {
'start_point' : start_point,
'end_point' : end_point,
'dash_style' : dash_style,
'arrow_style' : arrow_style,
}
graphic_line._elemdict = {
}
graphic_object._propdict = {
}
graphic_object._elemdict = {
}
graphic_shape._propdict = {
}
graphic_shape._elemdict = {
}
graphic_text._propdict = {
'color' : color,
'font' : font,
'size' : size,
'uniform_styles' : uniform_styles,
}
graphic_text._elemdict = {
}
graphic_group._propdict = {
}
graphic_group._elemdict = {
}
oval._propdict = {
}
oval._elemdict = {
}
pixel._propdict = {
'color' : color,
}
pixel._elemdict = {
}
pixel_map._propdict = {
}
pixel_map._elemdict = {
}
polygon._propdict = {
'point_list' : point_list,
}
polygon._elemdict = {
}
rectangle._propdict = {
}
rectangle._elemdict = {
}
rounded_rectangle._propdict = {
'corner_curve_height' : corner_curve_height,
'corner_curve_width' : corner_curve_width,
}
rounded_rectangle._elemdict = {
}
_Enum_tran = {
'copy_pixels' : 'cpy ', #
'not_copy_pixels' : 'ncpy', #
'or_pixels' : 'or ', #
'not_or_pixels' : 'ntor', #
'bic_pixels' : 'bic ', #
'not_bic_pixels' : 'nbic', #
'xor_pixels' : 'xor ', #
'not_xor_pixels' : 'nxor', #
'add_over_pixels' : 'addo', #
'add_pin_pixels' : 'addp', #
'sub_over_pixels' : 'subo', #
'sub_pin_pixels' : 'subp', #
'ad_max_pixels' : 'admx', #
'ad_min_pixels' : 'admn', #
'blend_pixels' : 'blnd', #
}
_Enum_arro = {
'no_arrow' : 'arno', # No arrow on line
'arrow_at_start' : 'arst', # Arrow at start of line
'arrow_at_end' : 'aren', # Arrow at end of line
'arrow_at_both_ends' : 'arbo', # Arrow at both the start and the end of the line
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'crec' : rectangle,
'cpix' : pixel_map,
'carc' : arc,
'cgsh' : graphic_shape,
'cpxl' : pixel,
'crrc' : rounded_rectangle,
'cpgn' : polygon,
'cdrw' : drawing_area,
'cgob' : graphic_object,
'glin' : graphic_line,
'cgtx' : graphic_text,
'covl' : oval,
'cpic' : graphic_group,
}
_propdeclarations = {
'pend' : end_point,
'pupd' : update_on_change,
'pstp' : start_point,
'pdrt' : definition_rect,
'pnam' : name,
'pbcl' : background_color,
'pptm' : transfer_mode,
'pnel' : default_location,
'pdpt' : pixel_depth,
'gobs' : ordering,
'ustl' : uniform_styles,
'ptlt' : point_list,
'pdst' : dash_style,
'psct' : writing_code,
'txst' : style,
'font' : font,
'pchd' : corner_curve_height,
'arro' : arrow_style,
'ppwd' : pen_width,
'ptps' : default_size,
'ppcl' : pen_color,
'ptxf' : default_font,
'pcwd' : corner_curve_width,
'ptxc' : text_color,
'cltb' : color_table,
'pppa' : pen_pattern,
'pang' : start_angle,
'flpt' : fill_pattern,
'colr' : color,
'pbnd' : bounds,
'ptsz' : size,
'parc' : arc_angle,
'flcl' : fill_color,
'pbpt' : background_pattern,
}
_compdeclarations = {
}
_enumdeclarations = {
'tran' : _Enum_tran,
'arro' : _Enum_arro,
}
| 24.458128
| 100
| 0.693958
|
43d87902fd4622536b4111b8bde9de7e9702dda8
| 2,806
|
py
|
Python
|
numba/core/options.py
|
ashrielbrian/numba
|
2558320dc4ae9d2ecea423f8ab24813d7461bb84
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 10
|
2019-04-27T08:45:34.000Z
|
2021-06-21T22:08:54.000Z
|
numba/core/options.py
|
ashrielbrian/numba
|
2558320dc4ae9d2ecea423f8ab24813d7461bb84
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 108
|
2020-08-17T22:38:26.000Z
|
2021-12-06T09:44:14.000Z
|
numba/core/options.py
|
ashrielbrian/numba
|
2558320dc4ae9d2ecea423f8ab24813d7461bb84
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 10
|
2019-07-20T02:16:10.000Z
|
2021-06-22T06:56:23.000Z
|
"""
Target Options
"""
import operator
from numba.core import config, utils
from numba.core.targetconfig import TargetConfig, Option
class TargetOptions:
"""Target options maps user options from decorators to the
``numba.core.compiler.Flags`` used by lowering and target context.
"""
class Mapping:
def __init__(self, flag_name, apply=lambda x: x):
self.flag_name = flag_name
self.apply = apply
def finalize(self, flags, options):
"""Subclasses can override this method to make target specific
customizations of default flags.
Parameters
----------
flags : Flags
options : dict
"""
pass
@classmethod
def parse_as_flags(cls, flags, options):
"""Parse target options defined in ``options`` and set ``flags``
accordingly.
Parameters
----------
flags : Flags
options : dict
"""
opt = cls()
opt._apply(flags, options)
opt.finalize(flags, options)
return flags
def _apply(self, flags, options):
# Find all Mapping instances in the class
mappings = {}
cls = type(self)
for k in dir(cls):
v = getattr(cls, k)
if isinstance(v, cls.Mapping):
mappings[k] = v
used = set()
for k, mapping in mappings.items():
if k in options:
v = mapping.apply(options[k])
setattr(flags, mapping.flag_name, v)
used.add(k)
unused = set(options) - used
if unused:
# Unread options?
m = (f"Unrecognized options: {unused}. "
f"Known options are {mappings.keys()}")
raise KeyError(m)
_mapping = TargetOptions.Mapping
class DefaultOptions:
"""Defines how user-level target options are mapped to the target flags.
"""
nopython = _mapping("enable_pyobject", operator.not_)
forceobj = _mapping("force_pyobject")
looplift = _mapping("enable_looplift")
_nrt = _mapping("nrt")
debug = _mapping("debuginfo")
boundscheck = _mapping("boundscheck")
nogil = _mapping("release_gil")
no_rewrites = _mapping("no_rewrites")
no_cpython_wrapper = _mapping("no_cpython_wrapper")
no_cfunc_wrapper = _mapping("no_cfunc_wrapper")
parallel = _mapping("auto_parallel")
fastmath = _mapping("fastmath")
error_model = _mapping("error_model")
inline = _mapping("inline")
def include_default_options(*args):
"""Returns a mixin class with a subset of the options
Parameters
----------
*args : str
Option names to include.
"""
glbs = {k: getattr(DefaultOptions, k) for k in args}
return type("OptionMixins", (), glbs)
| 26.980769
| 76
| 0.596579
|
87b67befbe06b91307c3cc00002585cd3b70e2a3
| 749
|
py
|
Python
|
my/src/Flask.py
|
qq57694878/pychonws
|
cfad3d94e251db35c0f3485bc7231cc9de999913
|
[
"Apache-2.0"
] | 21
|
2020-02-04T03:18:09.000Z
|
2021-06-04T03:27:09.000Z
|
1-learn-python/app.py
|
forzhr/hydrus
|
1dfd88eb99bf738eb4446920631ae8a0fd869210
|
[
"MIT"
] | null | null | null |
1-learn-python/app.py
|
forzhr/hydrus
|
1dfd88eb99bf738eb4446920631ae8a0fd869210
|
[
"MIT"
] | 12
|
2019-11-05T03:37:06.000Z
|
2021-04-27T12:04:59.000Z
|
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
return '<h1>Home</h1>'
@app.route('/signin', methods=['GET'])
def signin_form():
return '''<form action="/signin" method="post">
<p><input name="username"></p>
<p><input name="password" type="password"></p>
<p><button type="submit">Sign In</button></p>
</form>'''
@app.route('/signin', methods=['POST'])
def signin():
# 需要从request对象读取表单内容:
if request.form['username']=='admin' and request.form['password']=='password':
return '<h3>Hello, admin!</h3>'
return '<h3>Bad username or password.</h3>'
if __name__ == '__main__':
app.run()
| 28.807692
| 82
| 0.58478
|
1e300130b17f65c250ecf4ffec444038f4e9ece7
| 4,814
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/t_systems_mms/icinga_director/plugins/modules/icinga_timeperiod.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/t_systems_mms/icinga_director/plugins/modules/icinga_timeperiod.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/t_systems_mms/icinga_director/plugins/modules/icinga_timeperiod.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 T-Systems Multimedia Solutions GmbH
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: icinga_timeperiod
short_description: Manage timeperiods in Icinga2
description:
- Add or remove a timeperiod to Icinga2 through the director API.
author: Sebastian Gumprich (@rndmh3ro)
extends_documentation_fragment:
- ansible.builtin.url
- t_systems_mms.icinga_director.common_options
version_added: '1.0.0'
notes:
- This module supports check mode.
options:
state:
description:
- Apply feature state.
choices: [ "present", "absent" ]
default: present
type: str
object_name:
description:
- Name of the time period.
aliases: ['name']
required: true
type: str
display_name:
description:
- Alternative name for this timeperiod.
type: str
imports:
description:
- Importable templates, add as many as you want.
- Please note that order matters when importing properties from multiple templates - last one wins.
type: list
elements: str
ranges:
description:
- A dict of days and timeperiods.
type: dict
append:
description:
- Do not overwrite the whole object but instead append the defined properties.
- Note - Appending to existing vars, imports or any other list/dict is not possible. You have to overwrite the complete list/dict.
- Note - Variables that are set by default will also be applied, even if not set.
type: bool
choices: [True, False]
version_added: '1.25.0'
"""
EXAMPLES = """
- name: Create timeperiod
t_systems_mms.icinga_director.icinga_timeperiod:
state: present
url: "{{ icinga_url }}"
url_username: "{{ icinga_user }}"
url_password: "{{ icinga_pass }}"
object_name: '24/7'
imports:
- "timeperiod_template"
ranges:
monday: "00:00-23:59"
tuesday: "00:00-23:59"
wednesday: "00:00-23:59"
thursday: "00:00-23:59"
friday: "00:00-23:59"
saturday: "00:00-23:59"
sunday: "00:00-23:59"
- name: Update timeperiod
t_systems_mms.icinga_director.icinga_timeperiod:
state: present
url: "{{ icinga_url }}"
url_username: "{{ icinga_user }}"
url_password: "{{ icinga_pass }}"
object_name: '24/7'
display_name: '24/7'
append: true
"""
RETURN = r""" # """
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
from ansible_collections.t_systems_mms.icinga_director.plugins.module_utils.icinga import (
Icinga2APIObject,
)
# ===========================================
# Module execution.
#
def main():
# use the predefined argument spec for url
argument_spec = url_argument_spec()
# add our own arguments
argument_spec.update(
state=dict(default="present", choices=["absent", "present"]),
url=dict(required=True),
append=dict(type="bool", choices=[True, False]),
object_name=dict(required=True, aliases=["name"]),
display_name=dict(required=False),
imports=dict(type="list", elements="str", default=[], required=False),
ranges=dict(type="dict", required=False),
)
# Define the main module
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True
)
data_keys = [
"object_name",
"display_name",
"imports",
"ranges",
]
data = {}
if module.params["append"]:
for k in data_keys:
if module.params[k]:
data[k] = module.params[k]
else:
for k in data_keys:
data[k] = module.params[k]
data["object_type"] = "object"
icinga_object = Icinga2APIObject(
module=module, path="/timeperiod", data=data
)
changed, diff = icinga_object.update(module.params["state"])
module.exit_json(
changed=changed,
diff=diff,
)
# import module snippets
if __name__ == "__main__":
main()
| 28.654762
| 136
| 0.664936
|
84477b672fbe81d7c1293cb41259193d1e7c6170
| 9,099
|
py
|
Python
|
kaznet/apps/ona/tasks.py
|
onaio/kaznet-web
|
53da428d5e62e40c5d38d84c3f3ee06071dd779b
|
[
"Apache-2.0"
] | 1
|
2019-10-03T12:18:15.000Z
|
2019-10-03T12:18:15.000Z
|
kaznet/apps/ona/tasks.py
|
onaio/kaznet-web
|
53da428d5e62e40c5d38d84c3f3ee06071dd779b
|
[
"Apache-2.0"
] | 63
|
2018-10-31T12:57:36.000Z
|
2020-04-27T20:18:22.000Z
|
kaznet/apps/ona/tasks.py
|
onaio/kaznet-web
|
53da428d5e62e40c5d38d84c3f3ee06071dd779b
|
[
"Apache-2.0"
] | 3
|
2019-08-05T09:10:03.000Z
|
2020-01-05T21:24:49.000Z
|
"""
Celery tasks module for Ona app
"""
from datetime import timedelta
from time import sleep
from urllib.parse import urljoin
from celery import task as celery_task
from celery.utils.log import get_task_logger
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.urls import reverse
from django.utils import timezone
from kaznet.apps.main.models import Task
from kaznet.apps.ona.api import (create_filtered_data_sets,
create_form_webhook, fetch_missing_instances,
get_and_process_xforms, get_projects,
process_projects, sync_deleted_instances,
sync_deleted_projects, sync_deleted_xforms,
sync_updated_instances,
update_user_profile_metadata)
from kaznet.apps.ona.models import XForm, Instance
from kaznet.apps.ona.utils import check_if_users_can_submit_to_form
from kaznet.apps.ona.api import sync_submission_review
from kaznet.apps.ona.api import convert_kaznet_to_ona_submission_status
logger = get_task_logger(__name__) # pylint: disable=invalid-name
@celery_task(name="task_fetch_projects") # pylint: disable=not-callable
def task_fetch_projects(username: str):
"""
Fetches and processes projects from Onadata
"""
# get the projects from Onadata's API
projects = get_projects(username=username)
# save the projects locally
process_projects(projects)
# go through each project and process its forms
for project in projects:
project_forms = project.get('forms')
project_id = project.get('projectid')
if project_forms and project_id:
task_process_project_xforms.delay(
forms=project_forms, project_id=int(project_id))
sleep(0.1) # to avoid overload on onadata API
# pylint: disable=not-callable
@celery_task(name="task_process_project_xforms")
def task_process_project_xforms(forms: list, project_id: int):
"""
Simple processes XForms contained in the project response
from Onadata
"""
get_and_process_xforms(forms_data=forms, project_id=project_id)
# pylint: disable=not-callable
@celery_task(name="task_fetch_form_missing_instances")
def task_fetch_form_missing_instances(xform_id: int):
"""
Gets and processes instances from Onadata's API
"""
try:
xform = XForm.objects.get(id=xform_id)
except XForm.DoesNotExist: # pylint: disable=no-member
pass
else:
fetch_missing_instances(form_id=xform.ona_pk)
# pylint: disable=not-callable
@celery_task(name="task_sync_form_updated_instances")
def task_sync_form_updated_instances(xform_id: int):
"""
Checks for updated instances for a form and then updates them
"""
try:
xform = XForm.objects.get(id=xform_id)
except XForm.DoesNotExist: # pylint: disable=no-member
pass
else:
sync_updated_instances(form_id=xform.ona_pk)
# pylint: disable=not-callable
@celery_task(name="task_sync_updated_instances")
def task_sync_updated_instances():
"""
Checks for updated instances for all forms and then updates them
"""
xforms = XForm.objects.filter(deleted_at=None)
for xform in xforms:
if xform.has_task:
task = xform.task
if task is not None and task.status == Task.ACTIVE:
task_sync_form_updated_instances.delay(xform_id=xform.id)
# pylint: disable=not-callable
@celery_task(name="task_fetch_missing_instances")
def task_fetch_missing_instances():
"""
Gets and processes instances for all known XForms
"""
forms = XForm.objects.filter(deleted_at=None)
for form in forms:
if form.has_task:
the_task = form.task
if the_task is not None and the_task.status == Task.ACTIVE:
task_fetch_form_missing_instances.delay(xform_id=form.id)
@celery_task(name="task_process_user_profiles") # pylint: disable=not-callable
def task_process_user_profiles():
"""
Process the User Model Objects and Updates All Objects that need
Updating
"""
time = timezone.now() - timedelta(minutes=30)
user_list = User.objects.filter(last_login__gt=time)
for user in user_list:
task_update_user_profile.delay(
ona_username=user.userprofile.ona_username)
@celery_task(name="task_update_user_profile") # pylint: disable=not-callable
def task_update_user_profile(ona_username: str):
"""
Updates Userprofile metadata
"""
update_user_profile_metadata(ona_username)
# pylint: disable=not-callable
@celery_task(name="task_auto_create_filtered_data_sets")
def task_auto_create_filtered_data_sets(
form_id: int, project_id: int, form_title: str):
"""
Takes ona form filtered data sets
"""
create_filtered_data_sets(
form_id=form_id, project_id=project_id, form_title=form_title)
@celery_task(name="task_task_create_form_webhook")
def task_create_form_webhook(form_id: int):
"""
Creates an Onadata webhook for the form
"""
current_site = Site.objects.get_current()
service_url = urljoin(current_site.domain, reverse('webhook'))
create_form_webhook(
form_id=form_id,
service_url=service_url
)
# pylint: disable=not-callable
@celery_task(name="task_sync_form_deleted_instances")
def task_sync_form_deleted_instances(xform_id: int):
"""
Checks for deleted instances for a form and then syncs them
"""
try:
the_xform = XForm.objects.get(id=xform_id)
except XForm.DoesNotExist: # pylint: disable=no-member
pass
else:
result = sync_deleted_instances(form_id=the_xform.ona_pk)
# pylint: disable=logging-fstring-interpolation
logger.info(f'Synced & Deleted instances: {result}')
# pylint: disable=not-callable
@celery_task(name="task_sync_deleted_instances")
def task_sync_deleted_instances():
"""
Checks for deleted instances for all forms and then syncs them
"""
xforms = XForm.objects.filter(deleted_at=None)
for xform in xforms:
task_sync_form_deleted_instances.delay(xform_id=xform.id)
# pylint: disable=not-callable
@celery_task(name="task_sync_deleted_xforms")
def task_sync_deleted_xforms(username: str):
"""
checks for deleted xforms and syncs them
"""
result = sync_deleted_xforms(username=username)
# pylint: disable=logging-fstring-interpolation
logger.info(f'Synced & Deleted forms: {result}')
# pylint: disable=not-callable
@celery_task(name="task_sync_deleted_projects")
def task_sync_deleted_projects(usernames: list):
"""
checks for deleted projects and syncs them
"""
result = sync_deleted_projects(usernames=usernames)
# pylint: disable=logging-fstring-interpolation
logger.info(f'Synced & Deleted forms: {result}')
# pylint: disable=not-callable
@celery_task(name="task_check_if_users_can_submit_to_form")
def task_check_if_users_can_submit_to_form(xform_id):
"""
Check if users can submit to the form
"""
try:
xform = XForm.objects.get(pk=xform_id)
except XForm.DoesNotExist: # pylint: disable=no-member
pass
else:
check_if_users_can_submit_to_form(xform=xform)
# pylint: disable=not-callable
@celery_task(name="task_sync_xform_can_submit_checks")
def task_sync_xform_can_submit_checks():
"""
Checks if forms are configured correctly to allow users to make submissions
"""
xforms = XForm.objects.filter(deleted_at=None)
for xform in xforms:
task_check_if_users_can_submit_to_form.delay(xform_id=xform.id)
# pylint: disable=not-callable
@celery_task(name="task_sync_submission_review")
def task_sync_submission_review(instance_id: int,
kaznet_review_status: str, comment: str):
"""
Sync auto review of submission with its review on onadata
"""
ona_review_status = convert_kaznet_to_ona_submission_status(
kaznet_review_status)
sync_submission_review(instance_id, ona_review_status, comment)
# pylint: disable=not-callable
@celery_task(name="task_sync_outdated_submission_reviews")
def task_sync_outdated_submission_reviews():
"""
Sync outdated submission reviews that did not
sync with onadata when they were created
"""
# query all instances from db and iterate through,
# calling sync_submission_review for each if
# synced_with_ona_data is not set to True
all_instances = Instance.objects.filter(json__synced_with_ona_data=False)
all_instances = list(all_instances) + list(
Instance.objects.filter(json__synced_with_ona_data=None))
for instance in all_instances:
status = instance.json.get('status')
comment = instance.json.get('comment')
if status is not None and comment is not None:
task_sync_submission_review.delay(instance.id,
status,
comment)
| 34.078652
| 79
| 0.711287
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.