content
stringlengths 5
1.05M
|
|---|
# Matchmaker Dense Retrieval
# -------------------------------
# Conduct 3 phases of dense retrieval: encoding, indexing, search
# Only in batch form, not really meant for production use of a search engine
#
# - Needs a trained model (via train.py)
# - Measures efficiency & effectiveness on 1 collection + multiple query sets (start a new experiment for another collection)
# - Allows to start new experiment from each of the 3 steps via modes:
#
# mode config-requirement
#
# 1) encode+index+search -> trained_model folder path
# 2) index+search -> continue_folder folder path pointing to an experiment started with 1)
# 3) search -> continue_folder folder path pointing to an experiment started with 2)
#
# - We can do a lot of hyperparameter studies starting from each step, or just run through a full pass once
import argparse
import copy
import os
import glob
from timeit import default_timer
import sys
sys.path.append(os.getcwd())
import sys
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # needed because of the scann library
from transformers import logging
logging.set_verbosity_warning()
import torch
import numpy
import random
from allennlp.nn.util import move_to_device
from matchmaker.models.all import get_model, get_word_embedder, build_model
from matchmaker.modules.indexing_heads import *
from matchmaker.utils.utils import *
from matchmaker.utils.config import *
from matchmaker.utils.input_pipeline import allennlp_single_sequence_loader
from matchmaker.utils.performance_monitor import *
from matchmaker.eval import *
from matchmaker.utils.core_metrics import *
from matchmaker.retrieval.faiss_indices import *
from rich.console import Console
from rich.live import Live
console = Console()
MODE_ALL = "encode+index+search"
MODE_START_INDEX = "index+search"
MODE_START_SEARCH = "search"
if __name__ == "__main__":
#
# config & mode selection
# -------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('mode', help='One of: '+MODE_ALL+', '+MODE_START_INDEX+', '+MODE_START_SEARCH)
parser.add_argument('--run-name', action='store', dest='run_name',
help='run name, used for the run folder (no spaces, special characters)', required=True)
parser.add_argument('--config', nargs='+', action='store', dest='config_file',
help='config file with all hyper-params & paths', required=True)
parser.add_argument('--config-overwrites', action='store', dest='config_overwrites',
help='overwrite config values format; (non-)whitespace important! -> key1: value1,key2: value2', required=False)
args = parser.parse_args()
config = get_config(args.config_file, args.config_overwrites)
run_folder = prepare_experiment(args, config)
if args.mode == MODE_ALL:
encode_config = config
index_config = config
model_config = get_config_single(config["trained_model"])
print_hello({**model_config, **config}, run_folder, "[Dense Retrieval] Encode & Index & Search",
show_settings=["Model","Trained Checkpoint","Index","Collection Batch Size","Query Batch Size","Use ONNX Runtime"])
elif args.mode == MODE_START_INDEX:
if "continue_folder" not in config: raise Exception("continue_folder must be set in config")
encode_folder = config["continue_folder"]
encode_config = get_config_single(encode_folder)
model_config = get_config_single(encode_config["trained_model"])
index_config = config
print_hello({**model_config, **config,**{"trained_model":encode_config["trained_model"]}}, run_folder, "[Dense Retrieval] Index & Search",
show_settings=["Model","Trained Checkpoint","Index","Query Batch Size","Use ONNX Runtime"])
elif args.mode == MODE_START_SEARCH:
if "continue_folder" not in config: raise Exception("continue_folder must be set in config")
index_folder = config["continue_folder"]
index_config = get_config_single(index_folder)
encode_folder = index_config["continue_folder"] if "continue_folder" in index_config else index_folder
encode_config= get_config_single(encode_folder)
model_config = get_config_single(encode_config["trained_model"])
print_hello({**model_config, **config,**{"trained_model":encode_config["trained_model"]}}, run_folder, "[Dense Retrieval] Search",
show_settings=["Model","Trained Checkpoint","Index","Query Batch Size","Use ONNX Runtime"])
else:
raise Exception("mode not supported")
use_onnx = config["onnx_use_inference"]
if use_onnx: # prevent errors if onnx is not properly installed (see readme for setup ionstructions)
import onnxruntime
from matchmaker.utils.onnx_helper import *
logger = get_logger_to_file(run_folder, "main")
logger.info("Running: %s", str(sys.argv))
torch.manual_seed(model_config["random_seed"])
numpy.random.seed(model_config["random_seed"])
random.seed(model_config["random_seed"])
logger.info("Torch seed: %i ",torch.initial_seed())
# hardcode gpu usage
cuda_device = 0 # main cuda device
perf_monitor = PerformanceMonitor.get()
perf_monitor.start_block("startup")
#
# create and load model instance
# -------------------------------
word_embedder, padding_idx = get_word_embedder(model_config)
model, encoder_type = get_model(model_config,word_embedder,padding_idx)
model = build_model(model,encoder_type,word_embedder,model_config)
if model_config.get("model_checkpoint_from_huggingface",False):
model.from_pretrained(encode_config["trained_model"])
console.log("[Startup]","Trained model loaded from huggingface")
else:
model_path = os.path.join(encode_config["trained_model"], "best-model.pytorch-state-dict")
load_result = model.load_state_dict(torch.load(model_path),strict=False)
logger.info('Warmstart init model from: %s', model_path)
logger.info(load_result)
console.log("[Startup]","Trained model loaded locally; result:",load_result)
#
# setup heads wrapping the model for indexing & searching
#
if args.mode == MODE_ALL:
model_indexer = CollectionIndexerHead(model, use_fp16=False if use_onnx else model_config["use_fp16"]).cuda()
model_indexer.eval()
if use_onnx:
console.log("[Startup]","Using ONNX, converting & optimizing indexer ... ")
convert_and_optimize(model_indexer, os.path.join(run_folder,"indexer-model.onnx"),model_config["use_fp16"])
del model_indexer
onnx_indexer = onnxruntime.InferenceSession(os.path.join(run_folder,"indexer-model.onnx"),providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
model_searcher = QuerySearcherHead(model, use_fp16=False if use_onnx else model_config["use_fp16"]).cuda()
model_searcher.eval()
if use_onnx:
console.log("[Startup]","Using ONNX, converting & optimizing searcher ... ")
convert_and_optimize(model_searcher, os.path.join(run_folder,"searcher-model.onnx"),model_config["use_fp16"])
del model_searcher
logger.info('Model %s total parameters: %s', model_config["model"], sum(p.numel() for p in model.parameters() if p.requires_grad))
logger.info('Network: %s', model)
del model
#
# setup multi-gpu, todo fix to forward only calls
#
if torch.cuda.device_count() > 1 and not use_onnx:
console.log("Let's use", torch.cuda.device_count(), "GPUs!")
if args.mode == MODE_ALL:
model_indexer = torch.nn.DataParallel(model_indexer)
model_indexer.eval()
if config["query_batch_size"] > 1:
model_searcher = torch.nn.DataParallel(model_searcher)
model_searcher.eval()
else:
console.log("[Startup] Info: Using single GPU for search, as batch_size is set to 1")
elif torch.cuda.device_count() > 1 and use_onnx:
console.log("[Startup] Info: ONNX currently only uses 1 GPU")
perf_monitor.set_gpu_info(torch.cuda.device_count(),torch.cuda.get_device_name())
perf_monitor.stop_block("startup")
try:
#
# 1) Encode
# -------------------------------
token_base_size = config["token_block_size"]
token_dimensions = config["token_dim"]
if args.mode == MODE_ALL:
console.log("[Encoding]","Encoding collection from: ",config["collection_tsv"])
doc_infos = {}
seq_ids = []
token_base_number=0
token_base = np.memmap(os.path.join(run_folder,"token_reps_"+str(token_base_number)+".npy"), dtype=numpy.dtype(config["token_dtype"]),
mode="w+", shape=(token_base_size,token_dimensions))
current_ids = np.ndarray(shape=(token_base_size), dtype = 'int64')
id_mapping = []
token_insert_index=0
storage = []
storage_filled_to_index = []
input_loader = allennlp_single_sequence_loader(model_config,config, config["collection_tsv"], sequence_type="doc")
perf_monitor.start_block("encode")
start_time = default_timer()
#import pprofile
#prof = pprofile.Profile()
with torch.no_grad(), Live("[bold magenta] Loading...",console=console,auto_refresh=False) as status:#,prof():
batch_number=0
sequence_number=0
for batch in input_loader:
if use_onnx:
output = onnx_indexer.run(None, {'input_ids': batch["seq_tokens"]['input_ids'].numpy(),
'attention_mask': batch["seq_tokens"]['attention_mask'].to(torch.int64).numpy()})[0]
else:
batch = move_to_device(copy.deepcopy(batch), cuda_device)
output = model_indexer.forward(batch["seq_tokens"])
output = output.cpu().numpy() # get the output back to the cpu - in one piece
# compare ONNX Runtime and PyTorch results
#np.testing.assert_allclose(output, ort_outs[0], rtol=1e-03, atol=1e-05)
for sample_i, seq_id in enumerate(batch["seq_id"]): # operate on cpu memory
# assuming either 1 vec or 1-n
# if 1-n remove 0 vectors as padding (colbert,tk,tkl models)
current_reps = output[sample_i]
dim_count = len(current_reps.shape)
if dim_count == 2:
current_reps = current_reps[np.abs(current_reps).sum(-1) > 0,:] # a bit dicey, but i guess we are fine because what is the prob. of something being 0 without being padding
vec_count = 1 if dim_count == 1 else current_reps.shape[0]
if token_insert_index + vec_count > token_base_size:
storage.append(token_base[:token_insert_index])
id_mapping.append(current_ids[:token_insert_index])
current_ids = np.ndarray(shape=(token_base_size), dtype = 'int64')
storage_filled_to_index.append(token_insert_index)
token_base_number+=1
token_insert_index=0
token_base = np.memmap(os.path.join(run_folder,"token_reps_"+str(token_base_number)+".npy"), dtype=numpy.dtype(config["token_dtype"]),
mode="w+", shape=(token_base_size,token_dimensions))
start_index = token_insert_index
token_insert_index = token_insert_index + vec_count
token_base[start_index:token_insert_index] = current_reps
current_ids[start_index:token_insert_index] = len(seq_ids)
doc_infos[seq_id] = (token_base_number,start_index,token_insert_index)
seq_ids.append(seq_id)
batch_number += 1
sequence_number += len(batch["seq_id"])
if batch_number % 10 == 0: status.update("[bold magenta] Progress ... Batch No.: "+str(batch_number)+" | Sequence No.: "+str(sequence_number) +" | Seq. / second: " + \
"{:.2f}".format(sequence_number/(default_timer() - start_time)), refresh=True)
#prof.print_stats()
# save last token reps
storage.append(token_base[:token_insert_index])
id_mapping.append(current_ids[:token_insert_index])
storage_filled_to_index.append(token_insert_index)
saveCompressed(os.path.join(run_folder,"doc_infos.npz"),doc_infos=doc_infos,id_mapping=id_mapping,
seq_ids=seq_ids,storage_filled_to_index=storage_filled_to_index)
if not use_onnx:
perf_monitor.log_unique_value("encoding_gpu_mem",str(torch.cuda.memory_allocated()/float(1e9)) + " GB")
perf_monitor.log_unique_value("encoding_gpu_mem_max",str(torch.cuda.max_memory_allocated()/float(1e9)) + " GB")
perf_monitor.log_unique_value("encoded_size_on_disk",str(sum(os.path.getsize(f) for f in glob.glob(os.path.join(run_folder,"token_reps_*")))/float(1e9)) + " GB")
perf_monitor.stop_block("encode",len(seq_ids))
#
# skip encoding
#
else:
console.log("[Encoding]","Skipping encoding; loading collection vectors & info from: ",encode_folder)
dfs = numpy.load(os.path.join(encode_folder,"doc_infos.npz"),allow_pickle=True)
doc_infos=dfs.get("doc_infos")[()]
id_mapping=dfs.get("id_mapping")[()]
seq_ids=dfs.get("seq_ids")[()]
storage_filled_to_index=dfs.get("storage_filled_to_index")[()]
storage = []
for f in range(0,len(glob.glob(os.path.join(encode_folder,"token_reps_*")))):
storage.append(np.memmap(os.path.join(encode_folder,"token_reps_"+str(f)+".npy"), dtype=numpy.dtype(encode_config["token_dtype"]),
mode="r", shape=(token_base_size,token_dimensions))[:storage_filled_to_index[f]])
#
# 2) Nearest neighbor indexing
# -------------------------
if index_config["faiss_index_type"] == "ondisk_sharding":
indexer = FaissShardedOnDiskIdIndexer(index_config)
elif index_config["faiss_index_type"] == "full":
indexer = FaissIdIndexer(index_config)
elif index_config["faiss_index_type"] == "ivf":
indexer = FaissIVFIndexer(index_config) # todo
elif index_config["faiss_index_type"] == "hnsw":
indexer = FaissHNSWIndexer(index_config)
elif index_config["faiss_index_type"] == "scann":
from matchmaker.retrieval.scann_index import ScaNNIndexer # import here, because it only works on linux
indexer = ScaNNIndexer(index_config)
else:
raise Exception("faiss_index_type not supported")
# we don't save the full index, but rebuilt it every time (just loading the vectors basically)
if args.mode != MODE_START_SEARCH or index_config["faiss_index_type"] == "full":
perf_monitor.start_block("indexing")
indexer.prepare(storage)
indexer.index(id_mapping, storage)
if index_config["faiss_index_type"] == "scann": # scann needs a folder (already created)
index_path = os.path.join(run_folder,"scann-index")
os.makedirs(index_path)
indexer.save(index_path)
index_size = sum(os.path.getsize(f) for f in glob.glob(os.path.join(run_folder,"scann-index","*")))
perf_monitor.log_unique_value("index_size_on_disk",str(index_size/float(1e9)) + " GB")
perf_monitor.log_unique_value("index_size_on_disk_per_entry",str(index_size/len(seq_ids)) + " bytes")
elif index_config["faiss_index_type"] != "full":
indexer.save(os.path.join(run_folder,"faiss.index"))
perf_monitor.log_unique_value("faiss_index_size_on_disk",str(os.path.getsize(os.path.join(run_folder,"faiss.index"))/float(1e9)) + " GB")
perf_monitor.log_unique_value("faiss_index_size_on_disk_per_entry",str(os.path.getsize(os.path.join(run_folder,"faiss.index"))/len(seq_ids)) + " bytes")
perf_monitor.stop_block("indexing")
else:
console.log("[Indexing]","loading existing index from: ",index_folder)
if index_config["faiss_index_type"] == "scann":
indexer.load(os.path.join(index_folder,"scann-index"))
else:
indexer.load(os.path.join(index_folder,"faiss.index"),config)
#
# 3) Search
# -------------------------
if use_onnx:
if args.mode == MODE_ALL: del onnx_indexer
onnx_searcher = onnxruntime.InferenceSession(os.path.join(run_folder,"searcher-model.onnx"),providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
perf_monitor.start_block("search_total")
for test_name,test_config in config["query_sets"].items():
console.log("[Search]","Start retrieval for:", test_config["queries_tsv"])
input_loader = allennlp_single_sequence_loader(model_config,config,test_config["queries_tsv"], sequence_type="query", force_exact_batch_size=True)
validation_results = defaultdict(list)
times_query_encode = []
times_faiss = []
times_aggregate = []
with torch.no_grad(), Live("[bold magenta] Loading...",console=console,auto_refresh=False) as status:
i=0
for batch_orig in input_loader:
batch_size = len(batch_orig["seq_id"])
perf_monitor.start_block("search_query_encode")
if use_onnx:
output = onnx_searcher.run(None, {'input_ids': batch_orig["seq_tokens"]['input_ids'].numpy(),
'attention_mask': batch_orig["seq_tokens"]['attention_mask'].to(torch.int64).numpy()})[0]
else:
batch = move_to_device(copy.deepcopy(batch_orig), cuda_device)
output = model_searcher.forward(batch["seq_tokens"],search_type="encode")
output = output.cpu().numpy() # get the output back to the cpu - in one piece
perf_monitor.stop_block("search_query_encode",batch_size)
perf_monitor.start_block("search_nn_lookup")
res_scores, ids = indexer.search(output,test_config.get("index_hit_top_n",test_config["top_n"]))
perf_monitor.stop_block("search_nn_lookup",batch_size)
perf_monitor.start_block("search_aggregation")
for sample_i, query_id in enumerate(batch_orig["seq_id"]): # operate on cpu memory
if model_config["model"] == "ColBERT":
current_ids_set = set(current_ids.flatten())
for t, s_idx in enumerate(current_ids_set):
#st_3b1 = time.time()
doc_inf = doc_infos[seq_ids[s_idx]]
doc_reps = storage[doc_inf[0]][doc_inf[1]:doc_inf[2],:]
doc_reps = torch.from_numpy(doc_reps).unsqueeze(0).float()
#st_3b_close.append(time.time()-st_3b1)
#st_3b1 = time.time()
output_scores = model_searcher.forward(curr_q, document_enc=doc_reps, search_type="aggregate", use_fp16=model_config["use_fp16"])
#output_scores = output_scores.cpu().numpy()
validation_results[query_id].append((seq_ids[s_idx], float(output_scores)))
if model_config['model'] == 'maxP->bert_dot':
current_scores = res_scores[sample_i]
current_ids = ids[sample_i]
unique_ids = set()
for t, s_idx in enumerate(current_ids):
if s_idx in unique_ids:
continue
unique_ids.add(s_idx)
validation_results[query_id].append((seq_ids[s_idx], float(current_scores[t])))
if len(validation_results[query_id]) == test_config["top_n"]:
break
if len(validation_results[query_id]) < test_config["top_n"]:
logger.info("too few results for: ",query_id,"got:",len(validation_results[query_id]))
else: # simple max. inner product aggregation
# get collection seq ids
current_scores = res_scores[sample_i]
current_ids = ids[sample_i]
collection_ids = []
for t, s_idx in enumerate(current_ids):
validation_results[query_id].append((seq_ids[s_idx], float(current_scores[t])))
perf_monitor.stop_block("search_aggregation",batch_size)
i+=1
if i % 10 == 0: status.update("[bold magenta] Progress ... Batch No.: "+str(i) +" Query No.: "+str(len(batch_orig["seq_id"])*i), refresh=True)
save_sorted_results(validation_results, os.path.join(run_folder,test_name+"-output.txt"),until_rank=test_config["top_n"])
if "qrels" in test_config:
ranked_results = unrolled_to_ranked_result(validation_results)
metrics = calculate_metrics_plain(ranked_results,load_qrels(test_config["qrels"]),test_config["binarization_point"])
metric_file_path = os.path.join(run_folder, test_name+"-metrics.csv")
save_fullmetrics_oneN(metric_file_path, metrics, -1, -1)
console.log("[Search] Results for",test_name)
print_metric_summary(metrics)
perf_monitor.stop_block("search_total")
perf_monitor.print_summary()
perf_monitor.save_summary(os.path.join(run_folder,"efficiency_metrics.json"))
except KeyboardInterrupt:
logger.info('-' * 20)
logger.info('Manual Stop!')
console.log("Manual Stop! Bye :)")
except Exception as e:
logger.info('-' * 20)
logger.exception('[train] Got exception: ')
logger.info('Exiting from training early')
console.log("[red]Exception! ",str(e))
console.print_exception()
exit(1)
finally:
# cleanup the onnx model, this will come back to bite me some day, but for now let's save storage space!
if os.path.exists(os.path.join(run_folder,"indexer-model.onnx")):
os.remove(os.path.join(run_folder,"indexer-model.onnx"))
if os.path.exists(os.path.join(run_folder,"searcher-model.onnx")):
os.remove(os.path.join(run_folder,"searcher-model.onnx"))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File name: matrix_calc.py
"""
Created on Thu May 7 20:56:54 2020
@author: Neo(liuniu@smail.nju.edu.cn)
Some codes for calculating various matrix & array needed in the LSQ preocess.
The normal equation is writtern as
A * x = b
where A is the normal matrix, x the vector consist of unknowns,
and b the right-hand-side array.
"""
import sys
import os
import numpy as np
from numpy import pi, concatenate
# My progs
# from .vsh_expension import real_vec_sph_harm_proj
from vsh_expension_201128 import real_vec_sph_harm_proj, vec_sph_harm_proj
# ----------------------------- FUNCTIONS -----------------------------
def cov_mat_calc(dra_err, ddc_err, ra_dc_cov=None, ra_dc_cor=None):
"""Calculate the covariance matrix.
Parameters
----------
dra_err/ddc_err : array of float
formal uncertainty of dRA(*cos(dc_rad))/dDE
ra_dc_cov : array of float
correlation between dRA and dDE, default is None
ra_dc_cor : array of float
correlation coefficient between dRA and dDE, default is None
Returns
----------
cov_mat : matrix
Covariance matrix used in the least squares fitting.
"""
if len(ddc_err) != len(dra_err):
print("The length of dra_err and ddc_err should be equal")
sys.exit()
# TO-DO
# Check the length of correlation array
# else:
# if ra_dc_cov is None:
# Covariance matrix.
# err = np.vstack((dra_err, ddc_err)).T.flatten()
err = concatenate((dra_err, ddc_err))
cov_mat = np.diag(err**2)
# Take the correlation into consideration.
# Assume at most one of ra_dc_cov and ra_dc_cor is given
if ra_dc_cov is None and ra_dc_cor is None:
return cov_mat
elif ra_dc_cor is not None:
ra_dc_cov = ra_dc_cor * dra_err * ddc_err
N = len(dra_err)
for i, covi in enumerate(ra_dc_cov):
cov_mat[i, i+N] = covi
cov_mat[i+N, i] = covi
return cov_mat
def cov_to_cor(cov_mat):
"""Convert covariance matrix to sigma and correlation coefficient matrix
"""
# Formal uncertainty
sig = np.sqrt(cov_mat.diagonal())
# Correlation coefficient.
cor_mat = np.array([cov_mat[i, j] / sig[i] / sig[j]
for j in range(len(sig))
for i in range(len(sig))])
cor_mat.resize((len(sig), len(sig)))
return sig, cor_mat
def cor_to_cov(sig, cor_mat):
"""Convert correlation coefficient matrix to sigma and covariance matrix
"""
# Covariance
cov_mat = np.array([cor_mat[i, j] * sig[i] * sig[j]
for j in range(len(sig))
for i in range(len(sig))])
cov_mat.resize((len(sig), len(sig)))
return cov_mat
def wgt_mat_calc(dra_err, ddc_err, ra_dc_cov=None, ra_dc_cor=None):
"""Calculate the weight matrix.
Parameters
----------
dra_err/ddc_err : array of float
formal uncertainty of dRA(*cos(dc_rad))/dDE
ra_dc_cov : array of float
correlation between dRA and dDE, default is None
ra_dc_cor : array of float
correlation coefficient between dRA and dDE, default is None
Returns
----------
wgt_matrix : matrix
weighted matrix used in the least squares fitting.
"""
# Calculate the covariance matrix
cov_mat = cov_mat_calc(dra_err, ddc_err, ra_dc_cov, ra_dc_cor)
# Inverse it to obtain weight matrix.
wgt_mat = np.linalg.inv(cov_mat)
return wgt_mat
def jac_mat_l_calc(T_ra_mat, T_dc_mat, l, fit_type="full"):
"""Calculate the Jacobian matrix of lth degree
Parameters
----------
ra_rad/dc_rad : array (M,) of float
Right ascension/Declination in radian
l : int
degree of the harmonics
fit_type : string
flag to determine which parameters to be fitted
"full" for T- and S-vectors bot Number of observations
"""
# Number of source
M = T_ra_mat.shape[2]
# Usually begins with the first order
Tl0_ra, Tl0_dc = real_vec_sph_harm_proj(l, 0, T_ra_mat, T_dc_mat)
# Note the relation between Tlm and Slm
# S10_ra, S10_dc = -Tl0_dc, Tl0_ra
# jac_mat_ra = concatenate(
# (Tl0_ra.reshape(M, 1), Sl0_ra.reshape(M, 1)), axis=1)
jac_mat_ra = concatenate(
(Tl0_ra.reshape(M, 1), -Tl0_dc.reshape(M, 1)), axis=1)
jac_mat_dc = concatenate(
(Tl0_dc.reshape(M, 1), Tl0_ra.reshape(M, 1)), axis=1)
for m in range(1, l+1):
Tlmr_ra, Tlmr_dc, Tlmi_ra, Tlmi_dc = real_vec_sph_harm_proj(
l, m, T_ra_mat, T_dc_mat)
# Just to show the relation
# Slmr_ra, Slmi_ra = -Tlmr_dc, -Tlmr_dc
# Slmr_dc, Slmr_dc = Tlmr_ra, Tlmr_ra
# Concatenate the new array and the existing Jacobian matrix
jac_mat_ra = concatenate(
(jac_mat_ra, Tlmr_ra.reshape(M, 1), -Tlmr_dc.reshape(M, 1),
Tlmi_ra.reshape(M, 1), -Tlmi_dc.reshape(M, 1)), axis=1)
jac_mat_dc = concatenate(
(jac_mat_dc, Tlmr_dc.reshape(M, 1), Tlmr_ra.reshape(M, 1),
Tlmi_dc.reshape(M, 1), Tlmi_ra.reshape(M, 1)), axis=1)
# Treat (ra, dc) as two observations(dependent or independent)
# Combine the Jacobian matrix projection on RA and Decl.
jac_mat = concatenate((jac_mat_ra, jac_mat_dc), axis=0)
# Check the shape of the matrix
if jac_mat.shape != (2*M, 4*l+2):
print("Shape of Jocabian matrix at l={} is ({},{}) "
"rather than ({},{})".format(l, jac_mat.shape[0], jac_mat.shape[1],
2*M, 4*l+2))
sys.exit()
return jac_mat
def jac_mat_calc(ra_rad, dc_rad, l_max, fit_type="full"):
"""Calculate the Jacobian matrix
Parameters
----------
ra_rad/dc_rad : array (m,) of float
Right ascension/Declination in radian
l_max : int
maximum degree
fit_type : string
flag to determine which parameters to be fitted
"full" for T- and S-vectors both
"T" for T-vectors only
"S" for S-vectors only
Returns
----------
jac_mat : array of float
Jacobian matrix (M, N) (assume N unknows to determine)
"""
# Calculate all the VSH terms at one time
T_ra_mat, T_dc_mat = vec_sph_harm_proj(l_max, ra_rad, dc_rad)
# Usually begins with the first degree
jac_mat = jac_mat_l_calc(T_ra_mat, T_dc_mat, 1, fit_type)
for l in range(2, l_max+1):
new_mat = jac_mat_l_calc(T_ra_mat, T_dc_mat, l, fit_type)
jac_mat = concatenate((jac_mat, new_mat), axis=1)
# Check the shape of the Jacobian matrix
M = len(ra_rad)
N = 2 * l_max * (l_max+2)
if jac_mat.shape != (2*M, N):
print("Shape of Jocabian matrix is ({},{}) "
"rather than ({},{})".format(jac_mat.shape[0], jac_mat.shape[1],
2*M, N))
sys.exit()
return jac_mat
def nor_mat_calc(dra, ddc, dra_err, ddc_err, ra_rad, dc_rad,
ra_dc_cov=None, ra_dc_cor=None, l_max=1, fit_type="full", suffix=""):
"""Cacluate the normal and right-hand-side matrix for LSQ analysis.
Parameters
----------
dra_err/ddc_err : array of float
formal uncertainty of dRA(*cos(dc_rad))/dDE in dex
ra_rad/dc_rad : array of float
Right ascension/Declination in radian
ra_dc_cov : array of float
correlation between dRA and dDE, default is None
ra_dc_cor : array of float
correlation coefficient between dRA and dDE, default is None
l_max : int
maximum degree
fit_type : string
flag to determine which parameters to be fitted
"full" for T- and S-vectors both
"T" for T-vectors only
"S" for S-vectors only
suffix : string
suffix for output matrix file
Returns
----------
nor_mat : array of float
normal matrix
rhs_mat : array of float
right-hand-side matrix
"""
# Jacobian matrix
jac_mat = jac_mat_calc(ra_rad, dc_rad, l_max, fit_type)
# Weighted matrix
wgt_mat = wgt_mat_calc(dra_err, ddc_err, ra_dc_cov, ra_dc_cor)
# Jac_mat_T * Wgt_mat
mul_mat = np.dot(np.transpose(jac_mat), wgt_mat)
# Calculate normal matrix A
nor_mat = np.dot(mul_mat, jac_mat)
# Calculate right-hand-side matrix b
res_mat = concatenate((dra, ddc), axis=0)
rhs_mat = np.dot(mul_mat, res_mat)
# Save matrice for later use
# np.save("jac_mat_{:s}.npy".format(suffix), jac_mat)
return nor_mat, rhs_mat
def predict_mat_calc(pmt, suffix):
"""Calculate the predicted value
Parameters
----------
pmt : array
estimate of unknowns
suffix : string
suffix for finding corresponding Jacobian matrix
Returns
-------
dra_pre : array
predicted offset in RA
ddc_pre : array
predicted offset in Declination
"""
jac_mat = np.load("jac_mat_{:s}.npy".format(suffix))
dra_ddc = np.dot(jac_mat, pmt)
num_sou = int(len(dra_ddc)/2)
dra_pre, ddc_pre = dra_ddc[:num_sou], dra_ddc[num_sou:]
return dra_pre, ddc_pre
def nor_eq_sol(dra, ddc, dra_err, ddc_err, ra_rad, dc_rad, ra_dc_cov=None,
ra_dc_cor=None, l_max=1, fit_type="full", num_iter=None):
"""The 1st degree of VSH function: glide and rotation.
Parameters
----------
dra/ddc : array of float
R.A.(*cos(Dec.))/Dec. differences in dex
dra_err/ddc_err : array of float
formal uncertainty of dra(*cos(dc_rad))/ddc in dex
ra_rad/dc_rad : array of float
Right ascension/Declination in radian
ra_dc_cov/ra_dc_cor : array of float
covariance/correlation coefficient between dra and ddc in dex^2, default is None
fit_type : string
flag to determine which parameters to be fitted
"full" for T- and S-vectors both
"T" for T-vectors only
"S" for S-vectors only
Returns
----------
pmt : array of float
estimation of (d1, d2, d3, r1, r2, r3) in dex
sig : array of float
uncertainty of x in dex
cor_mat : matrix
matrix of correlation coefficient.
"""
# Maxium number of sources processed per time
# According to my test, 100 should be a good choice
if num_iter is None:
num_iter = 100
div = dra.size // num_iter
rem = dra.size % num_iter
suffix_array = []
A, b = 0, 0
if rem:
suffix_array.append("{:05d}".format(0))
if not ra_dc_cov is None:
A, b = nor_mat_calc(dra[: rem], ddc[: rem], dra_err[: rem], ddc_err[: rem],
ra_rad[: rem], dc_rad[: rem], ra_dc_cov=ra_dc_cov[: rem],
l_max=l_max, fit_type=fit_type, suffix=suffix_array[0])
elif not ra_dc_cor is None:
A, b = nor_mat_calc(dra[: rem], ddc[: rem], dra_err[: rem], ddc_err[: rem],
ra_rad[: rem], dc_rad[: rem], ra_dc_cor=ra_dc_cor[: rem],
l_max=l_max, fit_type=fit_type, suffix=suffix_array[0])
else:
A, b = nor_mat_calc(dra[: rem], ddc[: rem], dra_err[: rem], ddc_err[: rem],
ra_rad[: rem], dc_rad[: rem], l_max=l_max, fit_type=fit_type,
suffix=suffix_array[0])
for i in range(div):
sta = rem + i * num_iter
end = sta + num_iter
suffix_array.append("{:05d}".format(i+1))
if not ra_dc_cov is None:
An, bn = nor_mat_calc(dra[sta: end], ddc[sta: end], dra_err[sta: end],
ddc_err[sta: end], ra_rad[sta: end], dc_rad[sta: end],
ra_dc_cov=ra_dc_cov[sta: end], l_max=l_max, fit_type=fit_type,
suffix=suffix_array[-1])
elif not ra_dc_cor is None:
An, bn = nor_mat_calc(dra[sta: end], ddc[sta: end], dra_err[sta: end],
ddc_err[sta: end], ra_rad[sta: end], dc_rad[sta: end],
ra_dc_cor=ra_dc_cor[sta: end], l_max=l_max, fit_type=fit_type,
suffix=suffix_array[-1])
else:
An, bn = nor_mat_calc(dra[sta: end], ddc[sta: end], dra_err[sta: end],
ddc_err[sta: end], ra_rad[sta: end], dc_rad[sta: end],
l_max=l_max, fit_type=fit_type,
suffix=suffix_array[-1])
A = A + An
b = b + bn
# Solve the equations.
pmt = np.linalg.solve(A, b)
# Covariance.
cov_mat = np.linalg.inv(A)
# Formal uncertainty and correlation coefficient
sig, cor_mat = cov_to_cor(cov_mat)
# # Calculate residuals
# dra_pre, ddc_pre = predict_mat_calc(pmt, suffix_array[0])
# for i in range(1, len(suffix_array)):
# dra_prei, ddc_prei = predict_mat_calc(pmt, suffix_array[i])
# dra_pre = concatenate((dra_pre, dra_prei))
# ddc_pre = concatenate((ddc_pre, ddc_prei))
#
# dra1, ddc1 = dra - dra_pre, ddc - ddc_pre
#
# # Delete Jacobian matrix file
# for suffix in suffix_array:
# os.system("rm jac_mat_{:s}.npy".format(suffix))
# Return the result.
# return pmt, sig, cor_mat, dra1, ddc1
return pmt, sig, cor_mat
# --------------------------------- END --------------------------------
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Partial implementation for framework import file processing."""
load(
"@build_bazel_apple_support//lib:apple_support.bzl",
"apple_support",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleFrameworkImportInfo",
)
load(
"@build_bazel_rules_apple//apple/internal:bitcode_support.bzl",
"bitcode_support",
)
load(
"@build_bazel_rules_apple//apple/internal:codesigning_support.bzl",
"codesigning_support",
)
load(
"@build_bazel_rules_apple//apple/internal:processor.bzl",
"processor",
)
load(
"@build_bazel_rules_apple//apple/internal/utils:bundle_paths.bzl",
"bundle_paths",
)
load(
"@build_bazel_rules_apple//apple/internal:intermediates.bzl",
"intermediates",
)
load(
"@bazel_skylib//lib:partial.bzl",
"partial",
)
load(
"@bazel_skylib//lib:paths.bzl",
"paths",
)
def _framework_import_partial_impl(
*,
actions,
apple_toolchain_info,
label_name,
platform_prerequisites,
provisioning_profile,
rule_descriptor,
targets,
targets_to_avoid):
"""Implementation for the framework import file processing partial."""
transitive_sets = [
x[AppleFrameworkImportInfo].framework_imports
for x in targets
if AppleFrameworkImportInfo in x
]
files_to_bundle = depset(transitive = transitive_sets).to_list()
if targets_to_avoid:
avoid_transitive_sets = [
x[AppleFrameworkImportInfo].framework_imports
for x in targets_to_avoid
if AppleFrameworkImportInfo in x
]
if avoid_transitive_sets:
avoid_files = depset(transitive = avoid_transitive_sets).to_list()
# Remove any files present in the targets to avoid from framework files that need to be
# bundled.
files_to_bundle = [x for x in files_to_bundle if x not in avoid_files]
# Collect the architectures that we are using for the build.
build_archs_found = depset(transitive = [
x[AppleFrameworkImportInfo].build_archs
for x in targets
if AppleFrameworkImportInfo in x
]).to_list()
# Start assembling our partial's outputs.
bundle_zips = []
signed_frameworks_list = []
# Separating our files by framework path, to better address what should be passed in.
framework_binaries_by_framework = dict()
files_by_framework = dict()
for file in files_to_bundle:
framework_path = bundle_paths.farthest_parent(file.short_path, "framework")
# Use the framework path's basename to distinguish groups of files.
framework_basename = paths.basename(framework_path)
if not files_by_framework.get(framework_basename):
files_by_framework[framework_basename] = []
if not framework_binaries_by_framework.get(framework_basename):
framework_binaries_by_framework[framework_basename] = []
# Check if this file is a binary to slice and code sign.
framework_relative_path = paths.relativize(file.short_path, framework_path)
parent_dir = framework_basename
framework_relative_dir = paths.dirname(framework_relative_path).strip("/")
if framework_relative_dir:
parent_dir = paths.join(parent_dir, framework_relative_dir)
if paths.replace_extension(parent_dir, "") == file.basename:
framework_binaries_by_framework[framework_basename].append(file)
continue
# Treat the rest as files to copy into the bundle.
files_by_framework[framework_basename].append(file)
for framework_basename in files_by_framework.keys():
# Create a temporary path for intermediate files and the anticipated zip output.
temp_path = paths.join("_imported_frameworks/", framework_basename)
framework_zip = intermediates.file(
actions,
label_name,
temp_path + ".zip",
)
temp_framework_bundle_path = paths.split_extension(framework_zip.path)[0]
# Pass through all binaries, files, and relevant info as args.
args = actions.args()
for framework_binary in framework_binaries_by_framework[framework_basename]:
args.add("--framework_binary", framework_binary.path)
for build_arch in build_archs_found:
args.add("--slice", build_arch)
if bitcode_support.bitcode_mode_string(platform_prerequisites.apple_fragment) == "none":
args.add("--strip_bitcode")
args.add("--output_zip", framework_zip.path)
args.add("--temp_path", temp_framework_bundle_path)
for file in files_by_framework[framework_basename]:
args.add("--framework_file", file.path)
codesign_args = codesigning_support.codesigning_args(
entitlements = None,
full_archive_path = temp_framework_bundle_path,
is_framework = True,
platform_prerequisites = platform_prerequisites,
provisioning_profile = provisioning_profile,
rule_descriptor = rule_descriptor,
)
args.add_all(codesign_args)
resolved_codesigningtool = apple_toolchain_info.resolved_codesigningtool
resolved_imported_dynamic_framework_processor = apple_toolchain_info.resolved_imported_dynamic_framework_processor
# Inputs of action are all the framework files, plus binaries needed for identifying the
# current build's preferred architecture, plus a generated list of those binaries to prune
# their dependencies so that future changes to the app/extension/framework binaries do not
# force this action to re-run on incremental builds, plus the top-level target's
# provisioning profile if the current build targets real devices.
input_files = files_by_framework[framework_basename] + framework_binaries_by_framework[framework_basename]
execution_requirements = {}
if provisioning_profile:
input_files.append(provisioning_profile)
execution_requirements = {"no-sandbox": "1"}
if platform_prerequisites.platform.is_device:
# Added so that the output of this action is not cached
# remotely, in case multiple developers sign the same artifact
# with different identities.
execution_requirements["no-remote"] = "1"
transitive_inputs = [
resolved_imported_dynamic_framework_processor.inputs,
resolved_codesigningtool.inputs,
]
apple_support.run(
actions = actions,
apple_fragment = platform_prerequisites.apple_fragment,
arguments = [args],
executable = resolved_imported_dynamic_framework_processor.executable,
execution_requirements = execution_requirements,
inputs = depset(input_files, transitive = transitive_inputs),
input_manifests = resolved_imported_dynamic_framework_processor.input_manifests +
resolved_codesigningtool.input_manifests,
mnemonic = "ImportedDynamicFrameworkProcessor",
outputs = [framework_zip],
tools = [resolved_codesigningtool.executable],
xcode_config = platform_prerequisites.xcode_version_config,
xcode_path_wrapper = platform_prerequisites.xcode_path_wrapper,
)
bundle_zips.append(
(processor.location.framework, None, depset([framework_zip])),
)
signed_frameworks_list.append(framework_basename)
return struct(
bundle_zips = bundle_zips,
signed_frameworks = depset(signed_frameworks_list),
)
def framework_import_partial(
*,
actions,
apple_toolchain_info,
label_name,
platform_prerequisites,
provisioning_profile,
rule_descriptor,
targets,
targets_to_avoid = []):
"""Constructor for the framework import file processing partial.
This partial propagates framework import file bundle locations. The files are collected through
the framework_import_aspect aspect.
Args:
actions: The actions provider from `ctx.actions`.
apple_toolchain_info: `struct` of tools from the shared Apple toolchain.
label_name: Name of the target being built.
platform_prerequisites: Struct containing information on the platform being targeted.
provisioning_profile: File for the provisioning profile.
rule_descriptor: A rule descriptor for platform and product types from the rule context.
targets: The list of targets through which to collect the framework import files.
targets_to_avoid: The list of targets that may already be bundling some of the frameworks,
to be used when deduplicating frameworks already bundled.
Returns:
A partial that returns the bundle location of the framework import files.
"""
return partial.make(
_framework_import_partial_impl,
actions = actions,
apple_toolchain_info = apple_toolchain_info,
label_name = label_name,
platform_prerequisites = platform_prerequisites,
provisioning_profile = provisioning_profile,
rule_descriptor = rule_descriptor,
targets = targets,
targets_to_avoid = targets_to_avoid,
)
|
from PIL import Image, ImageDraw
s=24
st_list=['4005','4553','6758','7201','7309','8316','8411','9201','9202','9432','9437','9613','9983']
#st_list = ['AAL','AAPL','AMZN','FB','GOOG','ZM']
images = []
for i in st_list:
st_name = i + '_trend_2020-06-11_'
im = Image.open('./stock/stock0/{}.png'.format(st_name))
im =im.resize(size=(2588, 1600), resample=Image.NEAREST)
images.append(im)
st_name = i + '_trend_2020-06-12_'
im = Image.open('./stock/stock0/{}.png'.format(st_name))
im =im.resize(size=(2588, 1600), resample=Image.NEAREST)
images.append(im)
images[0].save('./stock/stock0/zm_T26.gif', save_all=True, append_images=images[1:s], duration=100*10, loop=0)
|
from typing import Optional, Callable
from model_spaces.core.hyperpriors import Hyperpriors
class Covariance:
rnd_code_max_digit = 10
rnd_code_maximum_length = 16
rnd_code_truncation = 1e6
def __init__(self,
name: str,
is_base: bool,
rnd_code,
function_handle: Callable,
priors):
self.name = name # string to represent the model
self.is_base = is_base # boolean to define if it is a base covariance
self.rnd_code = rnd_code # random numbers to figure out if two models are the same
self.function_handle = function_handle # covariance function handle
self.priors = priors # covariance prior function handle
@classmethod
def mask(cls, o1, dimension: int):
pass
def __add__(self, other):
pass
def __mul__(self, other):
pass
def __eq__(self, other):
pass
def get_hyperparameters_sample(self):
pass
def is_base_kernel(self):
pass
@staticmethod
def str2covariance(covariance_name: str,
hyperpriors: Optional[Hyperpriors] = None,
**kwargs):
pass
|
# -*- coding: utf-8 -*-
from functools import reduce
from itertools import product
from operator import mul
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
import numpy as np
from africanus.util.requirements import requires_optional
from africanus.rime.predict import (PREDICT_DOCS, predict_checks,
predict_vis as np_predict_vis)
from africanus.rime.wsclean_predict import (
WSCLEAN_PREDICT_DOCS,
wsclean_predict_impl as wsclean_predict_body)
from africanus.model.wsclean.spec_model import spectra as wsclean_spectra
try:
import dask.array as da
from dask.base import tokenize
import dask.blockwise as db
from dask.utils import funcname
from dask.highlevelgraph import HighLevelGraph
except ImportError as e:
opt_import_error = e
else:
opt_import_error = None
def _ind_map(arg, ind, out_ind, dim_map, dim_blocks):
# Yield name as first tuple element
yield arg
for j in ind:
try:
dim_idx = dim_map[j]
except KeyError:
# The blockid is not in the output key.
# Assume (and check for a single blockid)
try:
db = dim_blocks[j]
except KeyError:
raise ValueError("%s not in block mapping" % j)
else:
if db != 1:
raise ValueError("Dimension %s must be a single block" % j)
yield 0
else:
# Extract blockid for this index from the output key
yield out_ind[dim_idx]
class LinearReduction(Mapping):
def __init__(
self,
func,
output_indices,
indices,
numblocks,
feed_index=0,
axis=None,
):
self.func = func
self.output_indices = tuple(output_indices)
self.indices = tuple((name, tuple(ind) if ind is not None else ind)
for name, ind in indices)
self.numblocks = numblocks
if axis is None:
raise ValueError("axis not set")
if axis in self.output_indices:
raise ValueError("axis in output_indices")
self.feed_index = feed_index
self.axis = axis
token = tokenize(self.func,
self.output_indices,
self.indices,
self.numblocks,
self.feed_index,
self.axis)
self.func_name = funcname(self.func)
self.name = "-".join((self.func_name, token))
@property
def _dict(self):
if hasattr(self, "_cached_dict"):
return self._cached_dict
else:
# Reduction axis
ax = self.axis
feed_index = self.feed_index
# Number of blocks for each dimension, derived from the input
dim_blocks = db.broadcast_dimensions(self.indices, self.numblocks)
last_block = dim_blocks[ax] - 1
out_dims = (ax,) + self.output_indices
dim_map = {k: i for i, k in enumerate(out_dims)}
dsk = {}
int_name = "-".join((self.func_name,
"intermediate",
tokenize(self.name)))
# Iterate over the output keys creating associated task
for out_ind in product(*[range(dim_blocks[d]) for d in out_dims]):
task = [self.func]
for i, (arg, ind) in enumerate(self.indices):
if i == feed_index:
# First reduction block, feed in None
if out_ind[0] == 0:
task.append(None)
# Otherwise feed in the result of the last operation
else:
task.append((int_name,) +
# Index last reduction block
# always in first axis
(out_ind[0] - 1,) +
out_ind[1:])
elif ind is None:
# Literal arg, embed
task.append(arg)
else:
# Derive input key from output key indices
task.append(tuple(_ind_map(arg, ind, out_ind,
dim_map, dim_blocks)))
# Final block
if out_ind[0] == last_block:
dsk[(self.name,) + out_ind[1:]] = tuple(task)
# Intermediate block
else:
dsk[(int_name,) + out_ind] = tuple(task)
self._cached_dict = dsk
return self._cached_dict
def __getitem__(self, key):
return self._dict[key]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return reduce(mul, self._out_numblocks().values(), 1)
def _out_numblocks(self):
d = {}
indices = {k: v for k, v in self.indices if v is not None}
for k, v in self.numblocks.items():
for a, b in zip(indices[k], v):
d[a] = max(d.get(a, 0), b)
return {k: v for k, v in d.items() if k in self.output_indices}
def linear_reduction(time_index, antenna1, antenna2,
dde1_jones, source_coh, dde2_jones,
predict_check_tup, out_dtype):
(have_ddes1, have_coh, have_ddes2,
have_dies1, have_bvis, have_dies2) = predict_check_tup
have_ddes = have_ddes1 and have_ddes2
if have_ddes:
cdims = tuple("corr-%d" % i for i in range(len(dde1_jones.shape[4:])))
elif have_coh:
cdims = tuple("corr-%d" % i for i in range(len(source_coh.shape[3:])))
else:
raise ValueError("need ddes or source coherencies")
args = [(time_index, ("row",)),
(antenna1, ("row",)),
(antenna2, ("row",)),
(dde1_jones, ("source", "row", "ant", "chan") + cdims),
(source_coh, ("source", "row", "chan") + cdims),
(dde2_jones, ("source", "row", "ant", "chan") + cdims),
(None, None),
(None, None),
(None, None)]
name_args = [(None, None) if a is None else
(a.name, i) if isinstance(a, da.Array) else
(a, i) for a, i in args]
numblocks = {a.name: a.numblocks
for a, i in args
if a is not None}
lr = LinearReduction(np_predict_vis, ("row", "chan") + cdims,
name_args,
numblocks=numblocks,
feed_index=7,
axis='source')
graph = HighLevelGraph.from_collections(lr.name, lr,
[a for a, i in args
if a is not None])
chunk_map = {d: arg.chunks[i] for arg, ind in args
if arg is not None and ind is not None
for i, d in enumerate(ind)}
chunk_map['row'] = time_index.chunks[0] # Override
chunks = tuple(chunk_map[d] for d in ('row', 'chan') + cdims)
return da.Array(graph, lr.name, chunks, dtype=out_dtype)
def _predict_coh_wrapper(time_index, antenna1, antenna2,
dde1_jones, source_coh, dde2_jones,
base_vis,
reduce_single_source=False):
if reduce_single_source:
# All these arrays contract over a single 'source' chunk
dde1_jones = dde1_jones[0] if dde1_jones else None
source_coh = source_coh[0] if source_coh else None
dde2_jones = dde2_jones[0] if dde2_jones else None
vis = np_predict_vis(time_index, antenna1, antenna2,
# dde1_jones contracts over a single 'ant' chunk
dde1_jones[0] if dde1_jones else None,
source_coh,
# dde2_jones contracts over a single 'ant' chunk
dde2_jones[0] if dde2_jones else None,
None,
base_vis,
None)
if reduce_single_source:
return vis
return vis[None, ...]
def _predict_dies_wrapper(time_index, antenna1, antenna2,
die1_jones, base_vis, die2_jones):
return np_predict_vis(time_index, antenna1, antenna2,
None,
None,
None,
# die1_jones loses the 'ant' dim
die1_jones[0] if die1_jones else None,
base_vis,
# die2_jones loses the 'ant' dim
die2_jones[0] if die2_jones else None)
def parallel_reduction(time_index, antenna1, antenna2,
dde1_jones, source_coh, dde2_jones,
predict_check_tup, out_dtype):
""" Does a standard dask tree reduction over source coherencies """
(have_ddes1, have_coh, have_ddes2,
have_dies1, have_bvis, have_dies2) = predict_check_tup
have_ddes = have_ddes1 and have_ddes2
if have_ddes:
cdims = tuple("corr-%d" % i for i in range(len(dde1_jones.shape[4:])))
elif have_coh:
cdims = tuple("corr-%d" % i for i in range(len(source_coh.shape[3:])))
else:
raise ValueError("need ddes or source coherencies")
ajones_dims = ("src", "row", "ant", "chan") + cdims
src_coh_dims = ("src", "row", "chan") + cdims
coherencies = da.blockwise(
_predict_coh_wrapper, src_coh_dims,
time_index, ("row",),
antenna1, ("row",),
antenna2, ("row",),
dde1_jones, None if dde1_jones is None else ajones_dims,
source_coh, None if source_coh is None else src_coh_dims,
dde2_jones, None if dde2_jones is None else ajones_dims,
None, None,
# time+row dimension chunks are equivalent but differently sized
align_arrays=False,
# Force row dimension to take row chunking scheme,
# instead of time chunking scheme
adjust_chunks={'row': time_index.chunks[0]},
meta=np.empty((0,)*len(src_coh_dims), dtype=out_dtype),
dtype=out_dtype)
return coherencies.sum(axis=0)
def apply_dies(time_index, antenna1, antenna2,
die1_jones, base_vis, die2_jones,
predict_check_tup, out_dtype):
""" Apply any Direction-Independent Effects and Base Visibilities """
# Now apply any Direction Independent Effect Terms
(have_ddes1, have_coh, have_ddes2,
have_dies1, have_bvis, have_dies2) = predict_check_tup
have_dies = have_dies1 and have_dies2
# Generate strings for the correlation dimensions
# This also has the effect of checking that we have all valid inputs
if have_dies:
cdims = tuple("corr-%d" % i for i in range(len(die1_jones.shape[3:])))
elif have_bvis:
cdims = tuple("corr-%d" % i for i in range(len(base_vis.shape[2:])))
else:
raise ValueError("Missing both antenna and baseline jones terms")
# In the case of predict_vis, the "row" and "time" dimensions
# are intimately related -- a contiguous series of rows
# are related to a contiguous series of timesteps.
# This means that the number of chunks of these
# two dimensions must match even though the chunk sizes may not.
# blockwise insists on matching chunk sizes.
# For this reason, we use the lower level blockwise and
# substitute "row" for "time" in arrays such as dde1_jones
# and die1_jones.
gjones_dims = ("row", "ant", "chan") + cdims
vis_dims = ("row", "chan") + cdims
return da.blockwise(
_predict_dies_wrapper, vis_dims,
time_index, ("row",),
antenna1, ("row",),
antenna2, ("row",),
die1_jones, None if die1_jones is None else gjones_dims,
base_vis, None if base_vis is None else vis_dims,
die2_jones, None if die2_jones is None else gjones_dims,
# time+row dimension chunks are equivalent but differently sized
align_arrays=False,
# Force row dimension to take row chunking scheme,
# instead of time chunking scheme
adjust_chunks={'row': time_index.chunks[0]},
meta=np.empty((0,)*len(vis_dims), dtype=out_dtype),
dtype=out_dtype)
@requires_optional('dask.array', opt_import_error)
def predict_vis(time_index, antenna1, antenna2,
dde1_jones=None, source_coh=None, dde2_jones=None,
die1_jones=None, base_vis=None, die2_jones=None,
streams=None):
predict_check_tup = predict_checks(time_index, antenna1, antenna2,
dde1_jones, source_coh, dde2_jones,
die1_jones, base_vis, die2_jones)
(have_ddes1, have_coh, have_ddes2,
have_dies1, have_bvis, have_dies2) = predict_check_tup
have_ddes = have_ddes1 and have_ddes2
if have_ddes:
if dde1_jones.shape[2] != dde1_jones.chunks[2][0]:
raise ValueError("Subdivision of antenna dimension into "
"multiple chunks is not supported.")
if dde2_jones.shape[2] != dde2_jones.chunks[2][0]:
raise ValueError("Subdivision of antenna dimension into "
"multiple chunks is not supported.")
if dde1_jones.chunks != dde2_jones.chunks:
raise ValueError("dde1_jones.chunks != dde2_jones.chunks")
if len(dde1_jones.chunks[1]) != len(time_index.chunks[0]):
raise ValueError("Number of row chunks (%s) does not equal "
"number of time chunks (%s)." %
(time_index.chunks[0], dde1_jones.chunks[1]))
have_dies = have_dies1 and have_dies2
if have_dies:
if die1_jones.shape[1] != die1_jones.chunks[1][0]:
raise ValueError("Subdivision of antenna dimension into "
"multiple chunks is not supported.")
if die2_jones.shape[1] != die2_jones.chunks[1][0]:
raise ValueError("Subdivision of antenna dimension into "
"multiple chunks is not supported.")
if die1_jones.chunks != die2_jones.chunks:
raise ValueError("die1_jones.chunks != die2_jones.chunks")
if len(die1_jones.chunks[0]) != len(time_index.chunks[0]):
raise ValueError("Number of row chunks (%s) does not equal "
"number of time chunks (%s)." %
(time_index.chunks[0], die1_jones.chunks[1]))
# Infer the output dtype
dtype_arrays = [dde1_jones, source_coh, dde2_jones, die1_jones, die2_jones]
out_dtype = np.result_type(*(np.dtype(a.dtype.name)
for a in dtype_arrays
if a is not None))
# Apply direction dependent effects
if have_coh or have_ddes:
# We create separate graphs for computing coherencies and applying
# the gains because coherencies are chunked over source which
# must be summed and added to the (possibly present) base visibilities
if streams is True:
sum_coherencies = linear_reduction(time_index,
antenna1,
antenna2,
dde1_jones,
source_coh,
dde2_jones,
predict_check_tup,
out_dtype)
else:
sum_coherencies = parallel_reduction(time_index,
antenna1,
antenna2,
dde1_jones,
source_coh,
dde2_jones,
predict_check_tup,
out_dtype)
else:
assert have_dies or have_bvis
sum_coherencies = None
# No more effects to apply, return at this point
if not have_dies and not have_bvis:
return sum_coherencies
# Add coherencies to the base visibilities
if sum_coherencies is not None:
if not have_bvis:
# Set base_vis = summed coherencies
base_vis = sum_coherencies
predict_check_tup = (have_ddes1, have_coh, have_ddes2,
have_dies1, True, have_dies2)
else:
base_vis += sum_coherencies
# Apply direction independent effects
return apply_dies(time_index, antenna1, antenna2,
die1_jones, base_vis, die2_jones,
predict_check_tup, out_dtype)
def wsclean_spectrum_wrapper(flux, coeffs, log_poly, ref_freq, frequency):
return wsclean_spectra(flux, coeffs[0], log_poly, ref_freq, frequency)
def wsclean_body_wrapper(uvw, lm, source_type, gauss_shape,
frequency, spectrum, dtype_):
return wsclean_predict_body(uvw[0], lm[0], source_type,
gauss_shape[0], frequency, spectrum,
dtype_)[None, :]
@requires_optional('dask.array', opt_import_error)
def wsclean_predict(uvw, lm, source_type, flux, coeffs,
log_poly, ref_freq, gauss_shape, frequency):
spectrum_dtype = np.result_type(*(a.dtype for a in (flux, coeffs,
log_poly, ref_freq,
frequency)))
spectrum = da.blockwise(wsclean_spectrum_wrapper, ("source", "chan"),
flux, ("source",),
coeffs, ("source", "comp"),
log_poly, ("source",),
ref_freq, ("source",),
frequency, ("chan",),
dtype=spectrum_dtype)
out_dtype = np.result_type(uvw.dtype, lm.dtype, frequency.dtype,
spectrum.dtype, np.complex64)
vis = da.blockwise(wsclean_body_wrapper, ("source", "row", "chan", "corr"),
uvw, ("row", "uvw"),
lm, ("source", "lm"),
source_type, ("source",),
gauss_shape, ("source", "gauss"),
frequency, ("chan",),
spectrum, ("source", "chan"),
out_dtype, None,
adjust_chunks={"source": 1},
new_axes={"corr": 1},
dtype=out_dtype)
return vis.sum(axis=0)
EXTRA_DASK_ARGS = """
streams : {False, True}
If ``True`` the coherencies are serially summed in a linear chain.
If ``False``, dask uses a tree style reduction algorithm.
"""
EXTRA_DASK_NOTES = """
* The ``ant`` dimension should only contain a single chunk equal
to the number of antenna. Since each ``row`` can contain
any antenna, random access must be preserved along this dimension.
* The chunks in the ``row`` and ``time`` dimension **must** align.
This subtle point **must be understood otherwise
invalid results will be produced** by the chunking scheme.
In the example below
we have four unique time indices :code:`[0,1,2,3]`, and
four unique antenna :code:`[0,1,2,3]` indexing :code:`10` rows.
.. code-block:: python
# Row indices into the time/antenna indexed arrays
time_idx = np.asarray([0,0,1,1,2,2,2,2,3,3])
ant1 = np.asarray( [0,0,0,0,1,1,1,2,2,3]
ant2 = np.asarray( [0,1,2,3,1,2,3,2,3,3])
A reasonable chunking scheme for the
``row`` and ``time`` dimension would be :code:`(4,4,2)`
and :code:`(2,1,1)` respectively.
Another way of explaining this is that the first
four rows contain two unique timesteps, the second four
rows contain one unique timestep and the last two rows
contain one unique timestep.
Some rules of thumb:
1. The number chunks in ``row`` and ``time`` must match
although the individual chunk sizes need not.
2. Unique timesteps should not be split across row chunks.
3. For a Measurement Set whose rows are ordered on the
``TIME`` column, the following is a good way of obtaining
the row chunking strategy:
.. code-block:: python
import numpy as np
import pyrap.tables as pt
ms = pt.table("data.ms")
times = ms.getcol("TIME")
unique_times, chunks = np.unique(times, return_counts=True)
4. Use :func:`~africanus.util.shapes.aggregate_chunks`
to aggregate multiple ``row`` and ``time``
chunks into chunks large enough such that functions operating
on the resulting data can drop the GIL and spend time
processing the data. Expanding the previous example:
.. code-block:: python
# Aggregate row
utimes = unique_times.size
# Single chunk for each unique time
time_chunks = (1,)*utimes
# Aggregate row chunks into chunks <= 10000
aggregate_chunks((chunks, time_chunks), (10000, utimes))
"""
try:
predict_vis.__doc__ = PREDICT_DOCS.substitute(
array_type=":class:`dask.array.Array`",
get_time_index=":code:`time.map_blocks("
"lambda a: np.unique(a, "
"return_inverse=True)[1])`",
extra_args=EXTRA_DASK_ARGS,
extra_notes=EXTRA_DASK_NOTES)
except AttributeError:
pass
wsclean_predict.__doc__ = WSCLEAN_PREDICT_DOCS.substitute(
array_type=":class:`dask.array.Array`")
|
from JobBrowserBFF.TestBase import TestBase
UPSTREAM_SERVICE = 'ee2'
ENV = 'ci'
USER_CLASS = 'user'
class JobBrowserBFFTest(TestBase):
def test_get_job_types_happy(self):
try:
self.set_config('upstream-service', UPSTREAM_SERVICE)
impl, context = self.impl_for(ENV, USER_CLASS)
result = impl.get_job_types(context)
self.assertIsInstance(result, dict)
self.assertIn('job_types', result)
job_types = result.get('job_types')
self.assertIsInstance(job_types, list)
self.assertEqual(len(job_types), 3)
job_type = job_types[0]
self.assertEqual(job_type['code'], 'narrative')
except Exception as ex:
self.assert_no_exception(ex)
|
import Tkinter
class MyFrame(Tkinter.Frame):
def __init__(self, parent, *args, **kwargs):
Tkinter.Frame(self, parent, args, **kwargs)
def main():
root = Tkinter.Tk()
MyFrame(root)
root.mainloop()
if __name__ == '__main__':
main()
|
import pytest
from django.core.management import call_command
from company.models import Company, CompanyCaseStudy
from supplier.models import Supplier
@pytest.mark.django_db
def test_load_test_fixture():
try:
call_command('loaddata', 'test_fixtures/load_tests.json')
except Exception:
raise AssertionError("Load test fixtures are broken")
assert Company.objects.all().count() == 25
assert CompanyCaseStudy.objects.all().count() == 1
assert Supplier.objects.all().count() == 25
|
# Thank For CatUserBot
# Ported By @VckyouuBitch
# FROM Geez - Projects <https://github.com/Vckyou/GeezProjects>
from telethon.tl.functions.contacts import BlockRequest, UnblockRequest
from telethon.tl.types import (
MessageEntityMentionName,
)
from userbot import bot, BOTLOG, BOTLOG_CHATID, CMD_HELP
from userbot.events import geezbot_cmd
from userbot import CUSTOM_CMD as geez
async def get_full_user(event):
args = event.pattern_match.group(1).split(':', 1)
extra = None
if event.reply_to_msg_id and not len(args) == 2:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.sender_id)
extra = event.pattern_match.group(1)
elif len(args[0]) > 0:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.edit("`ID người dùng là bắt buộc`")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except Exception as err:
return await event.edit("Có gì đó sai", str(err))
return user_obj, extra
async def get_user_sender_id(user, event):
if isinstance(user, str):
user = int(user)
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj
@bot.on(geezbot_cmd(outgoing=True, pattern=r"gban(?: |$)(.*)"))
async def gspider(userbot):
lol = userbot
sender = await lol.get_sender()
me = await lol.client.get_me()
if not sender.id == me.id:
friday = await lol.reply("Gbanning người dùng..")
else:
friday = await lol.edit("Chờ xử lý.....")
me = await userbot.client.get_me()
await friday.edit(f"**Đã cấm người dùng khỏi toàn bộ nhóm tôi làm admin..**")
my_mention = "[{}](tg://user?id={})".format(me.first_name, me.id)
f"@{me.username}" if me.username else my_mention
await userbot.get_chat()
a = b = 0
if userbot.is_private:
user = userbot.chat
reason = userbot.pattern_match.group(1)
else:
userbot.chat.title
try:
user, reason = await get_full_user(userbot)
except BaseException:
pass
try:
if not reason:
reason = "Private"
except BaseException:
return await friday.edit(f"**Có một lỗi!!**")
if user:
if user.id == 1920635554:
return await friday.edit(
f"**Cha của bạn đã không dạy bạn? Rằng bạn không thể cấm người sáng tạo của bạn🖕**"
)
try:
from userbot.modules.sql_helper.gmute_sql import gmute
except BaseException:
pass
try:
await userbot.client(BlockRequest(user))
except BaseException:
pass
testuserbot = [
d.entity.id
for d in await userbot.client.get_dialogs()
if (d.is_group or d.is_channel)
]
for i in testuserbot:
try:
await userbot.client.edit_permissions(i, user, view_messages=False)
a += 1
await friday.edit(f"**GBANNED // Tổng số cuộc trò chuyện bị ảnh hưởng **: `{a}`")
except BaseException:
b += 1
else:
await friday.edit(f"**Trả lời người dùng!!**")
try:
if gmute(user.id) is False:
return await friday.edit(f"**Lỗi! Người dùng đã bị cấm.**")
except BaseException:
pass
return await friday.edit(
f"**Đã cấm [{user.first_name}](tg://user?id={user.id}) khỏi {a} nhóm**"
)
if BOTLOG:
await userbot.client.send_message(
BOTLOG_CHATID,
"#GMUTE\n"
f"USER: [{user.first_name}](tg://user?id={user.id})\n"
f"CHAT: {userbot.chat.title}(`{userbot.chat_id}`)",
)
@bot.on(geezbot_cmd(outgoing=True, pattern=r"ungban(?: |$)(.*)"))
async def gspider(userbot):
lol = userbot
sender = await lol.get_sender()
me = await lol.client.get_me()
if not sender.id == me.id:
friday = await lol.reply("`Chờ để tôi xử lý`")
else:
friday = await lol.edit("Chỉ một giây thôi ")
me = await userbot.client.get_me()
await friday.edit(f"Đang cố gắng Bỏ cấm người dùng!")
my_mention = "[{}](tg://user?id={})".format(me.first_name, me.id)
f"@{me.username}" if me.username else my_mention
await userbot.get_chat()
a = b = 0
if userbot.is_private:
user = userbot.chat
reason = userbot.pattern_match.group(1)
else:
userbot.chat.title
try:
user, reason = await get_full_user(userbot)
except BaseException:
pass
try:
if not reason:
reason = "Private"
except BaseException:
return await friday.edit("Có một lỗi !!")
if user:
if user.id == 1920635554:
return await friday.edit("**Bạn không thể cấm anh ta ... kết quả là bạn không thể cấm anh ta ... Anh ta là Người tạo ra tôi!**")
try:
from userbot.modules.sql_helper.gmute_sql import ungmute
except BaseException:
pass
try:
await userbot.client(UnblockRequest(user))
except BaseException:
pass
testuserbot = [
d.entity.id
for d in await userbot.client.get_dialogs()
if (d.is_group or d.is_channel)
]
for i in testuserbot:
try:
await userbot.client.edit_permissions(i, user, send_messages=True)
a += 1
await friday.edit(f"**UNGBANNING // TRÒ CHUYỆN CÓ ẢNH HƯỞNG - {a} **")
except BaseException:
b += 1
else:
await friday.edit("**Trả lời người dùng !!**")
try:
if ungmute(user.id) is False:
return await friday.edit("**Lỗi! Người dùng có thể đã được bỏ cấm.**")
except BaseException:
pass
return await friday.edit(
f"**UNGBANNED // USER - [{user.first_name}](tg://user?id={user.id}) CHATS : {a} **"
)
CMD_HELP.update({
"gban": f"\
`{geez}gban reason`\
\nUsage: Trên toàn cầu Cấm người dùng khỏi tất cả các bot Quản trị nhóm nơi bạn là SUDO.\
\n\n`{geez}ungban reason`\
\nUsage: Bỏ Cấm người dùng trên toàn cầu khỏi tất cả các bot Quản trị nhóm nơi bạn đang SUDO"
})
|
"""empty message
Revision ID: f683429df188
Revises: 1f6af6d0cad3, 1b767d526d05
Create Date: 2020-11-29 02:09:29.262379
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f683429df188'
down_revision = ('1f6af6d0cad3', '1b767d526d05')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
|
import abc
from appium import webdriver
from core.task import Task
from utils.tools import AppiumTools
class Processor(metaclass=abc.ABCMeta):
def __init__(self, serial, _session: webdriver.Remote):
self._serial = serial
self._session = _session
self._appium_tools = AppiumTools(self._serial, self._session)
@abc.abstractmethod
def run(self, task: Task):
pass
def log(self, message):
print("{}: {}".format(self._serial, message))
def swipe(self):
pass
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.management import call_command
def main():
settings.configure(
INSTALLED_APPS = (
'django.contrib.contenttypes',
'uturn',
),
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3'
}
},
ROOT_URLCONF='uturn.tests.urls',
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
),
)
call_command('test', 'uturn')
if __name__ == '__main__':
main()
|
import csv
from scipy import ndimage
from matplotlib import pyplot as plt
import yaml
from PIL import Image
import numpy as np
from numba import njit
import os
import ReferenceModification.LibFunctions as lib
from ReferenceModification.Simulator.map_utils import *
from ReferenceModification.Simulator.BaseSimulatorClasses import BaseSim
class ForestMap:
def __init__(self, map_name) -> None:
self.map_name = map_name
# map info
self.resolution = None
self.n_obs = None
self.map_height = None
self.map_width = None
self.forest_length = None
self.forest_width = None
self.start_pose = None
self.obs_size = None
self.obstacle_buffer = None
self.end_y = None
self.end_goal = None
self.origin = np.zeros(3) # for ScanSimulator
self.dt_img = None
self.map_img = None
self.ref_pts = None # std wpts that aren't expanded
self.ss_normal = None # not expanded
self.diffs = None
self.l2s = None
self.load_map()
self.load_center_pts()
def load_map(self):
file_name = 'maps/' + self.map_name + '.yaml'
with open(file_name) as file:
documents = yaml.full_load(file)
yaml_file = dict(documents.items())
try:
self.resolution = yaml_file['resolution']
self.n_obs = yaml_file['n_obs']
self.obs_size = yaml_file['obs_size']
self.start_pose = np.array(yaml_file['start_pose'])
self.forest_length = yaml_file['forest_length']
self.forest_width = yaml_file['forest_width']
self.obstacle_buffer = yaml_file['obstacle_buffer']
self.end_y = yaml_file['end_y']
except Exception as e:
print(e)
raise IOError("Problem loading map yaml file")
self.end_goal = np.array([self.start_pose[0], self.end_y])
self.map_height = int(self.forest_length / self.resolution)
self.map_width = int(self.forest_width / self.resolution)
self.map_img = np.zeros((self.map_width, self.map_height))
self.set_dt()
def add_obstacles(self):
self.map_img = np.zeros((self.map_width, self.map_height))
y_length = (self.end_y - self.obstacle_buffer*2 - self.start_pose[1] - self.obs_size)
box_factor = 1.4
y_box = y_length / (self.n_obs * box_factor)
rands = np.random.random((self.n_obs, 2))
xs = rands[:, 0] * (self.forest_width-self.obs_size)
ys = rands[:, 1] * y_box
y_start = self.start_pose[1] + self.obstacle_buffer
y_pts = [y_start + y_box * box_factor * i for i in range(self.n_obs)]
ys = ys + y_pts
obs_locations = np.concatenate([xs[:, None], ys[:, None]], axis=-1)
obs_size_px = int(self.obs_size/self.resolution)
for location in obs_locations:
x, y = self.xy_to_row_column(location)
# print(f"Obstacle: ({location}): {x}, {y}")
self.map_img[x:x+obs_size_px, y:y+obs_size_px] = 1
def set_dt(self):
img = np.ones_like(self.map_img) - self.map_img
img[0, :] = 0 #TODO: move this to the original map img that I make
img[-1, :] = 0
img[:, 0] = 0
img[:, -1] = 0
self.dt_img = ndimage.distance_transform_edt(img) * self.resolution
self.dt_img = np.array(self.dt_img).T
return self.dt_img
def check_plan_location(self, pt):
return check_scan_location(pt, self.origin, self.resolution, self.map_width, self.map_height, self.dt_img, 0.2)
def check_scan_location(self, pt):
return check_scan_location(pt, self.origin, self.resolution, self.map_width, self.map_height, self.dt_img, 0.1)
def convert_positions(self, pts):
return convert_positions(pts, self.origin, self.resolution)
def xy_to_row_column(self, pt):
return xy_to_row_column(pt, self.origin, self.resolution)
def render_map(self, figure_n=1, wait=False):
#TODO: draw the track boundaries nicely
f = plt.figure(figure_n)
plt.clf()
plt.xlim([0, self.map_width])
plt.ylim([0, self.map_height])
plt.imshow(self.map_img.T, origin='lower')
xs = np.linspace(0, self.map_width, 10)
ys = np.ones_like(xs) * self.end_y / self.resolution
plt.plot(xs, ys, '--')
x, y = self.xy_to_row_column(self.start_pose[0:2])
plt.plot(x, y, '*', markersize=14)
plt.pause(0.0001)
if wait:
plt.show()
pass
def convert_positions(self, pts):
xs, ys = [], []
for pt in pts:
x, y = self.xy_to_row_column(pt)
xs.append(x)
ys.append(y)
return np.array(xs), np.array(ys)
def render_wpts(self, wpts):
plt.figure(4)
xs, ys = self.convert_positions(wpts)
plt.plot(xs, ys, '--', linewidth=2)
# plt.plot(xs, ys, '+', markersize=12)
plt.pause(0.0001)
def render_aim_pts(self, pts):
plt.figure(4)
xs, ys = self.convert_positions(pts)
# plt.plot(xs, ys, '--', linewidth=2)
plt.plot(xs, ys, 'x', markersize=10)
plt.pause(0.0001)
def load_center_pts(self):
track = []
filename = 'maps/' + self.map_name + "_opti.csv"
with open(filename, 'r') as csvfile:
csvFile = csv.reader(csvfile, quoting=csv.QUOTE_NONNUMERIC)
for lines in csvFile:
track.append(lines)
track = np.array(track)
print(f"Track Loaded: {filename}")
self.ref_pts = track[:, 1:3]
self.ss_normal = track[:, 0]
# self.expand_wpts()
# print(self.ref_pts)
self.diffs = self.ref_pts[1:,:] - self.ref_pts[:-1,:]
self.l2s = self.diffs[:,0]**2 + self.diffs[:,1]**2
def expand_wpts(self):
n = 5 # number of pts per orig pt
dz = 1 / n
o_line = self.wpts
new_line = []
for i in range(len(self.wpts)-1):
dd = lib.sub_locations(o_line[i+1], o_line[i])
for j in range(n):
pt = lib.add_locations(o_line[i], dd, dz*j)
new_line.append(pt)
self.wpts = np.array(new_line)
class ForestSim(BaseSim):
"""
Simulator for Race Tracks
Data members:
map_name: name of the map to be used. Forest yaml file which stores the parameters for the forest. No image is required.
"""
def __init__(self, map_name, sim_conf=None):
"""
Init function
Args:
map_name: name of forest map to use.
sim_conf: config file for simulation
"""
if sim_conf is None:
# path = os.path.dirname(__file__)
# sim_conf = lib.load_conf(path, "std_config")
sim_conf = lib.load_conf("std_config")
env_map = ForestMap(map_name)
BaseSim.__init__(self, env_map, self.check_done_forest, sim_conf
)
def check_done_forest(self):
"""
Checks if the episode in the forest is complete
Returns:
done (bool): a flag if the ep is done
"""
self.reward = 0 # normal
# check if finished lap
dx = self.state[0] - self.env_map.start_pose[0]
dx_lim = self.env_map.forest_width * 0.5
if dx < dx_lim and self.state[1] > self.env_map.end_y:
self.done = True
self.reward = 1
self.done_reason = f"Lap complete"
# check crash
elif self.env_map.check_scan_location(self.state[0:2]):
self.done = True
self.reward = -1
self.done_reason = f"Crash obstacle: [{self.state[0]:.2f}, {self.state[1]:.2f}]"
# check steps
elif self.steps > self.max_steps:
self.done = True
self.reward = -1
self.done_reason = f"Max steps"
# check orientation
elif abs(self.state[2]) > 0.66*np.pi:
self.done = True
self.done_reason = f"Vehicle turned around"
self.reward = -1
return self.done
|
import logging
import time
import discord
import discord_components as dc
import music_bot.embeds as embeds
from music_bot.search import SearchHandler
class CommandHandler:
def __init__(self, client, prefix):
self.client = client
self.prefix = prefix
self.commands = [
CommandType("help", self.help, "list available commands"),
CommandType("join", self.join_command, "connects to authors voice channel"),
CommandType("leave", self.leave, "disconnects from current voice channel"),
CommandType("search", self.search, "searches on youtube for the query after the command, "
"and lets you select out of 8 results"),
CommandType("play", self.play, "plays query after command from youtube, first search result, "
"you have to be in a voice channel, or resumes if there ist no query"),
CommandType("next", self.next, "play next song in queue"),
CommandType("pause", self.resume_pause, "pause current song"),
CommandType("resume", self.resume_pause, "resume current song"),
CommandType("stop", self.stop_command, "stop current song, and clears queue"),
CommandType("volume up", self.volume_up, "global playback volume up"),
CommandType("volume down", self.volume_down, "global playback volume down"),
]
self.guilds_voice_settings = []
self.active_searches = []
self.search_handler = SearchHandler()
@staticmethod
async def switch_message_interaction(content=None, embed=None, delete_after=None, message=None, interaction=None):
if message:
await message.channel.send(
content=content,
embed=embed,
delete_after=delete_after
)
elif interaction:
await interaction.respond(
content=content,
embed=embed,
# delete_after=delete_after
)
else:
logging.warning("need message or interaction in switch_message_interaction")
async def check_author_voice(self, author, message=None, interaction=None) -> bool:
if author.voice:
return True
else:
await self.switch_message_interaction(
embed=embeds.simple_message("ERROR",
f"author not in any voice channel",
self.client.user),
delete_after=10,
message=message,
interaction=interaction
)
return False
async def get_current_voice(self, voice_channel) -> discord.VoiceClient:
result_list_guild = list(filter(lambda voice_client: voice_client.guild.name == voice_channel.guild.name,
self.client.voice_clients))
if len(result_list_guild) == 0:
return await self.join(voice_channel)
result_list_channel = list(filter(lambda voice_client: voice_client.channel == voice_channel,
result_list_guild))
if len(result_list_channel) == 0:
return await self.join(voice_channel)
return result_list_channel[0]
async def command(self, message):
command = message.content.split(" ")[0].replace(self.prefix, "")
result_list = list(filter(lambda command_type: command_type.command == command, self.commands))
if len(result_list) == 1:
result = result_list[0]
await result.function(message)
await message.delete()
elif len(result_list) == 0:
await message.channel.send(
embed=embeds.simple_message("ERROR",
f"**Unknown Command:** \"{command}\"",
self.client.user),
delete_after=10
)
else:
await message.channel.send(
embed=embeds.simple_message("ERROR",
f"**Multiple Commands matched:** \"{command}\", {result_list}",
self.client.user),
delete_after=10
)
logging.warning(f"found multiple commands: {result_list}")
async def help(self, message):
answer = "__**available commands**__:\n"
for command in self.commands:
answer += f"> **{self.prefix}{command.command}** :: {command.description}\n"
await message.channel.send(embed=embeds.simple_message("Help", answer, self.client.user), delete_after=120)
return
async def join_command(self, message):
if await self.check_author_voice(message.author, message, None):
await self.join(message.author.voice.channel, message, None)
async def join(self, voice_channel, message=None, interaction=None) -> discord.VoiceClient:
voice_client = await voice_channel.connect()
await self.switch_message_interaction(
embed=embeds.simple_message("Joined",
f"Joined {voice_channel.name}",
self.client.user),
delete_after=10,
message=message,
interaction=interaction
)
settings = list(filter(lambda settings_element: settings_element.guild_id == voice_channel.guild.id,
self.guilds_voice_settings))
if len(settings) == 0:
self.guilds_voice_settings.append(GuildVoiceSettings(voice_channel.guild.id, voice_client.session_id))
else:
settings[0].voice_id = voice_client.session_id
return voice_client
async def leave(self, message):
if await self.check_author_voice(message.author, message, None):
current_voice_client = await self.get_current_voice(message.author.voice.channel)
await message.channel.send(
embed=embeds.simple_message("Disconnected",
f"Disconnected from {current_voice_client.channel.name}",
self.client.user),
delete_after=10
)
settings = list(filter(lambda settings_element: settings_element.guild_id == current_voice_client.guild.id,
self.guilds_voice_settings))
settings[0].voice_id = None
await current_voice_client.disconnect()
return
async def search(self, message):
if await self.check_author_voice(message.author, message, None):
search_query = message.content.replace(f"{self.prefix}search ", "")
await message.channel.send(embed=embeds.simple_message("Searching",
"Searching, just a moment",
self.client.user),
delete_after=5,
)
search_results = self.search_handler.youtube_search(search_query)
custom_id = f"song_search_{int(time.time())}"
send_message = await message.channel.send(
embed=embeds.search_results_message("Search",
f"Search for: {search_query}",
search_results,
self.client.user),
components=[
dc.Select(
placeholder="Select Search result",
options=[
dc.SelectOption(label=search_result.title,
value=search_result.url,
description=f"({search_result.url}) "
f"{search_result.duration}")
for search_result in search_results
],
custom_id=custom_id,
)
],
)
self.active_searches.append(ActiveSearchType(custom_id, message, send_message, search_results))
async def play(self, message, search_result=None, current_queue_element=None):
if current_queue_element:
search_result = current_queue_element.search_result
active_voice_client = current_queue_element.voice_client
else:
if not search_result:
search_query = message.content.replace(f"{self.prefix}search", ""). \
replace(f"{self.prefix}play", "").strip()
if len(search_query) == 0:
await self.resume_pause(message)
return
search_result = self.search_handler.simple_search(search_query)
if await self.check_author_voice(message.author, message, None):
active_voice_client = await self.get_current_voice(message.author.voice.channel)
else:
return
settings = list(filter(lambda settings_element: settings_element.voice_id == active_voice_client.session_id,
self.guilds_voice_settings))[0]
volume = settings.volume
queue_index = settings.queue_index
queue = settings.queue
message_send_return = None
info_message_send_return = None
if not active_voice_client.is_playing():
if not message:
channel = current_queue_element.channel
else:
channel = message.channel
if len(queue) == 0:
queued_after = 0
else:
queued_after = len(queue) - (queue_index + 1)
message_send = channel.send(
embed=embeds.search_results_message(
"Playing",
f"Songs in queue after: {queued_after}",
[search_result],
self.client.user),
components=[
dc.Button(label="play/pause",
custom_id=f"play_pause_button_{active_voice_client.channel.id}"),
dc.Button(label="next",
custom_id=f"next_button_{active_voice_client.channel.id}"),
dc.Button(label="volume up",
custom_id=f"volume_up_button_{active_voice_client.channel.id}"),
dc.Button(label="volume down",
custom_id=f"volume_down_button_{active_voice_client.channel.id}"),
dc.Button(label="stop",
custom_id=f"stop_button_{active_voice_client.channel.id}"),
],
)
source = discord.FFmpegPCMAudio(search_result.play_url)
active_voice_client.play(discord.PCMVolumeTransformer(source, volume=volume / 100))
message_send_return = await message_send
if len(queue) - 1 >= queue_index:
index = self.guilds_voice_settings.index(settings)
self.guilds_voice_settings[index].queue.message = message_send_return
else:
info_message_send_return = await message.channel.send(
embed=embeds.search_results_message(
f"Queued {search_result.title}",
f"Songs in queue after: {len(queue) - queue_index}",
[search_result],
self.client.user),
)
queue[queue_index - 1].info_message = info_message_send_return
if not current_queue_element:
logging.info(f"added {search_result.title} to queue")
index = self.guilds_voice_settings.index(settings)
self.guilds_voice_settings[index].queue.append(
QueueType(search_result, message.channel, message_send_return,
info_message_send_return, active_voice_client))
if len(queue) == 1:
await self.client.before_check_playing_loop(active_voice_client)
return
async def next(self, interaction=None):
settings = list(filter(lambda settings_element: settings_element.guild_id == interaction.guild.id,
self.guilds_voice_settings))[0]
queue_index = settings.queue_index
queue = settings.queue
if len(queue) - 1 > queue_index:
queue_element = queue[queue_index]
queue_element.voice_client.stop()
if queue_element.message is not None:
try:
await queue_element.message.delete()
except discord.errors.NotFound:
pass
if queue_element.info_message is not None:
try:
await queue_element.info_message.delete()
except discord.errors.NotFound:
pass
queue_index += 1
if queue[queue_index].info_message is not None:
try:
await queue[queue_index].info_message.delete()
except discord.errors.NotFound:
pass
await self.play(None, None, queue[queue_index])
if interaction:
await interaction.respond(
embed=embeds.simple_message(
f"Next",
f"",
self.client.user),
)
else:
queue_element = queue[queue_index]
await self.switch_message_interaction(
embed=embeds.simple_message(
f"ERROR",
f"No more Songs in Queue",
self.client.user),
delete_after=10,
message=queue_element,
interaction=interaction
)
async def resume_pause(self, message, interaction=None):
if not await self.check_author_voice(message.author, message, interaction):
return
else:
active_voice_client = await self.get_current_voice(message.author.voice.channel)
if active_voice_client.is_paused():
active_voice_client.resume()
await self.switch_message_interaction(
embed=embeds.simple_message("Resumed",
"",
self.client.user),
delete_after=10,
message=message,
interaction=interaction
)
elif active_voice_client.is_playing():
active_voice_client.pause()
await self.switch_message_interaction(
embed=embeds.simple_message("Paused",
"",
self.client.user),
delete_after=10,
message=message,
interaction=interaction
)
else:
await self.switch_message_interaction(
embed=embeds.simple_message("ERROR",
"Nothing to resume or pause",
self.client.user),
delete_after=10,
message=message,
interaction=interaction
)
async def stop_command(self, message, interaction=None):
if not await self.check_author_voice(message.author if message else interaction.author, message, interaction):
return
else:
active_voice_client = await self.get_current_voice(message.author.voice.channel
if message else interaction.author.voice.channel)
await self.stop(active_voice_client, message, interaction)
async def stop(self, voice_client, message=None, interaction=None):
if voice_client.is_playing() or voice_client.is_paused():
settings = list(filter(lambda settings_element: settings_element.voice_id == voice_client.session_id,
self.guilds_voice_settings))[0]
queue_index = settings.queue_index
queue = settings.queue
self.client.check_playing_loop.stop()
queue_element = queue[queue_index]
queue_element.voice_client.stop()
if queue_element.message is not None:
try:
await queue_element.message.delete()
except discord.errors.NotFound:
pass
if queue_element.info_message is not None:
try:
await queue_element.info_message.delete()
except discord.errors.NotFound:
pass
index = self.guilds_voice_settings.index(settings)
self.guilds_voice_settings[index].queue = []
self.guilds_voice_settings[index].queue_index = 0
await self.switch_message_interaction(
embed=embeds.simple_message("Stopped",
"",
self.client.user),
delete_after=10,
message=message,
interaction=interaction
)
else:
await self.switch_message_interaction(
embed=embeds.simple_message("ERROR",
"Nothing to stop",
self.client.user),
delete_after=10,
message=message,
interaction=interaction
)
async def volume_set(self, message, interaction=None, value=0, status_message="Volume set"):
if message:
if not await self.check_author_voice(message.author, message, None):
return
else:
active_voice_client = await self.get_current_voice(message.author.voice.channel)
else: # NOTE interaction
if not await self.check_author_voice(interaction.author, None, interaction):
return
else:
active_voice_client = await self.get_current_voice(interaction.author.voice.channel)
settings = list(filter(lambda settings_element: settings_element.voice_id == active_voice_client.session_id,
self.guilds_voice_settings))[0]
index = self.guilds_voice_settings.index(settings)
self.guilds_voice_settings[index].volume += value
volume = settings.volume
active_voice_client.source.volume = volume / 100
await self.switch_message_interaction(
embed=embeds.simple_message(status_message,
f"Volume: {int(volume)}%",
self.client.user),
delete_after=10,
message=message,
interaction=interaction
)
async def volume_up(self, message, interaction=None):
await self.volume_set(message, interaction, 10, "Volume up")
return
async def volume_down(self, message, interaction=None):
await self.volume_set(message, interaction, -10, "Volume down")
return
class CommandType:
def __init__(self, command, function, description):
self.command = command
self.function = function
self.description = description
class ActiveSearchType:
def __init__(self, custom_id, message, send_message, search_elements):
self.id = custom_id
self.message = message
self.send_message = send_message
self.search_elements = search_elements
class QueueType:
def __init__(self, search_result, channel, message, info_message, voice_client):
self.search_result = search_result
self.channel = channel
self.message = message
self.info_message = info_message
self.voice_client = voice_client
class GuildVoiceSettings:
def __init__(self, guild_id, voice_id):
self.guild_id = guild_id
self.voice_id = voice_id
self.volume = 30
self.queue_index = 0
self.queue = []
|
'''
Created on Aug 20, 2009
@author: Christopher
'''
def get_birthdays(bdays, now):
import datetime
today = datetime.date.fromtimestamp(now)
new = {}
for uid, d_ in bdays.items():
bd = d_.get('birthday_date')
if bd is not None:
mdy = [None]*3
vals = [int(i) for i in bd.split('/')]
mdy[:len(vals)] = vals
m, d, y = mdy
bd_date_this = datetime.date(today.year, m, d)
bd_date_next = datetime.date(today.year + 1, m, d)
keep = False
if -1 < (bd_date_this-today).days <= 7:
keep = True
bd_date = bd_date_this
elif -1 < (bd_date_next-today).days <= 7:
keep = True
bd_date = bd_date_next
if keep:
new[uid] = dict(d_)
new[uid]['bday'] = bd_date
if y is not None:
born_on = datetime.date(y,m,d)
#I figure leap-year shouldn't matter, who d'you know that has been around the sun 1460+ times?
new[uid]['age'] = int(((bd_date - born_on).days + 1) / 365)
return sorted(new.items(), key=lambda foo: foo[1]['bday'])
|
import os
import logging
import time
import datetime
from easy_thumbnails.models import Source
from django.core.files.storage import default_storage
from django.core.management.base import BaseCommand, CommandError
from django.db.models import F,Max,OuterRef,Subquery
from django.utils.timezone import now
from ik.models.members import Member
from ik.models.chat import ChatMember, Message, Chat
from ik.utils.email import can_send_email_to, send_email_template
from ik.constants import *
logger = logging.getLogger("cc")
class Command(BaseCommand):
help = "Send unread message notify email to members"
def send_to_member(self, member, unread_chats):
if can_send_email_to(member):
self.stdout.write(self.style.SUCCESS('Send to member %s' % member))
total_unread = 0
for unr in unread_chats:
total_unread += unr["unread_count"]
ctx = {
"member": member,
"unread_chats": unread_chats,
"total_unread": total_unread
}
send_email_template("unread_messages.html", ctx, member.email, "cc_unread",
from_email="unread@chatcube.org", member=member)
member.settings.set_value("unread_sent", int(time.time()))
def add_arguments(self, parser):
parser.add_argument('--testmember', type=str, help="EMAIL of member")
def handle(self, *args, **options):
testmember = options['testmember']
if testmember:
member = Member.objects.get(email=testmember)
unread_msgs = [{
'unread_count': 12,
"is_group": 1,
"chat_title": "Test Group Tile",
"last_message": Message.objects.filter(type=MESSAGE_TYPE_TEXT).first()
},
{
'unread_count': 12,
"is_group": 0,
"chat_title": "Test Chat Tile",
"last_message": Message.objects.filter(type=MESSAGE_TYPE_TEXT).first()
}]
self.send_to_member(member, unread_msgs)
return
latest_msg = Message.objects.filter(chat=OuterRef('chat')).order_by("-id")
chatmembers = ChatMember.objects.filter(last_seen_message_id__lt=Subquery(latest_msg.values('id')[:1]))\
.select_related('member', 'chat', 'last_seen_message')\
.order_by('member')
prev_cm = None
unread_chats = []
is_old_unread_messages_exists = False
unread_age = -1
sent_already = False
for cm in chatmembers:
if prev_cm != cm.member:
if prev_cm:
if unread_age > 0:
if is_old_unread_messages_exists and not sent_already and len(unread_chats) > 0:
self.send_to_member(prev_cm, unread_chats)
else:
self.stdout.write('Member %s, skip as no old unseen messages' % prev_cm)
else:
self.stdout.write('Member %s, skip as set unread age to -1' % prev_cm)
notification_settings = cm.member.notification_settings()
unread_age = notification_settings.get("unread_age", 48)
sent_date = cm.member.settings.get_value("unread_sent", 0)
sent_already = (((sent_date + unread_age * 3600) - 3600) > int(time.time()))
is_old_unread_messages_exists = False
unread_chats = []
prev_cm = cm.member
if not is_old_unread_messages_exists and unread_age > 0 and not sent_already:
unread_date = now() - datetime.timedelta(seconds=unread_age*3600)
is_old_unread_messages_exists = Message.objects.filter(chat=cm.chat, sendtime__lt=unread_date, id__gt=cm.last_seen_message.id)\
.exclude(author=cm.member).exists()
if not sent_already and unread_age > 0:
party_chatmembers = cm.chat.get_party_chatmembers(cm.member, limit=4)
cnt = cm.chat.get_unread_count(cm)
if cnt > 0:
unread_chats.append({"unread_count": cnt,
"is_group": cm.chat.is_group,
"chat_title": cm.chat.get_title(party_chatmembers),
"last_message": cm.chat.get_last_message(cm.member)
})
if unread_chats:
if unread_age > 0:
if is_old_unread_messages_exists:
self.send_to_member(cm.member, unread_chats)
else:
self.stdout.write('Member %s, skip as no old unseen messages' % cm.member)
else:
self.stdout.write('Member %s, skip as set unread age to -1' % cm.member)
|
import komand
from .schema import BulkIndicatorDownloadInput, BulkIndicatorDownloadOutput
# Custom imports below
import datetime
class BulkIndicatorDownload(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="bulk_indicator_download",
description="Retrieve ThreatConnect Bulk Indicator Download",
input=BulkIndicatorDownloadInput(),
output=BulkIndicatorDownloadOutput(),
)
def run(self, params={}):
indicators = self.connection.threat_connect.bulk_indicators()
indicator_obj_list = []
filter1 = indicators.add_filter()
filter1.add_owner(params.get("owner"))
if params.get("confidence"):
filter1.add_pf_confidence(params.get("confidence"))
if params.get("attribute"):
filter1.add_pf_attribute(params.get("attribute"))
if params.get("date_added"):
filter1.add_pf_date_added(params.get("date_added"))
if params.get("last_modified"):
filter1.add_pf_last_modified(params.get("last_modified"))
if params.get("rating"):
filter1.add_pf_rating(params.get("rating"))
if params.get("tag"):
filter1.add_pf_tag(params.get("tag"))
if params.get("threat_assess_confidence"):
filter1.add_pf_threat_assess_confidence(params.get("threat_assess_confidence"))
if params.get("threat_assess_rating"):
filter1.add_pf_threat_assess_rating(params.get("threat_assess_rating"))
if params.get("type"):
filter1.aadd_pf_type(params.get("type"))
# Retrieve Indicators and Apply Filters
try:
indicators.retrieve()
except Exception as e:
raise e
# Iterate Through Results
for indicator in indicators:
indicator_obj = {
"id": indicator.id,
"owner_name": (indicator.owner_name or ""),
"date_added": (
datetime.datetime.strptime(indicator.date_added, "%Y-%d-%mT%H:%M:%SZ").isoformat() or ""
),
"last_modified": (
datetime.datetime.strptime(indicator.last_modified, "%Y-%d-%mT%H:%M:%SZ").isoformat() or ""
),
"rating": (indicator.rating or ""),
"threat_assess_rating": (str(indicator.threat_assess_rating) or ""),
"confidence": (indicator.confidence or ""),
"threat_assess_confidence": (str(indicator.threat_assess_confidence) or ""),
"type": (indicator.type or ""),
"weblink": indicator.weblink,
}
indicator_obj_list.append(indicator_obj)
return {"bulk_indicators": indicator_obj_list}
def test(self):
owners = self.connection.threat_connect.owners()
owner = ""
try:
owners.retrieve()
except Exception as e:
raise e
for owner in owners:
owner = owner.name
return {"Owner Name": owner}
|
from typing import Optional, Any, Union, Sequence, TextIO
import re
import warnings
from netmiko.base_connection import DELAY_FACTOR_DEPR_SIMPLE_MSG
from netmiko.cisco_base_connection import CiscoBaseConnection, CiscoFileTransfer
class CiscoXrBase(CiscoBaseConnection):
def establish_connection(self, width: int = 511, height: int = 511) -> None:
"""Establish SSH connection to the network device"""
super().establish_connection(width=width, height=height)
def session_preparation(self) -> None:
"""Prepare the session after the connection has been established."""
# IOS-XR has an issue where it echoes the command even though it hasn't returned the prompt
self._test_channel_read(pattern=r"[>#]")
cmd = "terminal width 511"
self.set_terminal_width(command=cmd, pattern=cmd)
self.disable_paging()
self._test_channel_read(pattern=r"[>#]")
self.set_base_prompt()
def send_config_set( # type: ignore
self,
config_commands: Union[str, Sequence[str], TextIO, None] = None,
exit_config_mode: bool = False,
**kwargs: Any,
) -> str:
"""IOS-XR requires you not exit from configuration mode."""
return super().send_config_set(
config_commands=config_commands, exit_config_mode=exit_config_mode, **kwargs
)
def commit(
self,
confirm: bool = False,
confirm_delay: Optional[int] = None,
comment: str = "",
label: str = "",
read_timeout: float = 120.0,
delay_factor: Optional[float] = None,
) -> str:
"""
Commit the candidate configuration.
default (no options):
command_string = commit
confirm and confirm_delay:
command_string = commit confirmed <confirm_delay>
label (which is a label name):
command_string = commit label <label>
comment:
command_string = commit comment <comment>
delay_factor: Deprecated in Netmiko 4.x. Will be eliminated in Netmiko 5.
supported combinations
label and confirm:
command_string = commit label <label> confirmed <confirm_delay>
label and comment:
command_string = commit label <label> comment <comment>
All other combinations will result in an exception.
failed commit message:
% Failed to commit one or more configuration items during a pseudo-atomic operation. All
changes made have been reverted. Please issue 'show configuration failed [inheritance]'
from this session to view the errors
message XR shows if other commits occurred:
One or more commits have occurred from other configuration sessions since this session
started or since the last commit was made from this session. You can use the 'show
configuration commit changes' command to browse the changes.
Exit of configuration mode with pending changes will cause the changes to be discarded and
an exception to be generated.
"""
if delay_factor is not None:
warnings.warn(DELAY_FACTOR_DEPR_SIMPLE_MSG, DeprecationWarning)
if confirm and not confirm_delay:
raise ValueError("Invalid arguments supplied to XR commit")
if confirm_delay and not confirm:
raise ValueError("Invalid arguments supplied to XR commit")
if comment and confirm:
raise ValueError("Invalid arguments supplied to XR commit")
label = str(label)
error_marker = "Failed to"
alt_error_marker = "One or more commits have occurred from other"
# Select proper command string based on arguments provided
if label:
if comment:
command_string = f"commit label {label} comment {comment}"
elif confirm:
command_string = "commit label {} confirmed {}".format(
label, str(confirm_delay)
)
else:
command_string = f"commit label {label}"
elif confirm:
command_string = f"commit confirmed {str(confirm_delay)}"
elif comment:
command_string = f"commit comment {comment}"
else:
command_string = "commit"
# Enter config mode (if necessary)
output = self.config_mode()
# IOS-XR might do this:
# This could be a few minutes if your config is large. Confirm? [y/n][confirm]
new_data = self.send_command(
command_string,
expect_string=r"(#|onfirm)",
strip_prompt=False,
strip_command=False,
read_timeout=read_timeout,
)
assert isinstance(new_data, str)
if "onfirm" in new_data:
output += new_data
new_data = self.send_command(
"y",
expect_string=r"#",
strip_prompt=False,
strip_command=False,
read_timeout=read_timeout,
)
assert isinstance(new_data, str)
output += new_data
if error_marker in output:
raise ValueError(f"Commit failed with the following errors:\n\n{output}")
if alt_error_marker in output:
# Other commits occurred, don't proceed with commit
new_data = self.send_command_timing(
"no", strip_prompt=False, strip_command=False
)
assert isinstance(new_data, str)
output += new_data
raise ValueError(f"Commit failed with the following errors:\n\n{output}")
return output
def check_config_mode(
self, check_string: str = ")#", pattern: str = r"[#\$]"
) -> bool:
"""Checks if the device is in configuration mode or not.
IOS-XR, unfortunately, does this:
RP/0/RSP0/CPU0:BNG(admin)#
"""
self.write_channel(self.RETURN)
output = self.read_until_pattern(pattern=pattern)
# Strip out (admin) so we don't get a false positive with (admin)#
# (admin-config)# would still match.
output = output.replace("(admin)", "")
return check_string in output
def exit_config_mode(self, exit_config: str = "end", pattern: str = "") -> str:
"""Exit configuration mode."""
output = ""
if self.check_config_mode():
self.write_channel(self.normalize_cmd(exit_config))
# Make sure you read until you detect the command echo (avoid getting out of sync)
if self.global_cmd_verify is not False:
output += self.read_until_pattern(
pattern=re.escape(exit_config.strip())
)
# Read until we detect either an Uncommitted change or the end prompt
if not re.search(r"(Uncommitted|#$)", output):
output += self.read_until_pattern(pattern=r"(Uncommitted|#$)")
if "Uncommitted changes found" in output:
self.write_channel(self.normalize_cmd("no\n"))
output += self.read_until_pattern(pattern=r"[>#]")
if not re.search(pattern, output, flags=re.M):
output += self.read_until_pattern(pattern=pattern)
if self.check_config_mode():
raise ValueError("Failed to exit configuration mode")
return output
def save_config(self, *args: Any, **kwargs: Any) -> str:
"""Not Implemented (use commit() method)"""
raise NotImplementedError
class CiscoXrSSH(CiscoXrBase):
"""Cisco XR SSH driver."""
pass
class CiscoXrTelnet(CiscoXrBase):
"""Cisco XR Telnet driver."""
pass
class CiscoXrFileTransfer(CiscoFileTransfer):
"""Cisco IOS-XR SCP File Transfer driver."""
@staticmethod
def process_md5(md5_output: str, pattern: str = r"^([a-fA-F0-9]+)$") -> str:
"""
IOS-XR defaults with timestamps enabled
# show md5 file /bootflash:/boot/grub/grub.cfg
Sat Mar 3 17:49:03.596 UTC
c84843f0030efd44b01343fdb8c2e801
"""
match = re.search(pattern, md5_output, flags=re.M)
if match:
return match.group(1)
else:
raise ValueError(f"Invalid output from MD5 command: {md5_output}")
def remote_md5(
self, base_cmd: str = "show md5 file", remote_file: Optional[str] = None
) -> str:
"""
IOS-XR for MD5 requires this extra leading /
show md5 file /bootflash:/boot/grub/grub.cfg
"""
if remote_file is None:
if self.direction == "put":
remote_file = self.dest_file
elif self.direction == "get":
remote_file = self.source_file
# IOS-XR requires both the leading slash and the slash between file-system and file here
remote_md5_cmd = f"{base_cmd} /{self.file_system}/{remote_file}"
dest_md5 = self.ssh_ctl_chan.send_command(remote_md5_cmd, read_timeout=300)
assert isinstance(dest_md5, str)
dest_md5 = self.process_md5(dest_md5)
return dest_md5
def enable_scp(self, cmd: str = "") -> None:
raise NotImplementedError
def disable_scp(self, cmd: str = "") -> None:
raise NotImplementedError
|
import mbuild as mb
from os.path import dirname, join
def _get_library_dir():
""" Returns the absolute path to the test compound library. """
compound_library = join(dirname(__file__), "test_cpds")
return compound_library
def load_solvent():
file_path = join(_get_library_dir(), "test_solvent.mol2")
compound = mb.load(file_path)
return compound
def load_lipid():
file_path = join(_get_library_dir(), "test_lipid.mol2")
compound = mb.load(file_path)
return compound
|
# one way to optimise this could be to use
# some other implementation to reverse other than using array[::-1]
# 26ms
class Solution:
def reverse(self, x: int) -> int:
if(x>=0):
result = int(str(x)[::-1])
else:
b = str(x)[1:]
result = int("-" + str(int(b[::-1])))
if (result>= 2147483647 or result <=- 2147483648):
return 0
else:
return result
## alt SOLUTION
## 36 ms
## can be improved by removing range
# class Solution:
# def reverse(self, x: int) -> int:
# rem = 0
# quo=x
# if(quo<0):
# quo=quo*(-1)
# while (quo > 9):
# rem = rem*10 +quo % 10
# quo=quo//10
# if(quo<10):
# rem = rem * 10 + quo % 10
# if(x<0):
# rem=rem*(-1)
# if(rem not in range(-2**31, (2**31))):
# return 0
# else:
# return rem
|
class Student:
def __init__(self, id, name, testscore):
self.id = id
self.name = name
self.testscore =testscore
def display(self):
print(self.id, self.name, self.testscore)
# Go to pickledump.py
|
"""
@author: krakowiakpawel9@gmail.com
@site: e-smartdata.org
"""
import cv2
print(cv2.__version__)
image = cv2.imread(filename=r'01_basics/images/bear.jpg')
cv2.imshow(winname='image', mat=image)
cv2.waitKey(delay=0)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-08 07:44
from __future__ import unicode_literals
import assets.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assets', '0017_auto_20170105_1958'),
]
operations = [
migrations.AddField(
model_name='component',
name='thumbnailer_name',
field=models.CharField(blank=True, choices=[(b'assets.thumbnailers.ImageThumbnailer', b'assets.thumbnailers.ImageThumbnailer')], default=None, max_length=64, null=True),
),
migrations.AlterField(
model_name='application',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to=assets.models.get_logo_path),
),
migrations.AlterField(
model_name='asset',
name='data',
field=models.FileField(upload_to=assets.models.get_data_path, verbose_name='data file'),
),
migrations.AlterField(
model_name='asset',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=assets.models.get_thumbnail_path, verbose_name='thumbnail'),
),
]
|
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import polyaxon_sdk
from marshmallow import ValidationError, fields, validates_schema
from polyaxon.containers.names import INIT_CONTAINER
from polyaxon.k8s import k8s_schemas
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.swagger import SwaggerField
from polyaxon.schemas.types import (
ArtifactsTypeSchema,
DockerfileTypeSchema,
GitTypeSchema,
)
class InitSchema(BaseCamelSchema):
artifacts = fields.Nested(ArtifactsTypeSchema, allow_none=True)
git = fields.Nested(GitTypeSchema, allow_none=True)
dockerfile = fields.Nested(DockerfileTypeSchema, allow_none=True)
connection = fields.Str(allow_none=True)
path = fields.Str(allow_none=True)
container = SwaggerField(
cls=k8s_schemas.V1Container,
defaults={"name": INIT_CONTAINER.format(random.randint(1, 100))},
allow_none=True,
)
@staticmethod
def schema_config():
return V1Init
@validates_schema
def validate_init(self, data, **kwargs):
artifacts = data.get("artifacts")
git = data.get("git")
dockerfile = data.get("dockerfile")
connection = data.get("connection")
schemas = 0
if artifacts:
schemas += 1
if git:
schemas += 1
if dockerfile:
schemas += 1
if schemas > 1:
raise ValidationError("One of artifacts, git, or dockerfile can be set")
if not connection and git and not git.url:
raise ValidationError(
"git field without a valid url requires a connection is required to be passed."
)
class V1Init(BaseConfig, polyaxon_sdk.V1Init):
IDENTIFIER = "init"
SCHEMA = InitSchema
REDUCED_ATTRIBUTES = [
"artifacts",
"git",
"dockerfile",
"connection",
"path",
"container",
]
def has_connection(self):
return any([self.connection, self.git, self.dockerfile, self.artifacts])
|
def inOrder(a, n, i):
global v
if i >= n:
return
inOrder(a, n, 2* i + 1)
v.append(a[i])
inOrder(a, n, 2* i +2)
def minSwaps():
global v
t = [[0, 0] for i in range(len(v))]
ans = -2
for i in range(len(v)):
t[i][0], t[i][1] = v[i], i
t, i = sorted(t), 0
while i < len(t):
if i == t[i][1] :
i += 1
continue
else:
t[i][0], t[t[i][1]][0] = t[t[i][1]][0], t[i][0]
t[i][1], t[t[i][1]][1] = t[t[i][1]][1], t[i][1]
if (i == t[i][1]):
i -= 1
i += 1
ans += 1
return ans
if __name__ == '__main__':
v = []
a = [ 5, 6, 7, 8, 9, 10, 11 ]
n = len(a)
inOrder(a, n, 0)
print(minSwaps())
|
aluno = {}
aluno['nome'] = str(input('Qual é o nome do aluno? ')).capitalize().strip()
aluno['nota'] = float(input('Qual é a media do aluno? '))
print(f'\nO nome do aluno é {aluno["nome"]}')
print(f'A media do aluno é {aluno["nota"]}')
if aluno['nota'] <= 6.9:
print(f'{aluno["nome"]} está em recuperação')
else:
print(f'{aluno["nome"]} está passado por media')
|
import logging
from common.document_parser.ref_utils import make_dict
logger = logging.getLogger(__name__)
ref_regex = make_dict()
def check(check_str, ref_type, exp_result):
count = 0
matches = ref_regex[ref_type][1].findall(check_str)
for match in matches:
num_match = ref_regex[ref_type][0].search(match[0])
if not num_match:
continue
ref = (str(ref_type) + " " + str(num_match[0])).strip()
count += 1
return count==exp_result
def test_dod():
check_str= "reference DoD 4160.28-M DoD 7000.14-R DoDD 5134.12 DoDI 4140.01 DoDI 3110.06 DoD"
ref_type = "DoD"
assert check(check_str, ref_type, 2)
def test_dodd():
check_str= "reference DoD 4160.28-M DoD 7000.14-R DoDD 5134.12 DoDI 4140.01 DoDI 3110.06 DoD Directive 5134.12 DoDD"
ref_type = "DoDD"
assert check(check_str, ref_type, 2)
def test_dodi():
check_str= "reference DoD Instruction 3110.06 DoD 4160.28-M DoD 7000.14-R DoDD 5134.12 DoDI 4140.01 DoDI 3110.06 DoDI"
ref_type = "DoDI"
assert check(check_str, ref_type, 3)
def test_dodm():
check_str= "reference DoD 4160.28-M DoD Manual 4140.01 DoDD 5134.12 DoDI 4140.01 DoDM 4100.39 DoDM"
ref_type = "DoDM"
assert check(check_str, ref_type, 2)
def test_dtm():
check_str= "reference DTM-07-024 DoD Manual 4140.01 DTM 04-021 DoDI 4140.01 DoDM 4100.39 DTM"
ref_type = "DTM"
assert check(check_str, ref_type, 2)
def test_ai():
check_str= "reference Administrative Instruction 102 AI DoDD 5134.12 AI 86"
ref_type = "AI"
assert check(check_str, ref_type, 2)
def test_title():
check_str= "reference Title 10 Title bla bla 12 Title 41"
ref_type = "Title"
assert check(check_str, ref_type, 2)
def test_icd():
check_str= "reference ICPG 704.4 ICPM 2006-700-8 ICD 501 ICPG 710.1 Intelligence Community Directive 204 ICD"
ref_type = "ICD"
assert check(check_str, ref_type, 2)
def test_icpg():
check_str= "reference ICPG 704.4 ICPM 2006-700-8 ICD 501 ICPG 710.1 Intelligence Community Directive 204 ICPG"
ref_type = "ICPG"
assert check(check_str, ref_type, 2)
def test_icpm():
check_str= "reference ICPG 704.4 ICPM 2006-700-8 ICD 501 ICPG 710.1 Intelligence Community Directive 204 ICPM"
ref_type = "ICPM"
assert check(check_str, ref_type, 1)
def test_cjcsi():
check_str= "reference CJCSI 1001.01 CJCSI 1100.01D DoDI 4140.01 CJCSI 12312321 CJCSM 3150.05D DoDM"
ref_type = "CJCSI"
assert check(check_str, ref_type, 2)
def test_cjcsm():
check_str= "reference CJCSM 3105.01 CJCSI 1001.01 CJCSI 1100.01D CJCSM 3150.05D CJCSM"
ref_type = "CJCSM"
assert check(check_str, ref_type, 2)
def test_cjcsg():
check_str= "reference CJCSM 3105.01 CJCS GDE 3401D CJCSI 1100.01D CJCS GDE 5260 CJCSM"
ref_type = "CJCSG"
assert check(check_str, ref_type, 2)
def test_cjcsn():
check_str= "reference CJCSN 3112 CJCSI 1001.01 CJCSN 3130.01 CJCSM 3150.05D CJCSN"
ref_type = "CJCSN"
assert check(check_str, ref_type, 2)
def test_jp():
check_str= "reference DoD 4160.28-M JP 1-02 DoDD 5134.12 JP 4140.01 JP 3-12 DoDM 4100.39 JP"
ref_type = "JP"
assert check(check_str, ref_type, 2)
def test_dcid():
check_str= "reference DCID 6/1 DoD DCID 1893 DoDD 5134.12 DoDI 4140.01 DCID 7/6 DCID"
ref_type = "DCID"
assert check(check_str, ref_type, 2)
def test_eo():
check_str= "reference Executive Order 12996 DoD Executive Order 4140.01 Executive Order 13340 "
ref_type = "EO"
assert check(check_str, ref_type, 2)
def test_ar():
check_str= "AR 1-1 AR 1-15 AR 1-202 AR 10-89 AR 11-2 Army Regulations 11-18 AR 25-400-2 AR 380-67 AR 380-381 AR 381-47 AR 381-141 Army Regulation 525-21 Army Regulations (AR) 600-8-3 AR 600-8-10 AR 600-8-101 AR 600-9 AR 601-210"
ref_type = "AR"
assert check(check_str, ref_type, 17)
def test_ago():
check_str= "AGO 1958-27 AGO 2020 - 31 ARMY general orders (AGO) 2001- 18 ARMY general order 2000- 07 "
ref_type = "AGO"
assert check(check_str, ref_type, 4)
def test_adp():
check_str= "ADP 1 ADP 3 -0 Army Doctrine Publication 7-0 ADP 1-01"
ref_type = "ADP"
assert check(check_str, ref_type, 4)
def test_pam():
check_str= "PAM 600-8-101 DA Pamphlet 5-11 PAM 40-507 "
ref_type = "PAM"
assert check(check_str, ref_type, 3)
def test_atp():
check_str= "ATP 1-0.1 ATP 1-20 ATP 2-22.9-2 Army Techniques Publication 1-05.03 "
ref_type = "ATP"
assert check(check_str, ref_type, 4)
def test_army_dir():
check_str= "army DIR 2020-08 army directive 2019 - 27 army dir"
ref_type = "ARMY"
assert check(check_str, ref_type, 2)
def test_tc():
check_str= "TC 2-91.5A (TC) 3-4 Training circular 3-34.500 TC"
ref_type = "TC"
assert check(check_str, ref_type, 3)
def test_stp():
check_str= "STP 6-13B24-SM -TG STP 3-CIED - SM-TG STP 6-13II-MQS STP 10-92L14-SM-TG STP 1AB-1948 "
ref_type = "STP"
assert check(check_str, ref_type, 4)
def test_tb():
check_str= "TB 8-6500-MPL TB 8-6515-001-35 TB 38-750-2 TB MED 1 TB MED 284 TB MED 750-1 TB 420-1 TB 420-33 TB ENG 146 TB ENG 62"
ref_type = "TB"
assert check(check_str, ref_type, 10)
def test_da_memo():
check_str= "DA MEMO 600-8-22 DA MEMO 5-5, DA Memorandum 25-53 da memo"
ref_type = "DA"
assert check(check_str, ref_type, 3)
def test_fm():
check_str= "FM 3-01.13 FM 3-13 Field Manual 1-0 FM 3-55.93 FM 3-90-1 FM 101-51-3-CD FM 7-100.1"
ref_type = "FM"
assert check(check_str, ref_type, 7)
def test_gta():
check_str= "GTA 03-04-001A GTA 90-01-028 Graphic Training aid 43-01-103 "
ref_type = "GTA"
assert check(check_str, ref_type, 3)
def test_hqda_policy():
check_str= "HQDA POLICY NOTICE 1-1 HQDA POLICY NOTICE 600-4 "
ref_type = "HQDA"
assert check(check_str, ref_type, 2)
def test_cta():
check_str= "CTA 8-100 CTA 50-909 Common Table of Allowances 50-970 "
ref_type = "CTA"
assert check(check_str, ref_type, 3)
def test_attp():
check_str= "reference ATTP 3-06.11 ATTP 4140.01 "
ref_type = "ATTP"
assert check(check_str, ref_type, 1)
def test_tm():
check_str= "TM 43-0001-26-2 TM 5-3895-332-23P TM 5-3820-255-12&P TM 3-11.42 TM 3-34.48-2 TM 1-5895-308-SUM TM 1-1680-377-13&P-4"
ref_type = "TM"
assert check(check_str, ref_type, 7)
def test_afi():
check_str = "AFI 1-1 AFI 11-2E-3V3 AFI10-2611-O AFI 13-101 AFI 17-2CDAV3"
ref_type = "AFI"
assert check(check_str, ref_type, 5)
def test_cfetp():
check_str = "CFETP 15WXC1 CFETP 1N2X1X-CC2 CFETP 3E4X1WG"
ref_type = "CFETP"
assert check(check_str, ref_type, 3)
def test_afman():
check_str = "AFMAN 11-2AEV3ADDENDA-A Air Force Manual 11-2C-32BV2 AFMAN10-1004 AFMAN11-2KC-10V3_ADDENDA-A"
ref_type = "AFMAN"
assert check(check_str, ref_type, 4)
def test_qtp():
check_str = "QTP 24-3-HAZMAT QTP 43AX-1 (QTP) 24-3-D549"
ref_type = "QTP"
assert check(check_str, ref_type, 3)
def test_afpd():
check_str = "AFPD 1 AFPD 4 AFPD 10-10 AFPD 91-1"
ref_type = "AFPD"
assert check(check_str, ref_type, 3)
def test_afttp():
check_str = "Air Force Tactics, Techniques, and Procedures (AFTTP) 3-42.32 AFTTP3-4.6_AS AFTTP 3-32.33V1"
ref_type = "AFTTP"
assert check(check_str, ref_type, 3)
def test_afva():
check_str = "AFVA 10-241 AFVA 51-1"
ref_type = "AFVA"
assert check(check_str, ref_type, 2)
def test_afh():
check_str = "AFH 10-222V1 AFH 1 AFH32-7084"
ref_type = "AFH"
assert check(check_str, ref_type, 3)
def test_hafmd():
check_str = "HAFMD 1-2 HAFMD 1-24 Addendum B"
ref_type = "HAFMD"
assert check(check_str, ref_type, 2)
def test_afpam():
check_str = "AFPAM 36-2801V1 AFPAM ( I ) 24-237"
ref_type = "AFPAM"
assert check(check_str, ref_type, 2)
def test_afmd():
check_str = "AFMD 1 AFMD 28"
ref_type = "AFMD"
assert check(check_str, ref_type, 2)
def test_afm():
check_str = "AFM 19-10"
ref_type = "AFM"
assert check(check_str, ref_type, 1)
def test_HOI():
check_str = "HOI 10-1 HOI 36-28"
ref_type = "HOI"
assert check(check_str, ref_type, 2)
def test_afjqs():
check_str = "AFJQS 5J0X1-2 AFJQS 2XXXX"
ref_type = "AFJQS"
assert check(check_str, ref_type, 2)
def test_afji():
check_str = "AFJI 10-411 Air Force Joint Instruction (AFJI) 32-9006"
ref_type = "AFJI"
assert check(check_str, ref_type, 2)
def test_afgm():
check_str = "AFGM 2020-36-04 AFGM 2020-63-148-01"
ref_type = "AFGM"
assert check(check_str, ref_type, 2)
def test_dafi():
check_str = "DAFI 33-360 DAFI 90-2002 DAFI 48-107V1"
ref_type = "DAFI"
assert check(check_str, ref_type, 3)
def test_af():
check_str = "AF 100 AF form 1005"
ref_type = "AF"
assert check(check_str, ref_type, 2)
def test_sf():
check_str = "SF 87 SF 708"
ref_type = "SF"
assert check(check_str, ref_type, 2)
def test_afpm():
check_str = "AFPM 2019-36-02"
ref_type = "AFPM"
assert check(check_str, ref_type, 1)
def test_afjman():
check_str = "AFJMAN 23-209"
ref_type = "AFJMAN"
assert check(check_str, ref_type, 1)
def test_jta():
check_str = "JTA 08-02 JTA 74-1"
ref_type = "JTA"
assert check(check_str, ref_type, 2)
def test_dafpd():
check_str = "DAFPD 10-36 DAFPD 90-1"
ref_type = "DAFPD"
assert check(check_str, ref_type, 2)
def test_mco():
check_str = "MCO 4200.34 MCO P12000.11A MCO 7220R.39"
ref_type = "MCO"
assert check(check_str, ref_type, 3)
def test_mcbul():
check_str = "MCBUL 1300 MCBUL 10120"
ref_type = "MCBUL"
assert check(check_str, ref_type, 2)
def test_navmc():
check_str = "NAVMC 4500.36B NAVMC 2915"
ref_type = "NAVMC"
assert check(check_str, ref_type, 2)
def test_navmcdir():
check_str = "NAVMC DIR 1650.48 NAVMC Directive 5100.8"
ref_type = "NAVMC DIR"
assert check(check_str, ref_type, 2)
def test_mcrp():
check_str = "MCRP 1-10.1 MCRP 3-40B.5 MCRP 4-11.3M"
ref_type = "MCRP"
assert check(check_str, ref_type, 3)
def test_mcwp():
check_str = "MCWP 3-15.7 MCWP 11-10"
ref_type = "MCWP"
assert check(check_str, ref_type, 2)
def test_mctp():
check_str = "MCTP 12-10A MCTP 3-20G"
ref_type = "MCTP"
assert check(check_str, ref_type, 2)
def test_mcip():
check_str = "MCIP 3-03DI MCIP 3-03.1i MCIP 3-40G.21"
ref_type = "MCIP"
assert check(check_str, ref_type, 3)
def test_mcdp():
check_str = "MCDP 1-1 MCDP 7"
ref_type = "MCDP"
assert check(check_str, ref_type, 2)
def test_fmfrp():
check_str = "FMFRP 12-109-II FMFRP 0-53"
ref_type = "FMFRP"
assert check(check_str, ref_type, 2)
def test_fmfm():
check_str = "FMFM 6-1"
ref_type = "FMFM"
assert check(check_str, ref_type, 1)
def test_irm():
check_str = "IRM-2300-05B IRM 5236-06A IRM-5231-03"
ref_type = "IRM"
assert check(check_str, ref_type, 3)
def test_secnavinst():
check_str = "SECNAV Instruction 1640.9C SECNAVINST 5210.60"
ref_type = "SECNAVINST"
assert check(check_str, ref_type, 2)
def test_secnav():
check_str = "SECNAV M-1650.1 SECNAV M-5210.2"
ref_type = "SECNAV"
assert check(check_str, ref_type, 2)
def test_navsup():
check_str = "NAVSUP P-486 NAVSUP Publication 727"
ref_type = "NAVSUP"
assert check(check_str, ref_type, 2)
def test_jaginst():
check_str = "JAGINST 5800.7F JAG INSTRUCTION 1440.1E"
ref_type = "JAGINST"
assert check(check_str, ref_type, 2)
def test_ombm():
check_str = "M-00-02 M-07-16 m 18 19"
ref_type = "OMBM"
assert check(check_str, ref_type, 2)
|
from setuptools import setup
from pypandoc import convert_file
#: Converts the Markdown README in the RST format that PyPi expects.
long_description = convert_file('README.md', 'rst')
setup(name='obstruction',
description='Declarative parser for remapping object schemas and data',
long_description=long_description,
version='0.1.1',
url='https://github.com/chrisinajar/py-obstruction',
author='Chris Vickery',
author_email='chrisinajar@gmail.com',
license='MIT',
# classifiers=[
# 'Development Status :: 4 - Beta',
# 'Intended Audience :: System Administrators',
# 'License :: OSI Approved :: Apache Software License',
# 'Programming Language :: Python :: 3'
# ],
packages=['obstruction'],
install_requires=[
'six>=1.10',
'dot-prop>=0.2.0',
'map-object>=1.1.1'
]
)
|
class MissingArgumentException(Exception):
def __init__(self, message):
self.message = message
def __repr__(self):
return f'<MissingArgumentException "{self.message}">'
def __str__(self):
return self.message
class MisMatchException(Exception):
def __init__(self, message):
self.message = message
def __repr__(self):
return f'<MisMatchException "{self.message}">'
def __str__(self):
return self.message
class DatabaseSafetyException(Exception):
def __init__(self, message):
self.message = message
def __repr__(self):
return f'<DatabaseSafetyException "{self.message}">'
def __str__(self):
return self.message
class SQLTypeException(Exception):
def __init__(self, message):
self.message = message
def __repr__(self):
return f'<SQLTypeException "{self.message}">'
def __str__(self):
return self.message
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .models import ApiResourceLocation
from ..customer_intelligence.v4_0.models.models import CustomerIntelligenceEvent
from .models import ImproperException
from ..location.v4_0.models.models import ResourceAreaInfo
from .models import SystemException
from .models import VssJsonCollectionWrapperBase
from .models import VssJsonCollectionWrapper
from .models import WrappedException
__all__ = [
'ApiResourceLocation',
'CustomerIntelligenceEvent',
'ImproperException',
'ResourceAreaInfo',
'SystemException',
'VssJsonCollectionWrapperBase',
'VssJsonCollectionWrapper',
'WrappedException'
]
|
from cisconetworkswitch import *
|
# Generated by Django 2.1.7 on 2019-02-21 11:21
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0019_auto_20181227_1534'),
]
operations = [
migrations.AlterField(
model_name='historicalpaper',
name='change_request_of',
field=models.ForeignKey(blank=True, db_constraint=False, null=True,
on_delete=django.db.models.deletion.DO_NOTHING, related_name='+',
to='mainapp.Paper'),
),
]
|
a,b=map(str,input().split())
z=[]
x,c="",""
if a[0].islower():
x=x+a[0].upper()
if b[0].islower():
c=c+b[0].upper()
for i in range(1,len(a)):
if a[i].isupper() or a[i].islower():
x=x+a[i].lower()
for i in range(1,len(b)):
if b[i].isupper() or b[i].islower():
c=c+b[i].lower()
z.append(x)
z.append(c)
print(*z)
|
"""
Given an array of intervals where intervals[i] = [starti, endi],
merge all overlapping intervals,
and return an array of the non-overlapping intervals that cover all the intervals in the input.
Example 1:
Input: intervals = [[1,3],[2,6],[8,10],[15,18]]
Output: [[1,6],[8,10],[15,18]]
Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
Example 2:
Input: intervals = [[1,4],[4,5]]
Output: [[1,5]]
Explanation: Intervals [1,4] and [4,5] are considered overlapping.
Constraints:
1 <= intervals.length <= 104
intervals[i].length == 2
0 <= starti <= endi <= 104
"""
# V0
# https://github.com/labuladong/fucking-algorithm/blob/master/%E7%AE%97%E6%B3%95%E6%80%9D%E7%BB%B4%E7%B3%BB%E5%88%97/%E5%8C%BA%E9%97%B4%E8%B0%83%E5%BA%A6%E9%97%AE%E9%A2%98%E4%B9%8B%E5%8C%BA%E9%97%B4%E5%90%88%E5%B9%B6.md
class Solution:
# @param intervals, a list of Interval
# @return a list of Interval
def merge(self, intervals):
intervals = sorted(intervals, key=lambda x: x.start)
result = []
for interval in intervals:
if len(result) == 0 or result[-1].end < interval.start:
result.append(interval)
else:
result[-1].end = max(result[-1].end, interval.end)
return result
# V1
# https://www.jiuzhang.com/solution/merge-intervals/#tag-highlight-lang-python
"""
Definition of Interval.
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
"""
class Solution:
# @param intervals, a list of Interval
# @return a list of Interval
def merge(self, intervals):
intervals = sorted(intervals, key=lambda x: x.start)
result = []
for interval in intervals:
if len(result) == 0 or result[-1].end < interval.start:
result.append(interval)
else:
result[-1].end = max(result[-1].end, interval.end)
return result
# V1'
# https://www.cnblogs.com/zuoyuan/p/3782028.html
class Solution:
# @param intervals, a list of Interval
# @return a list of Interval
def merge(self, intervals):
intervals.sort(key = lambda x:x.start)
length=len(intervals)
res=[]
for i in range(length):
if res==[]:
res.append(intervals[i])
else:
size=len(res)
if res[size-1].start<=intervals[i].start<=res[size-1].end:
res[size-1].end=max(intervals[i].end, res[size-1].end)
else:
res.append(intervals[i])
return res
# V1''
# https://www.cnblogs.com/loadofleaf/p/5084209.html
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
intervals.sort(key = lambda x:x.start)
length = len(intervals)
res = []
if length == 0:
return res
res.append(intervals[0])
for i in range(1,length):
size = len(res)
if res[size - 1].start <= intervals[i].start <= res[size - 1].end:
res[size - 1].end = max(intervals[i].end, res[size - 1].end)
else:
res.append(intervals[i])
return res
# V2
|
from .txt_style import txtDataset
from .registry import DATASETS
import numpy as np
import cv2
import os
import math
import time
# import torch
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
@DATASETS.register_module
class Ctdet_txt(txtDataset):
# # for Visdrone
# CLASSES = ['__background__', "aeroplane", "bicycle", "bird", "boat",
# "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog",
# "horse", "motorbike", "person", "pottedplant", "sheep", "sofa",
# "train", "tvmonitor"]
# # for coco
# CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
# 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
# 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
# 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
# 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
# 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
# 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
# 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
# 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
# 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
# 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
# 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
# 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
# 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush')
def __init__(self, **kwargs):
super(Ctdet_txt, self).__init__(**kwargs)
# self.use_coco = use_coco
# print(kwargs)
def _get_border(self, border, size): # 128, width (800-1000?)
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def prepare_train_img(self, index):
self.max_objs = 128
self.num_classes = 10
# if self.use_coco:
# self.max_objs = 128
# self.num_classes = 80
# _valid_ids = [
# 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
# 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
# 24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
# 37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
# 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
# 58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
# 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
# 82, 84, 85, 86, 87, 88, 89, 90]
# cat_ids = {v: i for i, v in enumerate(_valid_ids)}
# else:
# self.max_objs = 50
# self.num_classes = 21
# cat_ids = {v: i for i, v in enumerate(np.arange(1, 21, dtype=np.int32))}
# import pdb; pdb.set_trace()
img_info = self.img_infos[index]
img_id = img_info['id']
file_name = img_info['filename']
img_path = os.path.join(self.img_prefix, file_name)
# id=img_id, filename=filename, width=width, height=height)
# file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
# img_path =os.path.join(self.img_prefix, file_name)
# ann_ids = self.coco.getAnnIds(imgIds=[img_id])
# anns = self.coco.loadAnns(ann_ids)
anns = self.get_ann_info(index)
# dict(
# bboxes=bboxes.astype(np.float32),
# labels=labels.astype(np.int64),
# bboxes_ignore=bboxes_ignore.astype(np.float32),
# labels_ignore=labels_ignore.astype(np.int64))
# bboxes = anns["bboxes"].tolist() # (xmin, ymin, xmax, ymax) numpy (, 4)
# labels = anns["labels"].tolist()
bboxes = anns["bboxes"]
labels = anns["labels"]
num_objs = min(bboxes.shape[0], self.max_objs)
img = cv2.imread(img_path) # h, w, c
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
# if self.opt.keep_res:
# input_h = (height | self.size_divisor) + 1
# input_w = (width | self.size_divisor) + 1
# input_h = height
# input_w = width
# s = np.array([input_w, input_h], dtype=np.float32)
# else:
s = max(img.shape[0], img.shape[1]) * 1.0
input_h, input_w = self.img_scales[0][1], self.img_scales[0][0]
# flipped = False
# if self.split == 'train':
# if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(256, img.shape[1])
h_border = self._get_border(256, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
# else:
# sf = 0.4
# cf = 0.1
# c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
# c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
# s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
# if np.random.random() < self.opt.flip:
# flipped = True
# img = img[:, ::-1, :]
# c[0] = width - c[0] - 1
trans_input = get_affine_transform(
c, s, 0, [input_w, input_h])
# meta = {}
# meta['c'] = c
# meta['s'] = s
inp = cv2.warpAffine(img, trans_input,
(input_w, input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
inp = (inp - self.img_norm_cfg['mean']) / self.img_norm_cfg['std']
inp = inp.transpose(2, 0, 1)
output_h = input_h // 4
output_w = input_w // 4
# print(output_h, output_w)
# meta['out_height'] = output_h
# meta['out_width'] = output_w
trans_output = get_affine_transform(c, s, 0, [output_w, output_h])
hm = np.zeros((self.num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
for k in range(num_objs):
# ann = anns[k]
# bbox = self._coco_box_to_bbox(ann['bbox']) # 输入网络 x1, y1, x2, y2
# cls_id = int(cat_ids[ann['category_id']])
bbox = bboxes[k] # (4), numpy
cls_id = int(labels[k]) - 1
# tranform bounding box to output size
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
print("h, w", h, w)
if h > 0 and w > 0:
# populate hm based on gd and ct
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
print(ct)
ct_int = ct.astype(np.int32)
print(ct_int)
draw_umich_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
ret = {'img': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh, 'reg': reg, 'img_meta':[]}
return ret
def prepare_test_img(self, index):
self.max_objs = 128
self.num_classes = 10
img_info = self.img_infos[index]
img_id = img_info['id']
file_name = img_info['filename']
img_path = os.path.join(self.img_prefix, file_name)
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0
input_h, input_w = self.img_scales[0][1], self.img_scales[0][0]
trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
inp = cv2.warpAffine(img, trans_input,
(input_w, input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
inp = (inp - self.img_norm_cfg['mean']) / self.img_norm_cfg['std']
inp = inp.transpose(2, 0, 1) # reshape(1, 3, inp_height, inp_width)
output_h = input_h // 4
output_w = input_w // 4
meta = {'c': c, 's': s,
'out_height': output_h,
'out_width': output_w}
ret = {"img": inp, "img_meta": meta}
return ret
|
from conceptnet5.formats.msgpack_stream import read_msgpack_stream
from conceptnet5.uri import uri_prefixes
from conceptnet5.relations import SYMMETRIC_RELATIONS
from ordered_set import OrderedSet
import json
def write_row(outfile, items):
print('\t'.join(sanitize(str(x)) for x in items), file=outfile)
def write_ordered_set(filename, oset):
with open(filename, 'w', encoding='utf-8') as outfile:
for i, item in enumerate(oset):
print('%d\t%s' % (i, sanitize(item)), file=outfile)
def write_relations(filename, oset):
with open(filename, 'w', encoding='utf-8') as outfile:
for i, rel in enumerate(oset):
directed_str = 't'
if rel in SYMMETRIC_RELATIONS:
directed_str = 'f'
print('%d\t%s\t%s' % (i, sanitize(rel), directed_str), file=outfile)
def sanitize(text):
return text.replace('\n', '').replace('\t', '').replace('\\', '\\\\')
def assertions_to_sql_csv(msgpack_filename, output_dir):
output_nodes = output_dir + '/nodes.csv'
output_edges = output_dir + '/edges.csv'
output_relations = output_dir + '/relations.csv'
output_sources = output_dir + '/sources.csv'
output_edge_sources = output_dir + '/edge_sources.csv'
output_node_prefixes = output_dir + '/node_prefixes.csv'
output_features = output_dir + '/edge_features.csv'
node_list = OrderedSet()
source_list = OrderedSet()
assertion_list = OrderedSet()
relation_list = OrderedSet()
seen_prefixes = set()
edge_file = open(output_edges, 'w', encoding='utf-8')
edge_source_file = open(output_edge_sources, 'w', encoding='utf-8')
node_prefix_file = open(output_node_prefixes, 'w', encoding='utf-8')
feature_file = open(output_features, 'w', encoding='utf-8')
for assertion in read_msgpack_stream(msgpack_filename):
if assertion['uri'] in assertion_list:
continue
assertion_idx = assertion_list.add(assertion['uri'])
rel_idx = relation_list.add(assertion['rel'])
start_idx = node_list.add(assertion['start'])
end_idx = node_list.add(assertion['end'])
source_indices = []
sources = assertion['sources']
for source in sources:
for sourceval in sorted(source.values()):
source_idx = source_list.add(sourceval)
source_indices.append(source_idx)
jsondata = json.dumps(assertion, ensure_ascii=False, sort_keys=True)
weight = assertion['weight']
write_row(
edge_file,
[assertion_idx, assertion['uri'],
rel_idx, start_idx, end_idx,
weight, jsondata]
)
for node in (assertion['start'], assertion['end'], assertion['dataset']):
write_prefixes(node_prefix_file, seen_prefixes, node_list, node)
for source_idx in sorted(set(source_indices)):
write_row(edge_source_file, [assertion_idx, source_idx])
if assertion['rel'] in SYMMETRIC_RELATIONS:
features = [(0, start_idx), (0, end_idx)]
else:
features = [(1, start_idx), (-1, end_idx)]
for direction, node_idx in features:
write_row(feature_file, [rel_idx, direction, node_idx, assertion_idx])
edge_file.close()
edge_source_file.close()
node_prefix_file.close()
write_ordered_set(output_nodes, node_list)
write_ordered_set(output_sources, source_list)
write_relations(output_relations, relation_list)
def write_prefixes(prefix_file, seen_prefixes, node_list, node):
for prefix in uri_prefixes(node):
if (node, prefix) not in seen_prefixes:
seen_prefixes.add((node, prefix))
node_idx = node_list.add(node)
prefix_idx = node_list.add(prefix)
write_row(prefix_file, [node_idx, prefix_idx])
def load_sql_csv(connection, input_dir):
for (filename, tablename) in [
(input_dir + '/relations.csv', 'relations'),
(input_dir + '/nodes.csv', 'nodes'),
(input_dir + '/edges.csv', 'edges'),
(input_dir + '/sources.csv', 'sources'),
(input_dir + '/edge_sources.csv', 'edge_sources'),
(input_dir + '/node_prefixes.csv', 'node_prefixes'),
(input_dir + '/edge_features.csv', 'edge_features')
]:
print(filename)
cursor = connection.cursor()
with open(filename, 'rb') as file:
cursor.execute("COPY %s FROM STDIN" % tablename, stream=file)
cursor.close()
connection.commit()
|
#!/usr/bin/env python
#
# Copyright (c) 2013, 2015 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
Load output of LTL2DSTAR into a NetworkX DiGraph
The official website of LTL2DSTAR is http://ltl2dstar.de/ , where a
definition of its output format can be found. Use "-" in place of
FILE to read from stdin.
* Parsing is done by hand; consider changing to use pyparsing. N.B.,
the current implementation is lax about the input file, i.e., it
accepts valid LTL2DSTAR output along with variants.
* The Automaton class is a very light derivative of networkx.DiGraph;
in other words, if you prefer to only work with a DiGraph, it is
easy to modify the existing code to do so. Expect the name to
change later, especially if (when) it becomes integrated into TuLiP.
* readdstar.py is a commandline utility. It expects to be given the
name of file from which to read (previously recorded) output of
LTL2DSTAR. Alternatively, use "-" to read from stdin. E.g., if
ltl2dstar and ltl2ba are in the current directory, try
$ echo 'U a b' | ltl2dstar --ltl2nba=spin:ltl2ba - -|./readdstar.py -
* Each edge (transition) is labeled with two things:
- "formula" : a disjunctive normal form expression for when the edge
should be taken; and
- "subsets of AP": a list of sets of atomic propositions.
E.g., if AP = {p, q}, and (1,3) is an edge in the Automaton object A,
then
A.edge[1][3]["subsets of AP"] = [set([]), set(['p'])]
means that the transition (1,3) should be taken (assuming current
execution has led to state 1) if the none of the atomic propositions
are true (i.e., !p & !q holds), or precisely "p" is true (i.e., p &
!q holds).
SCL; 2013, 2015.
"""
from __future__ import print_function
import sys
import networkx as nx
try:
input = raw_input
except NameError:
pass
class AcceptancePair(object):
def __init__(self, L=None, U=None):
if L is None:
self.L = set()
else:
self.L = set(L)
if U is None:
self.U = set()
else:
self.U = set(U)
class Automaton(nx.DiGraph):
def __init__(self, aut_type=None):
nx.DiGraph.__init__(self)
self.aut_type = aut_type
def __str__(self):
output = "Type: "
if self.aut_type == "DRA":
output += "deterministic Rabin\n"
elif self.aut_type == "DSA":
output += "deterministic Streett\n"
output += "AP = "+str(self.ap)+"\n"
output += "Transitions:"+"\n"
output += "\n".join(["\t("+str(u)+", "+str(v)+") :\n\t\tformula: "+str(d["formula"])+"\n\t\tsubsets of AP: "+str(d["subsets of AP"]) for (u,v,d) in self.edges_iter(data=True)])+"\n"
output += "Acceptance Pairs (each line is of the form (L, U)):"+"\n"
output += "\n".join(["\t("+str(Fi.L)+", "+str(Fi.U)+")" for Fi in self.F])
return output
def gen_apformula(AP, intrep):
"""Generate conjunction formula
>>> gen_apformula(AP=("p", "q"), intrep=2)
'!p & q'
"""
return " & ".join([AP[i] if ((intrep >> i) & 1) != 0 else "!"+AP[i] for i in range(len(AP))])
def gen_apsubset(AP, intrep):
"""Generate set of atomic propositions corresponding to integer
>>> gen_apsubset(AP=("p", "q"), intrep=2)
set(['q'])
"""
return set([AP[i] for i in range(len(AP)) if ((intrep >> i) & 1) != 0])
def readdstar(getline):
"""Construct automaton from LTL2DSTAR output.
getline is any method that can yield successive lines of output
from LTL2DSTAR. E.g., a file could be opened and then its
readline() method passed to readdstar.
"""
A = None
aut_type = None
comments = []
last_state = -1 # -1 indicates unset
try:
while True:
line = getline()
if len(line) == 0:
break # file.readline() returns empty string at EOF
parts = line.split()
if len(parts) == 0:
continue # Ignore blank lines
if not parts[0].endswith(":") and len(parts) == 3: # id line
aut_type = parts[0]
version = parts[1]
edge_type = parts[2]
A = Automaton(aut_type=aut_type)
elif parts[0] == "Comment:":
comments.append(" ".join(parts[1:]))
elif parts[0] == "States:":
num_states = int(parts[1])
elif parts[0] == "Acceptance-Pairs:":
num_pairs = int(parts[1])
A.F = [None for i in range(num_pairs)]
elif parts[0] == "Start:":
A.start_state = int(parts[1])
elif parts[0] == "AP:":
ap_len = int(parts[1])
A.ap = tuple([prop.strip("\"").rstrip("\"") for prop in parts[2:]])
assert ap_len == len(A.ap)
elif parts[0] == "State:":
last_state = int(parts[1])
apsubset_counter = 0
A.add_node(last_state)
elif parts[0] == "Acc-Sig:":
for accsig in parts[1:]:
accsig_index = int(accsig[1:])
if A.F[accsig_index] is None:
A.F[accsig_index] = AcceptancePair()
if accsig[0] == "+":
A.F[accsig_index].L.add(last_state)
elif accsig[0] == "-":
A.F[accsig_index].U.add(last_state)
elif last_state >= 0 and parts[0] != "---":
to_state = int(parts[0])
if not A.has_edge(last_state, to_state):
A.add_edge(last_state, to_state)
A.edge[last_state][to_state]["formula"] = "("+gen_apformula(A.ap, apsubset_counter)+")"
A.edge[last_state][to_state]["subsets of AP"] = [gen_apsubset(A.ap, apsubset_counter)]
else:
A.edge[last_state][to_state]["formula"] += " | ("+gen_apformula(A.ap, apsubset_counter)+")"
A.edge[last_state][to_state]["subsets of AP"].append(gen_apsubset(A.ap, apsubset_counter))
apsubset_counter += 1
except EOFError:
pass # raw_input() throws this at end-of-file
return A
if __name__ == "__main__":
if len(sys.argv) < 2 or "-h" in sys.argv:
print("Usage: "+sys.argv[0]+" FILE")
exit(1)
if sys.argv[1] == "-": # Read from stdin
getline = input
else:
f = open(sys.argv[1], "r")
getline = f.readline
print(readdstar(getline) )
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import os
import time
import paramiko
import logging
import getpass
from contextlib import contextmanager
import infra.remote
from glob import glob
from loguru import logger as LOG
DBG = os.getenv("DBG", "cgdb")
class CCFRemoteClient(object):
BIN = "cchost"
DEPS = []
LINES_RESULT_FROM_END = 6
def __init__(
self,
name,
host,
bin_path,
node_host,
node_port,
workspace,
label,
iterations,
config,
command_args,
remote_class,
):
"""
Creates a ccf client on a remote host.
"""
self.host = host
self.name = name
self.BIN = infra.path.build_bin_path(bin_path)
# strip out the config from the path
self.DEPS = glob("*.pem") + [config]
client_command_args = list(command_args)
if "--verify" in client_command_args:
# append verify file to the files to be copied
# and fix the path in the argument list
v_index = client_command_args.index("--verify")
verify_path = client_command_args[v_index + 1]
self.DEPS += [verify_path]
client_command_args[v_index + 1] = os.path.basename(verify_path)
cmd = [
self.BIN,
"--host={}".format(node_host),
"--port={}".format(node_port),
"--transactions={}".format(iterations),
"--config={}".format(os.path.basename(config)),
] + client_command_args
self.remote = remote_class(
name, host, [self.BIN], self.DEPS, cmd, workspace, label
)
def setup(self):
self.remote.setup()
LOG.success(f"Remote client {self.name} setup")
def start(self):
self.remote.start()
def restart(self):
self.remote.restart()
def node_cmd(self):
return self.remote._cmd()
def debug_node_cmd(self):
return self.remote._dbg()
def stop(self):
try:
self.remote.stop()
remote_files = self.remote.list_files()
remote_csvs = [f for f in remote_files if f.endswith(".csv")]
for csv in remote_csvs:
remote_file_dst = f"{self.name}_{csv}"
self.remote.get(csv, 1, remote_file_dst)
if csv == "perf_summary.csv":
with open("perf_summary.csv", "a") as l:
with open(remote_file_dst, "r") as r:
for line in r.readlines():
l.write(line)
except Exception:
LOG.exception("Failed to shut down {} cleanly".format(self.name))
def wait(self):
try:
self.remote.wait_for_stdout_line(line="Global commit", timeout=5)
except Exception:
LOG.exception("Failed to wait on client {}".format(self.name))
raise
def print_result(self):
self.remote.print_result(self.LINES_RESULT_FROM_END)
|
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2013-2018, OVH SAS.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of OVH SAS nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY OVH SAS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL OVH SAS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
import mock
import unittest
from uuid import uuid4
def _run(coro):
return asyncio.run(coro)
class AsyncMock(mock.Mock):
def __call__(self, *args, **kwargs):
sup = super(AsyncMock, self)
async def coro():
return sup.__call__(*args, **kwargs)
return coro()
def __await__(self):
return self().__await__()
class testConsumerKeyRequest(unittest.TestCase):
def test_add_rules(self):
# Prepare
import asyncovh
m_client = AsyncMock()
ck = asyncovh.ConsumerKeyRequest(m_client)
# Test: No-op
self.assertEqual([], ck._access_rules)
ck._access_rules = []
# Test: allow one
ck.add_rule("GET", '/me')
self.assertEqual([
{'method': 'GET', 'path': '/me'},
], ck._access_rules)
ck._access_rules = []
# Test: allow safe methods on domain
ck.add_rules(asyncovh.API_READ_WRITE_SAFE, '/domains/test.com')
self.assertEqual([
{'method': 'GET', 'path': '/domains/test.com'},
{'method': 'POST', 'path': '/domains/test.com'},
{'method': 'PUT', 'path': '/domains/test.com'},
], ck._access_rules)
ck._access_rules = []
# Test: allow all sms, strips suffix
ck.add_recursive_rules(asyncovh.API_READ_WRITE, '/sms/*')
self.assertEqual([
{'method': 'GET', 'path': '/sms'},
{'method': 'POST', 'path': '/sms'},
{'method': 'PUT', 'path': '/sms'},
{'method': 'DELETE', 'path': '/sms'},
{'method': 'GET', 'path': '/sms/*'},
{'method': 'POST', 'path': '/sms/*'},
{'method': 'PUT', 'path': '/sms/*'},
{'method': 'DELETE', 'path': '/sms/*'},
], ck._access_rules)
ck._access_rules = []
# Test: allow all, does not insert the empty rule
ck.add_recursive_rules(asyncovh.API_READ_WRITE, '/')
self.assertEqual([
{'method': 'GET', 'path': '/*'},
{'method': 'POST', 'path': '/*'},
{'method': 'PUT', 'path': '/*'},
{'method': 'DELETE', 'path': '/*'},
], ck._access_rules)
ck._access_rules = []
# Test launch request
ck.add_recursive_rules(asyncovh.API_READ_WRITE, '/')
self.assertEqual(m_client.request_consumerkey.return_value, _run(ck.request()))
m_client.request_consumerkey.assert_called_once_with(ck._access_rules, None)
|
import json
import re
with open("../data/labels_dict.json") as labelFile:
labels_dict = json.load(labelFile)
def getGoldAnswers(goldanswer_string):
goldanswerList = goldanswer_string.split(";")
goldanswers = []
for ga in goldanswerList:
if "/" in ga:
ga = ga.rsplit("/", 1)[1]
goldanswers.append(ga)
return goldanswers
# return if the given string is a timestamp
def is_timestamp(timestamp):
pattern = re.compile('^[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T00:00:00Z')
if not(pattern.match(timestamp)):
return False
else:
return True
def convertTimestamp( timestamp):
yearPattern = re.compile('^[0-9][0-9][0-9][0-9]-00-00T00:00:00Z')
monthPattern = re.compile('^[0-9][0-9][0-9][0-9]-[0-9][0-9]-00T00:00:00Z')
dayPattern = re.compile('^[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T00:00:00Z')
timesplits = timestamp.split("-")
year = timesplits[0]
if yearPattern.match(timestamp):
return year
month = convertMonth(timesplits[1])
if monthPattern.match(timestamp):
return month + " " + year
elif dayPattern.match(timestamp):
day = timesplits[2].rsplit("T")[0]
return day + " " + month + " " +year
return timestamp
# convert the given month to a number
def convertMonth( month):
return{
"01": "january",
"02": "february",
"03": "march",
"04": "april",
"05": "may",
"06": "june",
"07": "july",
"08": "august",
"09": "september",
"10": "october",
"11": "november",
"12": "december"
}[month]
def getLabel(entity):
label = ""
if entity.startswith("Q") or entity.startswith("P"):
#for predicates: P10-23, split away counting
if "-" in entity:
e = entity.split("-") [0]
else:
e = entity
if e in labels_dict.keys():
label = labels_dict[e]
else:
if is_timestamp(entity):
label = convertTimestamp(entity)
elif entity.startswith("+"):
label = entity.split("+")[1]
else:
label = entity
return label
|
import pytest
import rumps
from src.app_functions.exceptions.login_failed import LoginFailed
from src.app_functions.menu.change_credentials import change_credentials
@pytest.fixture(name="basic_app")
def create_app():
"""Creates a basic app object with some variables to pass to functions
Returns:
rumps.App: Basic app
"""
app = rumps.App("TestApp")
return app
def test_functions_called_correctly_succes(mocker, basic_app):
"""Check functionality if login succeeds"""
mock_function1 = mocker.patch("src.app_functions.menu.change_credentials.input_credentials")
mock_function2 = mocker.patch("src.app_functions.menu.change_credentials.duolingo_login")
mock_function3 = mocker.patch("src.app_functions.menu.change_credentials.update_menu")
change_credentials(basic_app)
mock_function1.assert_called_once_with()
mock_function2.assert_called_once_with(basic_app)
mock_function3.assert_called_once_with(basic_app)
assert basic_app.logged_in is True
def test_functions_called_correctly_failure(mocker, basic_app):
"""Check functionality if login fails"""
mock_function1 = mocker.patch("src.app_functions.menu.change_credentials.input_credentials")
mock_function2 = mocker.patch(
"src.app_functions.menu.change_credentials.duolingo_login", side_effect=LoginFailed
)
mock_function3 = mocker.patch("src.app_functions.menu.change_credentials.update_menu")
change_credentials(basic_app)
mock_function1.assert_called_once_with()
mock_function2.assert_called_once_with(basic_app)
mock_function3.assert_called_once_with(basic_app)
assert basic_app.logged_in is False
|
# -*- coding: utf-8 -*-
# pylint: disable=broad-except, bare-except
from .const import LOGGER, SHELLY_TYPES
from .base import Base
class Device(Base):
def __init__(self, block):
super(Device, self).__init__()
self.block = block
self.id = block.id
self.unit_id = block.id
self.type = block.type
self.ip_addr = block.ip_addr
self.is_device = True
self.is_sensor = False
self.sub_name = None
self.state_values = None
#self.sensor_values = None
self.state = None
self.device_type = None
self.device_sub_type = None #Used to make sensors unique
self.lazy_load = False
self.device_nr = None
self.master_unit = False
self.ext_sensor = None
def friendly_name(self):
try:
if self.block.parent.cloud:
device_id = self.id.lower().split('-')
name = None
#add_nr = False
idx = int(device_id[1]) if len(device_id) > 1 else 0
name = self.block.parent.cloud.get_device_name(device_id[0],
idx,
self.ext_sensor)
# if len(device_id) > 1 and int(device_id[1]) > 1:
# cloud_id = device_id[0] + '_' + str(int(device_id[1])-1)
# name = self.block.parent.cloud.get_device_name(cloud_id,
# self.ext_sensor)
# if not name:
# add_nr = True
# if not name:
# name = \
# self.block.parent.cloud.get_device_name(device_id[0],
# self.ext_sensor)
# if add_nr:
# name += " - " + device_id[1]
if name:
return name
except Exception as ex:
LOGGER.debug("Error look up name, %s", ex)
name = self.type_name() + ' - ' + self.id
#if self.device_nr:
# name += ' - ' + str(self.device_nr)
return name
def room_name(self):
if self.block.parent.cloud:
device_id = self.id.lower().split('-')
room = None
if len(device_id) > 1 and int(device_id[1]) > 1:
room = self.block.parent.cloud.get_room_name(
device_id[0] + "_" + device_id[1])
if room is None:
room = self.block.parent.cloud.get_room_name(device_id[0])
return room
def type_name(self):
"""Friendly type name"""
try:
name = SHELLY_TYPES[self.type]['name']
except:
name = self.type
if self.sub_name is not None:
name = name + " (" + self.sub_name + ")"
return name
def _send_command(self, url):
self.block.http_get(url)
self.block.update_status_interval = None #Force update
def available(self):
return self.block.available()
@property
def protocols(self):
return self.block.protocols
def _update(self, src, new_state=None, new_state_values=None):
LOGGER.debug(
"Update id:%s state:%s stateValue:%s",
self.id, new_state, new_state_values)
self._set_state(new_state, src)
if new_state_values is not None:
if self.state_values != new_state_values:
self.state_values = new_state_values
self.need_update = True
#if new_values is not None:
# self.sensor_values = new_values
# self.need_update = True
#if info_values is not None:
# self.info_values = info_values
# self.need_update = True
if self.lazy_load:
self.block.parent.callback_add_device(self)
self.raise_updated()
def update_status_information(self, _status):
"""Update the status information."""
def fw_version(self):
return self.block.fw_version()
def close(self):
self.cb_updated = []
def _reload_block(self):
self.block.reload = True
def loop(self):
pass
|
#!/usr/bin/python3
while True :
password = input("Make up a secure password: ")
print("You entered: ", password)
# initialize
has_capital = False
has_digit = False
# check length
gt_6 = len(password) >= 6
if gt_6 is True :
print("Password length check passed")
# check alphanum exist
for ch in password :
if ch in "0123456789" :
has_digit = True
if ch in "ABCDEFGHIJKLMNOPQRSTUVWYZ" :
has_capital = True
if has_digit is True :
print("Password number check passed")
if has_capital is True :
print("Password capital check passed")
# tell the user they failed to meet requirements
if not gt_6 :
print("Password length should be greater than or equal to 6 characters")
if not has_capital :
print("Password must have at least one capital letter")
if not has_digit :
print("Password must have at least one number")
# conditions to ask for a new password
if not gt_6 or not has_capital or not has_digit:
continue
break
print("Great password!")
|
# -*- coding: utf-8 -*-
#from django.contrib import admin
from django.contrib.admin import site, ModelAdmin, TabularInline, StackedInline
#from django.contrib.auth.models import User
from workflow.models import *
from workflow.forms import StepAdminForm
class StepInline(StackedInline):
model = Step
form = StepAdminForm
extra = 1
ordering = ['sequence']
classes = ('collapse-open',)
allow_add = True
class DynamicFieldProcessInline(TabularInline):
model = DynamicFieldProcess
extra = 1
#exclude = ['name','initial','max_length']
fields = ['label', 'fieldtype', 'required', 'sequence']
allow_add = True
class ProcessAdmin(ModelAdmin):
list_display = ('name','id','desc')
ModelAdmin.actions_on_top = True
inlines = [ DynamicFieldProcessInline, StepInline, ]
site.register(Process, ProcessAdmin)
class DynamicFieldStepInline(TabularInline):
model = DynamicFieldStep
extra = 1
#exclude = ['name','initial','max_length']
fields = ['label', 'fieldtype', 'required', 'sequence']
allow_add = True
class StepAdmin(ModelAdmin):
ModelAdmin.actions_on_top = True
list_display = ('id','name','process','sequence')
list_filter = ('process','managers')
list_display_links = ('name','process')
ordering = ['sequence']
inlines = [ DynamicFieldStepInline, ]
site.register(Step, StepAdmin)
class WorkAdmin(ModelAdmin):
list_display = ('id','current_step','name','owner')
list_display_links = ('name',)
search_fields = ['name','desc']
ModelAdmin.actions_on_top = True
list_filter = ('current_step','owner')
site.register(Work, WorkAdmin)
site.register(Attachment)
# User profile
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
site.unregister(User)
class UserProfileInline(TabularInline):
model = UserProfile
class UserProfileAdmin(UserAdmin):
inlines = [UserProfileInline]
site.register(User, UserProfileAdmin)
|
import pygame as pg
from random import randint, uniform
import pytweening as tween
vec = pg.math.Vector2
# fade in/out
# grow/shrink
class Particle(pg.sprite.Sprite):
def __init__(self, image, pos, vel, life, gravity, groups):
pg.sprite.Sprite.__init__(self, *groups)
self.image = image.copy()
self.pos = vec(pos)
self.vel = vel
self.life = life
self.gravity = gravity
self.rect = self.image.get_rect()
self.rect.center = self.pos
def update(self, dt):
self.vel += self.gravity * dt
self.pos += self.vel * dt
self.rect.center = self.pos
self.life -= dt
if self.life <= 0:
self.kill()
class Emitter:
def __init__(self, count):
self.particle_pool = []
for i in range(count):
self.particle_pool.append(Particle())
if __name__ == '__main__':
pg.init()
screen = pg.display.set_mode((800, 600))
clock = pg.time.Clock()
spawn = vec(400, 300)
all_sprites = pg.sprite.Group()
size = 8
circ = pg.Surface((size, size)).convert_alpha()
circ.fill((0, 0, 0, 0))
pg.draw.circle(circ, (255, 255, 0), (size // 2, size // 2), size // 2)
grav = vec(0, 200)
running = True
while running:
pg.display.set_caption("{:.2f}".format(clock.get_fps()))
dt = clock.tick(60) / 1000
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
if event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE:
running = False
if event.type == pg.KEYDOWN and event.key == pg.K_1:
for i in range(100):
Particle(circ, spawn, vec(randint(300, 400), 0).rotate(uniform(-110, -70)), uniform(1, 5), grav, [all_sprites])
if event.type == pg.MOUSEBUTTONDOWN:
Particle(circ, spawn, vec(randint(50, 200), 0).rotate(uniform(0, 360)), 2, grav, [all_sprites])
all_sprites.update(dt)
keys = pg.key.get_pressed()
if keys[pg.K_s]:
for i in range(10):
Particle(circ, spawn + vec(0, 250), vec(randint(250, 350), 0).rotate(uniform(-110, -70)), uniform(1, 4), grav, [all_sprites])
screen.fill((0, 0, 0))
all_sprites.draw(screen)
pg.display.flip()
pg.quit()
|
from operator import mod
from medical_seg.networks.layers.image_transformer import PoolFormer
from medical_seg.networks.layers.multi_attention import MultiAttentionTransformer
from medical_seg.networks.nets.co_unet import BasicUNet
from medical_seg.networks.nets.basic_unet_encoder import BasicUNetEncoder
import torch.nn as nn
import torch
import torch.nn.functional as F
# from medical_seg.networks.layers.spatial_image_transformer import SpatialTransformerLayer
from medical_seg.networks.layers.fusion_transformer import FusionSelfCrossTrans
from typing import Sequence, Union
from medical_seg.networks.nets.co_unet import UpCat
from medical_seg.networks.layers.cpc import ImageCPC
from einops import rearrange
from medical_seg.networks.nets.basic_pool_former import BasicPoolFormerEncoder
class CNN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
super().__init__()
self.net = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding),
nn.LeakyReLU(),
nn.BatchNorm3d(num_features=out_channels),
nn.Conv3d(out_channels, out_channels, kernel_size, stride, padding),
nn.LeakyReLU(),
nn.BatchNorm3d(num_features=out_channels),
)
def forward(self, x):
return self.net(x)
class ModalitySelfAttention(nn.Module):
def __init__(self, model_num, hidden_size):
super().__init__()
self.q = nn.Linear(in_features=hidden_size, out_features=hidden_size)
self.k = nn.Linear(in_features=hidden_size, out_features=hidden_size)
self.v = nn.Linear(in_features=hidden_size, out_features=hidden_size)
self.out_conv = CNN(in_channels=model_num*hidden_size, out_channels=hidden_size)
def forward(self, x, model_num):
## x: (batch, modal, feature, d, w, h)
x = rearrange(x, "b m f d w h -> b d w h m f", m=model_num)
q_out = self.q(x)
k_out = self.k(x)
v_out = self.v(x)
attention_score = torch.einsum("b d w h m f, b d w h f n -> b d w h m n", q_out, k_out.transpose(-1, -2))
modality_att_out = torch.einsum("b d w h m n, b d w h n f -> b d w h m f", attention_score, v_out)
modality_att_out = rearrange(modality_att_out, "b d w h m f -> b (m f) d w h")
modality_att_out = self.out_conv(modality_att_out)
return modality_att_out
class MCNNEncoder(nn.Module):
def __init__(self, model_num, fea=[16, 16, 32, 64, 128, 16],
pool_size=[(2, 2, 2), (2, 2, 2), (2, 2, 2), (2, 2, 2)]):
super().__init__()
self.model_num = model_num
self.unets = nn.ModuleList([])
for i in range(model_num):
unet = BasicUNetEncoder(dimensions=3, in_channels=1,
features=fea, pool_size=pool_size)
self.unets.append(unet)
def forward(self, x):
encoder_out = []
x = x.unsqueeze(dim=2)
for i in range(self.model_num):
encoder_out.append(self.unets[i](x[:, i]))
return encoder_out
class PoolFormerEncoders(nn.Module):
def __init__(self, model_num, fea=[16, 16, 32, 64, 128, 16],
pool_size=[(2, 2, 2), (2, 2, 2), (2, 2, 2), (2, 2, 2)]):
super().__init__()
self.model_num = model_num
self.encoders = nn.ModuleList([])
for i in range(model_num):
encoder = BasicPoolFormerEncoder(dimensions=3,
in_channels=1,
pool_size=pool_size,
features=fea)
self.encoders.append(encoder)
def forward(self, x):
encoder_out = []
x = x.unsqueeze(dim=2)
for i in range(self.model_num):
encoder_out.append(self.encoders[i](x[:, i]))
return encoder_out
class PoolFormerUpcat(nn.Module):
def __init__(self, in_channels, out_channels, up_size=(2, 2, 2), num_layers=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.up_size = up_size
up_ = up_size[0] * up_size[1] * up_size[2]
self.patch_expand = nn.Conv3d(in_channels, in_channels*up_, 1, 1, 0)
self.pool_former = PoolFormer(in_channels+out_channels, out_channels, patch_size=(1, 1, 1), mlp_size=out_channels, num_layers=num_layers)
def forward(self, x, x_cat):
pass
b, c, d, w, h = x.shape
x = self.patch_expand(x)
x = rearrange(x, "b (p1 p2 p3 c) d w h -> b c (d p1) (w p2) (h p3)", c = c, p1=self.up_size[0], p2=self.up_size[1], p3=self.up_size[2])
x = torch.cat([x, x_cat], dim=1)
x = self.pool_former(x)
return x
class SCAFNet(nn.Module):
def __init__(self, model_num, out_channels, image_size,
act: Union[str, tuple] = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}),
norm = ("GROUP", {"num_groups": 8, "affine": False}),
dropout: Union[float, tuple] = 0.0,
upsample: str = "deconv",
fea = [16, 16, 32, 64, 128, 16],
window_size=(2, 4, 4),
pool_size=[(2, 2, 2), (2, 2, 2), (2, 2, 2), (2, 2, 2)],
patch_size=(1, 1, 1),
self_num_layer=2,
token_mixer_size=32,
encoder="cnn",
corss_attention=True,
modality_gate=True):
super().__init__()
self.out_channels = out_channels
self.model_num = model_num
self.pool_size = pool_size
self.cross_attention = corss_attention
self.modality_gate = modality_gate
pool_size_all = [1, 1, 1]
for p in pool_size:
pool_size_all = [pool_size_all[i] * p[i] for i in range(len(p))]
new_image_size = [image_size[i] // pool_size_all[i] for i in range(3)]
if encoder == "cnn":
print("use cnn encoder")
self.multicnn_encoder = MCNNEncoder(model_num=model_num, fea=fea, pool_size=pool_size)
elif encoder == "poolformer":
print("use poolformer encoder")
self.multicnn_encoder = PoolFormerEncoders(model_num=model_num, fea=fea, pool_size=pool_size)
else :
import os
print("model is error")
os._exit(0)
if self.cross_attention:
print("use cross attention fusion module")
self.cross_fusion_trans = FusionSelfCrossTrans(model_num=model_num,
in_channels=fea[4],
hidden_size=fea[4],
patch_size=patch_size,
img_size=new_image_size,
mlp_size=2*fea[4], self_num_layer=self_num_layer,
window_size=window_size, token_mixer_size=token_mixer_size)
else :
# 不用crss attention
print("no cross attention fusion module")
self.fusion_conv_5 = CNN(model_num*fea[4], fea[4], 3, 1)
if self.modality_gate:
print("use modality gate module")
self.gate_layer = nn.Conv3d(fea[4], 2, 1, 1, 0)
else :
print("no modality gate module")
self.fusion_conv_1 = CNN(model_num*fea[0], fea[0], 3, 1)
self.fusion_conv_2 = CNN(model_num*fea[1], fea[1], 3, 1)
self.fusion_conv_3 = CNN(model_num*fea[2], fea[2], 3, 1)
self.fusion_conv_4 = CNN(model_num*fea[3], fea[3], 3, 1)
# self.upcat_4 = PoolFormerUpcat(fea[4], fea[3], up_size=pool_size[3], num_layers=2)
# self.upcat_3 = PoolFormerUpcat(fea[3], fea[2], up_size=pool_size[2], num_layers=2)
# self.upcat_2 = PoolFormerUpcat(fea[2], fea[1], up_size=pool_size[1], num_layers=2)
# self.upcat_1 = PoolFormerUpcat(fea[1], fea[5], up_size=pool_size[0], num_layers=2)
self.upcat_4 = UpCat(3, fea[4], fea[3], fea[3], act, norm, dropout, upsample, pool_size=pool_size[3])
self.upcat_3 = UpCat(3, fea[3], fea[2], fea[2], act, norm, dropout, upsample, pool_size=pool_size[2])
self.upcat_2 = UpCat(3, fea[2], fea[1], fea[1], act, norm, dropout, upsample, pool_size=pool_size[1])
self.upcat_1 = UpCat(3, fea[1], fea[0], fea[5], act, norm, dropout, upsample, halves=False, pool_size=pool_size[0])
self.final_conv = nn.Conv3d(fea[5], out_channels, 1, 1)
def forward(self, x):
assert x.shape[1] == self.model_num, "输入模态不一致,请检查"
encoder_x = self.multicnn_encoder(x)
encoder_1 = torch.stack([encoder_x[i][4] for i in range(self.model_num)], dim=1)
encoder_2 = torch.stack([encoder_x[i][3] for i in range(self.model_num)], dim=1)
encoder_3 = torch.stack([encoder_x[i][2] for i in range(self.model_num)], dim=1)
encoder_4 = torch.stack([encoder_x[i][1] for i in range(self.model_num)], dim=1)
encoder_5 = torch.stack([encoder_x[i][0] for i in range(self.model_num)], dim=1)
if self.cross_attention:
fusion_out = self.cross_fusion_trans(encoder_5)
encoder_5 = rearrange(encoder_5, "b n c d w h -> b (n c) d w h")
fusion_out_cnn = self.fusion_conv_5(encoder_5)
fusion_out = fusion_out + fusion_out_cnn
else :
# 不用cross attention
encoder_5 = rearrange(encoder_5, "b n c d w h -> b (n c) d w h")
fusion_out = self.fusion_conv_5(encoder_5)
if self.modality_gate:
# 使用modality gate
fusion_out_tmp = self.gate_layer(fusion_out)
fusion_out_2 = torch.sigmoid(torch.nn.functional.interpolate(fusion_out_tmp, scale_factor=self.pool_size[3], mode="trilinear"))
fusion_out_4 = torch.sigmoid(torch.nn.functional.interpolate(fusion_out_2, scale_factor=self.pool_size[2], mode="trilinear"))
fusion_out_8 = torch.sigmoid(torch.nn.functional.interpolate(fusion_out_4, scale_factor=self.pool_size[1], mode="trilinear"))
fusion_out_16 = torch.sigmoid(torch.nn.functional.interpolate(fusion_out_8, scale_factor=self.pool_size[0], mode="trilinear"))
# 筛选
encoder_1 = rearrange(encoder_1 * fusion_out_16.unsqueeze(dim=2), "b n c d w h -> b (n c) d w h")
encoder_2 = rearrange(encoder_2 * fusion_out_8.unsqueeze(dim=2), "b n c d w h -> b (n c) d w h")
encoder_3 = rearrange(encoder_3 * fusion_out_4.unsqueeze(dim=2), "b n c d w h -> b (n c) d w h")
encoder_4 = rearrange(encoder_4 * fusion_out_2.unsqueeze(dim=2), "b n c d w h -> b (n c) d w h")
else :
# 不筛选
encoder_1 = rearrange(encoder_1 , "b n c d w h -> b (n c) d w h")
encoder_2 = rearrange(encoder_2 , "b n c d w h -> b (n c) d w h")
encoder_3 = rearrange(encoder_3 , "b n c d w h -> b (n c) d w h")
encoder_4 = rearrange(encoder_4 , "b n c d w h -> b (n c) d w h")
encoder_1_cnn = self.fusion_conv_1(encoder_1)
encoder_2_cnn = self.fusion_conv_2(encoder_2)
encoder_3_cnn = self.fusion_conv_3(encoder_3)
encoder_4_cnn = self.fusion_conv_4(encoder_4)
u4 = self.upcat_4(fusion_out, encoder_4_cnn)
u3 = self.upcat_3(u4, encoder_3_cnn)
u2 = self.upcat_2(u3, encoder_2_cnn)
u1 = self.upcat_1(u2, encoder_1_cnn)
logits = self.final_conv(u1)
return logits
class SCAFNetNoCross(nn.Module):
def __init__(self, model_num, out_channels, image_size,
act: Union[str, tuple] = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}),
norm = ("GROUP", {"num_groups": 8, "affine": False}),
dropout: Union[float, tuple] = 0.0,
upsample: str = "deconv",
fea = [16, 16, 32, 64, 128, 16],
window_size=(2, 4, 4),
pool_size=[(2, 2, 2), (2, 2, 2), (2, 2, 2), (2, 2, 2)],
patch_size=(1, 1, 1),
self_num_layer=2,
token_mixer_size=32,
encoder="cnn",
corss_attention=True,
modality_gate=True):
super().__init__()
self.out_channels = out_channels
self.model_num = model_num
self.pool_size = pool_size
self.cross_attention = corss_attention
self.modality_gate = modality_gate
pool_size_all = [1, 1, 1]
for p in pool_size:
pool_size_all = [pool_size_all[i] * p[i] for i in range(len(p))]
new_image_size = [image_size[i] // pool_size_all[i] for i in range(3)]
if encoder == "cnn":
print("use cnn encoder")
self.multicnn_encoder = MCNNEncoder(model_num=model_num, fea=fea, pool_size=pool_size)
elif encoder == "poolformer":
print("use poolformer encoder")
self.multicnn_encoder = PoolFormerEncoders(model_num=model_num, fea=fea, pool_size=pool_size)
else :
import os
print("model is error")
os._exit(0)
if self.cross_attention:
print("use cross attention fusion module")
self.cross_fusion_trans = MultiAttentionTransformer(
in_channels=model_num*fea[4],
out_channels=fea[4],
patch_size=(1,1,1),
img_size=new_image_size,
mlp_size=2*fea[4],
window_size=window_size,
)
# self.cross_fusion_trans = FusionSelfCrossTrans(model_num=model_num,
# in_channels=fea[4],
# hidden_size=fea[4],
# patch_size=patch_size,
# img_size=new_image_size,
# mlp_size=2*fea[4], self_num_layer=self_num_layer,
# window_size=window_size, token_mixer_size=token_mixer_size)
else :
# 不用crss attention
print("no cross attention fusion module")
self.fusion_conv_5 = CNN(model_num*fea[4], fea[4], 3, 1)
if self.modality_gate:
print("use modality gate module")
self.gate_layer = nn.Conv3d(fea[4], 2, 1, 1, 0)
else :
print("no modality gate module")
self.fusion_conv_1 = CNN(model_num*fea[0], fea[0], 3, 1)
self.fusion_conv_2 = CNN(model_num*fea[1], fea[1], 3, 1)
self.fusion_conv_3 = CNN(model_num*fea[2], fea[2], 3, 1)
self.fusion_conv_4 = CNN(model_num*fea[3], fea[3], 3, 1)
# self.upcat_4 = PoolFormerUpcat(fea[4], fea[3], up_size=pool_size[3], num_layers=2)
# self.upcat_3 = PoolFormerUpcat(fea[3], fea[2], up_size=pool_size[2], num_layers=2)
# self.upcat_2 = PoolFormerUpcat(fea[2], fea[1], up_size=pool_size[1], num_layers=2)
# self.upcat_1 = PoolFormerUpcat(fea[1], fea[5], up_size=pool_size[0], num_layers=2)
self.upcat_4 = UpCat(3, fea[4], fea[3], fea[3], act, norm, dropout, upsample, pool_size=pool_size[3])
self.upcat_3 = UpCat(3, fea[3], fea[2], fea[2], act, norm, dropout, upsample, pool_size=pool_size[2])
self.upcat_2 = UpCat(3, fea[2], fea[1], fea[1], act, norm, dropout, upsample, pool_size=pool_size[1])
self.upcat_1 = UpCat(3, fea[1], fea[0], fea[5], act, norm, dropout, upsample, halves=False, pool_size=pool_size[0])
self.final_conv = nn.Conv3d(fea[5], out_channels, 1, 1)
def forward(self, x):
assert x.shape[1] == self.model_num, "输入模态不一致,请检查"
encoder_x = self.multicnn_encoder(x)
encoder_1 = torch.stack([encoder_x[i][4] for i in range(self.model_num)], dim=1)
encoder_2 = torch.stack([encoder_x[i][3] for i in range(self.model_num)], dim=1)
encoder_3 = torch.stack([encoder_x[i][2] for i in range(self.model_num)], dim=1)
encoder_4 = torch.stack([encoder_x[i][1] for i in range(self.model_num)], dim=1)
encoder_5 = torch.stack([encoder_x[i][0] for i in range(self.model_num)], dim=1)
if self.cross_attention:
encoder_5 = rearrange(encoder_5, "b n c d w h -> b (n c) d w h")
fusion_out = self.cross_fusion_trans(encoder_5)
fusion_out_cnn = self.fusion_conv_5(encoder_5)
fusion_out = fusion_out + fusion_out_cnn
else :
# 不用cross attention
encoder_5 = rearrange(encoder_5, "b n c d w h -> b (n c) d w h")
fusion_out = self.fusion_conv_5(encoder_5)
if self.modality_gate:
# 使用modality gate
fusion_out_tmp = self.gate_layer(fusion_out)
fusion_out_2 = torch.sigmoid(torch.nn.functional.interpolate(fusion_out_tmp, scale_factor=self.pool_size[3], mode="trilinear"))
fusion_out_4 = torch.sigmoid(torch.nn.functional.interpolate(fusion_out_2, scale_factor=self.pool_size[2], mode="trilinear"))
fusion_out_8 = torch.sigmoid(torch.nn.functional.interpolate(fusion_out_4, scale_factor=self.pool_size[1], mode="trilinear"))
fusion_out_16 = torch.sigmoid(torch.nn.functional.interpolate(fusion_out_8, scale_factor=self.pool_size[0], mode="trilinear"))
# 筛选
encoder_1 = rearrange(encoder_1 * fusion_out_16.unsqueeze(dim=2), "b n c d w h -> b (n c) d w h")
encoder_2 = rearrange(encoder_2 * fusion_out_8.unsqueeze(dim=2), "b n c d w h -> b (n c) d w h")
encoder_3 = rearrange(encoder_3 * fusion_out_4.unsqueeze(dim=2), "b n c d w h -> b (n c) d w h")
encoder_4 = rearrange(encoder_4 * fusion_out_2.unsqueeze(dim=2), "b n c d w h -> b (n c) d w h")
else :
# 不筛选
encoder_1 = rearrange(encoder_1 , "b n c d w h -> b (n c) d w h")
encoder_2 = rearrange(encoder_2 , "b n c d w h -> b (n c) d w h")
encoder_3 = rearrange(encoder_3 , "b n c d w h -> b (n c) d w h")
encoder_4 = rearrange(encoder_4 , "b n c d w h -> b (n c) d w h")
encoder_1_cnn = self.fusion_conv_1(encoder_1)
encoder_2_cnn = self.fusion_conv_2(encoder_2)
encoder_3_cnn = self.fusion_conv_3(encoder_3)
encoder_4_cnn = self.fusion_conv_4(encoder_4)
u4 = self.upcat_4(fusion_out, encoder_4_cnn)
u3 = self.upcat_3(u4, encoder_3_cnn)
u2 = self.upcat_2(u3, encoder_2_cnn)
u1 = self.upcat_1(u2, encoder_1_cnn)
logits = self.final_conv(u1)
return logits
|
import os
import pickle
from typing import Union
from datetime import date as dt
import cv2
import face_recognition
from src.models import StudentModel, AttendanceModel
from src.settings import (
DATASET_PATH,
HAAR_CASCADE_PATH,
DLIB_MODEL, DLIB_TOLERANCE, ENCODINGS_FILE
)
class CliAppUtils:
app_title = "Attendance System"
def __init__(self, input_video: Union[int, str]):
self.input_video = input_video
def check(self):
# store input video stream capture in cap variable
cap = cv2.VideoCapture(self.input_video)
while cap.isOpened():
# capture frame-by-frame
ret, frame = cap.read()
if not ret: # video is not detected
continue
# display the resulting frame
cv2.imshow(f"Checking Camera - {self.app_title}", frame)
k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# when everything is done
cap.release()
cv2.destroyAllWindows()
@classmethod
def save_n_create(cls, name: str) -> str:
student = StudentModel(name=name)
# save student to database
student.save_to_db()
# create a directory for <id> of the student
id_path = f"{DATASET_PATH}{os.sep}{student.id}"
if not os.path.exists(id_path):
os.makedirs(id_path)
return id_path
# Capture Image function definition
def detect_n_capture(self):
name = input("Enter Student's Name: ")
id_path = self.save_n_create(name)
# store input video stream in cap variable
cap = cv2.VideoCapture(self.input_video)
face_classifier = cv2.CascadeClassifier(HAAR_CASCADE_PATH)
increment_num = 0
# loop over the frames from the video stream
while True:
# capture frame-by-frame
ret, img = cap.read()
if not ret: # video is not detected
break
# detect faces using haar cascade detector
faces = face_classifier.detectMultiScale(img, 1.0485258, 6)
for(x, y, w, h) in faces:
increment_num += 1
# saving the captured face in the <id> folder under static/images/dataset
cv2.imwrite(
f"{id_path}{os.sep}{str(increment_num)}.jpg",
img
) # img[startY:endY, startX:endX]
# draw the bounding box of the face along with the associated
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
# display the resulting frame
cv2.imshow(f"Capturing Face - {self.app_title}", img)
# wait for 100 milliseconds
k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
elif increment_num >= 15: # Take 30 or 60 face sample and stop video
break
# when everything is done
cap.release()
cv2.destroyAllWindows()
def recognize_n_attendance(self):
print("[INFO] loading encodings...")
data = pickle.loads(open(ENCODINGS_FILE, "rb").read())
# print(len(data['encodings']) == len(data['ids']))
print("[INFO] starting video stream...")
# store input video stream in cap variable
cap = cv2.VideoCapture(self.input_video)
# create in dictionary for known students from database to avoid multiple queries
known_students = {}
# loop over the frames from the video stream
while True:
# grab the frame from the video stream
ret, img = cap.read()
# convert the input frame from BGR to RGB then resize it to have
# a width of 750px (to speedup processing)
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# rgb = imutils.resize(img, width=750)
r = img.shape[1] / float(rgb.shape[1])
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input frame, then compute
# the facial embeddings for each face
boxes = face_recognition.face_locations(rgb, model=DLIB_MODEL)
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known encodings
matches = face_recognition.compare_faces(data["encodings"], encoding, DLIB_TOLERANCE)
# name to be displayed on video
display_name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matched_indexes = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face
for matched_index in matched_indexes:
_id = data["ids"][matched_index]
counts[_id] = counts.get(_id, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
_id = max(counts, key=counts.get)
if _id:
if _id in known_students.keys():
# find matched student in the known_students by id
student = known_students[_id]
else:
# find matched student in the database by id
student = StudentModel.find_by_id(_id)
known_students[_id] = student
# if student's attendance is not marked
if not AttendanceModel.is_marked(dt.today(), student):
# then mark student's attendance
student_attendance = AttendanceModel(student=student)
# commit changes to database
student_attendance.save_to_db()
# update displayed name to student's name
display_name = student.name
# append the name to be displayed in names list
names.append(display_name)
# loop over the recognized faces
for ((top, right, bottom, left), display_name) in zip(boxes, names):
if display_name == "Unknown":
continue
# rescale the face coordinates
top = int(top * r)
right = int(right * r)
bottom = int(bottom * r)
left = int(left * r)
top_left = (left, top)
bottom_right = (right, bottom)
# draw the predicted face name on the image
cv2.rectangle(img, top_left, bottom_right, (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(img, display_name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
# display the output frames to the screen
cv2.imshow(f"Recognizing Faces - {self.app_title}", img)
k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting from the loop
if k == 27:
break
# do a bit of cleanup
cap.release()
cv2.destroyAllWindows()
print("Attendance Successful!")
|
from peepshow.pager.pager import Pager
class TooManyInitialIterations(StopIteration): pass
class PagedCache:
def __init__(self, content, str_func=str):
self.iterator = content.__iter__()
self.index = 0
self.offset = 0
self.cache = []
self.str_func = str_func
def iterate(self, times):
for i in range(times):
next(self.iterator)
self.offset += 1
self.index += 1
def __iter__(self):
while True:
try:
obj = next(self.iterator)
except StopIteration:
break
line = self.str_func(obj)
self.cache.append(obj)
yield f'[{self.index:>5}] {line}'
self.index += 1
def clear_cache(self):
self.offset += len(self.cache)
self.cache[:] = []
def __getitem__(self, index):
cache_index = index - self.offset
if not cache_index >= 0 or not cache_index < len(self.cache):
raise IndexError("You can use only indices visible on the screen.")
return self.cache[cache_index]
def recall_cache(self):
def content_gen():
for index, entry in enumerate(self.cache):
key, obj = entry
line = self.str_func(entry)
index_ = index + self.offset
yield f'[{index_:>5}] {line}'
p = Pager(numeric=True)
p.page(content_gen())
def page(content, str_func=str, offset=0):
cache = PagedCache(content, str_func)
try:
cache.iterate(offset)
except StopIteration as ex:
raise TooManyInitialIterations(f'Only {cache.index} iterations possible.') from ex
pager = Pager( (PagedCache.clear_cache, cache), numeric=True )
pager.page(cache)
return cache
|
"""Module of infoset database functions.
Handles queries that span multiple tables.
"""
# Python standard libraries
import copy
from collections import defaultdict
# PIP libraries
from sqlalchemy import and_
# Infoset libraries
from infoset.utils import general
from infoset.db import db
from infoset.db.db_orm import Datapoint, Device, Agent, DeviceAgent, AgentName
def datapoint_summary_list():
"""Get summary datapoint information as a list of dicts.
Args:
None
Returns:
return_value: List of dicts. Each dict keyed by table column name
"""
# Return
return_value = _datapoint_summary(aslist=True)
return return_value
def datapoint_summary():
"""Get summary datapoint information as a dict.
Args:
None
Returns:
return_value: Dict keyed by idx_datapoint.
Subkeys are by table column name
"""
# Return
return_value = _datapoint_summary(aslist=False)
return return_value
def _datapoint_summary(aslist=False):
"""Get summary datapoint information.
Args:
None
Returns:
return_value: Dict keyed by idx_datapoint OR list of dicts
"""
# Initialize key variables
data = defaultdict(lambda: defaultdict(dict))
data_list = []
return_value = None
# Establish a database session
database = db.Database()
session = database.session()
# Get result of query
rows = session.query(
Datapoint.idx_datapoint,
Datapoint.agent_label,
Datapoint.agent_source,
DeviceAgent.idx_deviceagent,
AgentName.name,
Agent.id_agent,
Device.devicename).filter(
and_(
Datapoint.idx_deviceagent == DeviceAgent.idx_deviceagent,
Agent.idx_agent == DeviceAgent.idx_agent,
Agent.idx_agentname == AgentName.idx_agentname,
Device.idx_device == DeviceAgent.idx_device)
)
# Process query results
for row in rows:
idx_datapoint = row.idx_datapoint
data_dict = {}
data_dict['agent'] = general.decode(row.name)
data_dict['agent_label'] = general.decode(row.agent_label)
data_dict['agent_source'] = general.decode(row.agent_source)
data_dict['id_agent'] = general.decode(row.id_agent)
data_dict['devicename'] = general.decode(row.devicename)
data_dict['idx_deviceagent'] = row.idx_deviceagent
# Assign values to data structures dependent on 'aslist' value
if aslist is True:
data_dict['idx_datapoint'] = idx_datapoint
data_list.append(data_dict)
else:
data[idx_datapoint] = data_dict
# Return the session to the database pool after processing
database.close()
# Assign values to data structures dependent on 'aslist' value
if aslist is True:
return_value = copy.deepcopy(data_list)
else:
return_value = copy.deepcopy(data)
# Return
return return_value
|
from motor_control import roboclaw
reading = roboclaw.readmainbattery()
voltage = (1.0 * reading)/10.0
print "Battery Level: %.1f V" %voltage
if voltage < 10.0:
print "WARNING: Battery low, change soon!"
|
from .rickydl import *
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
import unittest
from freeflow.core.tests.dag import DagTest
from freeflow.core.tests.sensor.externaltask import SensorExternalTaskTest
# Optional tests
try:
from freeflow.core.tests.operator.bigquery import OperatorBigqueryTest
except ImportError:
pass
from freeflow.core.dag_loader import get_dag_files
test_classes = [DagTest,
SensorExternalTaskTest]
# Optional tests
try:
test_classes += [OperatorBigqueryTest]
except Exception:
pass
dag_files = []
def run():
global dag_files
dag_files = get_dag_files()
test_loader = unittest.TestLoader()
suites = []
for test_class in test_classes:
suite = test_loader.loadTestsFromTestCase(test_class)
suites.append(suite)
test_suites = unittest.TestSuite(suites)
test_runner = unittest.TextTestRunner()
result = test_runner.run(test_suites)
if len(result.failures) > 0 or len(result.errors) > 0:
raise SystemExit(1)
|
import numpy as np
import numpy.random as npr
import pandas as pd
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.preprocessing import StandardScaler
from scipy.special import logsumexp, softmax, log_softmax, logit
from scipy.sparse import diags
import torch
from torch import nn
from torch.nn import functional as F
from torch.optim import Adam
from tqdm import tqdm
class SoftmaxRegression(BaseEstimator, RegressorMixin):
def __init__(
self,
alpha=0,
lr=0.01,
max_iters=100,
verbose=False,
random_state=0
):
self.alpha = alpha
self.max_iters = max_iters
self.verbose = verbose
self.coef_ = None
self.intercept_ = None
self.scaler_ = None
self.lr = lr
self.random_state = random_state
self.model = None
def fit(self, X, y, sample_weight=None):
X = np.array(X)
y = np.array(y)
w = sample_weight
self.scaler_ = StandardScaler()
self.scaler_.fit(X)
X = self.scaler_.transform(X)
N, D = X.shape
K = y.shape[1]
self.classes_ = np.arange(K)
torch.manual_seed(self.random_state)
device = 'cpu'
X = torch.tensor(X,dtype=torch.float32,device=device)
Y = torch.tensor(y,dtype=torch.float32,device=device)
self.model = nn.Linear(D,K).to(device=device)
self.model.train()
opt = Adam(self.model.parameters(),lr=self.lr)
iterator = range(self.max_iters)
if self.verbose:
iterator = tqdm(iterator,position=0)
for t in iterator:
Y_hat = self.model(X)
loss = F.cross_entropy(Y_hat, Y, reduction='sum') + self.alpha * self.model.weight.square().sum()
loss.backward()
opt.step()
opt.zero_grad()
self.model = self.model.cpu()
self.model.requires_grad_(False)
self.model.eval()
def predict_proba(self, X):
X = self.scaler_.transform(np.array(X))
X = torch.tensor(X,dtype=torch.float32)
Y_hat = self.model(X)
Y_hat = Y_hat.cpu().numpy()
Y_hat = softmax(Y_hat, axis=1)
return Y_hat
def predict(self, X):
Y_hat = self.predict_proba(X)
Y_hat = np.argmax(Y_hat, axis=1)
return Y_hat
def score(self, X, y, sample_weight=None):
X = self.scaler_.transform(np.array(X))
X = torch.tensor(X,dtype=torch.float32)
Y = torch.tensor(y,dtype=torch.float32)
Y_hat = self.model(X)
loss = F.cross_entropy(Y_hat, Y, reduction='sum').item()
return loss
# # one-vs-rest platt scaling -- found this was irreproducible
# from sklearn.linear_model import LogisticRegression
# from sklearn.model_selection import cross_val_predict, KFold
# def platt_scaling(scores, labels):
# lm = LogisticRegression(penalty='none',random_state=0)
# idx = list(set(scores.index)&set(labels.index))
# rng = npr.RandomState(0)
# rng.shuffle(idx)
# scores = {region:cross_val_predict(
# lm,
# scores.loc[idx],
# labels[region][idx],
# cv=5,
# method='predict_proba'
# )[:,1] for region in labels.columns}
# scores = pd.DataFrame(
# scores,
# index=idx,
# columns=labels.columns
# )
# scores /= scores.sum(1).values[:,None]
# return scores
|
from librosa.core import stft
from librosa import filters
from librosa import to_mono
import numpy as np
import scipy
def medium_time_power_calculation(power_stft_signal, M=2):
medium_time_power = np.zeros_like(power_stft_signal)
power_stft_signal = np.pad(power_stft_signal, [(M, M), (0, 0)], 'constant')
for i in range(medium_time_power.shape[0]):
medium_time_power[i, :] = sum([1 / (2 * M + 1) *
power_stft_signal[i + k - M, :]
for k in range(2 * M + 1)])
return medium_time_power
def asymmetric_lawpass_filtering(rectified_signal, lm_a=0.999, lm_b=0.5):
floor_level = np.zeros_like(rectified_signal)
floor_level[0, ] = 0.9 * rectified_signal[0, ]
for m in range(floor_level.shape[0]):
floor_level[m, ] = np.where(rectified_signal[m, ] >=
floor_level[m - 1, ],
lm_a * floor_level[m - 1, ] +
(1 - lm_a) * rectified_signal[m, ],
lm_b * floor_level[m - 1, ] +
(1 - lm_b) * rectified_signal[m, ])
return floor_level
def halfwave_rectification(subtracted_lower_envelope, th=0):
return np.where(subtracted_lower_envelope < th,
np.zeros_like(subtracted_lower_envelope),
subtracted_lower_envelope)
def temporal_masking(rectified_signal, lam_t=0.85, myu_t=0.2):
temporal_masked_signal = np.zeros_like(rectified_signal)
online_peak_power = np.zeros_like(rectified_signal)
temporal_masked_signal[0, ] = rectified_signal[0, ]
for m in range(rectified_signal.shape[0]):
online_peak_power[m, :] = np.where(
lam_t * online_peak_power[m - 1, :] >= rectified_signal[m, ],
lam_t * online_peak_power[m - 1, :],
rectified_signal[m, :])
temporal_masked_signal[m, :] = np.where(
rectified_signal[m, :] >= lam_t * online_peak_power[m - 1, :],
rectified_signal[m, :],
myu_t * online_peak_power[m - 1, :])
return temporal_masked_signal
def after_temporal_masking(temporal_masked_signal, floor_level):
return np.where(temporal_masked_signal > floor_level,
temporal_masked_signal, floor_level)
def switch_excitation_or_non_excitation(temporal_masked_signal,
floor_level, lower_envelope,
medium_time_power, c=2):
return np.where(medium_time_power >= c * lower_envelope,
temporal_masked_signal, floor_level)
def weight_smoothing(final_output, medium_time_power, N=4, L=40):
spectral_weight_smoothing = np.zeros_like(final_output)
for l in range(final_output.shape[1]):
l_1 = max(l - N, 1)
l_2 = min(l + N, L)
spectral_weight_smoothing[:, l] = sum(
[1 / (l_2 - l_1 + 1) * (final_output[:, k] / np.where(
medium_time_power[:, k] > 0.0001,
medium_time_power[:, k],
0.0001)) for k in range(l_1, l_2)])
return spectral_weight_smoothing
def time_frequency_normalization(power_stft_signal,
spectral_weight_smoothing):
return power_stft_signal * spectral_weight_smoothing
def mean_power_normalization(transfer_function,
final_output, lam_myu=0.999, L=80, k=1):
myu = np.zeros(shape=(transfer_function.shape[0]))
myu[0] = 0.0001
normalized_power = np.zeros_like(transfer_function)
for m in range(1, transfer_function.shape[0]):
myu[m] = lam_myu * myu[m - 1] + \
(1 - lam_myu) / L * \
sum([transfer_function[m, s] for s in range(0, L - 1)])
for m in range(final_output.shape[0]):
normalized_power[m, :] = k * transfer_function[m, :] / myu[m]
return normalized_power
def power_function_nonlinearity(normalized_power, n=15):
return normalized_power ** (1 / n)
def pncc(audio_wave, n_fft=512, sr=16000, winlen=0.020, winstep=0.010,
n_mels=40, n_pncc=13, weight_N=4, power=2):
pre_emphasis_signal = scipy.signal.lfilter([1.0, -0.97], 1, audio_wave)
mono_wave = to_mono(pre_emphasis_signal)
stft_pre_emphasis_signal = np.abs(stft(mono_wave,
n_fft=n_fft,
hop_length=int(sr * winstep),
win_length=int(sr * winlen),
window=np.ones(int(sr * winlen)),
center=False)) ** power
mel_filter = np.abs(filters.mel(sr, n_fft=n_fft, n_mels=n_mels)) ** power
power_stft_signal = np.dot(stft_pre_emphasis_signal.T, mel_filter.T)
medium_time_power = medium_time_power_calculation(power_stft_signal)
lower_envelope = asymmetric_lawpass_filtering(
medium_time_power, 0.999, 0.5)
subtracted_lower_envelope = medium_time_power - lower_envelope
rectified_signal = halfwave_rectification(subtracted_lower_envelope)
floor_level = asymmetric_lawpass_filtering(rectified_signal)
temporal_masked_signal = temporal_masking(rectified_signal)
temporal_masked_signal = after_temporal_masking(
temporal_masked_signal, floor_level)
final_output = switch_excitation_or_non_excitation(
temporal_masked_signal, floor_level, lower_envelope,
medium_time_power)
spectral_weight_smoothing = weight_smoothing(
final_output, medium_time_power, weight_N, L=n_mels)
transfer_function = time_frequency_normalization(
power_stft_signal=power_stft_signal,
spectral_weight_smoothing=spectral_weight_smoothing)
normalized_power = mean_power_normalization(
transfer_function, final_output)
power_law_nonlinearity = power_function_nonlinearity(normalized_power)
return power_law_nonlinearity
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_api
'''
:author: madkote
:contact: madkote(at)bluewin.ch
Test
----
Unit tests for Flask app
Usage
-----
>>> python test_api.py
'''
import json
import unittest
import api
import settings
VERSION = (0, 1, 0)
__all__ = []
__author__ = 'madkote <madkote(at)bluewin.ch>'
__version__ = '.'.join(str(x) for x in VERSION)
# =============================================================================
# TESTS
# =============================================================================
class Test(unittest.TestCase):
def setUp(self):
api.app.config['TESTING'] = True
self.client = api.app.test_client()
def tearDown(self):
api.app.config['TESTING'] = False
def test_data(self):
route = settings.API_URL + '/data'
r = self.client.get(route)
res = json.loads(r.data.decode())
# these data may vary
res.pop('version')
res.pop('host')
res.pop('node')
res.pop('system')
exp = {'data': 'some data'}
self.assertTrue(res == exp,
'Route >%s should return %s, but got %s' %
(route, exp, res))
if __name__ == "__main__":
unittest.main()
|
from lib.button import Button
# places button objects in an array
class BUTTON_CONTROLLER:
def __init__(self, pins, leds, keys):
self.buttons = []
self.function_state = [
False
]
for index, pin in enumerate(pins, start=0):
self.buttons.append(Button(pin, leds, index, self.function_state, keys[index]))
# checks all buttons in button array to see if they are pressed
def check(self):
for button in self.buttons:
button.update()
|
input = """301"""
input = int(input)
# input = 3
buffer = [0]
def spinlock(s, n, q):
n = n + 1
n = n % len(s)
s = [q] + s[n:] + s[:n]
return s
for i in range(1, 2017 + 1):
buffer = spinlock(buffer, input, i)
print(buffer[1])
|
import ipcalc
import codecs
import yaml
#this function writes the beginning of the VagrantFile
def BeginVagrantFile(f):
# print("writing the beginning of the vagrant file")
f.write("# -*- mode: ruby -*- \n# vi: set ft=ruby :\n\n")
f.write("#All Vagrant configuration is done below. The 2 in Vagrant.configure\n#configures the configuration version we support older styles for\n#backwards compatibility. Please don't change it unless you know what\n#you're doing.\n")
f.write("Vagrant.configure(\"2\") do |config|\n")
f.write("config.vm.box_check_update = true\n")
f.write("config.vm.provider \"virtualbox\" do |vb|\n")
f.write("vb.customize [\"modifyvm\", :id, \"--usb\", \"on\"]\n")
f.write("vb.customize [\"modifyvm\", :id, \"--usbehci\", \"off\"]\n")
f.write("vb.customize [\"modifyvm\", :id, \"--nicpromisc2\", \"allow-all\"]\n")
f.write("vb.customize [\"modifyvm\", :id, \"--nicpromisc3\", \"allow-all\"]\n")
f.write("vb.customize [\"modifyvm\", :id, \"--nicpromisc4\", \"allow-all\"]\n")
f.write("vb.customize [\"modifyvm\", :id, \"--nicpromisc5\", \"allow-all\"]\n")
f.write("vb.cpus = 1\n")
f.write("end\n")
#this function write in the vagrant file a new PC host
def writeHost(f,Host):
# print("adding an host to the vagrant file")
#extrapolate each attribute from the touples
Id = Host[1]["Id"]
Name = Host[1]["Name"]
Ram = Host[1]["Ram"]
Os = Host[1]["Os"]
Ip = Host[1]["Network"][0]["Ip"]
Netmask = Host[1]["Network"][0]["Netmask"]
Interface = Host[1]["Network"][0]["Interface"]
IpNoSub = Ip.split("/")[0]
Network = ipcalc.Network(Ip)
IpNet = Network.network()
#there must be a more efficient way to calculate this, this one is too trivial
for x in Network:
Gateway = str(x)
f.write("config.vm.define \"" + Name + "\" do |" + Name + "|\n")
f.write(Name + ".vm.box = \"" + Os + "\"\n")
f.write(Name + ".vm.hostname = \"" + Name + "\"\n")
if Id is 4:
f.write(Name + ".vm.network \"private_network\", ip: \"" + IpNoSub +"\", netmask: \"" + Netmask + "\", virtualbox__intnet: \"broadcast_router-south-1\", auto_config: true\n")
if Id is 5:
f.write(Name + ".vm.network \"private_network\", ip: \"" + IpNoSub +"\", netmask: \"" + Netmask + "\", virtualbox__intnet: \"broadcast_router-south-2\", auto_config: true\n")
if Id is 6:
f.write(Name + ".vm.network \"private_network\", ip: \"" + IpNoSub +"\", netmask: \"" + Netmask + "\", virtualbox__intnet: \"broadcast_router-south-3\", auto_config: true\n")
f.write(Name + ".vm.provision \"shell\", run: \"always\", inline: <<-SHELL\n")
f.write("echo \"Static Routig configuration Started for " + Name + "\"\n")
f.write("sudo sysctl -w net.ipv4.ip_forward=1\n")
f.write("sudo route add -net " + str(IpNet) + " netmask " + Netmask + " gw " + Gateway + " dev " + Interface + "\n")
f.write("echo \"Configuration END\"\n")
f.write("echo \"" + Name + " is ready to Use\"\n")
f.write("SHELL\n")
f.write(Name + ".vm.provider \"virtualbox\" do |vb|\n")
f.write("vb.memory = " + Ram + "\n")
f.write("end\n")
f.write("end\n")
#this function write in the vagrant file a new Router
def writeRouter(f,Router):
# print("adding a router to the vagrant file")
#extrapolate each attribute from the touples
Id = Router[1]["Id"]
Name = Router[1]["Name"]
Ram = Router[1]["Ram"]
Os = Router[1]["Os"]
Ip1 = Router[1]["Network"][0]["Ip"]
Netmask1 = Router[1]["Network"][0]["Netmask"]
Interface1 = Router[1]["Network"][0]["Interface"]
IpNoSub1 = Ip1.split("/")[0]
NetmaskAbbr1 = Ip1.split("/")[1]
Ip2 = Router[1]["Network"][1]["Ip"]
Netmask2 = Router[1]["Network"][1]["Netmask"]
Interface2 = Router[1]["Network"][1]["Interface"]
IpNoSub2 = Ip2.split("/")[0]
NetmaskAbbr2 = Ip2.split("/")[1]
Ip3 = Router[1]["Network"][2]["Ip"]
Netmask3 = Router[1]["Network"][2]["Netmask"]
Interface3 = Router[1]["Network"][2]["Interface"]
IpNoSub3 = Ip3.split("/")[0]
NetmaskAbbr3 = Ip3.split("/")[1]
Network1 = ipcalc.Network(Ip1)
IpNet1 = Network1.network()
for x in Network1:
Gateway1 = str(x)
Network2 = ipcalc.Network(Ip2)
IpNet2 = Network2.network()
for x in Network2:
Gateway2 = str(x)
Network3 = ipcalc.Network(Ip3)
IpNet3 = Network3.network()
for x in Network3:
Gateway3 = str(x)
f.write("config.vm.define \""+ Name +"\" do |" + Name + "|\n")
f.write(Name + ".vm.box = \"" + Os + "\"\n")
f.write(Name + ".vm.hostname = \""+ Name +"\"\n")
if Id is 1:
f.write(Name + ".vm.network \"private_network\", virtualbox__intnet: \"broadcast_router-south-1\", auto_config: false\n")
f.write(Name + ".vm.network \"private_network\", virtualbox__intnet: \"broadcast_router-inter-1\", auto_config: false\n")
f.write(Name + ".vm.network \"private_network\", virtualbox__intnet: \"broadcast_router-inter-3\", auto_config: false\n")
if Id is 2:
f.write(Name + ".vm.network \"private_network\", virtualbox__intnet: \"broadcast_router-south-2\", auto_config: false\n")
f.write(Name + ".vm.network \"private_network\", virtualbox__intnet: \"broadcast_router-inter-2\", auto_config: false\n")
f.write(Name + ".vm.network \"private_network\", virtualbox__intnet: \"broadcast_router-inter-1\", auto_config: false\n")
if Id is 3:
f.write(Name + ".vm.network \"private_network\", virtualbox__intnet: \"broadcast_router-south-3\", auto_config: false\n")
f.write(Name + ".vm.network \"private_network\", virtualbox__intnet: \"broadcast_router-inter-3\", auto_config: false\n")
f.write(Name + ".vm.network \"private_network\", virtualbox__intnet: \"broadcast_router-inter-2\", auto_config: false\n")
f.write(Name + ".vm.provision \"shell\", inline: <<-SHELL\n")
f.write("echo \" Quagga "+ Name +" start installing\"\n")
f.write("#sudo sysctl -w net.ipv4.ip_forward=1\n")
f.write("sudo apt-get update\n")
f.write("sudo apt-get install quagga quagga-doc traceroute\n")
f.write("sudo cp /usr/share/doc/quagga/examples/zebra.conf.sample /etc/quagga/zebra.conf\n")
f.write("sudo cp /usr/share/doc/quagga/examples/ospfd.conf.sample /etc/quagga/ospfd.conf\n")
f.write("sudo chown quagga.quaggavty /etc/quagga/*.conf\n")
f.write("sudo /etc/init.d/quagga start\n")
f.write("sudo sed -i s'/zebra=no/zebra=yes/' /etc/quagga/daemons\n")
f.write("sudo sed -i s'/ospfd=no/ospfd=yes/' /etc/quagga/daemons\n")
f.write("sudo echo 'VTYSH_PAGER=more' >>/etc/environment\n")
f.write("sudo echo 'export VTYSH_PAGER=more' >>/etc/bash.bashrc\n")
f.write("sudo /etc/init.d/quagga restart\n")
f.write("echo \"Routing Protocol ospf Configuration Started\"\n")
f.write("sudo vtysh -c '\n")
f.write("configure terminal\n")
f.write("router ospf\n")
f.write("network " + str(IpNet1) + "/" + NetmaskAbbr1 + " area 0.0.0.0\n")
f.write("network " + str(IpNet2) + "/" + NetmaskAbbr2 + " area 0.0.0.0\n")
f.write("network " + str(IpNet3) + "/" + NetmaskAbbr3 + " area 0.0.0.0\n")
f.write("exit\n")
f.write("interface " + Interface1 + "\n")
f.write("ip address " + IpNoSub1 + "/" + NetmaskAbbr1 + "\n")
f.write("exit\n")
f.write("interface " + Interface2 + "\n")
f.write("ip address " + IpNoSub2 + "/" + NetmaskAbbr2 + "\n")
f.write("exit\n")
f.write("interface " + Interface3 + "\n")
f.write("ip address " + IpNoSub3 + "/" + NetmaskAbbr3 + "\n")
f.write("do write\n")
f.write("exit\n")
f.write("exit\n")
f.write("ip forwarding\n")
f.write("exit'\n")
f.write("echo \"Configuration END\"\n")
f.write("echo \"" + Name + " is ready to Use\"\n")
f.write("SHELL\n")
f.write("# " + Name + ".vm.provision \"shell\", path: \"common.sh\"\n")
f.write(Name + ".vm.provider \"virtualbox\" do |vb|\n")
f.write("vb.memory = " + Ram + "\n")
f.write("end\n")
f.write("end\n")
#the following is a fake graph that i used for testing
#instead of typing everytime the input in the command line
host1 = (4,{
"Id" : 4,
"Name":"host1",
"Type": "Host",
"Ram": "1024",
"Os": "bento/ubuntu-16.04",
"Network" : [{
"Ip": "192.168.1.1/24",
"Netmask": "255.255.255.0",
"Interface" : "eth1"
}]
})
host2 = (5,{
"Id" : 5,
"Name":"host2",
"Type": "Host",
"Ram": "1024",
"Os": "bento/ubuntu-16.04",
"Network" : [{
"Ip": "192.168.2.1/24",
"Netmask": "255.255.255.0",
"Interface" : "eth1"
}]
})
host3 = (6,{
"Id" : 6,
"Name":"host3",
"Type": "Host",
"Ram": "1024",
"Os": "bento/ubuntu-16.04",
"Network" : [{
"Ip": "192.168.3.1/24",
"Netmask": "255.255.255.0",
"Interface" : "eth1"
}]
})
rout1 = (1,{
"Id" : 1,
"Name":"router1",
"Type": "Router",
"Ram": "1024",
"Os": "bento/ubuntu-16.04",
"Network" : [{
"Ip": "192.168.1.254/24",
"Netmask": "255.255.255.0",
"Interface" : "eth1"
},{
"Ip": "192.168.100.1/24",
"Netmask": "255.255.255.0",
"Interface" : "eth2"
},{
"Ip": "192.168.101.2/24",
"Netmask": "255.255.255.0",
"Interface" : "eth3"
}]
})
rout2 = (2,{
"Id" : 2,
"Name":"router2",
"Type": "Router",
"Ram": "1024",
"Os": "bento/ubuntu-16.04",
"Network" : [{
"Ip": "192.168.2.254/24",
"Netmask": "255.255.255.0",
"Interface" : "eth1"
},{
"Ip": "192.168.100.2/24",
"Netmask": "255.255.255.0",
"Interface" : "eth2"
},{
"Ip": "192.168.102.2/24",
"Netmask": "255.255.255.0",
"Interface" : "eth3"
}]
})
rout3 = (3,{
"Id" : 3,
"Name":"ruoter3",
"Type": "Router",
"Ram": "1024",
"Os": "bento/ubuntu-16.04",
"Network" : [{
"Ip": "192.168.3.254/24",
"Netmask": "255.255.255.0",
"Interface" : "eth1"
},{
"Ip": "192.168.101.1/24",
"Netmask": "255.255.255.0",
"Interface" : "eth2"
},{
"Ip": "192.168.102.1/24",
"Netmask": "255.255.255.0",
"Interface" : "eth3"
}]
})
MyNet = [host1,host2,host3,rout1,rout2,rout3]
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
def remap(newList):
print("-------------------")
for item in newList:
print("Looking at device " + str(item))
print("the TYPE is " + item["type"])
if item["type"] == "router" :
for device in MyNet:
if device[1]["Id"] is item["id"]:
print("remap of device " + str(device[1]["Id"]) + " to device " + str(item["id"]))
device[1]["Name"] = item["label"]
device[1]["Ram"] = item["ram"]
device[1]["Os"] = item["vm_image"]
device[1]["Network"][0]["Ip"] = item["network_interfaces"][0]["ip_address"]
device[1]["Network"][0]["Netmask"] = item["network_interfaces"][0]["netmask"]
device[1]["Network"][0]["Interface"] = item["network_interfaces"][0]["name_interface"]
device[1]["Network"][1]["Ip"] = item["network_interfaces"][1]["ip_address"]
device[1]["Network"][1]["Netmask"] = item["network_interfaces"][1]["netmask"]
device[1]["Network"][1]["Interface"] = item["network_interfaces"][1]["name_interface"]
device[1]["Network"][2]["Ip"] = item["network_interfaces"][2]["ip_address"]
device[1]["Network"][2]["Netmask"] = item["network_interfaces"][2]["netmask"]
device[1]["Network"][2]["Interface"] = item["network_interfaces"][2]["name_interface"]
for item in newList:
if item["type"] == "host" :
for device in MyNet:
if device[1]["Id"] is item["id"]:
print("remap of device " + str(device[1]["Id"]) + " to device " + str(item["id"]))
device[1]["Name"] = item["label"]
device[1]["Ram"] = item["ram"]
device[1]["Os"] = item["vm_image"]
device[1]["Network"][0]["Ip"] = item["network_interfaces"][0]["ip_address"]
device[1]["Network"][0]["Netmask"] = item["network_interfaces"][0]["netmask"]
device[1]["Network"][0]["Interface"] = item["network_interfaces"][0]["name_interface"]
return MyNet
def main():
VagrantFile = open("VagrantfileOSPF", "w")
#read the data structure from input
#Network = G.nodes.data():
file = codecs.open("NetworkGraphs/Template/OSPF_Routing_Template.html", "r", "utf-8")
html = file.read()
if "nodes = new vis.DataSet(" in html:
listOfDevice = find_between(html, "nodes = new vis.DataSet(" , ")")
print(listOfDevice)
listOfDevice = yaml.load(listOfDevice)
newNet = remap(listOfDevice)
Network = newNet
#first, let's write the beginnig of the VagrantFile
BeginVagrantFile(VagrantFile)
#second, let's write each device with his feature
#this topology has 3 hosts and 3 routers
#call the respective function to "populate" the vagrant file
for device in Network:
typeOfDevice = device[1]["Type"]
#print("the device is a " + typeOfDevice)
if typeOfDevice is "Router":
writeRouter(VagrantFile,device)
for device in Network:
typeOfDevice = device[1]["Type"]
#print("the device is a " + typeOfDevice)
if typeOfDevice is "Host":
writeHost(VagrantFile,device)
VagrantFile.write("end\n")
VagrantFile.close()
main()
|
from pm4py.evaluation.generalization.versions import token_based as generalization_token_based
from pm4py.evaluation.precision.versions import etconformance_token as precision_token_based
from pm4py.evaluation.simplicity.versions import arc_degree as simplicity_arc_degree
from pm4py.evaluation.replay_fitness.versions import token_replay as fitness_token_based
from pm4py.algo.tokenreplay import factory as token_replay
from pm4py import log as log_lib
PARAM_ACTIVITY_KEY = 'activity_key'
PARAM_FITNESS_WEIGHT = 'fitness_weight'
PARAM_PRECISION_WEIGHT = 'precision_weight'
PARAM_SIMPLICITY_WEIGHT = 'simplicity_weight'
PARAM_GENERALIZATION_WEIGHT = 'generalization_weight'
PARAMETERS = [PARAM_ACTIVITY_KEY, PARAM_FITNESS_WEIGHT, PARAM_PRECISION_WEIGHT, PARAM_SIMPLICITY_WEIGHT, PARAM_GENERALIZATION_WEIGHT]
def apply_token_replay(log, net, initial_marking, final_marking, parameters=None):
"""
Calculates all metrics based on token-based replay and returns a unified dictionary
Parameters
-----------
log
Trace log
net
Petri net
initial_marking
Initial marking
final_marking
Final marking
parameters
Parameters
Returns
-----------
dictionary
Dictionary containing fitness, precision, generalization and simplicity; along with the average weight of these metrics
"""
if parameters is None:
parameters = {}
activity_key = parameters[PARAM_ACTIVITY_KEY] if PARAM_ACTIVITY_KEY in parameters else log_lib.util.xes.DEFAULT_NAME_KEY
fitness_weight = parameters[PARAM_FITNESS_WEIGHT] if PARAM_FITNESS_WEIGHT in parameters else 0.25
precision_weight = parameters[PARAM_PRECISION_WEIGHT] if PARAM_PRECISION_WEIGHT in parameters else 0.25
simplicity_weight = parameters[PARAM_SIMPLICITY_WEIGHT] if PARAM_SIMPLICITY_WEIGHT in parameters else 0.25
generalization_weight = parameters[PARAM_GENERALIZATION_WEIGHT] if PARAM_GENERALIZATION_WEIGHT in parameters else 0.25
sum_of_weights = (fitness_weight + precision_weight + simplicity_weight + generalization_weight)
fitness_weight = fitness_weight / sum_of_weights
precision_weight = precision_weight / sum_of_weights
simplicity_weight = simplicity_weight / sum_of_weights
generalization_weight = generalization_weight / sum_of_weights
[traceIsFit, traceFitnessValue, activatedTransitions, placeFitness, reachedMarkings, enabledTransitionsInMarkings] =\
token_replay.apply(log, net, initial_marking, final_marking, activity_key=activity_key)
parameters = {}
parameters["activity_key"] = activity_key
fitness = fitness_token_based.get_fitness(traceIsFit, traceFitnessValue)
precision = precision_token_based.apply(log, net, initial_marking, final_marking, parameters=parameters)
generalization = generalization_token_based.get_generalization(net, activatedTransitions)
simplicity = simplicity_arc_degree.apply(net)
dictionary = {}
dictionary["fitness"] = fitness
dictionary["precision"] = precision
dictionary["generalization"] = generalization
dictionary["simplicity"] = simplicity
metricsAverageWeight = fitness_weight * fitness["averageFitness"] + precision_weight * precision\
+ generalization_weight * generalization + simplicity_weight * simplicity
dictionary["metricsAverageWeight"] = metricsAverageWeight
return dictionary
TOKEN_BASED = "token_based"
VERSIONS = {TOKEN_BASED: apply_token_replay}
def apply(log, net, initial_marking, final_marking, parameters=None, variant="token_based"):
return VERSIONS[variant](log, net, initial_marking, final_marking, parameters=parameters)
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the RPC Framework Core's implementation of the Base interface."""
import logging
import random
import time
import unittest
from grpc.framework.core import implementations
from grpc.framework.interfaces.base import utilities
from tests.unit.framework.common import test_constants
from tests.unit.framework.interfaces.base import test_cases
from tests.unit.framework.interfaces.base import test_interfaces
class _Implementation(test_interfaces.Implementation):
def __init__(self):
self._invocation_initial_metadata = object()
self._service_initial_metadata = object()
self._invocation_terminal_metadata = object()
self._service_terminal_metadata = object()
def instantiate(self, serializations, servicer):
invocation = implementations.invocation_end_link()
service = implementations.service_end_link(
servicer, test_constants.DEFAULT_TIMEOUT,
test_constants.MAXIMUM_TIMEOUT)
invocation.join_link(service)
service.join_link(invocation)
return invocation, service, None
def destantiate(self, memo):
pass
def invocation_initial_metadata(self):
return self._invocation_initial_metadata
def service_initial_metadata(self):
return self._service_initial_metadata
def invocation_completion(self):
return utilities.completion(self._invocation_terminal_metadata, None, None)
def service_completion(self):
return utilities.completion(self._service_terminal_metadata, None, None)
def metadata_transmitted(self, original_metadata, transmitted_metadata):
return transmitted_metadata is original_metadata
def completion_transmitted(self, original_completion, transmitted_completion):
return (
(original_completion.terminal_metadata is
transmitted_completion.terminal_metadata) and
original_completion.code is transmitted_completion.code and
original_completion.message is transmitted_completion.message
)
def load_tests(loader, tests, pattern):
return unittest.TestSuite(
tests=tuple(
loader.loadTestsFromTestCase(test_case_class)
for test_case_class in test_cases.test_cases(_Implementation())))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
import tensorflow as tf
from methods.dpf import DPF
from utils.data_utils import load_data, noisyfy_data, make_batch_iterator, remove_state
from utils.exp_utils import get_default_hyperparams
def train_dpf(task='nav01', data_path='../data/100s', model_path='../models/tmp', plot=False):
# load training data and add noise
train_data = load_data(data_path=data_path, filename=task + '_train')
noisy_train_data = noisyfy_data(train_data)
# reset tensorflow graph
tf.compat.v1.reset_default_graph()
# instantiate method
hyperparams = get_default_hyperparams()
method = DPF(**hyperparams['global'])
with tf.compat.v1.Session() as session:
# train method and save result in model_path
method.fit(session, noisy_train_data, model_path, **hyperparams['train'], plot_task=task, plot=plot)
def test_dpf(task='nav01', data_path='../data/100s', model_path='../models/tmp'):
# load test data
test_data = load_data(data_path=data_path, filename=task + '_test')
noisy_test_data = noisyfy_data(test_data)
test_batch_iterator = make_batch_iterator(noisy_test_data, seq_len=50)
# reset tensorflow graph
tf.compat.v1.reset_default_graph()
# instantiate method
hyperparams = get_default_hyperparams()
method = DPF(**hyperparams['global'])
with tf.Session() as session:
# load method and apply to new data
method.load(session, model_path)
for i in range(10):
test_batch = next(test_batch_iterator)
test_batch_input = remove_state(test_batch, provide_initial_state=False)
result = method.predict(session, test_batch_input, **hyperparams['test'])
if __name__ == '__main__':
train_dpf(plot=True)
test_dpf()
|
# The read4 API is already defined for you.
# @param buf, a list of characters
# @return an integer
# def read4(buf):
from collections import deque
class Solution(object):
def __init__(self):
self.queue = deque()
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Maximum number of characters to read (int)
:rtype: The number of characters read (int)
"""
idx = 0
while True:
buf4 = [''] * 4
count = read4(buf4)
self.queue.extend(buf4)
count = min(len(self.queue), n - idx)
if not count:
return idx
for i in range(count):
buf[idx] = self.queue.popleft()
idx += 1
return idx
|
faq = """
***FREQUENTLY ASKED QUESTIONS***
**Can I ping or dm John ?**
Yes, feel free to ping or dm john, his response time depends on how busy he is, but he is a teddy bear, so you will get a response sooner or later
**Can I ping moderators role/admins ?**
Yes, but when it is really urgent, otherwise just ping a moderator or admin who is online or active in chat at that time.
**From where can I get a list of ctf's ?**
Use the website `ctftime.org`, or use the `NullCTF bot`
**Can you please help me get my (INSTA|FB|STEAM...) account back ?**
NO, reach the support of respective service/social network
**Can I send a DM to anyone in the server ?**
No, please ask that person if he/she is okay with DM's, before sending a DM
**Is there place where JHD resources are stored ?**
We have a repo in which the JHD_Bot dump all the resources from `#resources` channel.
link - https://github.com/Fumenoid/JHD_Resources
**Is Shiptoasting channel a NSFW channel ?**
NO !! That channel is only for tech memes and SFW image posting, we have marked that channel NSFW because some meme contains explicit language.
**How did you guys build the JHD_bot ? resources ?**
Really thankfull for a guy named `jared`, his youtube tutorials & Discord, `Drillenissen` and `tsu_katana` too, it's a indirect shoutout to them, they don't know about it : P
also `Nullpxl's bot - NullCTF`, I have read it's source code like thrice, it's super cool !
"""
channels = """
***Channels and their descriptions***
**INFO/NEWS**
`#welcome`
Welcome channel, for new members to verify themselves.
`#roles`
Pick up optional swag roles here from a reaciton menu.
`#suggestions`
Channel where suggestions appear as polls! Cast your votes!
**Hangout channels**
`#general`
Channel For Rational Conversation / Conversation about Infosec / Cyber Security / Computer Programming / Computers
`#shiptoasting`
Channel for memes and other such nonsense. Strictly no actual NSFW content. If you have to question if it's allowed, it's not.
`#jh-memes`
Memes about the man, the myth, the legend, John Hammond
`#bot commands`
Come one come all! This is the one and only place to use the bots! Please use all bot commands here unless otherwise specified.
**Sources**
`#resources`
Drop your resources for CTFs, hacking, cyber security, program, networking, etc. here! Nothing that you shouldn't share please.
`#github`
Share GitHub projects here!
`#wargames-list`
List of ongong wargames and archived CTFS.
`#ctf-writeups`
Channel to post writeups for past ctfs.
`#community-content`
Channel for the community to share their content related to cyber security: be it a blog post, videos or any other helpful resources.
"""
channels2 = """
***Channels and their descriptions***
**CTF/Wargames**
`#capture-the-flag`
A channel for discussing anything CTF that doesn't have a specific channel. PLEASE keep spoilers to DMs!
`#over-the-wire`
Channel for OverTheWire challenges. Please keep spoilers in DMs
`#tryhackme`
Channel for discussion of Try Hack Me http://tryhackme.com
`#hackthebox`
Request for help in DM, asking questions for HackTheBox, please do not post spoilers
`#blueteamlabs`
For discussion of the Blue Team Labs
`#archived_ctfs`
Archives of channels for past ctf competitions
`#pwn-training`
Challenges to practise your pwn skills on
**Hacking/Security**
`#beginners`
Channel for those new into hacking/security. Please be respectful with your questions/responses and DO NOT put down others for not having a level of knowledge similar to yours. This is a NEWB FRIENDLY ZONE
`#news`
Feel free to link news stories relevant to hacking, cybersecurity, ctfs, tech, etc. here.
`#web-hacking`
Browsers, web apps, and all that jazz.
`#osint-privacy`
Open Source Intelligent and privacy discussion and so forth. Do NOT dox anyone!
`#pwning-and-reversing`
Discussion that relates to binary exploitation and reverse engineering.
`#blue-ir-forensics`
Blue teaming, incident response and forensics discussion all goes here.
`#wireless-hacking`
Discussion about Wifi, RFID, forms of wireless hacking.
`#cryptography`
Discussion of crypto and associated maths-y stuff.
`#mobile-security`
Meddling with phones, tablets and portable decvices can go here
`#hardware-hacking`
Stuff about attacking vulnerable physical hardware can go here
**Development**
`#programming`
Channel for anything programming related questions.
`#webdev`
Channel for anything webdev related questions.
`#certifications`
Discuss the certifications you have or want here. What they are. Which ones are useful. How to get them. etc. Do NOT ask for copyrighted content. Obtain it from legal sources.
`#katana-pwncat`
Discuss and coordinate work on Katana Modules/Pwncat here.
"""
url = """
**https://github.com/Fumenoid/JHDBot**
"""
|
from runs_plot import *
normal_run_data = "../../../experiments/runs/normal_run_data.csv"
normal_plot_path = "../plots/normal/"
thm10_run_data = "../../../experiments/runs/thm10_run_data.csv"
thm10_plot_path = "../plots/thm10/"
v_is_g_run_data = "../../../experiments/runs/v_is_g_run_data.csv"
v_is_g_plot_path = "../plots/v_is_g/"
ratio_plot_path = "../plots/ratio/"
accuracy_plot_path = "../plots/accuracy/"
def run_normal():
print("Run Normal")
init_clr9()
for t in range(T_MIN, T_MAX+1):
create_histogram(t, LB_DELTA_START, "LB, ", normal_run_data, normal_plot_path, condition_all_lb)
create_histogram(t, LB_DELTA_START, "LB (> 0), ", normal_run_data, normal_plot_path, condition_greater_than_zero)
create_histogram(t, UB_DELTA_START, "UB, ", normal_run_data, normal_plot_path, condition_all_ub)
create_histogram(t, UB_DELTA_START, "UB (g=v), ", normal_run_data, normal_plot_path, condition_g_equals_v)
def run_thm_10():
print("Run THM10")
init_theorem10()
for t in range(T_MIN, T_MAX+1):
create_histogram(t, LB_DELTA_START, "LB, ", thm10_run_data, thm10_plot_path, condition_all_lb)
create_histogram(t, LB_DELTA_START, "LB (> 0), ", thm10_run_data, thm10_plot_path, condition_greater_than_zero)
create_histogram(t, UB_DELTA_START, "UB, ", thm10_run_data, thm10_plot_path, condition_all_ub)
create_histogram(t, UB_DELTA_START, "UB (g=v), ", thm10_run_data, thm10_plot_path, condition_g_equals_v)
def run_v_is_g():
print("Run v is g")
init_clr9()
for t in range(T_MIN, T_MAX+1):
create_histogram(t, LB_DELTA_START, "LB, ", v_is_g_run_data, v_is_g_plot_path, condition_all_lb)
create_histogram(t, LB_DELTA_START, "LB (> 0), ", v_is_g_run_data, v_is_g_plot_path, condition_greater_than_zero)
create_histogram(t, UB_DELTA_START, "UB, ", v_is_g_run_data, v_is_g_plot_path, condition_all_ub)
create_histogram(t, UB_DELTA_START, "UB (g=v), ", v_is_g_run_data, v_is_g_plot_path, condition_g_equals_v)
def ratio_all():
ratio("All Data (Normalized)", normal_run_data, ratio_plot_path, 0, True, ratio_condition_all)
ratio("All Data And v is Gen (Normalized)", v_is_g_run_data, ratio_plot_path, 0, True, ratio_condition_v_equals_g_all)
for v in range(2, 10):
ratio("v = {} (Normalized)".format(v), normal_run_data, ratio_plot_path, v, True, ratio_condition_one_v)
ratio("v = {} And v is Gen(Normalized)".format(v), v_is_g_run_data, ratio_plot_path, v, True, ratio_condition_v_equals_g_one_v)
def run_accuracy():
print("Run Accuracy")
init_clr9()
#LOWER BOUND
create_accuracy2(LB_DELTA_START, normal_run_data, normal_run_data, accuracy_plot_path,
"Runs Lower Bound Accuracy", "All trials", r'$\rho(b,t)$', condition_all_lb, condition_greater_than_zero)
create_accuracy2(LB_DELTA_START, normal_run_data, v_is_g_run_data, accuracy_plot_path,
"Runs Lower Bound Accuracy (g is v)", "All trials", r"$g = v$", condition_all_lb, condition_all_lb, "green")
create_accuracy2(LB_DELTA_START, normal_run_data, normal_run_data, accuracy_plot_path,
"Runs Lower Bound Accuracy all vs binary", "All trials", "Binary", condition_all_lb, condition_binary_lb)
create_accuracy2(LB_DELTA_START, v_is_g_run_data, v_is_g_run_data, accuracy_plot_path,
"Runs Lower Bound Accuracy all (g is v) vs binary (g is v)", r"All ($g = v$)", r"Binary ($g = v$)", condition_all_lb, condition_binary_lb)
create_accuracy2(LB_DELTA_START, normal_run_data, v_is_g_run_data, accuracy_plot_path,
"Runs Lower Bound Accuracy binary vs binary where v is g", "Binary", r"binary ($v = g$)", condition_binary_lb, condition_binary_lb, "green")
#UPPER BOUND
create_accuracy2(UB_DELTA_START, normal_run_data, v_is_g_run_data, accuracy_plot_path,
"Runs Upper Bound Accuracy", "All trials", r"$g = v$", condition_all_ub, condition_g_equals_v, "green")
create_accuracy2(UB_DELTA_START, normal_run_data, normal_run_data, accuracy_plot_path,
"Runs Upper Bound Accuracy all s binary", "All trials", "Binary", condition_all_ub, condition_binary_ub)
create_accuracy2(UB_DELTA_START, v_is_g_run_data, v_is_g_run_data, accuracy_plot_path,
"Runs Upper Bound Accuracy all (g is v) s binary (g is v)", r"All ($g = v$)", r"Binary ($g = v$)", condition_all_ub, condition_binary_ub)
create_accuracy2(UB_DELTA_START, normal_run_data, v_is_g_run_data, accuracy_plot_path,
"Runs Upper Bound Accuracy binary vs binary where v is g", "Binary", r"Binary ($v = g$)", condition_binary_ub, condition_binary_ub, "green")
functions = {
'normal': run_normal,
'thm10': run_thm_10,
'v_is_g': run_v_is_g,
'ratio': ratio_all,
'accuracy': run_accuracy,
}
if __name__ == '__main__':
func = functions[sys.argv[1]]
func()
sys.exit(0)
|
__author__ = 'amannhold'
|
__author__ = 's7a'
# All imports
from parser import Parser
from breaker import Breaker
import re
# The Syntactic simplification class
class SyntacticSimplifier:
# Constructor for the Syntactic Simplifier
def __init__(self):
self.parser = Parser()
self.breaker = Breaker()
# Simplify content
def simplify(self, content, plot_tree=False, detailed=False):
results = []
sentences = re.split('\.|!|\?', content)
for sentence in sentences:
sentence += "."
if sentence == ".":
continue
parse_trees = self.parser.parse(sentence, plot_tree)
for parse_tree in parse_trees:
broken_string = self.breaker.break_tree(parse_tree, detailed)
results.append({
"tree": str(parse_tree),
"broken_string": broken_string
})
return results
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetSubscriptionResult',
'AwaitableGetSubscriptionResult',
'get_subscription',
]
@pulumi.output_type
class GetSubscriptionResult:
"""
Subscription details.
"""
def __init__(__self__, allow_tracing=None, created_date=None, display_name=None, end_date=None, expiration_date=None, id=None, name=None, notification_date=None, owner_id=None, primary_key=None, scope=None, secondary_key=None, start_date=None, state=None, state_comment=None, type=None):
if allow_tracing and not isinstance(allow_tracing, bool):
raise TypeError("Expected argument 'allow_tracing' to be a bool")
pulumi.set(__self__, "allow_tracing", allow_tracing)
if created_date and not isinstance(created_date, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", created_date)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if end_date and not isinstance(end_date, str):
raise TypeError("Expected argument 'end_date' to be a str")
pulumi.set(__self__, "end_date", end_date)
if expiration_date and not isinstance(expiration_date, str):
raise TypeError("Expected argument 'expiration_date' to be a str")
pulumi.set(__self__, "expiration_date", expiration_date)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notification_date and not isinstance(notification_date, str):
raise TypeError("Expected argument 'notification_date' to be a str")
pulumi.set(__self__, "notification_date", notification_date)
if owner_id and not isinstance(owner_id, str):
raise TypeError("Expected argument 'owner_id' to be a str")
pulumi.set(__self__, "owner_id", owner_id)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if scope and not isinstance(scope, str):
raise TypeError("Expected argument 'scope' to be a str")
pulumi.set(__self__, "scope", scope)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
if start_date and not isinstance(start_date, str):
raise TypeError("Expected argument 'start_date' to be a str")
pulumi.set(__self__, "start_date", start_date)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if state_comment and not isinstance(state_comment, str):
raise TypeError("Expected argument 'state_comment' to be a str")
pulumi.set(__self__, "state_comment", state_comment)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="allowTracing")
def allow_tracing(self) -> Optional[bool]:
"""
Determines whether tracing is enabled
"""
return pulumi.get(self, "allow_tracing")
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> str:
"""
Subscription creation date. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
The name of the subscription, or null if the subscription has no name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="endDate")
def end_date(self) -> Optional[str]:
"""
Date when subscription was cancelled or expired. The setting is for audit purposes only and the subscription is not automatically cancelled. The subscription lifecycle can be managed by using the `state` property. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "end_date")
@property
@pulumi.getter(name="expirationDate")
def expiration_date(self) -> Optional[str]:
"""
Subscription expiration date. The setting is for audit purposes only and the subscription is not automatically expired. The subscription lifecycle can be managed by using the `state` property. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "expiration_date")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationDate")
def notification_date(self) -> Optional[str]:
"""
Upcoming subscription expiration notification date. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "notification_date")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> Optional[str]:
"""
The user resource identifier of the subscription owner. The value is a valid relative URL in the format of /users/{userId} where {userId} is a user identifier.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
Subscription primary key.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter
def scope(self) -> str:
"""
Scope like /products/{productId} or /apis or /apis/{apiId}.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
Subscription secondary key.
"""
return pulumi.get(self, "secondary_key")
@property
@pulumi.getter(name="startDate")
def start_date(self) -> Optional[str]:
"""
Subscription activation date. The setting is for audit purposes only and the subscription is not automatically activated. The subscription lifecycle can be managed by using the `state` property. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "start_date")
@property
@pulumi.getter
def state(self) -> str:
"""
Subscription state. Possible states are * active – the subscription is active, * suspended – the subscription is blocked, and the subscriber cannot call any APIs of the product, * submitted – the subscription request has been made by the developer, but has not yet been approved or rejected, * rejected – the subscription request has been denied by an administrator, * cancelled – the subscription has been cancelled by the developer or administrator, * expired – the subscription reached its expiration date and was deactivated.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="stateComment")
def state_comment(self) -> Optional[str]:
"""
Optional subscription comment added by an administrator.
"""
return pulumi.get(self, "state_comment")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
class AwaitableGetSubscriptionResult(GetSubscriptionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSubscriptionResult(
allow_tracing=self.allow_tracing,
created_date=self.created_date,
display_name=self.display_name,
end_date=self.end_date,
expiration_date=self.expiration_date,
id=self.id,
name=self.name,
notification_date=self.notification_date,
owner_id=self.owner_id,
primary_key=self.primary_key,
scope=self.scope,
secondary_key=self.secondary_key,
start_date=self.start_date,
state=self.state,
state_comment=self.state_comment,
type=self.type)
def get_subscription(resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
sid: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubscriptionResult:
"""
Subscription details.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
:param str sid: Subscription entity Identifier. The entity represents the association between a user and a product in API Management.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['sid'] = sid
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20180601preview:getSubscription', __args__, opts=opts, typ=GetSubscriptionResult).value
return AwaitableGetSubscriptionResult(
allow_tracing=__ret__.allow_tracing,
created_date=__ret__.created_date,
display_name=__ret__.display_name,
end_date=__ret__.end_date,
expiration_date=__ret__.expiration_date,
id=__ret__.id,
name=__ret__.name,
notification_date=__ret__.notification_date,
owner_id=__ret__.owner_id,
primary_key=__ret__.primary_key,
scope=__ret__.scope,
secondary_key=__ret__.secondary_key,
start_date=__ret__.start_date,
state=__ret__.state,
state_comment=__ret__.state_comment,
type=__ret__.type)
|
# import os
# import pythoncom
#
# from django.core.mail import send_mail
# from django.db.models.signals import post_init, post_save
# from django.dispatch import receiver
# from docx.shared import Mm
# from docxtpl import DocxTemplate, InlineImage
#
# from contactApp.models import Resume
# from docx2pdf import convert
#
#
# # 触发器
# @receiver(post_init, sender=Resume)
# def before_save_resume(sender, instance, **kwargs):
# """触发器,post_init表示在管理员单击“保存前”触发,post_save表示在管理员单击“保存后”触发.
# @receiver中第一个参数表示信号类型,第二个表示监控的模型类"""
# instance.__original_status = instance.status # 记录点击前的状态
#
#
# @receiver(post_save, sender=Resume)
# def post_save_resume(sender, instance, **kwargs):
# """触发器,当管理员修改面试成绩“通过”之后触发,@receiver第一个参数为管理员单击“保存后”触发"""
# send_status = 0
# # print(instance.__original_status) # 点击之前状态
# # print(instance.status) # 点击之后状态
# email = instance.email # 获取到应聘者邮箱
# EMAIL_FROM = 'z1915270314@163.com' # 发送者邮箱
# if instance.__original_status == 1 and instance.status == 2:
# email_title = '恒达科技有限公司招聘初试结果'
# email_body = '恭喜您通过本企业的初试,请您本周六到公司进行第二次面试!'
# send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])
#
# # 生成动态word
# template_path = os.getcwd() + "/media/contact/recruit.docx" # 模板文件路径
# print(template_path)
# # 调用模板
# template = DocxTemplate(template_path)
# # 从实例中获取当前的字段
# name = instance.name
# personID = instance.personID
# sex = instance.sex
# email = instance.email
# birth = instance.birth
# edu = instance.edu
# school = instance.school
# major = instance.major
# position = instance.position
# experience = instance.experience
# photo = instance.photo
#
# context = {
# 'name': name,
# 'personID': personID,
# 'sex': sex,
# 'email': email,
# 'birth': birth,
# 'edu': edu,
# 'school': school,
# 'major': major,
# 'position': position,
# 'experience': experience,
# 'photo': InlineImage(template, photo, width=Mm(30), height=Mm(40)),
# }
# template.render(context)
# # 存储文件的路径
# filename = "%s/media/contact/recruit/%s_%d.docx" % (os.getcwd(), instance.name, instance.id)
# template.save(filename)
#
# # 调用CoInitialize创建pdf文档
# pythoncom.CoInitialize()
# # word转pdf
# if os.path.exists(filename):# 判断是否存在该word文件
# pdf_filename = "%s/media/contact/recruit/%s_%d.pdf" % (os.getcwd(), instance.name, instance.id)
# convert(filename, pdf_filename) # 将word转为pdf
# else:
# print("word文件不存在")
#
# elif instance.__original_status == 1 and instance.status == 3:
# email_title = '恒达科技有限公司招聘初试结果'
# email_body = '很遗憾,您未能通过本企业的初试,感谢您的关注!'
# send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])
# print(send_status)
|
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
import h5py
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn import decomposition
from sklearn.preprocessing import scale
from skimage.transform import rescale
import readMnist
#temp = mnist.read('train', '/home/crob/Downloads/mnist/')
f = h5py.File('mnist_train.h5','r')
mnist_train = f['data'][:]
mnist_train_labels = f['labels'][:]
f.close()
f = h5py.File('mnist_test.h5','r')
mnist_test = f['data'][:]
mnist_test_labels = f['labels'][:]
f.close()
#im = np.ndarray
#for img in temp:
# im.append(img)
np.random.seed(69)
digits = load_digits()
data = scale(digits.data)
#n_samples, n_features = data.shape
n_samples = len(mnist_train)
n_features = np.size(mnist_train[1])
#n_digits = len(np.unique(digits.target))
n_digits = 10
#labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
# Insert code for PCA
n_comp=5
pca = decomposition.PCA(n_components=n_comp)
out = pca.fit(mnist_train)
xtrain = pca.transform(mnist_train)
b = pca.components_.reshape((n_comp, 28, 28))
print(np.shape(b))
# estimator = KMeans(init='k-means++', n_clusters=n_digits, n_init=10) #name="k-means++", data=data)
# t0 = time()
# estimator.fit(temp)
# print('Time to complete %i seconds' % (time() - t0))
# print(estimator.labels_)
# print(len(estimator.cluster_centers_))
# a = estimator.cluster_centers_[1,:]
# b = np.reshape(estimator.cluster_centers_,(n_digits,28,28))
# #cent = np.reshape(a,(28,28))
# #imgplot = plt.imshow(cent)
# np.shape(b)
for i in range(len(b)):
plt.subplot(5,len(b)/5,i)
plt.tick_params(
axis='both',
which='both',
bottom='off',
top='off',
left='off',
right='off',
labelbottom='off',
labelleft='off')
imgp=plt.imshow(b[i,:,:])
plt.set_cmap('gray')
plt.suptitle('%i KMeans Centroids of MNIST Dataset' % len(b))
plt.show()
example = mnist_test[range(5),:]
ex = pca.inverse_transform(example)
print(np.shape(ex))
example = ex.components_.reshape(28,28)
print(np.shape(example))
for i in range(len(b)):
plt.subplot(5,len(b)/5,i)
plt.tick_params(
axis='both',
which='both',
bottom='off',
top='off',
left='off',
right='off',
labelbottom='off',
labelleft='off')
c = np.dot(b[i,:,:],example)
imgp=plt.imshow(c)
plt.set_cmap('gray')
plt.suptitle('%i KMeans Centroids of MNIST Dataset' % len(b))
plt.show()
|
from nq.filters import Filter
from nq.flags import Flags
class recurse_until:
pass
def _common_iterable(obj):
if isinstance(obj, dict):
return obj
else:
return (index for index, value in enumerate(obj))
def _is_iterable(obj):
return isinstance(obj, (list, tuple, set, dict))
def logger(enabled, depth):
def log_wrapper(message, *args, **kwargs):
if enabled:
print(" " * depth * 2, "[nq]", message, *args, **kwargs)
return log_wrapper
def nested_query(iterable, *path_parts, **kwargs):
results = []
if not _is_iterable(iterable):
return results
depth = kwargs["depth"] if "depth" in kwargs else 0
verbose = kwargs["verbose"] if "verbose" in kwargs else False
log = logger(verbose, depth)
flags = []
if "flags" in kwargs:
flags = kwargs["flags"]
if not isinstance(flags, list):
flags = [flags]
if len(path_parts) > 0:
path_part = path_parts[0]
log("Searching with path part", path_part, "on iterable", iterable)
if isinstance(path_part, Filter):
for key in _common_iterable(iterable):
log("Applying filter", path_part, "on item", iterable[key])
if path_part.check_filter(key, iterable[key], depth):
if len(path_parts) == 1:
results.append(iterable[key])
else:
results += nested_query(
iterable[key],
*path_parts[1::],
depth=depth + 1,
verbose=verbose
)
elif path_part is recurse_until:
if len(path_parts) > 1:
test_next_part = nested_query(
iterable, *path_parts[1::], depth=depth + 1, verbose=verbose
)
if type(iterable) in (list, dict):
for key in _common_iterable(iterable):
if len(path_parts) == 1:
results.append(iterable[key])
else:
results += nested_query(
iterable[key],
*path_parts,
depth=depth + 1,
verbose=verbose
)
if test_next_part:
results += test_next_part
else:
found_item = False
try:
iterable[path_part]
found_item = True
except Exception:
pass
if found_item:
if len(path_parts) == 1:
results.append(iterable[path_part])
elif type(iterable[path_part]) in (tuple, dict, list):
results += nested_query(
iterable[path_part],
*path_parts[1::],
depth=depth + 1,
verbose=verbose
)
log("Found results", results)
if depth == 0 and len(results) == 1:
return results[0]
else:
if Flags.FIRST_RESULT in flags:
return results[0]
elif Flags.LAST_RESULT in flags:
return results[-1]
else:
return results
else:
return iterable
class Tree:
def __init__(self, data):
self.data = data
def query(self, *path_parts, **kwargs):
return nested_query(self.data, *path_parts, **kwargs)
|
# Generated by Django 2.0.9 on 2018-10-28 09:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_department'),
]
operations = [
migrations.AlterField(
model_name='user',
name='department',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.Department'),
),
]
|
n = int(input('Digite um numero para ver a sua tabuada: '))
for x in range(1,11):
print('{} x {} = {}'.format(n, x, n*x))
|
__all__ = [
'gbufferedimage',
'gcolor',
'gevents',
'gmath',
'gobjects',
'gwindow',
]
|
import argparse
import json
import matplotlib.pyplot as plt
from pathlib import Path
import joblib
import numpy as np
import rlkit.torch.pytorch_util as ptu
from rlkit.core import logger
from rlkit.torch.core import PyTorchModule
from rlkit.torch.mpc.collocation.collocation_mpc_controller import (
SlsqpCMC,
GradientCMC,
StateGCMC,
LBfgsBCMC,
LBfgsBStateOnlyCMC,
TdmToImplicitModel,
)
class TdmPolicyToTimeInvariantGoalReachingPolicy(PyTorchModule):
def __init__(self, tdm_policy, env, num_steps_left):
super().__init__()
self.tdm_policy = tdm_policy
self.env = env
self.num_steps_left = num_steps_left
def forward(self, states, next_states):
num_steps_left = ptu.np_to_var(
self.num_steps_left * np.ones((states.shape[0], 1))
)
goals = self.env.convert_obs_to_goals(next_states)
return self.tdm_policy(
observations=states,
goals=goals,
num_steps_left=num_steps_left,
)[0]
class TrueModelToImplicitModel(PyTorchModule):
def __init__(self, env):
super().__init__()
self.env = env
def forward(self, states, actions, next_states):
state = ptu.get_numpy(states[0])
action = ptu.get_numpy(actions[0])
next_state = ptu.get_numpy(next_states[0])
true_next_state = self.env.true_model(state, action)
return -((next_state - true_next_state)**2).sum()
fig, (ax1, ax2) = plt.subplots(1, 2)
def debug(env, obs, agent_info):
best_obs_seq = agent_info['best_obs_seq']
best_action_seq = agent_info['best_action_seq']
real_obs_seq = env.wrapped_env.true_states(
obs, best_action_seq
)
ax1.clear()
env.wrapped_env.plot_trajectory(
ax1,
np.array(best_obs_seq),
np.array(best_action_seq),
goal=env.wrapped_env._target_position,
)
ax1.set_title("imagined")
ax2.clear()
env.wrapped_env.plot_trajectory(
ax2,
np.array(real_obs_seq),
np.array(best_action_seq),
goal=env.wrapped_env._target_position,
)
ax2.set_title("real")
plt.draw()
plt.pause(0.001)
def rollout(env, agent, max_path_length=np.inf, animated=False, tick=False):
observations = []
actions = []
rewards = []
terminals = []
agent_infos = []
env_infos = []
o = env.reset()
agent.reset()
next_o = None
path_length = 0
if animated:
env.render()
while path_length < max_path_length:
a, agent_info = agent.get_action(o)
debug(env, o, agent_info)
next_o, r, d, env_info = env.step(a)
observations.append(o)
rewards.append(r)
terminals.append(d)
actions.append(a)
agent_infos.append(agent_info)
env_infos.append(env_info)
path_length += 1
if d:
break
o = next_o
if animated:
env.render()
if tick:
import ipdb; ipdb.set_trace()
actions = np.array(actions)
if len(actions.shape) == 1:
actions = np.expand_dims(actions, 1)
observations = np.array(observations)
if len(observations.shape) == 1:
observations = np.expand_dims(observations, 1)
next_o = np.array([next_o])
next_observations = np.vstack(
(
observations[1:, :],
np.expand_dims(next_o, 0)
)
)
return dict(
observations=observations,
actions=actions,
rewards=np.array(rewards).reshape(-1, 1),
next_observations=next_observations,
terminals=np.array(terminals).reshape(-1, 1),
agent_infos=agent_infos,
env_infos=env_infos,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
parser.add_argument('--H', type=int, default=100,
help='Max length of rollout')
parser.add_argument('--ph', type=int, default=3,
help='planning horizon')
parser.add_argument('--nrolls', type=int, default=1,
help='Number of rollout per eval')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--lm', type=float, default=1,
help='Lagrange Multiplier (before division by reward scale)')
parser.add_argument('--hide', action='store_true')
parser.add_argument('--pause', action='store_true')
parser.add_argument('--tick', action='store_true')
parser.add_argument('--justsim', action='store_true')
parser.add_argument('--npath', type=int, default=100)
parser.add_argument('--tau', type=int, default=0)
parser.add_argument('--opt', type=str, default='lbfgs')
args = parser.parse_args()
if args.pause:
import ipdb; ipdb.set_trace()
variant_path = Path(args.file).parents[0] / 'variant.json'
variant = json.load(variant_path.open())
if 'sac_tdm_kwargs' in variant:
reward_scale = variant['sac_tdm_kwargs']['base_kwargs']['reward_scale']
else:
reward_scale = variant['td3_tdm_kwargs']['base_kwargs']['reward_scale']
data = joblib.load(args.file)
env = data['env']
qf = data['qf']
vf = data['vf']
policy = data['policy']
# ptu.set_gpu_mode(True)
# qf.to(ptu.device)
implicit_model = TdmToImplicitModel(
env,
qf,
tau=args.tau,
)
# implicit_model = TrueModelToImplicitModel(env)
lagrange_multiplier = args.lm / reward_scale
# lagrange_multiplier = 10
planning_horizon = args.ph
goal_slice = env.ob_to_goal_slice
multitask_goal_slice = slice(None)
optimizer = args.opt
print("Optimizer choice: ", optimizer)
print("lagrange multiplier: ", lagrange_multiplier)
print("goal slice: ", goal_slice)
print("multitask goal slice: ", multitask_goal_slice)
if optimizer == 'slsqp':
policy = SlsqpCMC(
implicit_model,
env,
goal_slice=goal_slice,
multitask_goal_slice=multitask_goal_slice,
planning_horizon=planning_horizon,
# use_implicit_model_gradient=True,
solver_kwargs={
'ftol': 1e-2,
'maxiter': 100,
},
)
elif optimizer == 'gradient':
policy = GradientCMC(
implicit_model,
env,
goal_slice=goal_slice,
multitask_goal_slice=multitask_goal_slice,
planning_horizon=planning_horizon,
lagrange_multiplier=lagrange_multiplier,
num_grad_steps=100,
num_particles=128,
warm_start=False,
)
elif optimizer == 'state':
policy = StateGCMC(
implicit_model,
env,
goal_slice=goal_slice,
multitask_goal_slice=multitask_goal_slice,
planning_horizon=planning_horizon,
lagrange_multiplier=lagrange_multiplier,
num_grad_steps=100,
num_particles=128,
warm_start=False,
)
elif optimizer == 'lbfgs':
policy = LBfgsBCMC(
implicit_model,
env,
tdm_policy=data['trained_policy'],
goal_slice=goal_slice,
multitask_goal_slice=multitask_goal_slice,
lagrange_multipler=lagrange_multiplier,
planning_horizon=planning_horizon,
replan_every_time_step=True,
only_use_terminal_env_loss=True,
# only_use_terminal_env_loss=False,
solver_kwargs={
'factr': 1e12,
},
)
elif optimizer == 'slbfgs':
policy = LBfgsBStateOnlyCMC(
vf,
data['trained_policy'],
env,
goal_slice=goal_slice,
multitask_goal_slice=multitask_goal_slice,
lagrange_multipler=lagrange_multiplier,
planning_horizon=planning_horizon,
replan_every_time_step=True,
only_use_terminal_env_loss=False,
solver_kwargs={
'factr': 1e10,
},
)
else:
raise ValueError("Unknown optimizer type: {}".format(optimizer))
paths = []
while True:
env.set_goal(env.sample_goal_for_rollout())
paths.append(rollout(
env,
policy,
max_path_length=args.H,
animated=not args.hide,
tick=args.tick,
))
env.log_diagnostics(paths)
logger.dump_tabular()
|
# TODOs:
# - use TensorArray as training buffers instead of FIFOQueue
# - recombine without blowing up numbers (should fit in 64bit word)
# - gradient computation + SGD
# - compare performance if native type is float64 instead of int64
# - performance on GPU
# - better cache strategy?
# - does it make sense to cache additions, subtractions, etc as well?
# - make truncation optional; should work even with cached results
# - lazy mods
# - sigmoid() is showing some unused substructures in TensorBoard; why?
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
from .tensor.int100 import Int100Tensor
from .protocol.protocol import get_protocol
# Idea is to simulate five different players on different devices.
# Hopefully Tensorflow can take care of (optimising?) networking like this.
#
# 64 bit CRT
# - 5 components for modulus ~120 bits (encoding 16.32)
#
# BITPRECISION_INTEGRAL = 16
# BITPRECISION_FRACTIONAL = 30
# INT_TYPE = tf.int64
# FLOAT_TYPE = tf.float64
# TRUNCATION_GAP = 20
# m = [89702869, 78489023, 69973811, 70736797, 79637461]
# M = 2775323292128270996149412858586749843569 # == prod(m)
# lambdas = [
# 875825745388370376486957843033325692983,
# 2472444909335399274510299476273538963924,
# 394981838173825602426191615931186905822,
# 2769522470404025199908727961712750149119,
# 1813194913083192535116061678809447818860
# ]
# *** NOTE ***
# keeping mod operations in-lined here for simplicity;
# we should do them lazily
# TODO[Morten] how to support this one with new abstractions?
def concat(ys):
# FIXME[Morten] add support for PrivateTensors as well
def helper(tensors):
# as an example, assume shape is [[(1000,2); 10]; 3]
tensors = tf.concat(tensors, axis=1)
# now shape is (10,3000,2)
tensors = tf.split(tensors, 10, axis=0)
# now shape is [(1,3000,2); 10]
tensors = [ tf.reshape(tensor, tensor.shape[1:]) for tensor in tensors ]
# now shape is [(3000,2); 10]
return tensors
with tf.name_scope('concat'):
y0s, y1s = zip(*[ y.unmasked.unwrapped for y in ys ])
bs, b0s, b1s, beta_on_0s, beta_on_1s = zip(*[ y.unwrapped for y in ys ])
with tf.device(get_protocol().crypto_producer.device_name):
b = helper(bs)
with tf.device(get_protocol().server_0.device_name):
y0 = helper(y0s)
b0 = helper(b0s)
beta_on_0 = helper(beta_on_0s)
with tf.device(get_protocol().server_1.device_name):
y1 = helper(y1s)
b1 = helper(b1s)
beta_on_1 = helper(beta_on_1s)
y = PrivateTensor(y0, y1)
y_masked = MaskedPrivateTensor(y, b, b0, b1, beta_on_0, beta_on_1)
return y_masked
# TODO[Morten] how to support this one with new abstractions?
def split(y, num_splits):
assert isinstance(y, MaskedPrivateTensor)
# FIXME[Morten] add support for PrivateTensors as well
y0, y1 = y.unmasked.unwrapped
b, b0, b1, beta_on_0, beta_on_1 = y.unwrapped
def helper(tensors):
# FIXME[Morten] all this reshaping seems to encur a big hit on (at least) graph building
# as an example, assume shape is [(3000,2); 10]
tensors = tf.stack(tensors)
# now shape is (10,3000,2)
tensors = tf.split(tensors, num_splits, axis=1)
# now shape is [(10,30,2); 100] if num_splits == 100
tensors = [
[ tf.reshape(xi, xi.shape[1:]) for xi in tf.split(tensor, 10, axis=0) ]
for tensor in tensors
]
# now shape is [[(30,2); 10]; 100]
return tensors
with tf.name_scope('split'):
with tf.device(get_protocol().crypto_producer.device_name):
bs = helper(b)
with tf.device(get_protocol().server_0.device_name):
y0s = helper(y0)
b0s = helper(b0)
beta_on_0s = helper(beta_on_0)
with tf.device(get_protocol().server_1.device_name):
y1s = helper(y1)
b1s = helper(b1)
beta_on_1s = helper(beta_on_1)
tensors = []
for y0, y1, b, b0, b1, beta_on_0, beta_on_1 in zip(y0s, y1s, bs, b0s, b1s, beta_on_0s, beta_on_1s):
y = PrivateTensor(y0,y1)
y_masked = MaskedPrivateTensor(y, b, b0, b1, beta_on_0, beta_on_1)
tensors.append(y_masked)
return tensors
def scale(x, k, apply_encoding=None):
assert type(x) in [PrivateTensor], type(x)
assert type(k) in [int, float], type(k)
x0, x1 = x.unwrapped
assert type(x0) in [Int100Tensor], type(x0)
assert type(x1) in [Int100Tensor], type(x1)
if apply_encoding is None:
# determine automatically
apply_encoding = type(k) is float
c = np.array([k])
if apply_encoding: c = encode(c)
with tf.name_scope('scale'):
with tf.device(get_protocol().server_0.device_name):
y0 = x0 * c
with tf.device(get_protocol().server_1.device_name):
y1 = x1 * c
y = PrivateTensor(y0, y1)
if apply_encoding:
y = truncate(y)
return y
def local_mask(x):
assert isinstance(x, Tensor), type(x)
with tf.name_scope('local_mask'):
x0, x1 = share(x.unwrapped)
a = sample(x.shape)
a0, a1 = share(a)
alpha = crt_sub(x.unwrapped, a)
return MaskedPrivateTensor(PrivateTensor(x0, x1), a, a0, a1, alpha, alpha)
global_cache_updators = []
def cache(x, initializers=None, updators=None):
if updators is None:
updators = global_cache_updators
# TODO[Morten] use `initializers`
node_key = ('cache', x)
cached = _nodes.get(node_key, None)
if cached is None:
if isinstance(x, PrivateTensor):
x0, x1 = x.unwrapped
with tf.name_scope('cache'):
with tf.device(get_protocol().server_0.device_name):
cached_x0 = [ tf.Variable(tf.random_uniform(shape=vi.shape, maxval=mi, dtype=INT_TYPE), dtype=INT_TYPE) for vi, mi in zip(x0, m) ]
updators.append([ tf.assign(var, val) for var, val in zip(cached_x0, x0) ])
with tf.device(get_protocol().server_1.device_name):
cached_x1 = [ tf.Variable(tf.random_uniform(shape=vi.shape, maxval=mi, dtype=INT_TYPE), dtype=INT_TYPE) for vi, mi in zip(x1, m) ]
updators.append([ tf.assign(var, val) for var, val in zip(cached_x1, x1) ])
# TODO[Morten] wrap PrivateTensor around var.read_value() instead to ensure updated values?
cached = PrivateTensor(cached_x0, cached_x1)
_nodes[node_key] = cached
elif isinstance(x, MaskedPrivateTensor):
a, a0, a1, alpha_on_0, alpha_on_1 = x.unwrapped
cached_x = cache(x.unmasked, initializers, updators)
with tf.name_scope('cache'):
with tf.device(get_protocol().crypto_producer.device_name):
cached_a = [ tf.Variable(tf.random_uniform(shape=vi.shape, maxval=mi, dtype=INT_TYPE), dtype=INT_TYPE) for vi, mi in zip(a, m) ]
updators.append([ tf.assign(var, val) for var, val in zip(cached_a, a) ])
with tf.device(get_protocol().server_0.device_name):
cached_a0 = [ tf.Variable(tf.random_uniform(shape=vi.shape, maxval=mi, dtype=INT_TYPE), dtype=INT_TYPE) for vi, mi in zip(a0, m) ]
updators.append([ tf.assign(var, val) for var, val in zip(cached_a0, a0) ])
cached_alpha_on_0 = [ tf.Variable(tf.random_uniform(shape=vi.shape, maxval=mi, dtype=INT_TYPE), dtype=INT_TYPE) for vi, mi in zip(alpha_on_0, m) ]
updators.append([ tf.assign(var, val) for var, val in zip(cached_alpha_on_0, alpha_on_0) ])
with tf.device(get_protocol().server_1.device_name):
cached_a1 = [ tf.Variable(tf.random_uniform(shape=vi.shape, maxval=mi, dtype=INT_TYPE), dtype=INT_TYPE) for vi, mi in zip(a1, m) ]
updators.append([ tf.assign(var, val) for var, val in zip(cached_a1, a1) ])
cached_alpha_on_1 = [ tf.Variable(tf.random_uniform(shape=vi.shape, maxval=mi, dtype=INT_TYPE), dtype=INT_TYPE) for vi, mi in zip(alpha_on_1, m) ]
updators.append([ tf.assign(var, val) for var, val in zip(cached_alpha_on_1, alpha_on_1) ])
# TODO[Morten] wrap MaskedPrivateTensor around var.read_value() instead to ensure updated values?
cached = MaskedPrivateTensor(
cached_x,
cached_a,
cached_a0,
cached_a1,
cached_alpha_on_0,
cached_alpha_on_1
)
_nodes[node_key] = cached
else:
raise AssertionError("'x' not of supported type")
return cached
def encode_input(vars_and_values):
if not isinstance(vars_and_values, list):
vars_and_values = [vars_and_values]
result = dict()
for input_x, X in vars_and_values:
result.update( (input_xi, Xi) for input_xi, Xi in zip(input_x, decompose(encode(X))) )
return result
def decode_output(value):
return decode(recombine(value))
def pyfunc_hack(func, x, shape=None):
""" Essentially calls `tf.py_func(func, [x])` but supports
returning (lists of) ints as needed by e.g. encoding. """
# TODO[Morten]
# tf.py_func currently has some limitations that mean we can't
# directly apply it to e.g. encoding and decomposition;
# concretely, it doesn't allow passing of ints.
# this hack gets around that limitation by converting to
# floats when exiting pyfunc, as well as some packing to
# account for working with lists of tensors.
# patch function to apply packing before returning
def patched_func(x):
y = func(x)
y = pyfunc_hack_preexit(y)
return y
# applied just before exiting tf.py_func
def pyfunc_hack_preexit(x):
assert type(x) in [tuple, list]
for xi in x: assert type(xi) == np.ndarray
# convert all values to floats; at least for small
# ints this should give correct results
x = [ xi.astype(float) for xi in x ]
# pack list into single tensor
x = np.array(x)
return x
# applied just after exiting tf.py_func
def pyfunc_hack_postexit(x, component_shape, num_components=10):
assert type(x) == tf.Tensor
# unpack tensor into list
x = [
tf.reshape(xi, component_shape)
for xi in tf.split(x, num_components)
]
# convert to ints
x = [ tf.cast(xi, dtype=tf.int32) for xi in x ]
return x
y = tf.py_func(patched_func, [x], tf.double)
y = pyfunc_hack_postexit(y, shape or x.shape)
return y
def encode_and_decompose(x, shape=None):
func = lambda x: decompose(encode(x))
return pyfunc_hack(func, x, shape=shape)
def recombine_and_decode(x):
return decode(recombine(x))
|
class Solution:
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
if x < 10:
return True
temp = x
rem = 0
while temp > 0:
rem = rem*10 + temp % 10
temp = temp//10
if rem == x:
return True
else:
return False
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
OPTIONS_SCHEMA = {
"title": "Cinder Protection Options",
"type": "object",
"properties": {
"backup_name": {
"type": "string",
"title": "Backup Name",
"description": "The name of the backup.",
"default": None
},
"backup_mode": {
"type": "string",
"title": "Backup Mode",
"description": "The backup mode.",
"enum": ["full", "incremental"],
"default": "full"
},
"container": {
"type": "string",
"title": "Container",
"description": "The container which been chosen.",
"default": None
},
"description": {
"type": "string",
"title": "Description",
"description": "The description of the volume.",
"default": None
},
"force": {
"type": "boolean",
"title": "Force",
"description": "Whether to backup, even if the volume "
"is attached",
"default": False
}
},
"required": ["backup_name", "backup_mode", "container", "force"]
}
RESTORE_SCHEMA = {
"title": "Cinder Protection Restore",
"type": "object",
"properties": {
"volume_id": {
"type": "string",
"title": "Volume ID",
"description": "The target volume ID to restore to.",
"default": None
},
"restore_name": {
"type": "string",
"title": "Restore Name",
"description": "The name of the restored volume.",
"default": None
},
"restore_description": {
"type": "string",
"title": "Restore Description",
"description": "The description of the restored volume.",
"default": None
}
}
}
SAVED_INFO_SCHEMA = {
"title": "Cinder Protection Saved Info",
"type": "object",
"properties": {
"name": {
"type": "string",
"title": "Name",
"description": "The name for this backup."
},
"is_incremental": {
"type": "boolean",
"title": "Is Incremental",
"description":
"The type of the backup, "
"True is incremental and False is full."
},
"status": {
"type": "string",
"title": "Status",
"description": "The backup status, such as available.",
"enum": ['creating', 'available',
'deleting', 'error',
'restoring', 'error_restoring'],
},
"progress": {
"type": "number",
"title": "Progress",
"description":
"The current operation progress for this backup.",
"constraint": {'min': 0, 'max': 1},
},
"fail_reason": {
"type": "string",
"title": "Fail Reason",
"description":
"The reason for the failure status of the backup."
},
"size": {
"type": "integer",
"title": "Size",
"description": "The size of the backup, in GB."
},
"volume_id": {
"type": "string",
"title": "Volume ID",
"description":
("The ID of the volume "
"from which the backup was created.")
},
},
"required": ["name", "status", "progress", "fail_reason",
"size", "volume_id"]
}
|
from optimus.engines.base.dataframe.constants import BaseConstants
class DataFrameBaseConstants(BaseConstants):
pass
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _linearform
else:
import _linearform
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _linearform.SWIG_PyInstanceMethod_New
_swig_new_static_method = _linearform.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._ser.coefficient
import mfem._ser.globals
import mfem._ser.array
import mfem._ser.mem_manager
import mfem._ser.matrix
import mfem._ser.vector
import mfem._ser.operators
import mfem._ser.intrules
import mfem._ser.sparsemat
import mfem._ser.densemat
import mfem._ser.eltrans
import mfem._ser.fe
import mfem._ser.geom
import mfem._ser.mesh
import mfem._ser.sort_pairs
import mfem._ser.ncmesh
import mfem._ser.gridfunc
import mfem._ser.fespace
import mfem._ser.fe_coll
import mfem._ser.lininteg
import mfem._ser.handle
import mfem._ser.restriction
import mfem._ser.element
import mfem._ser.table
import mfem._ser.hash
import mfem._ser.bilininteg
import mfem._ser.nonlininteg
import mfem._ser.vertex
import mfem._ser.vtk
class LinearForm(mfem._ser.vector.Vector):
r"""Proxy of C++ mfem::LinearForm class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(LinearForm self, FiniteElementSpace f) -> LinearForm
__init__(LinearForm self, FiniteElementSpace f, LinearForm lf) -> LinearForm
__init__(LinearForm self) -> LinearForm
__init__(LinearForm self, FiniteElementSpace f, double * data) -> LinearForm
"""
_linearform.LinearForm_swiginit(self, _linearform.new_LinearForm(*args))
def GetFES(self):
r"""GetFES(LinearForm self) -> FiniteElementSpace"""
import warnings
warnings.warn("mfem::LinearForm::GetFES() is deprecated",
DeprecationWarning,)
return _linearform.LinearForm_GetFES(self)
def FESpace(self, *args):
r"""
FESpace(LinearForm self) -> FiniteElementSpace
FESpace(LinearForm self) -> FiniteElementSpace
"""
return _linearform.LinearForm_FESpace(self, *args)
FESpace = _swig_new_instance_method(_linearform.LinearForm_FESpace)
def AddDomainIntegrator(self, lfi):
r"""AddDomainIntegrator(LinearForm self, LinearFormIntegrator lfi)"""
if not hasattr(self, "_integrators"): self._integrators = []
self._integrators.append(lfi)
lfi.thisown=0
return _linearform.LinearForm_AddDomainIntegrator(self, lfi)
def AddBoundaryIntegrator(self, *args):
r"""
AddBoundaryIntegrator(LinearForm self, LinearFormIntegrator lfi)
AddBoundaryIntegrator(LinearForm self, LinearFormIntegrator lfi, intArray bdr_attr_marker)
"""
if not hasattr(self, "_integrators"): self._integrators = []
lfi = args[0]
self._integrators.append(lfi)
lfi.thisown=0
return _linearform.LinearForm_AddBoundaryIntegrator(self, *args)
def AddBdrFaceIntegrator(self, *args):
r"""
AddBdrFaceIntegrator(LinearForm self, LinearFormIntegrator lfi)
AddBdrFaceIntegrator(LinearForm self, LinearFormIntegrator lfi, intArray bdr_attr_marker)
"""
if not hasattr(self, "_integrators"): self._integrators = []
lfi = args[0]
self._integrators.append(lfi)
lfi.thisown=0
return _linearform.LinearForm_AddBdrFaceIntegrator(self, *args)
def GetDLFI(self):
r"""GetDLFI(LinearForm self) -> mfem::Array< mfem::LinearFormIntegrator * > *"""
return _linearform.LinearForm_GetDLFI(self)
GetDLFI = _swig_new_instance_method(_linearform.LinearForm_GetDLFI)
def GetDLFI_Delta(self):
r"""GetDLFI_Delta(LinearForm self) -> mfem::Array< mfem::DeltaLFIntegrator * > *"""
return _linearform.LinearForm_GetDLFI_Delta(self)
GetDLFI_Delta = _swig_new_instance_method(_linearform.LinearForm_GetDLFI_Delta)
def GetBLFI(self):
r"""GetBLFI(LinearForm self) -> mfem::Array< mfem::LinearFormIntegrator * > *"""
return _linearform.LinearForm_GetBLFI(self)
GetBLFI = _swig_new_instance_method(_linearform.LinearForm_GetBLFI)
def GetFLFI(self):
r"""GetFLFI(LinearForm self) -> mfem::Array< mfem::LinearFormIntegrator * > *"""
return _linearform.LinearForm_GetFLFI(self)
GetFLFI = _swig_new_instance_method(_linearform.LinearForm_GetFLFI)
def GetFLFI_Marker(self):
r"""GetFLFI_Marker(LinearForm self) -> mfem::Array< mfem::Array< int > * > *"""
return _linearform.LinearForm_GetFLFI_Marker(self)
GetFLFI_Marker = _swig_new_instance_method(_linearform.LinearForm_GetFLFI_Marker)
def Assemble(self):
r"""Assemble(LinearForm self)"""
return _linearform.LinearForm_Assemble(self)
Assemble = _swig_new_instance_method(_linearform.LinearForm_Assemble)
def AssembleDelta(self):
r"""AssembleDelta(LinearForm self)"""
return _linearform.LinearForm_AssembleDelta(self)
AssembleDelta = _swig_new_instance_method(_linearform.LinearForm_AssembleDelta)
def Update(self, *args):
r"""
Update(LinearForm self)
Update(LinearForm self, FiniteElementSpace f)
Update(LinearForm self, FiniteElementSpace f, Vector v, int v_offset)
"""
return _linearform.LinearForm_Update(self, *args)
Update = _swig_new_instance_method(_linearform.LinearForm_Update)
def MakeRef(self, f, v, v_offset):
r"""MakeRef(LinearForm self, FiniteElementSpace f, Vector v, int v_offset)"""
return _linearform.LinearForm_MakeRef(self, f, v, v_offset)
MakeRef = _swig_new_instance_method(_linearform.LinearForm_MakeRef)
def __call__(self, gf):
r"""__call__(LinearForm self, GridFunction gf) -> double"""
return _linearform.LinearForm___call__(self, gf)
__call__ = _swig_new_instance_method(_linearform.LinearForm___call__)
__swig_destroy__ = _linearform.delete_LinearForm
# Register LinearForm in _linearform:
_linearform.LinearForm_swigregister(LinearForm)
|
def Reverse(input):
total = ""
for char in input:
total = char + total
return total
print(Reverse("Reverse"))
|
#!/bin/env python
# -*- coding: utf-8 -*-
import collections
import csv
import click
import numpy as np
import psycopg2
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from postgis import register
def connectDB(db, dbuser, dbpass, line, spacing):
# Connect to the PostgreSQL Database
dbConnection = psycopg2.connect("dbname='{0}' user='{1}' password='{2}'".format(db, dbuser, dbpass))
register(dbConnection)
# Create a DB cursor and basic view for the script
dbCursor = dbConnection.cursor()
# DB Line View
dbLineViewSQL = """
CREATE OR REPLACE VIEW LinhaInterpolada AS
SELECT (ST_DumpPoints(ST_LineInterpolatePoints(wkb_geometry, {0}))).path[1],
(ST_DumpPoints(ST_LineInterpolatePoints(wkb_geometry, {0}))).geom
FROM linha{1};
""".format(spacing, line)
dbCursor.execute(dbLineViewSQL)
# DB MultiPoint Function
dbMultiFunctionSQL = """
CREATE OR REPLACE FUNCTION ST_AsMultiPoint(geometry) RETURNS geometry AS
'SELECT ST_Union((d).geom) FROM ST_DumpPoints(ST_LineInterpolatePoints($1, {0})) AS d;'
LANGUAGE sql IMMUTABLE STRICT COST 10;
""".format(spacing)
dbCursor.execute(dbMultiFunctionSQL)
dbConnection.commit()
return (dbConnection, dbCursor)
def buildStopsFromFile(stopsFileName, line, dbCursor):
"""Build a dictionary containing bus stops information from a given file
This function reads stopsFileName and creates a dictionary to hold information for each bus stop.
Each bus stop is indexed by a given id (from column id in the file), and contains the following data:
* the bus stop id
* its position (latitude and longitude)
* and travelled distance
:param stopsFileName:
:return: a dictionary containing bus stops information
"""
stopsFile = open(stopsFileName)
stopsReader = csv.DictReader(stopsFile)
stops = dict()
for aStop in stopsReader:
index = int(aStop["id"])
stops[index] = {}
stops[index]["term"] = bool(int(aStop["term"]))
stops[index]["id"] = index
stops[index]["lat"] = float(aStop["lat"])
stops[index]["lng"] = float(aStop["lng"])
stops[index]["dist"] = int(aStop["dist"])
travDist = getTravDistance(aStop["lat"], aStop["lng"], line, dbCursor)
print(aStop["term"], index, float(aStop["lat"]), float(aStop["lng"]), int(aStop["dist"]), int(travDist), sep=",")
return stops
def getTravDistance(lat, lng, busLine, dbCursor):
""" Returns the total travelled distance to position (lat, lng) in the given bus line
:param lat: the bus's latitude position
:param lng: the bus's longitude position
:param busLine: the bus line
:param dbCursor: a cursor to the database
:return: total distanced travelled in meters to the position specified
"""
# Map matching to discover closest point (projected)
closestPointSQL = """
SELECT ST_ClosestPoint(ST_AsMultiPoint(linha.wkb_geometry), pt.geometry) AS ponto
FROM ST_GeomFromText('POINT({0} {1})', 4326) AS pt,
linha{2} AS linha
""".format(lng, lat, busLine)
dbCursor.execute(closestPointSQL)
closestPoint = dbCursor.fetchone()
# Get the index of map matched point
pointIndexSQL = """
SELECT path, geom
FROM LinhaInterpolada
WHERE geom = %s
"""
dbCursor.execute(pointIndexSQL, (closestPoint))
pointIndex = dbCursor.fetchone()
# Get Travelled Distance
distanceSQL = """
SELECT ST_Length(ST_MakeLine(linhainterpolada.geom), true)
FROM LinhaInterpolada AS linhainterpolada
WHERE path <= %s
"""
dbCursor.execute(distanceSQL, [pointIndex[0]])
distance = dbCursor.fetchone()[0]
return distance
@click.command()
@click.option("--line", default=400, help="Bus Line")
@click.option("--stops", default="data/400-pontos.csv", help="File containing Bus Stops")
@click.option("--spacing", default=0.00025, help="Interpolation Spacing")
@click.option("--db", default="highway", help="PostGreSQL Database")
@click.option("--dbuser", default="ufg", help="PostGreSQL User")
@click.option("--dbpass", default="ufgufg", help="PostGreSQL Password")
@click.option("--output", default="output.csv", help="Output file")
def main(line, stops, spacing, db, dbuser, dbpass, output):
# Create DB connection and get a cursor
dbConnection, dbCursor = connectDB(db, dbuser, dbpass, line, spacing)
# Parse Bus Stops
busStops = buildStopsFromFile(stops, line, dbCursor)
# Output processed headway
# writeOutput(busStops, output)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
from setuptools import find_packages, setup
setup(
name="labgraph_viz",
version="1.0.0",
description="Some useful visualizations for labgraph",
packages=find_packages(),
python_requires=">=3.6, <3.7",
install_requires=[
"dataclasses==0.6",
"labgraph>=1.0.2",
"matplotlib==3.1.1",
"numpy==1.16.4",
"PyQt5-sip==12.8.1",
"pyqtgraph==0.11.1",
],
)
|
import numpy as np
import cv2
import os
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
|
import logging
import os
from osgeo import gdal
import matplotlib.pyplot as plt
from sentinelloader import Sentinel2Loader
logging.basicConfig(level=logging.DEBUG)
sl = Sentinel2Loader('/notebooks/data/output/sentinelcache',
os.environ['COPERNICUS_USER'], os.environ['COPERNICUS_PASSWORD'],
apiUrl='https://scihub.copernicus.eu/apihub/', showProgressbars=True)
# area = [(-51.15, -14),(-51.8,-14),(-51.8,-14.25),(-51.15,-14.25),(-51.15,-14)]# area = [(-51.15, -14),(-51.8,-14),(-51.8,-14.25),(-51.15,-14.25),(-51.15,-14)]
area = [(-47.873796, -16.044801), (-47.933796, -16.044801),
(-47.933796, -15.924801), (-47.873796, -15.924801)]
# area = [(-51.15, -14),(-52.1,-14),(-52.1,-14.25),(-51.15,-14.25),(-51.15,-14)]# area = [(-51.15, -14),(-51.8,-14),(-51.8,-14.25),(-51.15,-14.25),(-51.15,-14)]
# area = [(-44.8, -15),(-46.2,-15),(-46.2,-15.2),(-44.8,-15.2)]# area = [(-51.15, -14),(-51.8,-14),(-51.8,-14.25),(-51.15,-14.25),(-51.15,-14)]
# area = [(-50.45, -15.25),(-50.65, -15.25),(-50.65, -15.45),(-50.65, -15.45),(-49.5, -16.5)]
# area = [(-44.8, -15),(-45.3,-15),(-45.3,-15.2),(-44.8,-15.2)]# area = [(-51.15, -14),(-51.8,-14),(-51.8,-14.25),(-51.15,-14.25),(-51.15,-14)]
# geoTiffs = sl.getProductBandTiles(area, 'TCI', '60m', dateReference='2019-01-01', dateToleranceDays=20, cloudCoverage=(0,40))
# geoTiff = sl.cropRegion(area, geoTiffs)
# ds = gdal.Open(geoTiff).ReadAsArray()
# plt.figure(figsize=(44,44))
# plt.imshow(ds[0])
# plt.show()
# os.remove(geoTiff)
geoTiffs = sl.getRegionHistory(
area, 'TCI', '60m', '2019-01-06', '2019-01-30', daysStep=5)
for geoTiff in geoTiffs:
ds = gdal.Open(geoTiff).ReadAsArray()
plt.figure(figsize=(5, 5))
plt.imshow(ds[0])
plt.show()
os.remove(geoTiff)
# img = plt.imread(geoTiff)
# plt.imshow(img[:, :, 0], cmap=plt.cm.coolwarm)
# sl.getContents(area, 'TCI', '10m', dateReference='2019-01-01', dateToleranceDays=20, cloudCoverage=(0,40), cloudless=False, cloudlessDays=20)
# area = [(-44.8, -15),(-45.1,-15),(-45.1,-15.2),(-44.8,-15.2)]# area = [(-51.15, -14),(-51.8,-14),(-51.8,-14.25),(-51.15,-14.25),(-51.15,-14)]
# sl.getContents(area, 'TCI', '60m', dateReference='now', dateToleranceDays=20, cloudCoverage=(0,40), cloudless=False, cloudlessDays=20)
|
from secrets.plugins.base_plugin import BasePlugin
import re
from secrets.plugins.blacklist import Blacklist
class PasswordPatternPlugin(BasePlugin):
BLACKLIST = [r"[A-Z0-9]{8}-([A-Z0-9]{4}-){3}[A-Z0-9]{12}", # Guid
"RFC[0-9]{3}", "ES201[56]", "Office365", "System32", "log4net",
"VS20[0-9]{2}", "word2vec", "Graphics2D", r"[CS]A\d{4}", "Base64",
"AES256CBC", "ISO27001", r"\d+GB", r"\d+IOPS", "python3",
r"\d{2,4}(x\d{2,4})px", r"\d{3,4}px", r"\d{2,4}x\d{2,4}",
r"\d{1,2}(secs?|mins?|hours?|days?|weeks?|months?)", r"\d{1,2}min\d{1,2}s?",
r"ConsoleApp\d", r"\d{1,4}(k|m)bps", r"\d{1,4}ms", "KB\d+"]
def __init__(self):
self.password_regex = re.compile(r"(?:^|\s)(\w*(?:\d+[a-zA-Z]|[a-zA-Z]+\d)\w*!?)(?:$|\s)")
self.blacklist = Blacklist(self.BLACKLIST)
def find_secrets(self, line: str):
for m in self.password_regex.findall(line):
if not self.blacklist.matches(m):
yield m
|
# coding=utf-8
import sys
def fix_environment_stuff(module, base):
# restore builtins
module.__builtins__ = [x for x in base.__class__.__base__.__subclasses__() if x.__name__ == 'catch_warnings'][0]()._module.__builtins__
# patch getfilesystemencoding for NVIDIA Shield
getfilesystemencoding_orig = sys.getfilesystemencoding
def getfilesystemencoding():
return getfilesystemencoding_orig() or "utf-8"
sys.getfilesystemencoding = getfilesystemencoding
|
## set up logging
import logging, os
logging.basicConfig(level=os.environ.get("LOGLEVEL","INFO"))
log = logging.getLogger(__name__)
import requests
class Downloader:
"""A class for downloading USDA WASDE data files
***
Attributes
----------
Methods
-------
"""
def __init__(self):
pass
def __repr__(self):
return "<Instance of wasdeparser.downloader.Downloader>"
def pull(self,date:str,format="TEXT") -> str:
"""Downloads a WASDE file
***
Parameters
----------
date:str
Release date of desired file,
formatted as "YYYY-MM-DD"
format:str
Desired file format. One of
TEXT or EXCEL
"""
log.warning("Downloading not implemented")
pass
|
import awwards
from awwards.models import Profile
from django.shortcuts import render,redirect,get_object_or_404
from awwards.models import Profile, Project
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import Project, Rate
from .serializer import ProjectSerializer,ProfileSerializer
from awwards import serializer
from .forms import ProjectForm,UploadProjectForm,RateForm,ProfileForm
from django.http.response import Http404, HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url='/accounts/login/')
def home(request):
# projects = Project.display_projects()
# profile = Profile.objects.all()
if request.method == 'POST':
form = ProjectForm(request.POST)
if form.is_valid():
project = form.save()
project.user = request.user
project.save()
else:
form = ProjectForm()
try:
project = Project.display_projects()
except:
project.DoesNotExist
project = None
return render(request,'index.html',{"form":form,"project":project})
@login_required(login_url='/accounts/login/')
def profile(request):
if request.method=='POST':
form=ProfileForm(request.POST)
if form.is_valid():
profile=form.save(commit=False)
profile.user=request.user
profile.save()
else:
form=ProfileForm()
try:
profiles=Profile.objects.all()
except Profile.DoesNotExist:
profiles = None
return render(request, 'profile.html', {'profiles':profiles, 'form':form })
class ProjectList(APIView):
def get(self,request,format=None):
all_projects = Project.objects.all()
serializers = ProjectSerializer(all_projects,many=True)
return Response(serializers.data)
class ProfileList(APIView):
def get(self,request,format=None):
all_profiles = Profile.objects.all()
serializers = ProfileSerializer(all_profiles,many=True)
return Response(serializers.data)
def uploadproject(request):
current_user = request.user
try:
profile = Profile.objects.get(user = current_user)
except Profile.DoesNotExist:
raise Http404()
if request.method == "POST":
form = UploadProjectForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.profile = profile
image.save()
return redirect('home')
else:
form = UploadProjectForm()
return render(request, 'project/upload_project.html', {"form": form})
def search_projects(request):
if "project" in request.GET and request.GET["project"]:
searched_projects = request.GET.get("project")
projects = Project.search(searched_projects)
message = f"{searched_projects}"
return render(request,'search.html',{"message":message,"projects":projects})
else:
message = "Try again"
return render(request,'search.html',{"message":message,})
def rate(request, project):
projects=Project.objects.get(pk=project)
rate=Rate.objects.filter(project=projects).all()
if request.method=='POST':
form=RateForm(request.POST)
if form.is_valid():
rating=form.save(commit=False)
rating.user=request.user
rating.project=projects
rating.save()
project_ratings=Rate.objects.filter(project=project)
design_ratings=[r.design for r in project_ratings]
design_average=sum(design_ratings) /len(design_ratings)
content_ratings=[c.content for c in project_ratings]
content_average=sum(content_ratings) /len(content_ratings)
usability_ratings=[u.usability for u in project_ratings]
usability_average=sum(usability_ratings) /len(usability_ratings)
score=(design_average + content_average + usability_average)/3
rating.design_average=round(design_average, 2)
rating.usability_average=round(usability_average, 2)
rating.content_average=round(content_average, 2)
rating.score=round(score, 2)
rating.save()
return HttpResponseRedirect(request.path_info)
else:
form=RateForm()
parameters={
'project':project,
'rating_form':form,
'id':project,
'rate':rate
}
return render(request, 'project/rate.html', parameters )
|
import pygame
class Avatar:
"""Just a joke."""
def __init__(self, screen):
self.screen = screen
self.image = pygame.image.load('images/avatar.png')
self.rect = self.image.get_rect()
self.screen_rect = self.screen.get_rect()
self.rect.center = self.screen_rect.center
def blitme(self):
self.screen.blit(self.image, self.rect)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
from otcextensions.sdk import sdk_resource
class BackupTask(sdk_resource.Resource):
"""Cloud Backup"""
resources_key = "tasks"
base_path = "/backuppolicy/%(policy_id)s/backuptasks"
# capabilities
allow_list = True
_query_mapping = resource.QueryParameters(
"sort_dir", "sort_key", "status",
"limit", "marker", "offset", "status",
id="job_id")
#: Properties
#: Task job id
id = resource.Body("job_id", alternate_id=True)
#: Name of backup created by this task name
backup_name = resource.Body("backup_name")
#: Resource ID (volume-id for example)
resource_id = resource.Body("resource_id")
#: Resource Type (volume for example)
resource_type = resource.Body("resource_type")
#: Task status, valid values include: ``RUNNING``, ``EXECUTE_TIMEOUT``,
#: ``WAITING``, EXECUTE_FAIL``, ``EXECUTE_SUCCESS``
status = resource.Body("status")
#: task created at
created_at = resource.Body("created_at")
#: task finished at
finished_at = resource.Body("finished_at")
|
#!/usr/bin/env python
"""
processing GoldCLIP illumina datasets
only one of the PE reads
## type1: goldclip_version_1
read1: {NNN} - {bc} - {NN} - <insert>
## type2: goldclip_version_2
read1: {N10} - <insert> - A{barcode}
read2: {barcode}A - <insert> - {N10}
## type3: eCLIP
read1: {barcode} - <insert> - {N10}
read2: {N10} - <insert> - {bracode}
"""
__author__ = "Ming Wang"
__email__ = "wangm08@hotmail.com"
__date__ = "2018-04-01"
__version__ = "0.1"
from goldclip.goldcliplib.demx import *
class Demx:
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
def run(self):
"""run demx"""
r1 = self.kwargs['fq1']
r2 = self.kwargs['fq2']
barcode = self.kwargs['bc_file']
path_out = self.kwargs['out']
n_left = self.kwargs['n_left']
n_right = self.kwargs['n_right']
is_bioawk = self.kwargs['bioawk']
p7 = self.kwargs['p7']
p7_and_bc = self.kwargs['p7_and_bc']
mm = self.kwargs['n_mismatch']
cut = self.kwargs['cut']
# demx p7, then barcode
read1 = r1.name
barcode_file = barcode.name
assert is_path(path_out)
if p7_and_bc: # demx both p7 and barcode
if r2:
logging.info('demx P7 and barcode, PE reads')
read2 = r2.name
tmp = p7_bc_demx_pe(read1, read2, barcode_file, path_out,
n_left, n_right, cut=cut, mm=mm)
else:
logging.info('demx P7 and barcode, SE reads')
tmp = p7_bc_demx_se(read1, barcode_file, path_out, n_left, n_right,
cut=cut, mm=mm)
elif p7: # require demx p7, in fastq-comment-field
if r2:
logging.info('demx P7, PE reads')
read2 = r2.name
tmp = p7_demx_pe(read1, read2, barcode_file, path_out, mm)
else:
logging.info('demx P7, SE reads')
tmp = p7_demx_se(read1, barcode_file, path_out, mm)
else: # only barcode
if r2:
logging.info('demx barcode, PE reads')
read2 = r2.name
tmp = bc_demx_pe(read1, read2, barcode_file, path_out, n_left,
n_right, cut=cut, mm=mm)
else:
if is_bioawk:
logging.info('demx barcode, SE reads - bioawk')
tmp = demx_se_bioawk(read1, barcode_file, path_out, n_left,
n_right)
else:
logging.info('demx barcode, SE reads')
tmp = bc_demx_se(read1, barcode_file, path_out, n_left,
n_right, cut=cut, mm=mm)
logging.info('demx finish!')
## EOF
|
import requests
from planet.api.dispatch import _get_user_agent
ITEM_TYPE_URL = 'https://api.planet.com/data/v1/item-types/'
ASSET_TYPE_URL = 'https://api.planet.com/data/v1/asset-types/'
_item_types = None
_asset_types = None
# Default values here are used as a fallback
# In case the API fails to respond or takes too long.
DEFAULT_ITEM_TYPES = [
"PSScene4Band", "PSScene3Band", "REScene", "SkySatScene",
"REOrthoTile", "Sentinel2L1C", "PSOrthoTile", "Landsat8L1G"]
DEFAULT_ASSET_TYPES = [
"analytic", "analytic_b1", "analytic_b10", "analytic_b11", "analytic_b12",
"analytic_b2", "analytic_b3", "analytic_b4", "analytic_b5", "analytic_b6",
"analytic_b7", "analytic_b8", "analytic_b8a", "analytic_b9",
"analytic_bqa", "analytic_dn", "analytic_dn_xml", "analytic_ms",
"analytic_xml", "basic_analytic", "basic_analytic_b1",
"basic_analytic_b1_nitf", "basic_analytic_b2", "basic_analytic_b2_nitf",
"basic_analytic_b3", "basic_analytic_b3_nitf", "basic_analytic_b4",
"basic_analytic_b4_nitf", "basic_analytic_b5", "basic_analytic_b5_nitf",
"basic_analytic_dn", "basic_analytic_dn_nitf", "basic_analytic_dn_rpc",
"basic_analytic_dn_rpc_nitf", "basic_analytic_dn_xml",
"basic_analytic_dn_xml_nitf", "basic_analytic_nitf", "basic_analytic_rpc",
"basic_analytic_rpc_nitf", "basic_analytic_sci", "basic_analytic_xml",
"basic_analytic_xml_nitf", "basic_panchromatic_dn",
"basic_panchromatic_dn_rpc", "basic_udm", "browse", "metadata_aux",
"metadata_txt", "udm", "visual", "visual_xml"
]
def _get_json_or_raise(url, timeout=0.7):
headers = {'User-Agent': _get_user_agent()}
resp = requests.get(url, timeout=timeout, headers=headers)
resp.raise_for_status()
return resp.json()
def get_item_types():
global _item_types
if _item_types is None:
try:
data = _get_json_or_raise(ITEM_TYPE_URL)
_item_types = [it['id'] for it in data['item_types']]
except:
_item_types = DEFAULT_ITEM_TYPES
return _item_types
def get_asset_types():
global _asset_types
if _asset_types is None:
try:
data = _get_json_or_raise(ASSET_TYPE_URL)
_asset_types = [a['id'] for a in data['asset_types']]
except:
_asset_types = DEFAULT_ASSET_TYPES
return _asset_types
|
from typing import Any, Dict
from typing_extensions import TypedDict
import auth
from api.socket.constants import GAME_NS
from app import app, sio
from models import Label, LabelSelection, PlayerRoom, User
from state.game import game_state
from utils import logger
class LabelVisibilityMessage(TypedDict):
uuid: str
visible: bool
@sio.on("Label.Add", namespace=GAME_NS)
@auth.login_required(app, sio)
async def add(sid: str, data: Dict[str, Any]):
pr: PlayerRoom = game_state.get(sid)
label = Label.get_or_none(uuid=data)
if label is not None:
logger.warn(
f"{pr.player.name} tried to add a label with an id that already exists."
)
return
if data["user"] != pr.player.name:
logger.warn(f"{pr.player.name} tried to add a label for someone else.")
return
data["user"] = User.by_name(data["user"])
label = Label.create(**data)
for psid in game_state.get_sids(skip_sid=sid, room=pr.room):
if game_state.get_user(psid) == pr.player or label.visible:
await sio.emit("Label.Add", label.as_dict(), room=psid, namespace=GAME_NS)
@sio.on("Label.Delete", namespace=GAME_NS)
@auth.login_required(app, sio)
async def delete(sid: str, data: Dict[str, Any]):
pr: PlayerRoom = game_state.get(sid)
label = Label.get_or_none(uuid=data)
if label is None:
logger.warn(f"{pr.player.name} tried to delete a non-existing label.")
return
if label.user != pr.player:
logger.warn(f"{pr.player.name} tried to delete another user's label.")
return
label.delete_instance(True)
await sio.emit(
"Label.Delete",
{"user": pr.player.name, "uuid": data},
room=sid,
skip_sid=sid,
namespace=GAME_NS,
)
@sio.on("Label.Visibility.Set", namespace=GAME_NS)
@auth.login_required(app, sio)
async def set_visibility(sid: str, data: LabelVisibilityMessage):
pr: PlayerRoom = game_state.get(sid)
label = Label.get_or_none(uuid=data["uuid"])
if label is None:
logger.warn(f"{pr.player.name} tried to change a non-existing label.")
return
if label.user != pr.player:
logger.warn(f"{pr.player.name} tried to change another user's label.")
return
label.visible = data["visible"]
label.save()
for psid in game_state.get_sids(skip_sid=sid, room=pr.room):
if game_state.get_user(psid) == pr.player:
await sio.emit(
"Label.Visibility.Set",
{"user": label.pr.player.name, **data},
room=psid,
namespace=GAME_NS,
)
else:
if data["visible"]:
await sio.emit(
"Label.Add", label.as_dict(), room=psid, namespace=GAME_NS
)
else:
await sio.emit(
"Label.Delete",
{"uuid": label.uuid, "user": label.user.name},
room=psid,
namespace=GAME_NS,
)
@sio.on("Labels.Filter.Add", namespace=GAME_NS)
@auth.login_required(app, sio)
async def add_filter(sid: str, uuid: str):
pr: PlayerRoom = game_state.get(sid)
label = Label.get_or_none(uuid=uuid)
LabelSelection.create(label=label, user=pr.player, room=pr.room)
for psid in game_state.get_sids(skip_sid=sid, room=pr.room):
if game_state.get_user(psid) == pr.player:
await sio.emit("Labels.Filter.Add", uuid, room=psid, namespace=GAME_NS)
@sio.on("Labels.Filter.Remove", namespace=GAME_NS)
@auth.login_required(app, sio)
async def remove_filter(sid: str, uuid: str):
pr: PlayerRoom = game_state.get(sid)
label = Label.get_or_none(uuid=uuid)
ls = LabelSelection.get_or_none(label=label, room=pr.room, user=pr.player)
if ls:
ls.delete_instance(True)
for psid in game_state.get_sids(skip_sid=sid, room=pr.room):
if game_state.get_user(psid) == pr.player:
await sio.emit("Labels.Filter.Remove", uuid, room=psid, namespace=GAME_NS)
|
import os
import entrypoints
import numpy as np
def test_edf_ingestor(tmp_path):
from fabio.edfimage import EdfImage
# test data
data = np.random.random((1000, 1000))
# write data to test edf
edf_path = os.path.join(tmp_path, "test.edf")
print("edf_path:", edf_path)
EdfImage(data).write(edf_path)
# get edf ingestor
edf_ingestor = entrypoints.get_single("databroker.ingestors", "application/x-edf").load()
# load data into catalog
document = list(edf_ingestor([edf_path]))
uid = document[0][1]["uid"]
# TODO: actually do some assertions in here (assert the keys are correct in the descriptor)
# (assert data is image) ...
|
#!/usr/bin/env python
##############################################################################
# Written by: Cachen Chen <cachen@novell.com>
# Date: 08/11/2008
# Description: Test accessibility of form widget
# Use the formframe.py wrapper script
# Test the samples/winforms/form.py script
##############################################################################
# The docstring below is used in the generated log file
"""
Test accessibility of form widget
"""
# imports
import sys
import os
from strongwind import *
from form import *
from helpers import *
from sys import argv
from os import path
app_path = None
try:
app_path = argv[1]
except IndexError:
pass #expected
# open the form sample application
try:
app = launchForm(app_path)
except IOError, msg:
print "ERROR: %s" % msg
exit(2)
sleep(config.SHORT_DELAY)
# make sure we got the app back
if app is None:
exit(4)
# just an alias to make things shorter
fFrame = app.formFrame
###########################
# check forms's AtkAccessible
###########################
#check main form's states with 'active' state
statesCheck(fFrame, "Form", add_states=["active"])
###########################
# check ExtraForm(SWF.MessageBox)'s AtkAccessible
###########################
#click button1 to appear extra message widget
fFrame.click(fFrame.button1)
sleep(config.SHORT_DELAY)
#message = fFrame.app.findFrame("Message Form")
extra_form_1 = fFrame.app.findDialog("Message Form")
statesCheck(extra_form_1, "Form", add_states=["active", "modal"], invalid_states=["resizable"])
#check main form's states without 'active'
statesCheck(fFrame, "Form")
# click frame, but MessageBox window should remain active
## BUG575635: winforms.form's bug on 64bit platform
#fFrame.mouseClick()
#check main form's states again without 'active'
#statesCheck(fFrame, "Form")
# make sure that the message widget's states stay the same
#statesCheck(extra_form_1, "Form", add_states=["active", "modal"], invalid_states=["resizable"])
#close message form widget, main form rise 'active' state again
extra_form_1.altF4(assertClosed=False)
statesCheck(fFrame, "Form", add_states=["active"])
###########################
# check ExtraForm(Frame)'s AtkAccessible
###########################
#click button2 to appear extra empty form widget
fFrame.click(fFrame.button2)
sleep(config.SHORT_DELAY)
extra_form_2 = fFrame.app.findFrame("Extra Form")
#check extra form widget's states with 'active' state
statesCheck(extra_form_2, "Form", add_states=["active"])
#check main form's states without 'active'
statesCheck(fFrame, "Form")
# click main frame, which should become active
fFrame.mouseClick()
statesCheck(fFrame, "Form", add_states=["active"])
#check extra form's states without 'active'
statesCheck(extra_form_2, "Form")
#close extra form widget, main form rise 'active' state again
extra_form_2.mouseClick()
extra_form_2.altF4()
statesCheck(fFrame, "Form", add_states=["active"])
###########################
# check ExtraForm(Dialog)'s AtkAccessible
###########################
#click button3 to appear extra empty form widget
fFrame.click(fFrame.button3)
sleep(config.SHORT_DELAY)
extra_form_3 = fFrame.app.findDialog("Extra Form")
#check extra form widget's states with 'active' state
statesCheck(extra_form_3, "Form", add_states=["active", "modal"])
#check main form's states without 'active'
statesCheck(fFrame, "Form")
# click frame, but MessageBox window should remain active
fFrame.mouseClick()
# check main form's states again which is without 'active'
statesCheck(fFrame, "Form")
# make sure that the message widget's states stay the same
statesCheck(extra_form_3, "Form", add_states=["active", "modal"])
# close message form widget, main form rise 'active' state again
extra_form_3.altF4()
statesCheck(fFrame, "Form", add_states=["active"])
#close main form window
fFrame.quit()
print "INFO: Log written to: %s" % config.OUTPUT_DIR
|
version_info = (5, 6, 0)
__version__ = '.'.join(str(v) for v in version_info)
|
"""
Joe Tacheron
difficulty: 5%
runtime: 8m22s
answer: 128088830547982
***
719 Number Splitting
We define an S-number to be a natural number, n, that is a perfect square and its square root can be obtained by splitting the decimal representation of n into 2 or more numbers then adding the numbers.
For example, 81 is an S-number because sqrt(81) = 8 + 1.
6724 is an S-number: sqrt(6724) = 6 + 72 + 4.
Further we define T(N) to be the sum of all S numbers n<=N. You are given T(10^4)=41333.
Find T(10^12).
"""
def split_sums(n, mem={}):
if n < 10**6:
try:
return mem[n]
except KeyError:
pass
ss = [n]
if n >= 10:
m = 10
while m < n and m <= 10**6: # any higher and the square would be > n
r = n % m
l = split_sums(n // m)
for s in l:
ss.append(r + s)
m *= 10
if n < 10**6:
mem[n] = ss
return ss
def solve(N):
squares = {x:x**2 for x in range(2, int(N**0.5)+1)} # 1 is not an S number by definition
ans = 0
for x,x2 in squares.items():
if any(s==x for s in split_sums(x2)):
ans += x2
return ans
assert solve(10**4) == 41333
print(solve(10**12))
|
def _setMatrix(mat,d,r,lis,fill,cmat,fmat,uniform,seed,null):
# =============================================================================
#Handle arguments
if isinstance(d,int):
mat._setDim(d)
if not isinstance(lis,(str,list)):
lis = []
#Empty list given
if len(lis)==0:
if fill == None:
fill = null if mat._dfMat else uniform
mat._Matrix__fill = fill
elif isinstance(fill,str):
if mat.dtype.__name__ != "dataframe":
raise TypeError("Can't fill matrix with strings if dtype isn't set to dataframe")
isMethod = bool(type(fill).__name__ in ["method","function","builtin_function_or_method"])
d0,d1 = d
if lis in [None,"",{}]:
lis = []
if not isinstance(lis,(list,str,dict)):
raise TypeError("'data' parameter only accepts lists,strings and dictionaries")
#Dictionary given
if isinstance(lis,dict):
from ..C_funcs.linalg import Ctranspose
names,values = list(lis.keys()),list(lis.values())
if len(values) == 0:
raise ValueError("No data found")
if not all([1 if isinstance(val,list) else 0 for val in values]):
raise IndexError("Dictionary's values should be lists")
col_length = len(values[0])
if col_length == 0:
raise IndexError("Can't use empty lists as columns")
if not all([1 if len(val)==col_length else 0 for val in values[1:]]):
raise IndexError("Dictionary's values should be same length lists")
transposed = Ctranspose(len(names),col_length,values)
mat._matrix = transposed
mat._Matrix__dim=mat._declareDim()
mat._Matrix__features = names
return None
#Set new range
if r==None:
r=mat.initRange
else:
mat._Matrix_initRange=r
# =============================================================================
#Save the seed for reproduction
if mat.seed==None and len(lis)==0 and isMethod:
randseed = int(uniform(-2**24,2**24))
mat._Matrix__seed = randseed
elif isMethod and len(lis)==0:
seed(mat.seed)
else:
mat._Matrix__seed=None
# =============================================================================
#Set the new matrix
#Matrix from given string
if isinstance(lis,str):
mat._matrix=mat._listify(lis)
if mat.dim == [0,0]:
mat._Matrix__dim=mat._declareDim()
#Matrix from a list or other filling methods
else:
if len(lis)>0:
from ..validations.validate import consistentlist
#List of lists
if consistentlist(lis,list,lisname="rows"):
if mat.dim == [0,0]:
mat._matrix = mat._matrix = [row[:] for row in lis]
mat._Matrix__dim=mat._declareDim()
else:
d0,d1 = mat.dim
from ..validations.validate import exactdimension
exactdimension(lis,d0,d1,throw=True)
mat._matrix = [row[:] for row in lis]
#List of mixed values
else:
if mat.dim != [0,0]:
d0,d1 = mat.dim
assert d0*d1 == len(lis) , "Given list can't be used as a {d0}x{d1} Matrix"
mat._matrix=[]
for j in range(0,len(lis),d1):
mat._matrix.append(lis[j:j+d1])
else:
mat._matrix = [lis]
mat._Matrix__dim=mat._declareDim()
# =============================================================================
#Same range for all columns
elif len(lis)==0 and (isinstance(r,list) or isinstance(r,tuple)):
if isinstance(fill,(str,int,float,complex)):
from ..C_funcs.randgen import getfill
mat._matrix=getfill(d0,d1,fill)
return None
elif isMethod:
if fill.__name__=="uniform":
m,n=max(r),min(r)
if cmat:
mat._matrix=[[complex(uniform(n,m),uniform(n,m)) for a in range(d1)] for b in range(d0)]
elif fmat:
if r==[0,1]:
from ..C_funcs.zerone import pyfill
mat._matrix=pyfill(d0,d1,mat.seed)
else:
from ..C_funcs.randgen import getuni
mat._matrix=getuni(d0,d1,n,m,mat.seed)
else:
if r==[0,1]:
from ..C_funcs.randgen import igetrand
mat._matrix=igetrand(d0,d1,mat.seed)
else:
from ..C_funcs.randgen import igetuni
mat._matrix=igetuni(d0,d1,n-1,m+1,mat.seed)
elif fill.__name__ in ["gauss","betavariate","gammavariate","lognormvariate"]:
m,s=r[0],r[1]
if cmat:
mat._matrix=[[complex(fill(m,s),fill(m,s)) for a in range(d1)] for b in range(d0)]
elif fmat:
mat._matrix=[[fill(m,s) for a in range(d1)] for b in range(d0)]
else:
mat._matrix=[[round(fill(m,s)) for a in range(d1)] for b in range(d0)]
elif fill.__name__=="triangular":
n,m,o = r[0],r[1],r[2]
if cmat:
mat._matrix=[[complex(fill(n,m,o),fill(n,m,o)) for a in range(d1)] for b in range(d0)]
elif fmat:
mat._matrix=[[fill(n,m,o) for a in range(d1)] for b in range(d0)]
else:
mat._matrix=[[round(fill(n,m,o)) for a in range(d1)] for b in range(d0)]
elif fill.__name__=="expovariate":
lmb = r[0]
if cmat:
mat._matrix=[[complex(fill(lmb),fill(lmb)) for a in range(d1)] for b in range(d0)]
elif fmat:
mat._matrix=[[fill(lmb) for a in range(d1)] for b in range(d0)]
else:
mat._matrix=[[round(fill(lmb)) for a in range(d1)] for b in range(d0)]
else:
if cmat:
mat._matrix=[[complex(fill(*r),fill(*r)) for a in range(d1)] for b in range(d0)]
elif fmat or mat._dfMat:
mat._matrix=[[fill(*r) for a in range(d1)] for b in range(d0)]
else:
mat._matrix=[[round(fill(*r)) for a in range(d1)] for b in range(d0)]
#Ranged has no affect after this point
elif type(fill) == list:
if len(fill)!=d1:
raise ValueError(f"Given list {fill} should have {d1} values")
else:
mat._matrix = [fill for _ in range(d0)]
elif type(fill) == range:
l = list(fill)
if len(l)!=d1:
raise ValueError(f"Given range {fill} should have {d1} values")
else:
mat._matrix = [fill for _ in range(d0)]
else:
from ..C_funcs.randgen import getfill
mat._matrix=getfill(d0,d1,fill)
# =============================================================================
#Different ranges over individual columns
elif len(lis)==0 and isinstance(r,dict):
try:
assert len([i for i in r.keys()])==mat.dim[1]
vs=[len(i) for i in r.values()]
assert vs.count(vs[0])==len(vs)
feats=[i for i in r.keys()]
mat._Matrix__features=feats
except Exception as err:
print(err)
else:
lis=list(r.values())
if isinstance(fill,(str,int,float,complex)):
from ..C_funcs.randgen import getfill
mat._matrix=getfill(d0,d1,fill)
return None
elif isMethod:
if fill.__name__=="uniform":
if cmat:
temp=[[complex(uniform(min(lis[i]),max(lis[i])),uniform(min(lis[i]),max(lis[i]))) for _ in range(d0)] for i in range(d1)]
elif fmat:
temp=[[uniform(min(lis[i]),max(lis[i])) for _ in range(d0)] for i in range(d1)]
else:
temp=[[round(uniform(min(lis[i]),max(lis[i])+1))//1 for _ in range(d0)] for i in range(d1)]
elif fill.__name__ in ["gauss","betavariate","gammavariate","lognormvariate"]:
if cmat:
temp=[[complex(fill(lis[i][0],lis[i][1]),fill(lis[i][0],lis[i][1])) for _ in range(d0)] for i in range(d1)]
elif fmat:
temp=[[fill(lis[i][0],lis[i][1]) for _ in range(d0)] for i in range(d1)]
else:
temp=[[round(fill(lis[i][0],lis[i][1]+1))//1 for _ in range(d0)] for i in range(d1)]
elif fill.__name__=="triangular":
if cmat:
temp=[[complex(fill(lis[i][0],lis[i][1],lis[i][2]),fill(lis[i][0],lis[i][1],lis[i][2])) for _ in range(d0)] for i in range(d1)]
elif fmat:
temp=[[fill(lis[i][0],lis[i][1],lis[i][2]) for _ in range(d0)] for i in range(d1)]
else:
temp=[[round(fill(lis[i][0],lis[i][1]+1,lis[i][2]))//1 for _ in range(d0)] for i in range(d1)]
elif fill.__name__=="expovariate":
if cmat:
temp=[[complex(fill(lis[i][0]),fill(lis[i][0])) for _ in range(d0)] for i in range(d1)]
elif fmat:
temp=[[fill(lis[i][0]) for _ in range(d0)] for i in range(d1)]
else:
temp=[[round(fill(lis[i][0]))//1 for _ in range(d0)] for i in range(d1)]
else:
if cmat:
temp = [[complex(fill(*r[b]),fill(*r[b])) for a in range(d0)] for b in range(d1)]
elif fmat or mat._dfMat:
temp = [[fill(*r[b]) for a in range(d0)] for b in range(d1)]
else:
temp = [[round(fill(*r[b])) for a in range(d0)] for b in range(d1)]
#Ranged has no affect after this point
elif type(fill) == list:
if len(fill)!=d1:
raise ValueError(f"Given list {fill} should have {d1} values")
else:
mat._matrix = [fill for _ in range(d0)]
return None
elif type(fill) == range:
l = list(fill)
if len(l)!=d1:
raise ValueError(f"Given range {fill} should have {d1} values")
else:
mat._matrix = [fill for _ in range(d1)]
return None
else:
from ..C_funcs.randgen import getfill
temp = getfill(d1,d0,fill)
from ..C_funcs.linalg import Ctranspose
mat._matrix = Ctranspose(d1,d0,temp)
else:
return None
|
'''
A 71.03711
C 103.00919
D 115.02694
E 129.04259
F 147.06841
G 57.02146
H 137.05891
I 113.08406
K 128.09496
L 113.08406
M 131.04049
N 114.04293
P 97.05276
Q 128.05858
R 156.10111
S 87.03203
T 101.04768
V 99.06841
W 186.07931
Y 163.06333
'''
if __name__ == '__main__':
data = 'TSNTICYSADNACDFDMIVSPMNMVHGQLTVAVCEPQQTGFLGNKQFKRIVGFVMFDSCIALYVENTRACTRLRSHIQQMDEACVTMKLQNFLGETIWLAMYKKWQSGFSYHNSTSYTVWLLWTKTIPMLCDFCFNVGTYEDKIKYDKENTLYYVQEFRIDYMDIWHCHNKVGATMTAENNWVCQHAMDSQFFCHDTNYRCQVPFMGCYTDCIVRTIARQLFVLDWICKRDLMKIFQVIGQVLGIKDQFYCTHAEEHTDIEAIARIMEKHFWIFWMNISMGRSNPFPVLFSGAGFSFQHWWIMYYWGKSQTLGWASAWPTLHKEMNFWKMLESTQKNNCCRDSVYRGFGKGGPLHDCTVNTWIFVSIVDSNTQAVRMNPVMNSCCFDETWYQLYKNSHHHCTWYKQLCGFAKPAPMHMRVILIVFSSMSFRKDCHPGPYWEHPLFIHKIRKKRSAWRYTYIEQVPRYTHKWHMKSCWKNCHYCLQQTYWTWTKFTIDKINNTYQRHETAQCVWPCFFTWVADNERRSMQNWGTANYIVACVHMFCGAPNFAWGPCYGTVDMGGEVFTKQKAVTKCASSHTNSQSVMKASRSPYRGMLWQMHCVGYFKTIQYIDTNYDLWCVDVIQCSNLTEFHYIYECRDQMHSANCHHQNWTGRDDHCSLKNKLTIDHIDILQVEFRWWQVIDEEEVTMVCRPWVLFFWCETHRNSEQWFIVNCRMHSYPHDVTIDVMANKHVIELDDMAYALPKHDSWGHTDCQKMSEGENSCLVLLESARWQSRQYGTAHDEQLLTDAFEYDMNAGEHKCTMYYSYKNSHNTFFMTFFLKVFDERENWSPHVEGHNQGEKDRFPVKAHTDVVFC'
mass_table = {
'A': 71.03711,
'C': 103.00919,
'D': 115.02694,
'E': 129.04259,
'F': 147.06841,
'G': 57.02146,
'H': 137.05891,
'I': 113.08406,
'K': 128.09496,
'L': 113.08406,
'M': 131.04049,
'N': 114.04293,
'P': 97.05276,
'Q': 128.05858,
'R': 156.10111,
'S': 87.03203,
'T': 101.04768,
'V': 99.06841,
'W': 186.07931,
'Y': 163.06333
}
total = 0.0
for c in data.upper():
total += mass_table[c]
print round(total, 3)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.