content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def _create_dhcp_entries_for_many_instances(instances, ip_or_network):
"""
Assign IP and create DHCP entries for multiple instances.
"""
for instance in instances:
# when IP is assigned to many instances, mac is not provided through
# form and first non-mgmt mac should be used
ethernet = _get_non_mgmt_ethernets(instance).values_list(
'id', flat=True
).first() # TODO: is first the best choice here?
yield _create_dhcp_entries_for_single_instance(
instance, ip_or_network, ethernet
) | 5,326,100 |
def test_psf_estimation(psf_data, true_psf_file, kernel=None, metric='mean'):
"""Test PSF Estimation
This method tests the quality of the estimated PSFs
Parameters
----------
psf_data : np.ndarray
Estimated PSFs, 3D array
true_psf_file : str
True PSFs file name
kernel : int, optional
Standard deviation of Gaussian kernel
metric : str {mean, median}, optional
Metric for averaging results (default is 'mean')
Returns
-------
np.ndarray pixel errors, np.ndarray ellipticity errors
Raises
------
ValueError
If the number of clean images does not match the number of deconvolved
images
"""
true_psf = read_file(true_psf_file)
if true_psf.shape != psf_data.shape:
raise ValueError('The number of true PSF images must match the number '
'estimated PSF images.')
return test_images(psf_data, true_psf, kernel, metric) | 5,326,101 |
def EulerBack(V_m0,n_0,m_0,h_0,T,opcion,t1,t2,t3,t4,I1,I2,h_res=0.01):
"""
:param V_m0: Potencial de membrana inicial
:param n_0: Probabilidad inicial de n
:param m_0: Probabilidad inicial de m
:param h_0: Probabilidad inicial de h
:param T: Temperatura indicada por el usuario
:param opcion: * 1: Si la corriente es fija. * 2: Si la corriente es variable.
:param t1: [mS] Valor inicial del intervalo de tiempo 1.
:param t2: [mS] Valor final del intervalo de tiempo 1.
:param t3: [mS] Valor inicialdel intervalo de tiempo 2.
:param t4: [mS] Valor final del intervalo de tiempo 2.
:param I1: [mV] Intensidad de corriente del intervalo de tiempo 1.
:param I2: [mV] Intensidad de corriente del intervalo de tiempo 2.
:param h_res: [mS] Resolución o Step de tiempo para crear el rango. Default = 0.01 [mS]
:return: Tupla [t,Vm_EulerBack] -> t: Intervalo de tiempo de simulación.
Vm_EulerBack: Potencial de membrana para cada tiempo t de la simulación.
"""
phi_val = phi(T) # Se calcula el factor de temperatura (Φ)
t, I = tiempo_y_corriente(opcion,t1,t2,t3,t4,I1,I2,h_res) # Se crean arreglos de tiempo de simulación y corriente
# Se crean los vectores que almacenarán las soluciones (estimaciones) para Vm(t), n(t), m(t) y h(t) de cada iterac.
Vm_EulerBack, n_EulerBack, m_EulerBack, h_EulerBack = creacionArreglos(V_m0,n_0,m_0,h_0, t)
# El sistema de ecuaciones planteado en la función FAux_EulerBack, se resuelve usando fsolve para hallar las
# raíces del modelo.
for iter in range(1, len(t)):
BackRoots = opt.fsolve(FAux_EulerBack, np.array([Vm_EulerBack[iter - 1],
n_EulerBack[iter - 1],
m_EulerBack[iter - 1],
h_EulerBack[iter - 1]]),
(I[iter], Vm_EulerBack[iter - 1], n_EulerBack[iter - 1], m_EulerBack[iter - 1],
h_EulerBack[iter - 1], phi_val, h_res))
# Se extraen los vectores de solución de cada una de las columnas de la matriz de raíces.
Vm_EulerBack[iter] = BackRoots[0]
n_EulerBack[iter] = BackRoots[1]
m_EulerBack[iter] = BackRoots[2]
h_EulerBack[iter] = BackRoots[3]
return t, Vm_EulerBack | 5,326,102 |
def wiggle(shape, scope, offset, seed=0):
"""Shift points/contours/paths by a random amount."""
if shape is None: return None
functions = { "points": wiggle_points,
"contours": wiggle_contours,
"paths": wiggle_paths}
fn = functions.get(scope)
if fn is None: return None
return fn(shape, offset, seed) | 5,326,103 |
def parseAndDisplay(line, indentLevel):
"""Indents lines."""
if line.startswith("starting "):
printArgumentLine(indentLevel, line)
indentLevel += 1
elif line.startswith("ending "):
indentLevel -= 1
printArgumentLine(indentLevel, line)
else:
printLine(indentLevel, line)
return indentLevel | 5,326,104 |
def insert_question(question):
"""
Insert a particular question
@param: question - JSON object containing question data to be inserted
"""
return db.questions.insert_one(question) | 5,326,105 |
def chk_slv(slave, **kwargs):
"""Function: chk_slv
Description: Compares the Slave's read file and postition with the
executed file and position. Will also print GTID info, in pre-MySQL
5.6 this will be NULL.
Arguments:
(input) slave -> Slave instance.
"""
global PRT_TEMPLATE
mst_file, relay_file, read_pos, exec_pos = slave.get_log_info()
name = slave.get_name()
# Slave's master info doesn't match slave's relay info.
if mst_file != relay_file or read_pos != exec_pos:
print(PRT_TEMPLATE.format(name))
print("Warning: Slave might be lagging in execution of log.")
print("\tRead Log:\t{0}".format(mst_file))
print("\tRead Pos:\t{0}".format(read_pos))
if slave.gtid_mode:
print("\tRetrieved GTID:\t{0}".format(slave.retrieved_gtid))
print("\tExec Log:\t{0}".format(relay_file))
print("\tExec Pos:\t{0}".format(exec_pos))
if slave.gtid_mode:
print("\tExecuted GTID:\t{0}".format(slave.exe_gtid)) | 5,326,106 |
def sequence_vectorize(train_texts, val_texts):
"""Vectorizes texts as sequence vectors.
1 text = 1 sequence vector with fixed length.
# Arguments
train_texts: list, training text strings.
val_texts: list, validation text strings.
# Returns
x_train, x_val, word_index: vectorized training and validation
texts and word index dictionary.
"""
# Create vocabulary with training texts.
tokenizer = text.Tokenizer(num_words=TOP_K)
tokenizer.fit_on_texts(train_texts)
# Vectorize training and validation texts.
x_train = tokenizer.texts_to_sequences(train_texts)
x_val = tokenizer.texts_to_sequences(val_texts)
# Get max sequence length.
max_length = len(max(x_train, key=len))
if max_length > MAX_SEQUENCE_LENGTH:
max_length = MAX_SEQUENCE_LENGTH
# Fix sequence length to max value. Sequences shorter than the length are
# padded in the beginning and sequences longer are truncated
# at the beginning.
x_train = sequence.pad_sequences(x_train, maxlen=max_length)
x_val = sequence.pad_sequences(x_val, maxlen=max_length)
return x_train, x_val, tokenizer.word_index | 5,326,107 |
def fix_wcs_full(img, coords='radec.coo', iters=1):
"""
Try to improve the WCS solution of a final stacked image.
Parameters
----------
img : str
Name of image
coords : str
Name of coordinate file
iter : int
Number of desired iterations of ``msccmatch``. It is still being
tested, but one might be all that is necessary, especially if using the
Gaia catalog.
Note
----
This function is set up to use the files in the ``illcor`` directory. The
following are the parameters used by ``msccmatch``.
- verbose='yes'
- usebpm='no'
- nsearch=250
- search=30
- rsearch=0.2
- cfrac=.5
- csig=0.1
- nfit=5
- rms=1.0
- maxshif=5.0
- fitgeom="general"
- update='yes'
- interac='yes'
- fit='no',
- accept='yes'
- Stdout=1
"""
iraf.mscred(_doprint=0)
iraf.unlearn(iraf.mscred.msccmatch)
# otaext = {'33':'[1]','34':'[2]','44':'[3]','43':'[4]','42':'[5]','32':'[6]','22':'[7]','23':'[8]','24':'[9]'}
for i in range(iters):
fix = iraf.msccmatch(input=img,
coords=coords,
usebpm='no',
verbose='yes',
nsearch=250,
search=30,
rsearch=0.2,
cfrac=.5,
csig=0.1,
nfit=5,
rms=1.0,
maxshif=5.0,
fitgeom="general",
update='yes',
interac='yes',
fit='no',
accept='yes',
Stdout=1)
tqdm.write('fixing WCS for',img.f+', iter ='+repr(i))
tqdm.write(fix[-6])
tqdm.write(fix[-5])
tqdm.write(fix[-4])
tqdm.write(fix[-3])
tqdm.write(fix[-2]) | 5,326,108 |
def parse_preferences(file, preferences):
"""Parse preferences to the dictionary."""
for line in open(file, "r").readlines():
# all lower case
line = line.lower()
# ignore comment lines
if line[0] == "!" or line[0] == "#" or not line.split():
continue
key = line.split(":")[0].strip()
value = line.split(":")[1].strip()
value = check(key, value)
add_preference(key, value)
return preferences | 5,326,109 |
def func_hex2str(*args):
"""字符串 -> Hex"""
return func_hex2byte(*args).decode('utf-8') | 5,326,110 |
def setup_indexes(db_name, collection):
"""
Ensure that indexes in eduid_am.attributes collection are correctly setup.
To update an index add a new item in indexes and remove the previous version.
"""
indexes = {
# 'index-name': {'key': [('key', 1)], 'param1': True, 'param2': False}
# http://docs.mongodb.org/manual/reference/method/db.collection.ensureIndex/
'mail-index-v2': {'key': [('mail', 1)], 'unique': True, 'sparse': True},
'eppn-index-v1': {'key': [('eduPersonPrincipalName', 1)], 'unique': True},
'norEduPersonNIN-index-v2': {'key': [('norEduPersonNIN', 1)], 'unique': True, 'sparse': True},
'mobile-index-v1': {'key': [('mobile.mobile', 1), ('mobile.verified', 1)]},
'mailAliases-index-v1': {'key': [('mailAliases.email', 1), ('mailAliases.verified', 1)]}
}
userdb = UserDB(celery.conf.get('MONGO_URI'), db_name=db_name, collection=collection)
userdb.setup_indexes(indexes) | 5,326,111 |
def book_link_retrieval(soup_object):
"""
soup_object : Soup object having parsed HTML for the user query
"""
global sp, dir_link
i, retries = 0, 3
# fetching the title and book link
for _ in soup_object.find_all("td"):
for td in soup_object.find_all("h3"):
for ts in td.find_all("a"):
title = ts.get_text()
for ts in td.find_all('a', attrs={'href': re.compile("^/book/")}):
ref = ts.get('href')
book_link = "https://b-ok.asia" + ref
print("Title of the book: " + title + "\n")
print("Book link: " + book_link)
print("=" * 40)
# Fetching name of the author
data = requests.request('get', book_link) # any website
s = BeautifulSoup(data.text, 'html.parser')
author = s.find('a', {'class': 'color1'}).get_text()
# calling bookMetaData for fetching book details
book_meta_data(book_link, title, author) | 5,326,112 |
def warm_since():
"""Return the date when the current warm version of the fn started.
"""
if is_warm() == 'warm':
ts = os.path.getmtime(warm_file())
return ts | 5,326,113 |
def test_opt_level():
"""Test equivalence of nodes that pre-process to the same text."""
options = program_graph_options_pb2.ProgramGraphOptions(
opt_level=0,
)
unoptimized = llvm.BuildProgramGraph(SIMPLE_IR)
options.opt_level = 3
optimized = llvm.BuildProgramGraph(SIMPLE_IR, options)
assert len(optimized.node) < len(unoptimized.node) | 5,326,114 |
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab | 5,326,115 |
def triage(routes: List[Route]) -> Route:
"""
This function will be used to determine which route to use
"""
eva = {}
for i, route in enumerate(routes):
stored_route: StoredRoute = route.pop("stored_route")
reg_path = stored_route["path"]
segments = [s for s in reg_path.split("/") if s]
eva[i] = len([seg for seg in segments if not seg.startswith("(?P") and not seg.endswith(")")])
dt = {v: routes[k] for k, v in eva.items()}
return dt[max(dt)] | 5,326,116 |
def eval(**args):
"""
Evaluate selected model
Args:
seed (Int): Integer indicating set seed for random state
save_dir (String): Top level directory to generate results folder
model (String): Name of selected model
dataset (String): Name of selected dataset
exp (String): Name of experiment
load_type (String): Keyword indicator to evaluate the testing or validation set
pretrained (Int/String): Int/String indicating loading of random, pretrained or saved weights
Return:
None
"""
print("Experimental Setup: ")
pprint.PrettyPrinter(indent=4).pprint(args)
d = datetime.datetime.today()
date = d.strftime('%Y%m%d-%H%M%S')
result_dir = os.path.join(args['save_dir'], args['model'], '_'.join((args['dataset'],args['exp'],date)))
log_dir = os.path.join(result_dir, 'logs')
save_dir = os.path.join(result_dir, 'checkpoints')
run_id = args['exp']
use_wandb = args.get('use_wandb', False)
if not args['debug']:
if use_wandb:
wandb.init(project=args['dataset'], name=args['exp'], config=args, tags=args['tags'])
#Replace result dir with wandb unique id, much easier to find checkpoints
run_id = wandb.run.id
if run_id:
result_dir = os.path.join(args['save_dir'], args['model'], '_'.join((args['dataset'], run_id)))
log_dir = os.path.join(result_dir, 'logs')
save_dir = os.path.join(result_dir, 'checkpoints')
os.makedirs(result_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
os.makedirs(save_dir, exist_ok=True)
# Save copy of config file
with open(os.path.join(result_dir, 'config.yaml'),'w') as outfile:
yaml.dump(args, outfile, default_flow_style=False)
# Tensorboard Element
writer = SummaryWriter(log_dir)
# Check if GPU is available (CUDA)
num_gpus = args['num_gpus']
device = torch.device("cuda:0" if num_gpus > 0 and torch.cuda.is_available() else "cpu")
print('Using {}'.format(device.type))
# Load Network
model = create_model_object(**args).to(device)
model_obj = model
if device.type == 'cuda' and num_gpus > 1:
device_ids = list(range(num_gpus)) #number of GPUs specified
model = nn.DataParallel(model, device_ids=device_ids)
model_obj = model.module #Model from DataParallel object has to be accessed through module
print('GPUs Device IDs: {}'.format(device_ids))
# Load Data
loader = data_loader(**args, model_obj=model_obj)
if args['load_type'] == 'train_val':
eval_loader = loader['valid']
elif args['load_type'] == 'train':
eval_loader = loader['train']
elif args['load_type'] == 'test':
eval_loader = loader['test']
else:
sys.exit('load_type must be valid or test for eval, exiting')
if isinstance(args['pretrained'], str):
ckpt = load_checkpoint(args['pretrained'])
ckpt_keys = list(ckpt.keys())
if ckpt_keys[0].startswith('module.'): #if checkpoint weights are from DataParallel object
for key in ckpt_keys:
ckpt[key[7:]] = ckpt.pop(key)
model_obj.load_state_dict(ckpt, strict=False)
# Training Setup
params = [p for p in model.parameters() if p.requires_grad]
acc_metric = Metrics(**args, result_dir=result_dir, ndata=len(eval_loader.dataset), logger=wandb if use_wandb else None, run_id=run_id)
acc = 0.0
# Setup Model To Evaluate
model.eval()
seq_data = {'vid_id':-1}
with torch.no_grad():
for step, data in enumerate(eval_loader):
x_input = data['data']
annotations = data['annots']
B,_,T,H,W = x_input.shape #Expect: B,3,T,384,288 (T here is not time, it's all of the objects detected on that frame)
outputs = []
feats = []
uniq_frame_ids = list(set([a.item() for a in annotations['frame_ids']]))
for b in range(B):
vid_id = annotations['vid_id'][b]
frame_id = annotations['frame_ids'][b].item()
curr_crop = annotations['input_crop'][b]
frame_size = annotations['raw_frame_size'][b]
heatmap_size = args['heatmap_size']
num_joints = args['num_joints']
#print('frame id: {}'.format(frame_id))
if seq_data['vid_id'] != vid_id:
print('New vid id: {}'.format(vid_id))
seq_data = {} #No need to save between sequences
seq_data['vid_id'] = vid_id
seq_data['prev_heatmap'] = {}
last_frame_id = None
flag_new_frame = False
torch.cuda.empty_cache()
output = []
image_crops = x_input[b].permute(1,0,2,3).to(device) #from (B,3,T,H,W) to (T,3,H,W), operate on each crops as mini-batch
if args['model'] == 'FlowTrack':
out = model.forward(image_crops.unsqueeze(2)).squeeze(1)
if isinstance(out, tuple):
heatmap = out[0]
vis_feat = out[1]
else:
heatmap = out
else:
prev_heatmap = []
if not last_frame_id is None and last_frame_id != frame_id:
flag_new_frame = True
if last_frame_id is None or not flag_new_frame: #First frame of video
prev_hms = []
elif last_frame_id != frame_id: #New frame of video
prev_hms = seq_data['prev_heatmap'][last_frame_id]
#Delete other prev_heatmaps
for key in list(seq_data['prev_heatmap'].keys()):
if key not in [frame_id, last_frame_id]:
del seq_data['prev_heatmap'][key]
#############################
if len(prev_hms) > 0 and 'stride' in annotations:
batch, joints, height, width = prev_hms.shape
input_height, input_width = args['final_shape']
prev_hms = kornia.warp_affine(prev_hms.to(device), annotations['trans'][b].repeat(batch,1,1).float().to(device),\
dsize=(input_height, input_width), align_corners=True)
prev_heatmap = F.interpolate(prev_hms, size=(heatmap_size[1], heatmap_size[0]))
#No point in adding prior if it's all zeros
keep_prior = torch.max(prev_heatmap.view(batch,-1), dim=-1)[0] != 0
prev_heatmap = prev_heatmap[keep_prior]
#torch.cuda.empty_cache() #clear memory of deleted tensors
else:
for hm_ in prev_hms:
scr = torch.mean(torch.max(hm_.contiguous().view(num_joints, -1), dim=-1)[0],dim=0)
if scr < args['min_gauss_peak_eval']:
prev_heatmap.append(torch.zeros(num_joints, heatmap_size[1], heatmap_size[0]).to(device))
else:
pl,pt,pr,pb = annotations['padding'][b,0].tolist() #current image padding
x1,y1,x2,y2 = curr_crop[0].int().tolist() #current image hand crop
#add current image padding
pad_tensor = nn.ConstantPad2d((pl,pr,pt,pb), 0.0) #pad_left, pad_right, pad_top, pad_bot
hm_ = pad_tensor(hm_) #current hand crop w/ padding (only right and bottom padding need to be added)
#temp1 = hm_.clone()
hm_ = hm_[:,int(y1):int(y2),int(x1):int(x2)] #current crop position
#temp2 = hm_.clone()
hm_ = F.interpolate(hm_[:,None], size=heatmap_size)[:,0] #resized to heatmap size
#No point in adding prior if it's all zeros
if torch.max(hm_) > 0:
prev_heatmap.append(hm_)
if len(prev_heatmap) > 0:
prev_heatmap = torch.stack(prev_heatmap).to(device)
#############################
num_priors = 1
if len(prev_heatmap) > 0:
'''
import matplotlib.pyplot as plt
for p_idx in range(len(prev_heatmap)):
plt.subplot(3,3,p_idx+1)
plt.imshow(torch.max(prev_heatmap[p_idx], dim=0)[0].cpu().numpy())
plt.title('Prior {}'.format(p_idx))
plt.show()
'''
num_priors = prev_heatmap.shape[0]
image_crops = image_crops.repeat(num_priors,1,1,1)
else:
prev_heatmap = None
out = model.forward_one(image_crops, prev_heatmap, {'frame_id': frame_id, 'batch_num':b})
if isinstance(out, tuple):
heatmap = out[0]
vis_feat = out[1]
else:
heatmap = out
#If multiple priors, pick prediction with highest average confidence
if num_priors > 1:
#max_conf_idx = torch.argmax(torch.mean(heatmap, dim=[1,2,3]))
_heatmap = heatmap.view(num_priors, heatmap.shape[1], -1)
max_conf_idx = torch.argmax(torch.mean(torch.max(_heatmap, dim=-1)[0],dim=1))
heatmap = heatmap[None,max_conf_idx]
if isinstance(out, tuple):
vis_feat = vis_feat[None, max_conf_idx]
####Un-project output heatmap onto full frame before storing
#makes it easier to use for the next frame's bbox crop
x1,y1,x2,y2 = curr_crop[0].int().tolist()
img_width, img_height = frame_size
crop_h = y2 - y1
crop_w = x2 - x1
if 'stride' in annotations: #Human pose
input_height, input_width = args['final_shape']
hm_ = F.interpolate(heatmap, size=(input_height, input_width)) #resize from heatmap to input dimensions
hm_ = kornia.warp_affine(hm_.cpu(), annotations['inv_trans'][b].float(), dsize=(int(img_width), int(img_height)), align_corners=True) #inverse transform to full image
else: #Hand pose
pl,pt,pr,pb = annotations['padding'][b,0].tolist()
hm_ = F.interpolate(heatmap, size=(crop_h,crop_w))
pad_tensor = nn.ConstantPad2d((x1, max(0, ((img_width+pl+pr)-x2)), y1, max(0, ((img_height+pt+pb)-y2))), 0.0) #pad_left, pad_right, pad_top, pad_bot
hm_ = pad_tensor(hm_) #prior hand crop reprojected onto full frame
_pb = (img_height+pt) if not pb else -pb #check if non-zero, and adjust for array slicing
_pr = (img_width+pl) if not pr else -pr
hm_ = hm_[:,:,pt:_pb,pl:_pr] #prior hand crop w/o padding
########
if frame_id in seq_data['prev_heatmap']:
seq_data['prev_heatmap'][frame_id] = torch.cat((seq_data['prev_heatmap'][frame_id], hm_))
else:
seq_data['prev_heatmap'][frame_id] = hm_
last_frame_id = frame_id
outputs.append(heatmap) #append (T,D,H_,W_)
if isinstance(out, tuple):
feats.append(vis_feat)
outputs = torch.stack(outputs)
if len(feats) > 0:
feats = torch.stack(feats)
outputs = {'outputs':outputs, 'feat':feats}
if args['save_feat']:
feats = outputs['feat'].cpu().data
gt_key_pts = annotations['key_pts']
obj_ids = annotations['obj_ids']
track_ids = annotations['track_ids']
vid_id = annotations['vid_id']
load_type = annotations['load_type'][0]
feat_dir = os.path.join(args['save_feat_dir'], args['model']+'-'+args['exp'], load_type)
os.makedirs(feat_dir, exist_ok=True)
for vid in set(vid_id):
idx = [i for i, item in enumerate(vid_id) if item == vid]
feat = feats[idx]
key_pts = gt_key_pts[idx]
track = track_ids[idx]
oid = obj_ids[idx]
filename = os.path.join(feat_dir,vid+'.pkl')
if os.path.exists(filename):
vid_data = torch.load(filename)
vid_data['feat'] = torch.cat((vid_data['feat'], feat))
vid_data['gt_key_pts'] = torch.cat((vid_data['gt_key_pts'], key_pts))
vid_data['track_id'] = torch.cat((vid_data['track_id'], track))
vid_data['object_ids'] = torch.cat((vid_data['object_ids'], oid))
else:
vid_data = {'feat':feat, 'gt_key_pts':key_pts, 'track_id':track, 'object_ids':oid}
torch.save(vid_data, filename)
outputs = outputs['outputs']
acc = acc_metric.get_accuracy(outputs, annotations)
if step % 100 == 0:
print('Step: {}/{} | {} acc: {:.4f}'.format(step, len(eval_loader), args['load_type'], acc))
print('Accuracy of the network on the {} set: {:.3f} %\n'.format(args['load_type'], 100.*acc))
if not args['debug']:
writer.add_scalar(args['dataset']+'/'+args['model']+'/'+args['load_type']+'_accuracy', 100.*acc)
# Close Tensorboard Element
writer.close() | 5,326,117 |
def submit_production(production_yaml, dry_run=False, error_if_not_available=False):
"""
Interprets a production_yaml file and submits the respective nights for processing
within the defined production.
Args:
production_yaml, str. Pathname of the yaml file that defines the production.
dry_run, bool. Default is False. Should the jobs written to the processing table actually be submitted
for processing.
error_if_not_available, bool. Default is True. Raise as error if the required exposure table doesn't exist,
otherwise prints an error and returns.
Returns:
None.
"""
if not os.path.exists(production_yaml):
raise IOError(f"Prod Yaml file doesn't exist: {production_yaml} not found. Exiting.")
conf = yaml.safe_load(open(production_yaml, 'rb'))
specprod = str(conf['name']).lower()
specprod = verify_variable_with_environment(var=specprod, var_name='specprod', env_name='SPECPROD')
if 'reservation' in conf:
reservation = str(conf['reservation'])
if reservation.lower() == 'none':
reservation = None
else:
reservation = None
if 'queue' in conf:
queue = conf['queue']
else:
queue = 'realtime'
if 'OVERWRITEEXISTING' in conf:
overwrite_existing = conf['OVERWRITEEXISTING']
else:
overwrite_existing = False
print(f'Using queue: {queue}')
if reservation is not None:
print(f'Using reservation: {reservation}')
if overwrite_existing:
print("Ignoring the fact that files exists and submitting those nights anyway")
all_nights = get_all_nights()
non_survey_nights = []
for night in all_nights:
survey = assign_survey(night, conf)
if survey is None:
non_survey_nights.append(night)
continue
elif survey in conf['ProcessData'] and conf['ProcessData'][survey] is False:
print(f'Asked not to process survey: {survey}, Not processing night={night}.', '\n\n\n')
continue
elif survey in conf['SkipNights'] and night in conf['SkipNights'][survey]:
print(f'Asked to skip night={night} (in survey: {survey}). Skipping.', '\n\n\n')
continue
else:
print(f'Processing {survey} night: {night}')
submit_night(night, proc_obstypes=None, dry_run=dry_run, queue=queue, reservation=reservation,
overwrite_existing=overwrite_existing, error_if_not_available=error_if_not_available)
print(f"Completed {night}. Sleeping for 30s")
time.sleep(30)
print("Skipped the following nights that were not assigned to a survey:")
print(non_survey_nights, '\n\n\n')
print("All nights submitted") | 5,326,118 |
def upsert_object(data, cursor=None):
"""
Upsert an object in the repository.
"""
cursor = check_cursor(cursor)
data = _set_object_defaults(data, cursor)
cursor.execute('''
INSERT INTO objects (pid_id, namespace, state, owner, label, versioned,
log, created, modified)
VALUES (%(pid_id)s, %(namespace)s, %(state)s, %(owner)s, %(label)s,
%(versioned)s, %(log)s, %(created)s, %(modified)s)
ON CONFLICT (pid_id, namespace) DO UPDATE
SET (pid_id, namespace, state, owner, label, versioned, log,
modified) = (%(pid_id)s, %(namespace)s, %(state)s, %(owner)s,
%(label)s, %(versioned)s, %(log)s, %(modified)s)
RETURNING id
''', data)
logger.info("Upserted into namespace: %s with PID ID: %s.",
data['namespace'], data['pid_id'])
return cursor | 5,326,119 |
def sparsity_line(M,tol=1.0e-3,device='cpu'):
"""Get the line sparsity(%) of M
Attributes:
M: Tensor - the matrix.
tol: Scalar,optional - the threshold to select zeros.
device: device, cpu or gpu
Returns:
spacity: Scalar (%)- the spacity of the matrix.
"""
if type(M) is not torch.Tensor:
M = torch.as_tensor(M,device=device)
M1 = torch.where(torch.abs(M)<tol,torch.zeros_like(M),M)
M1_sum = torch.sum(M1, 1)
nb_nonzero = len(M1_sum.nonzero())
return (1.0-nb_nonzero/M1.shape[0])*100 | 5,326,120 |
def geospace(lat0, lon0, length, dx, strike):
""" returns a series of points in geographic coordinates"""
pts_a = []
npts = length // dx + 1
for idx in range(npts):
# convert to lat, lon
new = convert_local_idx_to_geo(idx, lat0, lon0, length, dx, strike)
pts_a.append(new)
return np.array(pts_a) | 5,326,121 |
def linearOutcomePrediction(zs, params_pred, scope=None):
"""
English:
Model for predictions outcomes from latent representations Z,
zs = batch of z-vectors (encoder-states, matrix)
Japanese:
このモデルにおける、潜在表現Zから得られる出力の予測です。
zs = ベクトル z のバッチ(袋)です。 (encoder の状態であり、行列です)
(恐らく、[z_0, z_1, z_2, ...] というような意味)
"""
with s2s.variable_scope.variable_scope(scope or "outcomepred", reuse=True):
coefficients, bias = params_pred
outcome_preds = tf.add(tf.matmul(zs, coefficients), bias)
return outcome_preds | 5,326,122 |
def _check_flag_value(flag_value):
"""
Search for a given flag in a given blockette for the current record.
This is a utility function for set_flags_in_fixed_headers and is not
designed to be called by someone else.
This function checks for valid entries for a flag. A flag can be either
* ``bool`` value to be always True or False for all the records
* ``datetime`` or ``UTCDateTime`` value to add a single 'INSTANT' datation
(see below)
* ``dict`` to allow complex flag datation
** The dict keys may be the keyword INSTANT to mark arbitrarly short
duration flags, or the keyword DURATION to mark events that span across
time.
** The dict values are:
*** for the INSTANT value, a single UTCDateTime or datetime object, or a
list of these datation objects
*** for the DURATION value, either a list of
[start1, end1, start2, end2, ...] or a list of tuples
[(start1, end1), (start2, end2), ...]
This function then returns all datation events as a list of tuples
[(start1, end1), ...] to ease the work of _convert_flags_to_raw_byte. Bool
values are unchanged, instant events become a tuple
(event_date, event_date).
If the flag value is incorrect, a ValueError is raised with a (hopefully)
explicit enough message.
:type flag_value: bool or dict
:param flag_value: the flag value to check.
:return: corrected value of the flag.
:raises: If the flag is not the one expected, a ``ValueError`` is raised
"""
if isinstance(flag_value, bool):
# bool allowed
corrected_flag = flag_value
elif isinstance(flag_value, datetime) or \
isinstance(flag_value, UTCDateTime):
# A single instant value is allowed
utc_val = UTCDateTime(flag_value)
corrected_flag = [(utc_val, utc_val)]
elif isinstance(flag_value, collections.Mapping):
# dict allowed if it has the right format
corrected_flag = []
for flag_key in flag_value:
if flag_key == "INSTANT":
# Expected: list of UTCDateTime
inst_values = flag_value[flag_key]
if isinstance(inst_values, datetime) or \
isinstance(inst_values, UTCDateTime):
# Single value : ensure it's UTCDateTime and store it
utc_val = UTCDateTime(inst_values)
corrected_flag.append((utc_val, utc_val))
elif isinstance(inst_values, collections.Sequence):
# Several instant values : check their types
# and add each of them
for value in inst_values:
if isinstance(value, datetime) or \
isinstance(value, UTCDateTime):
utc_val = UTCDateTime(value)
corrected_flag.append((utc_val, utc_val))
else:
msg = "Unexpected type for flag duration " +\
"'INSTANT' %s"
raise ValueError(msg % str(type(inst_values)))
else:
msg = "Unexpected type for flag duration 'INSTANT' %s"
raise ValueError(msg % str(type(inst_values)))
elif flag_key == "DURATION":
# Expecting either a list of tuples (start, end) or
# a list of (start1, end1, start1, end1)
dur_values = flag_value[flag_key]
if isinstance(dur_values, collections.Sequence):
if len(dur_values) != 0:
# Check first item
if isinstance(dur_values[0], datetime) or \
isinstance(dur_values[0], UTCDateTime):
# List of [start1, end1, start2, end2, etc]
# Check len
if len(dur_values) % 2 != 0:
msg = "Expected even length of duration " +\
"values, got %s"
raise ValueError(msg % len(dur_values))
# Add values
duration_iter = iter(dur_values)
for value in duration_iter:
start = value
end = dur_values[dur_values.index(value) + 1]
# Check start type
if not isinstance(start, datetime) and \
not isinstance(start, UTCDateTime):
msg = "Incorrect type for duration " +\
"start %s"
raise ValueError(msg % str(type(start)))
# Check end type
if not isinstance(end, datetime) and \
not isinstance(end, UTCDateTime):
msg = "Incorrect type for duration " +\
"end %s"
raise ValueError(msg % str(type(end)))
# Check duration validity
start = UTCDateTime(start)
end = UTCDateTime(end)
if start <= end:
corrected_flag.append((start, end))
else:
msg = "Flag datation: expected end of " +\
"duration after its start"
raise ValueError(msg)
next(duration_iter)
elif isinstance(dur_values[0], collections.Sequence):
# List of tuples (start, end)
for value in dur_values:
if not isinstance(value, collections.Sequence):
msg = "Incorrect type %s for flag duration"
raise ValueError(msg % str(type(value)))
elif len(value) != 2:
msg = "Incorrect len %s for flag duration"
raise ValueError(msg % len(value))
else:
start = value[0]
end = value[1]
# Check start type
if not isinstance(start, datetime) and \
not isinstance(start, UTCDateTime):
msg = "Incorrect type for duration " +\
"start %s"
raise ValueError(msg %
str(type(start)))
# Check end type
if not isinstance(end, datetime) and \
not isinstance(end, UTCDateTime):
msg = "Incorrect type for duration " +\
"end %s"
raise ValueError(msg % str(type(end)))
if start <= end:
corrected_flag.append((start, end))
else:
msg = "Flag datation: expected end " +\
"of duration after its start"
raise ValueError(msg)
# Else: len(dur_values) == 0, empty duration list:
# do nothing
else:
msg = "Incorrect DURATION value: expected a list of " +\
"tuples (start, end), got %s"
raise ValueError(msg % str(type(dur_values)))
else:
msg = "Invalid key %s for flag value. One of " +\
"'INSTANT', 'DURATION' is expected."
raise ValueError(msg % flag_key)
else:
msg = "Invalid type %s for flag value. Allowed values " +\
"are bool or dict"
raise ValueError(msg % str(type(flag_value)))
return corrected_flag | 5,326,123 |
def logout():
""" Logout a user """
session.pop('user_id', None)
session.pop('player_id', None)
return redirect(url_for('index')) | 5,326,124 |
def test_request_response_has_status_code():
"""An RequestResponseStub object's has expected properties."""
response = RequestResponseStub(status_code=301)
assert str(response.status_code) == '301'
assert str(RequestResponseStub().status_code) == '200' | 5,326,125 |
def get_available_gpus():
"""Returns a list of available GPU devices names. """
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == "GPU"] | 5,326,126 |
def download_config(impdb: str, branch: str) -> None:
"""Download the config from Github into the temp directory."""
requests = Request(META_URL.format(branch, impdb))
response = requests.get()
meta = json.loads(response.read())
tables = meta["tables"]
sha = get_git_branch_hash(branch)
# In case we push a new config version to github when the user is downloading
while True:
configs = {"_meta": meta}
for table in tables:
requests = Request(TABLE_URL.format(branch, impdb, table))
response = requests.get()
config = json.loads(response.read())
configs[table] = config
sha_check = get_git_branch_hash(branch)
if sha_check == sha:
break
sha = sha_check
path = config_directory()
if (path / branch / impdb).exists():
rmtree(path / branch / impdb)
(path / branch / impdb).mkdir(parents=True)
for fname, val in configs.items():
with (path / branch / impdb / f"{fname}.json").open("w") as f:
jdump(val, f)
with (path / branch / impdb / "_hash").open("w") as f:
f.write(sha) | 5,326,127 |
def runNoReLUSRModel(projectdir=projectdir,outdir=projectdir+'data/results/MAIN/srModelPredictionAccuracies/SRActFlowNoReLU/',
nhidden=10,computeFC=False,ncomponents=500,nproc=10):
"""
No ReLU model - remove nonlinearity in hidden area (see thresh=None)
"""
#### Don't change this parameter -- override
nhidden = 10
thresh = None # Don't include threshold
####
pathlib.Path(outdir).mkdir(parents=True, exist_ok=True) # Make sure directory exists
Model = SRModels.Model(projectdir=projectdir,n_hiddenregions=nhidden,randomize=False)
#### Only compute FC if it has not been computed before -- computationally intensive process
if computeFC:
Model.computeGroupFC(n_components=ncomponents,nproc=nproc)
#### Load real activations onto model
Model.loadRealMotorResponseActivations()
#### Load group FC weights as model weights
Model.loadModelFC()
#### Run SRActFlow simulations on entire group
actflow_rh, actflow_lh = Model.simulateGroupActFlow(thresh=thresh,nproc=nproc)
#### Evaluate how well actflow performs relative to actual activations
nbootstraps = 1000
print('Run decoding on RH actflow predictions...')
rh_filename = outdir + 'RH_decoding.txt'
Model.actflowDecoding(actflow_rh, Model.data_task_rh, rh_filename,
nbootstraps=nbootstraps, nproc=nproc, null=False, verbose=True)
print('Run null (permutation) decoding on RH actflow predictions...')
rh_filename_null = outdir + 'RH_null_decoding.txt'
Model.actflowDecoding(actflow_rh, Model.data_task_rh, rh_filename_null,
nbootstraps=nbootstraps, nproc=nproc, null=True, verbose=True)
print('Run decoding on LH actflow predictions...')
lh_filename = outdir + 'LH_decoding.txt'
Model.actflowDecoding(actflow_lh, Model.data_task_lh, lh_filename,
nbootstraps=nbootstraps, nproc=nproc, null=False, verbose=True)
print('Run null (permutation) decoding on LH actflow predictions...')
lh_filename_null = outdir + 'LH_null_decoding.txt'
Model.actflowDecoding(actflow_lh, Model.data_task_lh, lh_filename_null,
nbootstraps=nbootstraps, nproc=nproc, null=True, verbose=True) | 5,326,128 |
def configure_ident_username(keystone):
"""Requests a user to the Identity Service
"""
username = 'manila'
keystone.request_credentials(username) | 5,326,129 |
def tgamma ( x ) :
"""'tgamma' function taking into account the uncertainties
"""
fun = getattr ( x , '__tgamma__' , None )
if fun : return fun()
return math.gamma ( x ) | 5,326,130 |
def detect_peaks_by_channel(traces, peak_sign, abs_threholds, n_shifts):
"""Detect peaks using the 'by channel' method."""
traces_center = traces[n_shifts:-n_shifts, :]
length = traces_center.shape[0]
if peak_sign in ('pos', 'both'):
peak_mask = traces_center > abs_threholds[None, :]
for i in range(n_shifts):
peak_mask &= traces_center > traces[i:i + length, :]
peak_mask &= traces_center >= traces[n_shifts + i + 1:n_shifts + i + 1 + length, :]
if peak_sign in ('neg', 'both'):
if peak_sign == 'both':
peak_mask_pos = peak_mask.copy()
peak_mask = traces_center < -abs_threholds[None, :]
for i in range(n_shifts):
peak_mask &= traces_center < traces[i:i + length, :]
peak_mask &= traces_center <= traces[n_shifts + i + 1:n_shifts + i + 1 + length, :]
if peak_sign == 'both':
peak_mask = peak_mask | peak_mask_pos
# find peaks
peak_sample_ind, peak_chan_ind = np.nonzero(peak_mask)
# correct for time shift
peak_sample_ind += n_shifts
return peak_sample_ind, peak_chan_ind | 5,326,131 |
def multi_gauss_psf_kernel(psf_parameters, BINSZ=0.02, NEW_BINSZ=0.02, **kwargs):
"""Create multi-Gauss PSF kernel.
The Gaussian PSF components are specified via the
amplitude at the center and the FWHM.
See the example for the exact format.
Parameters
----------
psf_parameters : dict
PSF parameters
BINSZ : float (0.02)
Pixel size used for the given parameters in deg.
NEW_BINSZ : float (0.02)
New pixel size in deg. USed to change the resolution of the PSF.
Returns
-------
psf_kernel : `astropy.convolution.Kernel2D`
PSF kernel
Examples
--------
>>> psf_pars = dict()
>>> psf_pars['psf1'] = dict(ampl=1, fwhm=2.5)
>>> psf_pars['psf2'] = dict(ampl=0.06, fwhm=11.14)
>>> psf_pars['psf3'] = dict(ampl=0.47, fwhm=5.16)
>>> psf_kernel = multi_gauss_psf_kernel(psf_pars, x_size=51)
"""
psf = None
for ii in range(1, 4):
# Convert sigma and amplitude
pars = psf_parameters["psf{}".format(ii)]
sigma = gaussian_fwhm_to_sigma * pars["fwhm"] * BINSZ / NEW_BINSZ
ampl = 2 * np.pi * sigma ** 2 * pars["ampl"]
if psf is None:
psf = float(ampl) * Gaussian2DKernel(sigma, **kwargs)
else:
psf += float(ampl) * Gaussian2DKernel(sigma, **kwargs)
psf.normalize()
return psf | 5,326,132 |
def show_stress(off_screen=None):
""" Load and plot 1st bend of a hexahedral beam """
# get location of this file
result = pymapdl_reader.read_binary(rstfile)
print('Displaying node averaged stress in x direction for Mode 6')
result.plot_nodal_stress(5, 'x', off_screen=off_screen, n_colors=9) | 5,326,133 |
def Wavefunction( # type: ignore # pylint: disable=function-redefined
param: List[List[int]],
broken: Optional[Union[List[str], str]] = None) -> 'Wavefunction':
"""Initialize a wavefunction through the fqe namespace
Args:
param (List[List[int]]): parameters for the sectors
broken (Union[List[str], str]): symmetry to be broken
Returns:
(Wavefunction): a wavefunction object meeting the \
criteria laid out in the calling argument
"""
return wavefunction.Wavefunction(param, broken=broken) | 5,326,134 |
def extractFiles(comment):
"""Find all files in a comment.
@param comment: The C{unicode} comment text.
@return: A C{list} of about values from the comment, with no duplicates,
in the order they appear in the comment.
"""
return uniqueList(findall(FILE_REGEX, comment)) | 5,326,135 |
def showModelsStatic(ptcode,codes, vols, ss, mm, vs, showVol, clim, isoTh, clim2,
clim2D, drawMesh=True, meshDisplacement=True, drawModelLines=True,
showvol2D=False, showAxis=False, drawVessel=False, vesselType=1,
meshColor=None, **kwargs):
""" show one to four models in multipanel figure.
Input: arrays of codes, vols, ssdfs; params from show_models_static
Output: axes, colorbars
"""
# init fig
f = vv.figure(1); vv.clf()
# f.position = 0.00, 22.00, 1920.00, 1018.00
mw = 5
if drawMesh == True:
lc = 'w'
meshColor = meshColor
else:
lc = 'g'
# create subplots
if isinstance(codes, str): # if 1 ctcode, otherwise tuple of strings
a1 = vv.subplot(111)
axes = [a1]
elif codes == (codes[0],codes[1]):
a1 = vv.subplot(121)
a2 = vv.subplot(122)
axes = [a1,a2]
elif codes == (codes[0],codes[1], codes[2]):
a1 = vv.subplot(131)
a2 = vv.subplot(132)
a3 = vv.subplot(133)
axes = [a1,a2,a3]
elif codes == (codes[0],codes[1], codes[2], codes[3]):
a1 = vv.subplot(141)
a2 = vv.subplot(142)
a3 = vv.subplot(143)
a4 = vv.subplot(144)
axes = [a1,a2,a3,a4]
elif codes == (codes[0],codes[1], codes[2], codes[3], codes[4]):
a1 = vv.subplot(151)
a2 = vv.subplot(152)
a3 = vv.subplot(153)
a4 = vv.subplot(154)
a5 = vv.subplot(155)
axes = [a1,a2,a3,a4,a5]
else:
a1 = vv.subplot(111)
axes = [a1]
for i, ax in enumerate(axes):
ax.MakeCurrent()
vv.xlabel('x (mm)');vv.ylabel('y (mm)');vv.zlabel('z (mm)')
vv.title('Model for LSPEAS %s - %s' % (ptcode[7:], codes[i]))
t = show_ctvolume(vols[i], ss[i].model, axis=ax, showVol=showVol, clim=clim, isoTh=isoTh, **kwargs)
label = pick3d(ax, vols[i])
if drawModelLines == True:
ss[i].model.Draw(mc='b', mw = mw, lc=lc)
if showvol2D:
for i, ax in enumerate(axes):
t2 = vv.volshow2(vols[i], clim=clim2D, axes=ax)
cbars = [] # colorbars
if drawMesh:
for i, ax in enumerate(axes):
m = vv.mesh(mm[i], axes=ax)
if meshDisplacement:
m.clim = clim2
m.colormap = vv.CM_JET #todo: use colormap Viridis or Magma as JET is not linear (https://bids.github.io/colormap/)
cb = vv.colorbar(ax)
cbars.append(cb)
elif meshColor is not None:
if len(meshColor) == 1:
m.faceColor = meshColor[0] # (0,1,0,1)
else:
m.faceColor = meshColor[i]
else:
m.faceColor = 'g'
if drawVessel:
for i, ax in enumerate(axes):
v = showVesselMesh(vs[i], ax, type=vesselType)
for ax in axes:
ax.axis.axisColor = 1,1,1
ax.bgcolor = 25/255,25/255,112/255 # midnightblue
# http://cloford.com/resources/colours/500col.htm
ax.daspect = 1, 1, -1 # z-axis flipped
ax.axis.visible = showAxis
# set colorbar position
for cbar in cbars:
p1 = cbar.position
cbar.position = (p1[0], 20, p1[2], 0.98) # x,y,w,h
# bind rotate view and view presets [1,2,3,4,5]
f = vv.gcf()
f.eventKeyDown.Bind(lambda event: _utils_GUI.RotateView(event,axes,axishandling=False) )
f.eventKeyDown.Bind(lambda event: _utils_GUI.ViewPresets(event,axes) )
return axes, cbars | 5,326,136 |
def rint_compute(input_x):
"""rint compute implementation"""
res = akg.lang.cce.round(input_x)
res = akg.lang.cce.cast_to(res, input_x.dtype)
return res | 5,326,137 |
def _solequal(sol1, sol2, prec):
"""
Compare two different solutions with a given precision.
Return True if they equal.
"""
res = True
for sol_1, sol_2 in zip(sol1, sol2):
if np.ndim(sol_1) != 0 and np.ndim(sol_2) != 0:
res &= _dist(sol_1, sol_2) < prec
elif np.ndim(sol_1) != 0 and np.ndim(sol_2) == 0:
return False
elif np.ndim(sol_1) == 0 and np.ndim(sol_2) != 0:
return False
return res | 5,326,138 |
def printFactor(f):
"""
argument
`f`, a factor to print on screen
"""
# Create a empty list that we will fill in with the probability table entries
table = list()
# Iterate over all keys and probability values in the table
for key, item in f['table'].items():
# Convert the tuple to a list to be able to manipulate it
k = list(key)
# Append the probability value to the list with key values
k.append(item)
# Append an entire row to the table
table.append(k)
# dom is used as table header. We need it converted to list
dom = list(f['dom'])
# Append a 'Pr' to indicate the probabity column
dom.append('Pr')
print(tabulate(table,headers=dom,tablefmt='orgtbl')) | 5,326,139 |
def get_template_filepath(filename, basepath="templates"):
"""
Get the full path to the config templates, using a relative path to where the shippy script is stored
:param filename: (str) Name of the template file to look for
:param basepath: (str) Base directory to search for templates. Default: /templates
:return: (str) Path to template if found
:raises: (SystemExit) If template file doesn't exist
"""
local_path = os.path.dirname(__file__)
path = os.path.dirname(os.path.abspath(os.path.join(local_path, basepath, filename)))
if os.path.isdir(path):
return path
else:
raise SystemExit(f"Could not find template files in: {path}, bailing...") | 5,326,140 |
def clip_rows(data, ord=2, L=1):
"""
Scale clip rows according the same factor to ensure that the maximum value of the
norm of any row is L
"""
max_norm = get_max_norm(data, ord=ord)
print("For order {0}, max norm is {1}".format(ord, max_norm))
normalized_data = data.copy()
modified = 0
for i in range(data.shape[0]):
norm = get_norm(data[i], ord)
if norm > L:
modified += 1
normalized_data[i] = L * normalized_data[i] / norm
print("For order {0}, final max norm is {1}"
.format(ord, get_max_norm(normalized_data, ord=ord)))
print("Had to modify {0} rows ({1}% of total)"
.format(modified, 100*modified / data.shape[0]))
return normalized_data | 5,326,141 |
def extrapolate_to_zero_linear(pattern):
"""
Extrapolates a pattern to (0, 0) using a linear function from the most left point in the pattern
:param pattern: input Pattern
:return: extrapolated Pattern (includes the original one)
"""
x, y = pattern.data
step = x[1] - x[0]
low_x = np.sort(np.arange(min(x), 0, -step))
low_y = y[0] / x[0] * low_x
return Pattern(np.concatenate((low_x, x)),
np.concatenate((low_y, y))) | 5,326,142 |
def findKthSmallest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def partition(left, right, pivot_index):
pivot = nums[pivot_index]
# 1. move pivot to end
nums[pivot_index], nums[right] = nums[right], nums[pivot_index]
# 2. move all smaller elements to the left
store_index = left
for i in range(left, right):
if nums[i] < pivot:
nums[store_index], nums[i] = nums[i], nums[store_index]
store_index += 1
# 3. move pivot to its final place
nums[right], nums[store_index] = nums[store_index], nums[right]
return store_index
def select(left, right, k_smallest):
"""
Returns the k-th smallest element of list within left..right
"""
if left == right: # If the list contains only one element,
return nums[left] # return that element
# select a random pivot_index between
pivot_index = random.randint(left, right)
# find the pivot position in a sorted list
pivot_index = partition(left, right, pivot_index)
# the pivot is in its final sorted position
if k_smallest == pivot_index:
return nums[k_smallest]
# go left
elif k_smallest < pivot_index:
return select(left, pivot_index - 1, k_smallest)
# go right
else:
return select(pivot_index + 1, right, k_smallest)
return select(0, len(nums) - 1, k) | 5,326,143 |
def create_server(zkclient, server_id, parent_id):
"""Creates server definition in Zookeeper."""
server_node = z.path.server(server_id)
server_acl = zkutils.make_host_acl(server_id, 'rwcd')
zkutils.ensure_exists(zkclient, server_node, acl=[server_acl])
# zkutils.get return dict/tuple if need_metadata is true.
#
# pylint: disable=R0204
data = zkutils.get(zkclient, server_node)
if parent_id:
if not data:
data = {'parent': parent_id}
else:
data['parent'] = parent_id
_LOGGER.info('Creating server node %s with data %r and ACL %r',
server_node, data, server_acl)
if zkutils.put(zkclient, server_node, data,
acl=[server_acl], check_content=True):
create_event(zkclient, 0, 'servers', [server_id]) | 5,326,144 |
def racetrack_AP_RR_TF(
wavelength,
sw_angle=90,
radius=12,
couplerLength=4.5,
gap=0.2,
width=0.5,
thickness=0.2,
widthCoupler=0.5,
loss=[0.99],
coupling=[0],
):
"""This particular transfer function assumes that the coupling sides of the
ring resonator are straight, and the other two sides are curved. Therefore,
the roundtrip length of the RR is 2*pi*radius + 2*couplerLength. This model
also includes loss. (??? Need Verification on last line)
We assume that the round parts of the ring have negligble coupling compared to
the straight sections.
Parameters
-----------
wavelength : ndarray (N,)
Wavelength points to evaluate
radius : float
Radius of the sides in microns
couplerLength : float
Length of the coupling region in microns
gap : float
Gap in the coupler region in microns
width : float
Width of the waveguides in microns
thickness : float
Thickness of the waveguides in microns
Returns
-------
E : ndarray
Complex array of size (N,)
alpha : ndarray
Array of size (N,)
t : ndarray
Array of size (N,)
alpha_s : ndarray
Array of size (N,)
phi : ndarray
Array of size (N,)
"""
# Sanitize the input
wavelength = np.squeeze(wavelength)
# N = wavelength.shape[0]
# calculate coupling
cTE0, cTE1 = evWGcoupler(
wavelength=wavelength,
width=widthCoupler,
thickness=thickness,
sw_angle=sw_angle,
gap=gap,
)
n1 = np.squeeze(cTE0) # Get the first mode of the coupler region
n2 = np.squeeze(cTE1) # Get the second mode of the coupler region
Beta1 = 2 * np.pi * n1 / wavelength
Beta2 = 2 * np.pi * n2 / wavelength
x = 0.5 * (np.exp(1j * Beta1 * couplerLength) + np.exp(1j * Beta2 * couplerLength))
y = 0.5 * (
np.exp(1j * Beta1 * couplerLength)
+ np.exp(1j * Beta2 * couplerLength - 1j * np.pi)
)
alpha_c = np.sqrt(np.abs(x) ** 2 + np.abs(y) ** 2)
t_c = x
# k_c = y
# Construct the coupling polynomial
# couplingPoly = np.poly1d(coupling)
# r = np.abs(x) - couplingPoly(wavelength)
# k = np.abs(y)
# calculate bent waveguide
TE0_B = np.squeeze(
bentWaveguide(
wavelength=wavelength,
width=width,
thickness=thickness,
sw_angle=sw_angle,
radius=radius,
)
)
# calculate straight waveguide
TE0 = np.squeeze(
straightWaveguide(
wavelength=wavelength, width=width, thickness=thickness, sw_angle=sw_angle
)
)
# Calculate round trip length
# L = 2 * np.pi * radius + 2 * couplerLength
# calculate total loss
# alpha = np.squeeze(np.exp(- np.imag(TE0) * 2*couplerLength - np.imag(TE0_B)*2*np.pi*radius - lossPoly(wavelength)*L))
alpha_t = np.exp(
-np.imag(TE0) * 2 * couplerLength - np.imag(TE0_B) * 2 * np.pi * radius
)
alpha_m = np.squeeze(alpha_c * alpha_t)
offset = np.mean(alpha_m)
lossTemp = loss.copy()
lossTemp[-1] = loss[-1] - (1 - offset)
lossPoly = np.poly1d(loss)
alpha = lossPoly(wavelength)
alpha_s = alpha - alpha_m
# calculate phase shifts
phi_c = np.unwrap(np.angle(t_c))
BetaStraight = np.unwrap(2 * np.pi * np.real(TE0) / wavelength)
BetaBent = np.unwrap(2 * np.pi * np.real(TE0_B) / wavelength)
phi_r = np.squeeze(BetaStraight * couplerLength + BetaBent * 2 * np.pi * radius)
phi = np.unwrap(phi_r + phi_c)
t = np.abs(t_c) / alpha_c
## Cascade final coupler
# E = np.exp(1j*(np.pi+phi)) * (alpha - r*np.exp(-1j*phi))/(1-r*alpha*np.exp(1j*phi))
E = (
(t - alpha * np.exp(1j * phi))
/ (1 - alpha * t * np.exp(1j * phi))
* (t_c / np.conj(t_c))
* alpha_c
* np.exp(-1j * phi_c)
)
# Output final s matrix
return E, alpha, t, alpha_s, phi | 5,326,145 |
def test_pp_default_2d(
fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_2d, data_regression, num_regression
):
"""Test a default `pp.x` calculation producing a 2D data set."""
entry_point_calc_job = 'quantumespresso.pp'
entry_point_parser = 'quantumespresso.pp'
node = generate_calc_job_node(entry_point_calc_job, fixture_localhost, 'default_2d', generate_inputs_2d)
parser = generate_parser(entry_point_parser)
results, calcfunction = parser.parse_from_node(node, store_provenance=False)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_finished_ok, calcfunction.exit_message
assert 'output_parameters' in results
assert 'output_data' in results
assert len(results['output_data'].get_arraynames()) == 4
data_array = results['output_data'].get_array('data').flatten()
coords_array = results['output_data'].get_array('xy_coordinates').flatten()
data_units_array = results['output_data'].get_array('data_units')
coords_units_array = results['output_data'].get_array('xy_coordinates_units')
num_regression.check({
'data_array': data_array,
'coords_array': coords_array
},
default_tolerance=dict(atol=0, rtol=1e-18))
data_regression.check({
'parameters': results['output_parameters'].get_dict(),
'data_units': data_units_array.tolist(),
'coords_units': coords_units_array.tolist()
}) | 5,326,146 |
def reset_config():
"""
This fixture is called for every test in the project,
and resets the configuration to default.
"""
conf.config_init(None)
conf.testing_overrides = None | 5,326,147 |
def _get_event_data(tr, tt_model, phase, acc_type, depth_unit="km"):
"""
Update a sac trace to a obspy trace and update trace header,
and calculate theoretical traveltime of a specific model and phase
:param tr:
:param tt_model:
:param phase:
:param acc_type:
:param depth_unit:
:return:
.. Note::
The input trace should be read from sac-formatted files.
depth_unit is not used. if depth>1000 then unit should be meter,
since no events deeper than 700 km on the earth.
"""
model = TauPyModel(model=tt_model)
event_longitude = tr.stats.sac.evlo
event_latitude = tr.stats.sac.evla
event_depth = tr.stats.sac.evdp
try:
event_magnitude = tr.stats.sac.mag
except:
event_magnitude = 6.66
# if depth_unit == "m":
# event_depth /= 1000.0
# in this case, the depth_unit is considered to be m.
if event_depth > 1000:
event_depth /= 1000
station_longitude = tr.stats.sac.stlo
station_latitude = tr.stats.sac.stla
station_elevation = tr.stats.sac.stel
try:
component_azimuth = tr.stats.sac.cmpaz
component_inclination = tr.stats.sac.cmpinc
except:
# print(tr.stats)
if tr.stats.channel[-1] == "Z":
component_azimuth = 0
component_inclination = 0
elif tr.stats.channel[-1] == "N":
component_azimuth = 0
component_inclination = 90
elif tr.stats.channel[-1] == "E":
component_azimuth = 90
component_inclination = 90
else:
print("component is not ZNE. ", tr.stats.channel)
os._exit(0)
event_time = _get_sac_origin(tr)
distance, azimuth, back_azimuth = gps2dist_azimuth(lat1=event_latitude, lon1=event_longitude,
lat2=station_latitude, lon2=station_longitude,
a=6378137.0, f=0.0033528106647474805)
distance = kilometers2degrees(kilometer=distance / 1000.0)
# travel time, slowness, inclinations
arrivals = model.get_travel_times(source_depth_in_km=event_depth,
distance_in_degree=distance,
phase_list=[phase])
if len(arrivals) < 1:
return None
arr = arrivals[0]
onset = event_time + arr.time
phase = phase
inclination = arr.incident_angle
slowness = arr.ray_param
# pierce points
# pp_latitude
# pp_longitude
# pp_depth
# ray paths
# arrivals = model.get_travel_times(source_depth_in_km=event_depth,
# distance_in_degree=distance,
# phase_list=[phase])
header = {"model": tt_model, "type": acc_type,
"event_latitude": event_latitude, "event_longitude": event_longitude, "event_depth": event_depth,
"event_time": event_time, "event_magnitude": event_magnitude,
"station_latitude": station_latitude, "station_longitude": station_longitude,
"station_elevation": station_elevation,
"component_azimuth": component_azimuth, "component_inclination":component_inclination,
"onset": onset, "phase": phase, "inclination": inclination, "slowness": slowness,
"distance": distance, "azimuth": azimuth, "back_azimuth": back_azimuth
}
tr.stats.update(header)
return tr | 5,326,148 |
def fetch_ccgp_bid_info(keyword, pages, filename, **kwargs):
"""Fetch and store bidding information."""
bid = BidInfo()
if os.path.isfile(filename):
bid.load(filename)
directory = os.path.splitext(filename)[0]
if not os.path.exists(directory):
os.makedirs(directory)
if bid.next_page > 1:
print("Already fetched {} pages.".format(bid.next_page - 1))
print("Begin fetching from page {}.\n".format(bid.next_page))
begin_page = bid.next_page
for pageno in range(begin_page, begin_page + pages):
sys.stderr.write("Page {}:\n".format(pageno))
search_page = fetch_search_page(keyword, pageno, **kwargs)
bid_list = parse_search_page(search_page)
fetch_and_store_bids(bid_list, directory)
bid.extend_page(bid_list)
sys.stderr.write("Fetched {} docs at page {}. ".format(len(bid_list), pageno))
sys.stderr.write("Total bids: {}.\n".format(
len(bid.entries)))
# Save data after every parsing every pageno
bid.save(filename) | 5,326,149 |
def cmd_pool(args):
"""
Sub-command: "pool", down-sample image cube along the spatial dimension.
"""
if not args.clobber and os.path.exists(args.outfile):
raise OSError("output file already exists: %s" % args.outfile)
cube = FITSCube(args.infile)
print("Data cube unit: %s" % cube.unit)
print("Image/slice size: %dx%d" % (cube.width, cube.height))
print("Number of slices: %d" % cube.nslice)
print("Pooling image cube ...")
print("block size: %d, method: %s" % (args.blocksize, args.method))
cube.pool(blocksize=args.blocksize, func=getattr(np, args.method))
print("Pooled image/slice size: %dx%d" % (cube.width, cube.height))
print("Saving pooled FITS cube ...")
cube.write(args.outfile, clobber=args.clobber)
print("Pooled FITS cube wrote to: %s" % args.outfile) | 5,326,150 |
def consume_chunks(generator: Union[PandasTextFileReader, Iterator], progress: bool = True, total: int = None):
"""Transform the result of chained filters into a pandas DataFrame
:param generator: iterator to be transformed into a dataframe
:param progress: whether to show progress
:param total: total number of chunks the input is divided in
"""
data = []
if progress:
pbar = tqdm(generator, total=total)
else:
pbar = generator
for item in pbar:
if not isinstance(item, pd.DataFrame):
consumed = _consume_deeper_chunks(item)
data.extend(consumed)
else:
data.append(item)
if not len(data):
return pd.DataFrame()
return pd.concat(data, axis=0) | 5,326,151 |
def lang_not_found(s):
"""Is called when the language files aren't found"""
return s + "⚙" | 5,326,152 |
def _get_corr_matrix(corr, rho):
"""Preprocessing of correlation matrix ``corr`` or
correlation values ``rho``.
Given either ``corr`` or ``rho`` (each may be an array,
callable or process instance), returns the corresponding,
possibly time-dependent correlation matrix,
with a ``shape`` attribute set to
its shape (may be set to None if attempts to
retrieve shape information fail).
If ``corr`` is not None, ``rho`` is ignored.
If both are None, returns None.
"""
# exit if no correlations specified
if corr is None and rho is None:
return None
elif corr is not None:
# if present, corr overrides rho
corr = _variable_param_setup(corr)
cshape = _get_param_shape(corr)
if cshape is not None:
if len(cshape) not in (2, 3) or cshape[0] != cshape[1] or \
(len(cshape) == 3 and cshape[2] != 1):
raise ValueError(
"the correlation matrix ``corr`` should be square, "
"possibly with a trailing 1-dimensional axis matching "
"the paths axis, not an array with shape {}"
.format(cshape))
else:
# corr is None: build correlation matrix from rho,
# either statically or dynamically
rho = _variable_param_setup(rho)
rho_shape = _get_param_shape(rho)
if rho_shape is not None:
if len(rho_shape) > 2 or \
(len(rho_shape) == 2 and rho_shape[1] != 1):
raise ValueError(
"correlation ``rho`` should be a vector, "
"possibly with a trailing 1-dimensional axis matching "
"the paths axis, not an array with shape {}"
.format(rho.shape))
if callable(rho):
def corr(t):
return _const_rho_to_corr(rho(t))
corr.shape = None if rho_shape is None else \
(2, 2) if rho_shape == () else \
(2*rho_shape[0], 2*rho_shape[0])
else:
corr = _const_rho_to_corr(rho)
return corr | 5,326,153 |
def init_lqr(hyperparams):
"""
Return initial gains for a time-varying linear Gaussian controller
that tries to hold the initial position.
"""
config = copy.deepcopy(INIT_LG_LQR)
config.update(hyperparams)
x0, dX, dU = config['x0'], config['dX'], config['dU']
dt, T = config['dt'], config['T']
#TODO: Use packing instead of assuming which indices are the joint
# angles.
# Notation notes:
# L = loss, Q = q-function (dX+dU dimensional),
# V = value function (dX dimensional), F = dynamics
# Vectors are lower-case, matrices are upper case.
# Derivatives: x = state, u = action, t = state+action (trajectory).
# The time index is denoted by _t after the above.
# Ex. Ltt_t = Loss, 2nd derivative (w.r.t. trajectory),
# indexed by time t.
# Constants.
idx_x = slice(dX) # Slices out state.
idx_u = slice(dX, dX+dU) # Slices out actions.
if len(config['init_acc']) == 0:
config['init_acc'] = np.zeros(dU)
if len(config['init_gains']) == 0:
config['init_gains'] = np.ones(dU)
# Set up simple linear dynamics model.
Fd, fc = guess_dynamics(config['init_gains'], config['init_acc'],
dX, dU, dt)
# Setup a cost function based on stiffness.
# Ltt = (dX+dU) by (dX+dU) - Hessian of loss with respect to
# trajectory at a single timestep.
Ltt = np.diag(np.hstack([
config['stiffness'] * np.ones(dU),
config['stiffness'] * config['stiffness_vel'] * np.ones(dU),
np.zeros(dX - dU*2), np.ones(dU)
]))
Ltt = Ltt / config['init_var'] # Cost function - quadratic term.
lt = -Ltt.dot(np.r_[x0, np.zeros(dU)]) # Cost function - linear term.
# Perform dynamic programming.
K = np.zeros((T, dU, dX)) # Controller gains matrix.
k = np.zeros((T, dU)) # Controller bias term.
PSig = np.zeros((T, dU, dU)) # Covariance of noise.
cholPSig = np.zeros((T, dU, dU)) # Cholesky decomposition.
invPSig = np.zeros((T, dU, dU)) # Inverse of covariance.
vx_t = np.zeros(dX) # Vx = dV/dX. Derivative of value function.
Vxx_t = np.zeros((dX, dX)) # Vxx = ddV/dXdX.
#TODO: A lot of this code is repeated with traj_opt_lqr_python.py
# backward pass.
for t in range(T - 1, -1, -1):
# Compute Q function at this step.
if t == (T - 1):
Ltt_t = config['final_weight'] * Ltt
lt_t = config['final_weight'] * lt
else:
Ltt_t = Ltt
lt_t = lt
# Qtt = (dX+dU) by (dX+dU) 2nd Derivative of Q-function with
# respect to trajectory (dX+dU).
Qtt_t = Ltt_t + Fd.T.dot(Vxx_t).dot(Fd)
# Qt = (dX+dU) 1st Derivative of Q-function with respect to
# trajectory (dX+dU).
qt_t = lt_t + Fd.T.dot(vx_t + Vxx_t.dot(fc))
# Compute preceding value function.
U = sp.linalg.cholesky(Qtt_t[idx_u, idx_u])
L = U.T
invPSig[t, :, :] = Qtt_t[idx_u, idx_u]
PSig[t, :, :] = sp.linalg.solve_triangular(
U, sp.linalg.solve_triangular(L, np.eye(dU), lower=True)
)
cholPSig[t, :, :] = sp.linalg.cholesky(PSig[t, :, :])
K[t, :, :] = -sp.linalg.solve_triangular(
U, sp.linalg.solve_triangular(L, Qtt_t[idx_u, idx_x], lower=True)
)
k[t, :] = -sp.linalg.solve_triangular(
U, sp.linalg.solve_triangular(L, qt_t[idx_u], lower=True)
)
Vxx_t = Qtt_t[idx_x, idx_x] + Qtt_t[idx_x, idx_u].dot(K[t, :, :])
vx_t = qt_t[idx_x] + Qtt_t[idx_x, idx_u].dot(k[t, :])
Vxx_t = 0.5 * (Vxx_t + Vxx_t.T)
return LinearGaussianPolicy(K, k, PSig, cholPSig, invPSig) | 5,326,154 |
def sequence_exact_match(true_seq, pred_seq):
"""
Boolean return value indicates whether or not seqs are exact match
"""
true_seq = strip_whitespace(true_seq)
pred_seq = strip_whitespace(pred_seq)
return pred_seq["start"] == true_seq["start"] and pred_seq["end"] == true_seq["end"] | 5,326,155 |
def same_variable(a, b):
"""
Cette fonction dit si les deux objets sont en fait le même objet (True)
ou non (False) s'ils sont différents (même s'ils contiennent la même information).
@param a n'importe quel objet
@param b n'importe quel objet
@return ``True`` ou ``False``
.. faqref::
:tag: python
:title: Qu'est-ce qu'un type immuable ou immutable ?
:lid: faq-py-immutable
Une variable de type *immuable* ne peut être modifiée. Cela concerne principalement :
- ``int``, ``float``, ``str``, ``tuple``
Si une variable est de type *immuable*, lorsqu'on effectue une opération,
on créé implicitement une copie de l'objet.
Les dictionnaires et les listes sont *modifiables* (ou *mutable*). Pour une variable
de ce type, lorsqu'on écrit ``a = b``, ``a`` et ``b`` désigne le même objet même
si ce sont deux noms différentes. C'est le même emplacement mémoire
accessible paur deux moyens (deux identifiants).
Par exemple ::
a = (2,3)
b = a
a += (4,5)
print( a == b ) # --> False
print(a,b) # --> (2, 3, 4, 5) (2, 3)
a = [2,3]
b = a
a += [4,5]
print( a == b ) # --> True
print(a,b) # --> [2, 3, 4, 5] [2, 3, 4, 5]
Dans le premier cas, le type (``tuple``) est _immutable_, l'opérateur ``+=`` cache implicitement une copie.
Dans le second cas, le type (``list``) est _mutable_, l'opérateur ``+=`` évite la copie
car la variable peut être modifiée. Même si ``b=a`` est exécutée avant l'instruction suivante,
elle n'a **pas** pour effet de conserver l'état de ``a`` avant l'ajout d'élément.
Un autre exemple ::
a = [1, 2]
b = a
a [0] = -1
print(a) # --> [-1, 2]
print(b) # --> [-1, 2]
Pour copier une liste, il faut expliciter la demander ::
a = [1, 2]
b = list(a)
a [0] = -1
print(a) # --> [-1, 2]
print(b) # --> [1, 2]
La page `Immutable Sequence Types <https://docs.python.org/3/library/stdtypes.html?highlight=immutable#immutable-sequence-types>`_
détaille un peu plus le type qui sont *mutable* et ceux qui sont *immutable*. Parmi les types standards :
* **mutable**
* `bool <https://docs.python.org/3/library/functions.html#bool>`_
* `int <https://docs.python.org/3/library/functions.html#int>`_,
`float <https://docs.python.org/3/library/functions.html#float>`_,
`complex <https://docs.python.org/3/library/functions.html#complex>`_
* `str <https://docs.python.org/3/library/functions.html#func-str>`_,
`bytes <https://docs.python.org/3/library/functions.html#bytes>`_
* `None <https://docs.python.org/3/library/constants.html?highlight=none#None>`_
* `tuple <https://docs.python.org/3/library/functions.html#func-tuple>`_,
`frozenset <https://docs.python.org/3/library/functions.html#func-frozenset>`_
* **immutable**, par défaut tous les autres types dont :
* `list <https://docs.python.org/3/library/functions.html#func-list>`_
* `dict <https://docs.python.org/3/library/functions.html#func-dict>`_
* `set <https://docs.python.org/3/library/functions.html#func-set>`_
* `bytearray <https://docs.python.org/3/library/functions.html#bytearray>`_
Une instance de classe est mutable. Il est possible de la rendre
immutable par quelques astuces :
* `__slots__ <https://docs.python.org/3/reference/datamodel.html?highlight=_slots__#object.__slots__>`_
* `How to Create Immutable Classes in Python
<http://www.blog.pythonlibrary.org/2014/01/17/how-to-create-immutable-classes-in-python/>`_
* `Ways to make a class immutable in Python <http://stackoverflow.com/questions/4996815/ways-to-make-a-class-immutable-in-python>`_
* `freeze <https://freeze.readthedocs.org/en/latest/>`_
Enfin, pour les objects qui s'imbriquent les uns dans les autres, une liste de listes, une classe
qui incluent des dictionnaires et des listes, on distingue une copie simple d'une copie intégrale (**deepcopy**).
Dans le cas d'une liste de listes, la copie simple recopie uniquement la première liste ::
import copy
l1 = [ [0,1], [2,3] ]
l2 = copy.copy(l1)
l1 [0][0] = '##'
print(l1,l2) # --> [['##', 1], [2, 3]] [['##', 1], [2, 3]]
l1 [0] = [10,10]
print(l1,l2) # --> [[10, 10], [2, 3]] [['##', 1], [2, 3]]
La copie intégrale recopie également les objets inclus ::
import copy
l1 = [ [0,1], [2,3] ]
l2 = copy.deepcopy(l1)
l1 [0][0] = '##'
print(l1,l2) # --> [['##', 1], [2, 3]] [[0, 1], [2, 3]]
Les deux fonctions s'appliquent à tout object Python : `module copy <https://docs.python.org/3/library/copy.html>`_.
"""
return id(a) == id(b) | 5,326,156 |
def _check_legal_sampling_frequency_and_duration(sampling_frequency, duration):
""" By convention, sampling_frequency and duration have to multiply to an integer
This will check if the product of both parameters multiplies reasonably close
to an integer.
Parameters
==========
sampling_frequency: float
duration: float
"""
num = sampling_frequency * duration
if np.abs(num - np.round(num)) > 10**(-_TOL):
raise IllegalDurationAndSamplingFrequencyException(
'\nYour sampling frequency and duration must multiply to a number'
'up to (tol = {}) decimals close to an integer number. '
'\nBut sampling_frequency={} and duration={} multiply to {}'.format(
_TOL, sampling_frequency, duration,
sampling_frequency * duration
)
) | 5,326,157 |
def inform_report_issue(exception_msg):
"""
Informs user that he can report an issue.
The use of `memoize` makes it reporting only once for a given exception message.
"""
print(message.notice(
"If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\n"
"(Please don't forget to search if it hasn't been reported before)\n"
"To generate the report and open a browser, you may run ") +
message.hint("`bugreport --run-browser`") +
message.notice("\nPS: Pull requests are welcome")
) | 5,326,158 |
def format_level_2_memory(memory, header=None):
"""Format an experiment result memory object for measurement level 2.
Args:
memory (list): Memory from experiment with `meas_level==2` and `memory==True`.
header (dict): the experiment header dictionary containing
useful information for postprocessing.
Returns:
list[str]: List of bitstrings
"""
memory_list = []
for shot_memory in memory:
memory_list.append(format_counts_memory(shot_memory, header))
return memory_list | 5,326,159 |
def convert_objects_to_polar(rendering_items):
"""Apply conversion to turn all Objects block formats into polar."""
from .objectbased.conversion import to_polar
return list(apply_to_object_blocks(rendering_items, to_polar)) | 5,326,160 |
def test_getMonthFromIntToString():
"""
It should return the number of the month if given a string
and returns the name of the month if given and int
"""
# months = ['January', 'February', 'March', 'April', 'May', 'June',
# 'July', 'August', 'September', 'October' 'November',
# 'December']
mons = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
number = 1
for month in mons:
assert events.getMonth(number) == month
number += 1 | 5,326,161 |
def compile(file_address):
"""main function.
Args:
file_address (str): address of file that given in the command line.
"""
scanner = Scanner()
data = open(file_address, 'r').read()
tokens, symbol_table = scanner.get_tokens_symbol_table(data)
pprint(tokens)
print(symbol_table) | 5,326,162 |
def find_in_module(var_name: str, module, i: int = 0) -> Tuple[str, ast.AST]:
"""Find the piece of code that assigned a value to the variable with name *var_name* in the
module *module*.
:param var_name: Name of the variable to look for.
:param module: Module to search.
:returns: Tuple with source code segment and corresponding ast node.
"""
source = sourceget.get_module_source(module)
return find_in_source(var_name, source, i=i) | 5,326,163 |
def empty(shape,
dtype="f8",
order="C",
device=None,
usm_type="device",
sycl_queue=None):
"""Creates `dpnp_array` from uninitialized USM allocation."""
array_obj = dpt.empty(shape,
dtype=dtype,
order=order,
device=device,
usm_type=usm_type,
sycl_queue=sycl_queue)
return dpnp_array(array_obj.shape, buffer=array_obj, order=order) | 5,326,164 |
def test_exception_attribute_in_extra():
"""
In an except block, passing an exception attribute into logging as a value of extra dict is ok.
"""
tree = parse(dedent("""\
import logging
class CustomException(Exception):
def __init__(self, custom_arg):
self.custom_arg = custom_arg
try:
pass
except CustomException as e:
logging.error('Custom exception has occurred: {custom_arg}', extra=dict(custom_arg=e.custom_arg))
"""))
visitor = LoggingVisitor()
visitor.visit(tree)
assert_that(visitor.violations, is_(empty())) | 5,326,165 |
def solve2(input_data):
"""use scipy.ndimage"""
data_array = np.array(parse(input_data))
# boundaries of objects must be 0 for scipy label
# convert 0 in data to -1 and 9 to 0
data_array[data_array == 0] = -1
data_array[data_array == 9] = 0
labels, _ = label(data_array)
_, counts = np.unique(labels, return_counts=True)
counts[1:].sort()
return counts[-3:].prod() | 5,326,166 |
def yolox_semi_warm_cos_lr(
lr,
min_lr_ratio,
warmup_lr_start,
total_iters,
normal_iters,
no_aug_iters,
warmup_total_iters,
semi_iters,
iters_per_epoch,
iters_per_epoch_semi,
iters,
):
"""Cosine learning rate with warm up."""
min_lr = lr * min_lr_ratio
if iters <= warmup_total_iters:
# lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start
lr = (lr - warmup_lr_start) * pow(
iters / float(warmup_total_iters), 2
) + warmup_lr_start
elif iters >= normal_iters + semi_iters:
lr = min_lr
elif iters <= normal_iters:
lr = min_lr + 0.5 * (lr - min_lr) * (
1.0
+ math.cos(
math.pi
* (iters - warmup_total_iters)
/ (total_iters - warmup_total_iters - no_aug_iters)
)
)
else:
lr = min_lr + 0.5 * (lr - min_lr) * (
1.0
+ math.cos(
math.pi
* (
normal_iters
- warmup_total_iters
+ (iters - normal_iters)
* iters_per_epoch
* 1.0
/ iters_per_epoch_semi
)
/ (total_iters - warmup_total_iters - no_aug_iters)
)
)
return lr | 5,326,167 |
def command_hash(user: UserMessages, m: MicroPythonRepl, args):
"""\
Get hash of files on the targets file system.
"""
for path in expand_pattern(connect_repl(args.PATH, m)):
if path.is_dir():
if args.recursive:
for dirpath, dirnames, filenames in walk(path):
for file_path in filenames:
print_hash(user, file_path, file_path.sha256())
else:
print_hash(user, path, path.sha256()) | 5,326,168 |
def builder(obj, dep, denominator=None):
""" A func that modifies its obj without explicit return. """
def decorate(func):
tasks.append(Builder(func, obj, dep, denominator))
return func
return decorate | 5,326,169 |
def regex_split(input_string=None, regex=None, strip_whitespace=None, **kwargs):
"""
Use a regular expression to split an input_string into multiple items.
Args:
input_string (CEF type: *): The input string to split.
regex: The regular expression to use to split the string. Reserved regular expression characters should be escaped with a backslash, so '\.' will match '.' and '\\\\' will match '\'.
strip_whitespace: Either True or False to indicate whether or not to remove whitespace before and after each item. Defaults to True
Returns a JSON-serializable object that implements the configured data paths:
*.item (CEF type: *): A list of items created by splitting the input string.
"""
############################ Custom Code Goes Below This Line #################################
import json
import phantom.rules as phantom
import re
outputs = []
# strip_whitespace defaults to True, but if any value besides "True" is provided, it will be set to False
if strip_whitespace == None or strip_whitespace.lower() == 'true':
strip_whitespace = True
else:
strip_whitespace = False
regex = regex.replace('\\\\','\\')
results = re.split(regex, input_string)
if strip_whitespace:
results = [result.strip() for result in results]
phantom.debug("the input string {} was split into {}".format(input_string, results))
for result in results:
outputs.append({'item': result})
# Return a JSON-serializable object
assert json.dumps(outputs) # Will raise an exception if the :outputs: object is not JSON-serializable
return outputs | 5,326,170 |
def configure_for_tests():
"""Set logger for tests."""
set_logger(logging.getLogger())
logging.basicConfig()
_logger.setLevel(logging.INFO) | 5,326,171 |
def on_segment(p, r, q, epsilon):
"""
Given three colinear points p, q, r, and a threshold epsilone, determine if
determine if point q lies on line segment pr
"""
# Taken from http://stackoverflow.com/questions/328107/how-can-you-determine-a-point-is-between-two-other-points-on-a-line-segment
crossproduct = (q.y - p.y) * (r.x - p.x) - (q.x - p.x) * (r.y - p.y)
if abs(crossproduct) > epsilon:
return False # (or != 0 if using integers)
dotproduct = (q.x - p.x) * (r.x - p.x) + (q.y - p.y)*(r.y - p.y)
if dotproduct < 0:
return False
squaredlengthba = (r.x - p.x)*(r.x - p.x) + (r.y - p.y)*(r.y - p.y)
if dotproduct > squaredlengthba:
return False
return True | 5,326,172 |
def groupby_times(df, kind, unit=None):
"""Groupby specific times
Parameters
----------
df : pandas.DataFrame
DataFrame with `pandas.TimedeltaIndex` as index.
kind : {'monthly', 'weekly', 'daily', 'hourly', 'minutely', 'all'}
How to group `df`.
unit : str (optional)
What unit to use
Returns
-------
Grouped
"""
def tmp_since_last(freq):
if freq:
return since_last(df.index, freq, unit)
else:
return None
key_dict = {
'monthly': 'M',
'weekly': 'w',
'daily': 'd',
'hourly': 'h',
'minutely': 'm',
'secondly': 's',
'all': None
}
# key_dict.update({v:v for v in key_dict.values()})
if kind not in key_dict:
raise NotImplementedError('key must be something else')
# group_key = since_last(df.index, kind, unit)
else:
group_key = tmp_since_last(key_dict[kind])
grouped = df.groupby(group_key)
return grouped | 5,326,173 |
def pid_to_path(pid):
"""Returns the full path of the executable of a process given its pid."""
ps_command = "ps -o command " + pid
ps_output = execute(ps_command)
command = get_command(ps_output)
whereis_command = "whereis " + command
whereis_output = execute(whereis_command)
path = get_path(whereis_output)
if path == "":
return command
else:
return path | 5,326,174 |
def get_script_histogram(utext):
"""Return a map from script to character count + chars, excluding some common
whitespace, and inherited characters. utext is a unicode string."""
exclusions = {0x00, 0x0A, 0x0D, 0x20, 0xA0, 0xFEFF}
result = {}
for cp in utext:
if ord(cp) in exclusions:
continue
script = unicode_data.script(cp)
if script == "Zinh":
continue
if script not in result:
result[script] = [1, {cp}]
else:
r = result[script]
r[0] += 1
r[1].add(cp)
return result | 5,326,175 |
def ja_nein_vielleicht(*args):
"""
Ohne Argumente erstellt diese Funktion eine Ja-Nein-Vielleicht Auswahl. Mit
einem Argument gibt es den Wert der entsprechenden Auswahl zurück.
"""
values = {
True: "Vermutlich ja",
False: "Vermutlich nein",
None: "Kann ich noch nicht sagen"
}
if args:
return values[args[0]]
else:
return [
{True: values[True]},
{False: values[False]},
{None: values[None]}
] | 5,326,176 |
def to_sql(query, df, ini_path=PATH.INI_FILE):
"""SQL 쿼리를 실행하여 DB에 데이터를 입력
:param str query: 쿼리
:param :class:`pandas.DataFrame` df: 입력할 데이터
:param str ini_path: ini file 경로, default: ``PATH.INI_FILE``
"""
list_of_tuples = list(map(tuple, df.values))
DBHandler(ini2dict(ini_path, 'DB')).to_sql(query, list_of_tuples) | 5,326,177 |
def ebic(covariance, precision, n_samples, n_features, gamma=0):
"""
Extended Bayesian Information Criteria for model selection.
When using path mode, use this as an alternative to cross-validation for
finding lambda.
See:
"Extended Bayesian Information Criteria for Gaussian Graphical Models"
R. Foygel and M. Drton, NIPS 2010
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance (sample covariance)
precision : 2D ndarray (n_features, n_features)
The precision matrix of the model to be tested
n_samples : int
Number of examples.
n_features : int
Dimension of an example.
lam: (float)
Threshold value for precision matrix. This should be lambda scaling
used to obtain this estimate.
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
ebic score (float). Caller should minimized this score.
"""
l_theta = -np.sum(covariance * precision) + fast_logdet(precision)
l_theta *= n_features / 2.
# is something goes wrong with fast_logdet, return large value
if np.isinf(l_theta) or np.isnan(l_theta):
return 1e10
mask = np.abs(precision.flat) > np.finfo(precision.dtype).eps
precision_nnz = (np.sum(mask) - n_features) / 2.0 # lower off diagonal tri
return (
-2.0 * l_theta
+ precision_nnz * np.log(n_samples)
+ 4.0 * precision_nnz * np.log(n_features) * gamma
) | 5,326,178 |
def register_function(name, func, tagging_func, example_args, params_index,
precedence):
"""Registers a function as a pattern in the graph matcher registry.
The graph matcher needs to trace at least once the full function, which means
you need to provide it with dummy arguments. The shapes of the arguments do
not matter, as the graph matcher ignores their values, however the rank does.
Especially if there is some broadcasting happening you should register with
every possible broadcast pattern. As a general advice avoid using a shape to
be 1, unless you want the pattern to specifically match that, as some
operations, like squeeze for example, can have special behaviour then.
Args:
name: The name of the pattern that is being registered to.
func: The function that performs the computation.
tagging_func: Function that correctly creates the tag.
example_args: Example arguments that can be inputted into `func`.
params_index: Specifies at which index of the `example_args` are considered
a parameter.
precedence: This specifies what precedence the graph matcher is going to
assign to the provided pattern. The graph matcher will go from lowest to
highest precedence, randomly breaking ties, when matching. Note that the
pattern that matches a parameter with the lowest precedence will get
registered and no other will. Specifically useful when there is a pattern
for a layer with and without bias, in which case the with bias
registration always should go with lower precedence.
"""
# This is required because we can not use Jax before InitGoogle() runs
def register():
jnp_args = jax.tree_map(jnp.asarray, example_args)
graph = function_to_jax_graph(
func, jnp_args, params_index=params_index, tagging_func=tagging_func)
if NAME_TO_JAX_GRAPH.get(name) is None:
NAME_TO_JAX_GRAPH[name] = (precedence, [])
assert precedence == NAME_TO_JAX_GRAPH[name][0]
NAME_TO_JAX_GRAPH[name][1].append(graph)
DEFERRED_REGISTRATIONS.append(register) | 5,326,179 |
def _process_cli_plugin(bases, attrdict) -> dict:
"""Process a CLI plugin, generate its hook functions, and return a new
attrdict with all attributes set correctly.
"""
attrdict_copy = dict(attrdict) # copy to avoid mutating original
if cli.Command in bases and cli.CommandExtension in bases:
raise exceptions.PlugError(
"A plugin cannot be both a Command and a CommandExtension"
)
if cli.Command in bases:
settings = attrdict_copy.get("__settings__", cli.command_settings())
attrdict_copy["__settings__"] = settings
_check_base_parsers(settings.base_parsers or [], attrdict_copy)
elif cli.CommandExtension in bases:
if "__settings__" not in attrdict_copy:
raise exceptions.PlugError(
"CommandExtension must have a '__settings__' attribute"
)
handle_processed_args = _generate_handle_processed_args_func()
attrdict_copy[handle_processed_args.__name__] = handle_processed_args
attrdict_copy["attach_options"] = _attach_options
configurable_argnames = list(_get_configurable_arguments(attrdict))
if configurable_argnames:
def get_configurable_args(self) -> ConfigurableArguments:
return ConfigurableArguments(
config_section_name=self.__settings__.config_section_name
or self.__plugin_name__,
argnames=list(
_get_configurable_arguments(self.__class__.__dict__)
),
)
attrdict_copy[get_configurable_args.__name__] = get_configurable_args
return attrdict_copy | 5,326,180 |
def get_group(yaml_dict):
"""
Return the attributes of the light group
:param yaml_dict:
:return:
"""
group_name = list(yaml_dict["groups"].keys())[0]
group_dict = yaml_dict["groups"][group_name]
# Check group_dict has an id attribute
if 'id' not in group_dict.keys():
print("Error, expected to find an 'id' attribute in the group object")
return group_dict | 5,326,181 |
def close_db(error):
"""Zamykanie połączenia z bazą."""
if hasattr(g, 'db'):
g.db.close() | 5,326,182 |
def plot_time(
monitors,
labels,
savefile,
title="Average computation time per epoch",
ylabel="Seconds",
log=False,
directory=DEFAULT_DIRECTORY,
):
"""Plots the computation time required for each step as a horizontal bar
plot
:param monitors: a list of monitor sets: [(training, evaluation, inference)]
:param labels: a list of strings for the label of each monitor
:param savefile: name of the file to save. If none, then will not save
:param title: title of the figure
:param ylabel: label for the y-axis
:param log: whether to plot a log-plot. Can also be set to "symlog"
:param directory: directory to save the file in. Defaults to the results dir
:returns: the figure
"""
clean_labels = _correct_and_clean_labels(labels)
all_times = np.array(
[
[
np.mean(
[
np.sum(epoch["total"])
for epoch in training_monitor.timing
]
),
np.mean(
[
np.sum([iteration["total"] for iteration in epoch])
for epoch in projection_monitor.timing
]
),
]
for (
training_monitor,
evaluation_monitor,
projection_monitor,
) in monitors
]
)
# Using the recipe for a grouped bar plot
fig = plt.figure()
# set width of bars
bar_width = 1.0 / (1.0 + all_times.shape[1])
colors = list()
for i, times in enumerate(all_times):
positions = bar_width * np.arange(len(times)) + i
for j, (position, time, label) in enumerate(
zip(positions, times, ["Training", "Projection"])
):
if i == 0:
line2d = plt.bar(position, time, width=bar_width, label=label)
colors.append(line2d[0].get_facecolor())
else:
plt.bar(position, time, width=bar_width, color=colors[j])
# Add ticks on the middle of the group bars
xs = (
np.arange(len(all_times))
+ 0.5 * all_times.shape[1] * bar_width
- 0.5 * bar_width
)
plt.xticks(xs, clean_labels)
plt.legend()
# possibly make log plot
if log:
if log == "symlog":
plt.yscale("symlog")
else:
plt.yscale("log")
plt.ylabel(ylabel)
plt.title(title)
plt.tight_layout()
if savefile is not None:
filepath = f"{directory}/{savefile}.png"
print(f"Saving timing plot to {filepath}")
plt.savefig(filepath, dpi=300)
return fig | 5,326,183 |
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads | 5,326,184 |
def recommend(model):
"""
Generate n recommendations.
:param model: recommendation model
:return: tuple(recommendations made by model, recommendations made by primitive model, recall, coverage)
"""
n = 10
hit = 0 # used for recall calculation
total_recommendations = 0
all_recommendations = [] # used for coverage calculation
recommendations = {}
primitive_recommendations = {}
for user_id, user_profile in X_test_prepared.iterrows(): # iterate over test users, user_profile is a tuple
prediction = model.predictItemByUser(user_profile[1], user_profile[0], n)
primitive_prediction = primitive_model.predictItemByUser(None, user_profile[0], n, ratings_cleaned_df)
# primitive_predictions = primitive_model.test()
if prediction is None or prediction.ndim == 0:
continue
if user_profile[2] in prediction: # if prediction contains control item increase hit counter
hit += 1
recommendations[user_id] = prediction
primitive_recommendations[user_id] = primitive_prediction
all_recommendations.extend(list(prediction))
total_recommendations += 1
if total_recommendations > 0:
recall = hit / total_recommendations
else:
recall = 0
coverage = np.unique(all_recommendations).shape[0] / model.train_data.shape[1]
return recommendations, primitive_recommendations, recall, coverage | 5,326,185 |
def GetTrace(idp_name, package_name, version, launcher_activity, proxy_port, \
change_account=True, with_access_token=True, revoke_access_token=True, reset=False, \
uiconfig='uiaction.json', user='Eve1', port='4723', system_port=8200, tracefile='eveA.trace', \
emulator_name=None, snapshot_tag=None):
"""
Prepare network trace for further testing
"""
running_logger.debug("Recording tracefile %s", tracefile)
# init
rawTrace = None
idpPackageName = None
idpActivityName = None
if idp_name == 'sina':
idpPackageName = 'com.sina.weibo'
idpActivityName = 'com.sina.weibo.SplashActivity'
elif idp_name == 'wechat':
idpPackageName = 'com.tencent.mm'
idpActivityName = 'com.tencent.mm.ui.LauncherUI'
# change account for twice
if change_account and (emulator_name is None or snapshot_tag is None):
mitmdump = launch_mitmdump(proxy_port)
uictrl = UI_controller(port, system_port=system_port, package_name=idpPackageName, activity_name=idpActivityName, emulator_name=emulator_name)
running_logger.debug('Try to change account')
for _ in range(3):
try:
if uictrl.idp_login(user, idp_name):
break
except Exception as e:
running_logger.warn(e)
continue
else:
mitmdump.terminate()
raise Exception("Unable to login idp")
mitmdump.terminate()
# try to get trace for 5 times
for _ in range(5):
try:
if get_single_trace(proxy_port, tracefile, port, system_port, uiconfig, package_name, launcher_activity, emulator_name, snapshot_tag, idp_name, reset, user, with_access_token, version, revoke_access_token):
break
except AssertionError:
running_logger.warn('Wait too long for status change')
continue
except Exception as e:
running_logger.exception(e)
continue
else:
raise Exception("Cannot get network trace file in package: {}, trace file: {}".format(package_name, tracefile))
return rawTrace | 5,326,186 |
def drop_nondominant_term(latex_dict: dict) -> str:
"""
given
x = \\langle\\psi_{\\alpha}| \\hat{A} |\\psi_{\\beta}\\rangle
return
x = \\langle\\psi_{\\alpha}| a_{\\beta} |\psi_{\\beta} \\rangle
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> drop_nondominant_term(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed" | 5,326,187 |
def exampleCallback(**kwargs):
"""
kwargs hasgi
event = event trigger
hidDevice = trigger device
buttonId = tigger button ID
eventTypes = all registered event types
eventCount = Number of times even occured
TODO: Add timers
"""
print(kwargs["action"],":",kwargs["hidDevice"].getName(), ":Sample callback", kwargs) | 5,326,188 |
def template_introduce():
"""
This function constructs three image carousels for self introduction.
Check also: faq_bot/model/data.py
reference
- `Common Message Property <https://developers.worksmobile.com/kr/document/100500805?lang=en>`_
:return: image carousels type message content.
"""
fmt = _("See FAQs")
action0 = make_i18n_message_action("query_leave", "query", "See FAQs", fmt,
"See FAQs", fmt)
action1 = make_i18n_message_action("query_welfare", "query", "See FAQs",
fmt, "See FAQs", fmt)
action2 = make_i18n_message_action("query_security", "query", "See FAQs",
fmt, "See FAQs", fmt)
fmt_title0 = _("HR/Leave")
fmt_subtitle0 = _("See FAQs about HR and leave.")
element0 = make_i18n_list_template_element("query", "HR/Leave",
"See FAQs about HR and leave.",
image=CAROUSEL["leave"][0],
action=action0,
fmt_title=fmt_title0,
fmt_subtitle=fmt_subtitle0)
fmt_title1 = _("Welfare/Work support")
fmt_subtitle1 = _("See FAQs about welfare and work support.")
element1 = make_i18n_list_template_element("query", "Welfare/Work support",
"See FAQs about welfare "
"and work support.",
image=CAROUSEL["welfare"][0],
action=action1,
fmt_title=fmt_title1,
fmt_subtitle=fmt_subtitle1)
fmt_title2 = _("Security")
fmt_subtitle2 = _("See FAQs about security.")
element2 = make_i18n_list_template_element("query", "Security",
"See FAQs about security.",
image=CAROUSEL["security"][0],
action=action2,
fmt_title = fmt_title2,
fmt_subtitle = fmt_subtitle2)
return make_list_template([element0, element1, element2]) | 5,326,189 |
def assign_distance_to_mesh_vertex(vkey, weight, target_LOW, target_HIGH):
"""
Fills in the 'get_distance' attribute for a single vertex with vkey.
Parameters
----------
vkey: int
The vertex key.
weight: float,
The weighting of the distances from the lower and the upper target, from 0 to 1.
target_LOW: :class: 'compas_slicer.pre_processing.CompoundTarget'
The lower compound target.
target_HIGH: :class: 'compas_slicer.pre_processing.CompoundTarget'
The upper compound target.
"""
if target_LOW and target_HIGH: # then interpolate targets
d = get_weighted_distance(vkey, weight, target_LOW, target_HIGH)
elif target_LOW: # then offset target
offset = weight * target_LOW.get_max_dist()
d = target_LOW.get_distance(vkey) - offset
else:
raise ValueError('You need to provide at least one target')
return d | 5,326,190 |
def _get_user_by_email_or_username(request):
"""
Finds a user object in the database based on the given request, ignores all fields except for email and username.
"""
if 'email_or_username' not in request.POST or 'password' not in request.POST:
raise AuthFailedError(_('There was an error receiving your login information. Please email us.'))
email_or_username = request.POST.get('email_or_username', None)
try:
return USER_MODEL.objects.get(
Q(username=email_or_username) | Q(email=email_or_username)
)
except USER_MODEL.DoesNotExist:
digest = hashlib.shake_128(email_or_username.encode('utf-8')).hexdigest(16) # pylint: disable=too-many-function-args
AUDIT_LOG.warning(f"Login failed - Unknown user username/email {digest}") | 5,326,191 |
def test__apply_with_missing_info(test_teardown):
"""
Failure scenario: Missing information should throw errors.
"""
user_id = "someid456"
url, position, company = "", "", ""
result = apply_external(user_id, url, position, company, resume, date_posted, deadline, comment)
assert result['status'] == "You must provide a job URL, position and company." | 5,326,192 |
def timer(save=False, precision=3):
""" Timer Decorator with Logging """
def decorator(function):
@wraps(function)
def inner(*args, **kwargs):
start = default_timer()
value = function(*args, **kwargs)
end = default_timer()
if save:
_logger.addHandler(file_handler(function.__name__))
_logger.debug(f"[{function.__name__}] {round(end-start, precision)}s")
else:
_logger.debug(f"[{function.__name__}] {round(end - start, precision)}s")
return value
return inner
return decorator | 5,326,193 |
def get_minsize_assignment(N, min_comm_size):
"""Create membership vector where each community contains at least
as a certain number of nodes.
Parameters
----------
N : int
Desired length of membership vector
min_comm_size : int
Minimum number of nodes each community should have.
Returns
-------
np.array
Membership vector
"""
num_comms = int(N / min_comm_size)
membership = -np.ones(N, dtype='int') # -1 means non-assigned
for c in range(num_comms):
left_to_assign = np.flatnonzero(membership == -1)
assign = np.random.choice(left_to_assign, min_comm_size, replace=False)
membership[assign] = c
membership[membership == -1] = np.random.randint(num_comms, size=np.sum(membership == -1))
return membership | 5,326,194 |
def _combine_plots(
p1, p2, combine_rules=None,
sort_plot=False, sort_key=lambda x_y: x_y[0]
):
"""Combine two plots into one, following the given combine_rules to
determine how to merge the constants
:param p1: 1st plot to combine
:param p2: 2nd plot to combine
:param combine_rules: list of combine rules, which define how constants
in const_list and const_dict are merged. See definition above.
:param sort_plot: if true, sort the resulting plot according to the
sort_key. Default is to sort by x value.
:param sort_key: function that, when given a plot, returns a comparable
item, by which the plot is sorted.
:return: combined plot
"""
# Combine x arrays with each other and y arrays with each other
x1, y1 = p1[0:2]
x2, y2 = list(), list()
for x2i, y2i in zip(*p2[0:2]):
if x2i not in x1:
x2.append(x2i)
y2.append(y2i)
x = np.concatenate((x1, np.array(x2)))
y = np.concatenate((y1, np.array(y2)))
# Sort plot
if sort_plot:
next_x, next_y = list(), list()
for xi, yi in sorted(zip(x, y), key=sort_key):
next_x.append(xi)
next_y.append(yi)
x = np.array(next_x)
y = np.array(next_y)
# Combine constant lists
const_list = list()
for c1, c2 in zip(p1[2], p2[2]):
if c1 is not None and c2 is not None and _const_equals(c1, c2):
const_list.append(c1)
else:
const_list.append(None)
const_dict = dict()
# Combine constant dicts
d1, d2 = p1[3], p2[3]
for k in set(d1.keys() + d2.keys()):
if k in d1 and k in d2:
v1, v2 = d1[k], d2[k]
if v1 is not None and v2 is not None and _const_equals(v1, v2):
const_dict[k] = d1[k]
else:
const_dict[k] = None
else:
const_dict[k] = None
# Other combine rules
p = x, y, const_list, const_dict
if combine_rules is not None:
for rule in combine_rules:
p = rule(p, p1, p2)
return p | 5,326,195 |
def shape_of(array, *, strict=False):
"""
Return the shape of array. (sizes of each dimension)
"""
shape = []
layer = array
while True:
if not isinstance(layer, (tuple, list)):
break
size = len(layer)
shape.append(size)
if not size:
break
layer = layer[0]
if strict:
layers = deque(
(str(i), sub)
for i, sub in enumerate(array)
)
for size in shape[1:]:
for _ in range(len(layers)):
indices, layer = layers.popleft()
if not isinstance(layer, (tuple, list)):
raise ValueError(
f"array is not uniform: "
f"not isinstance(array[{indices}], (tuple, list)) ({layer})"
)
if len(layer) != size:
raise ValueError(
f"array is not uniform: "
f"len(array[{indices}]) ({layer}) != {size}"
)
layers.extend(
(indices + f", {i}", sub)
for i, sub in enumerate(layer)
)
for _ in range(len(layers)):
indices, layer = layers.popleft()
if isinstance(layer, (tuple, list)):
raise ValueError(
f"array is not uniform: "
f"isinstance(array[{indices}], (tuple, list)) ({layer})"
)
return tuple(shape) | 5,326,196 |
def ingest(study_dir, log_level):
"""
Ingest a study into the Kids First OMOP db
\b
Arguments:
\b
study_dir - Path to study directory containing extract_configs dir
"""
from common import etl
# Setup logging
output_dir = _setup_output_and_logging(study_dir, log_level)
# Get transformer
study_transform = import_module_from_file(os.path.join(
os.path.abspath(study_dir), 'transform.py'))
# Run etl
etl.run(study_dir, output_dir, study_transform.transform) | 5,326,197 |
def return_state_dict(network):
"""
save model to state_dict
"""
feat_model = {k: v.cpu() for k, v in network["feat_model"].state_dict().items()}
classifier = {k: v.cpu() for k, v in network["classifier"].state_dict().items()}
return {"feat_model": feat_model, "classifier": classifier} | 5,326,198 |
def assert_allclose(
actual: Tuple[float, float],
desired: Tuple[numpy.float64, numpy.float64],
err_msg: Literal["(-1.0,) d ['double']"],
):
"""
usage.scipy: 1
"""
... | 5,326,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.