content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def _read_one_cml(cml_g,
cml_id_list=None,
t_start=None,
t_stop=None,
column_names_to_read=None,
read_all_data=False):
"""
Parameters
----------
cml_g
cml_id_list
t_start
t_stop
column_names_to_read
read_all_data
Returns
-------
"""
metadata = _read_cml_metadata(cml_g)
if cml_id_list is not None:
if metadata['cml_id'] not in cml_id_list:
return None
cml_ch_list = []
for cml_ch_name, cml_ch_g in list(cml_g.items()):
if 'channel_' in cml_ch_name:
cml_ch_list.append(
_read_cml_channel(
cml_ch_g=cml_ch_g,
t_start=t_start,
t_stop=t_stop,
column_names_to_read=column_names_to_read,
read_all_data=read_all_data))
# TODO: Handle `auxiliary_N` and `product_N` cml_g-subgroups
return Comlink(channels=cml_ch_list, metadata=metadata)
|
952b40329fc75b0a37f210a734a6aa4a0f3b79f8
| 3,639,900
|
import os
def get_commands():
"""
returns a dictionary with all the az cli commands, keyed by the path to the command
inside each dictionary entry is another dictionary of verbs for that command
with the command object (from cli core module) being stored in that
"""
# using Microsoft VSCode tooling module to load the az cli command table
tooling.initialize()
commands = tooling.load_command_table()
command_dict = {} # initialize empty dict for our return
# iterate through the all the commands
for command_name in commands:
command = commands[command_name]
#print(command_name) # get the name of the command in format "az ..."
command_list = command_name.split(" ") # split apart each command segment
command_list = [pythonize_name(name) for name in command_list] # pythonize the names
command_verb = command_list.pop() # remove the last command which is the action verb
command_path = os.path.join(Constants.COMMAND_ROOT, *command_list) # build path of commands
# add command path to dictionary if not already there
if command_path not in command_dict:
command_dict[command_path]={}
# add the command object to the dictionary using the path and verb as keys
command_dict[command_path][command_verb] = command
return command_dict
|
6e0e77db9f508851d3aabca5571a10cc8736a42f
| 3,639,901
|
def calculate_keypoints(img, method, single_channel, graphics=False):
"""
Gray or single channel input
https://pysource.com/2018/03/21/feature-detection-sift-surf-obr-opencv-3-4-with-python-3-tutorial-25/
"""
if single_channel=='gray':
img_single_channel = single_channel_gray(img)
elif single_channel=='laplacian':
img_single_channel = compute_laplac(img)
elif single_channel=='color':
img_single_channel = clahe(img)
elif single_channel=='HSV':
img_single_channel = HSV(img)
elif single_channel=='hog':
img_single_channel = hog(img)
elif single_channel=='mixed':
img_single_channel = mixed(img)
print(img_single_channel.shape, type(img_single_channel), img_single_channel.dtype)
if method=='sift':
# SIFT
sift = cv2.SIFT_create(edgeThreshold = 21, sigma = 1.2) #edgeThreshold = 21, sigma = 1.2 #SIFT (Scale-Invariant Feature Transform)
keypoints_sift, descriptors_sift = sift.detectAndCompute(img_single_channel, None)
img_sift = cv2.drawKeypoints(img_single_channel, keypoints_sift, None, color=(0, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
if graphics == True:
plt.figure(), plt.imshow(img_sift), plt.title("SIFT"), plt.show()
return keypoints_sift, descriptors_sift
elif method=='orb':
# ORB
orb = cv2.ORB_create(nfeatures=3000)
keypoints_orb, descriptors_orb = orb.detectAndCompute(img_single_channel, None)
img_orb = cv2.drawKeypoints(img_single_channel, keypoints_orb, None, color=(0, 255, 0), flags=0)
if graphics == True:
plt.figure(), plt.imshow(img_orb), plt.title("ORB"), plt.show()
return keypoints_orb, descriptors_orb
elif method=='fast':
# FAST
fast = cv2.FastFeatureDetector_create() #FAST algorithm for corner detection
brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
keypoints_fast = fast.detect(img_single_channel, None)
keypoints_brief, descriptors_brief = brief.compute(img_single_channel, keypoints_fast)
print(len(keypoints_fast), len(keypoints_brief))
if graphics == True:
img_fast = cv2.drawKeypoints(img_single_channel, keypoints_fast, None, color=(255, 0, 0))
img_brief = cv2.drawKeypoints(img_single_channel, keypoints_brief, None, color=(255, 0, 0))
plt.figure(), plt.imshow(img_fast), plt.title("Detected FAST keypoints"), plt.show()
plt.figure(), plt.imshow(img_brief), plt.title("Detected BRIEF keypoints"), plt.show()
return keypoints_brief, descriptors_brief
elif method=='star':
# STAR-BRIEF
star = cv2.xfeatures2d.StarDetector_create() ## only feature
brief = cv2.xfeatures2d.BriefDescriptorExtractor_create() # only descript, NO feature
keypoints_star = star.detect(img_single_channel, None)
keypoints_brief, descriptors_brief = brief.compute(img_single_channel, keypoints_star)
print(len(keypoints_star), len(keypoints_brief))
if graphics == True:
img_star = cv2.drawKeypoints(img_single_channel, keypoints_star, None, color=(255, 0, 0))
img_brief = cv2.drawKeypoints(img_single_channel, keypoints_brief, None, color=(255, 0, 0))
plt.figure(), plt.imshow(img_star), plt.title("Detected STAR keypoints"), plt.show()
plt.figure(), plt.imshow(img_brief), plt.title("Detected BRIEF keypoints"), plt.show()
return keypoints_brief, descriptors_brief
return 0
|
18fa71aa824f7ce4ab5468832dbc020e4fc6519d
| 3,639,902
|
def plot_pos_neg(
train_data: pd.DataFrame,
train_target: pd.DataFrame,
col1: str = 'v5',
col2: str = 'v6'
) -> None:
"""
Make hexbin plot for training transaction data
:param train_data: pd.DataFrame, features dataframe
:param train_target: pd.DataFrame, target dataframe
:param col1: str, name of first column for hexbin plot
:param col2: str, name of second column for hexbin plot
:return: None
"""
pos_df = pd.DataFrame(train_data[train_target.values == 1], columns=train_data.columns)
neg_df = pd.DataFrame(train_data[train_target.values == 0], columns=train_data.columns)
sns.jointplot(pos_df[col1], pos_df[col2], kind='hex', xlim=(-5, 5), ylim=(-5, 5))
plt.suptitle('Positive distribution')
sns.jointplot(neg_df[col1], neg_df[col2], kind='hex', xlim=(-5, 5), ylim=(-5, 5))
_ = plt.suptitle('Negative distribution')
return None
|
2a5c27a638d43eb64192b28345edcbafd592825a
| 3,639,903
|
def raffle_form(request, prize_id):
"""Supply the raffle form."""
_ = request
prize = get_object_or_404(RafflePrize, pk=prize_id)
challenge = challenge_mgr.get_challenge()
try:
template = NoticeTemplate.objects.get(notice_type='raffle-winner-receipt')
except NoticeTemplate.DoesNotExist:
return render_to_response('view_prizes/form.txt', {
'raffle': True,
'prize': prize,
'round': prize.round,
'competition_name': challenge.name,
}, context_instance=RequestContext(request), mimetype='text/plain')
message = template.render({
'raffle': True,
'prize': prize,
'round': prize.round,
'competition_name': challenge.name,
})
return HttpResponse(message, content_type="text", mimetype='text/html')
|
c84f6a1824ad0d6991306cb543fdaee439b2d183
| 3,639,904
|
def is_rldh_label(label):
"""Tests a binary string against the definition of R-LDH label
As defined by RFC5890_
Reserved LDH labels, known as "tagged domain names" in some
other contexts, have the property that they contain "--" in the
third and fourth characters but which otherwise conform to LDH
label rules.
Non-Reserved LDH labels are the set of valid LDH labels that do
not have "--" in the third and fourth positions.
Therefore you can test for a NR-LDH label simply by using the *not*
operator."""
return is_ldh_label(label) and label[2:4] == b'--'
|
c44fef221381abaf0fa88115962ab9946c266090
| 3,639,905
|
def offence_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for environments in which player's team has the ball """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player have the ball
if obs["ball_owned_player"] == obs["active"] and obs["ball_owned_team"] == 0:
return True
return False
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
memory_patterns = [
far_from_goal_shot,
far_from_goal_high_pass,
bad_angle_high_pass,
close_to_goalkeeper_shot,
go_through_opponents,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
|
18643be138781ae7025e6f021437fd1ac2038566
| 3,639,906
|
import psutil
import os
def get_mem() -> int:
"""Return memory used by CombSpecSearcher - note this is actually the
memory usage of the process that the instance of CombSpecSearcher was
invoked."""
return int(psutil.Process(os.getpid()).memory_info().rss)
|
e2f3a5e4f954ad9a294df1747835cad7684486b0
| 3,639,907
|
import os
import yaml
def load_parameters(directory_name):
"""
Loads the .yml file parameters to a dictionary.
"""
root = os.getcwd()
directory = os.path.join(root, directory_name)
parameter_file_name = directory
parameter_file = open(parameter_file_name, 'r')
parameters = yaml.load(parameter_file, Loader=yaml.FullLoader)
parameter_file.close()
return parameters
|
793efa00af16851b78fd0f4277b24d03db76fe2c
| 3,639,908
|
def aggregated_lineplot_new(df_agg,countries,fill_between=('min','max'),save=False,fig=None,ax=None,clrs='default'):
"""
Creates an aggregates lineplot for multiple countries
Arguments:
*df_agg* (DataFrame) : contains the aggregated results, either relative (df_rel) or absolute (df_abs)
*countries* (list) : list of strings with names of countries to plot
*fill_between* (tuple) : indicates which percentiles to feel between
*save* (Boolean) : should the file be saved in the folder config['paths']['output_images']
Returns:
fig,ax
"""
#assert fill_between in cols.
if 'AoI relative combinations' in df_agg.columns: #INDICATES THAT THESE ARE RELATIVE RESULTS
grouper = 'AoI relative combinations'
xlabel = "% of combinations of micro-floods (AoI's) of the maximum number of micro-floods per country"
relative = True #need for plotting
elif 'AoI combinations' in df_agg.columns: #ABSOLUTE RESULTS
grouper = 'AoI combinations'
xlabel = "Number of combinations of micro-floods (AoI's)"
relative = False
if clrs == 'default':
clrs = ['darkblue', 'red', 'green', 'purple', 'orange', 'skyblue']
if (fig==None and ax==None): #if No axes and no figure is provided
fig, ax = plt.subplots(figsize=(8, 6))
lines = df_agg
for cntry, cl in zip(countries, clrs):
c = cntry.capitalize()
ax.plot(lines.loc[lines['country'] == c, grouper], lines.loc[lines['country'] == c, 'mean'],
color=cl, label=c)
ax.fill_between(lines.loc[lines['country'] == c, grouper], lines.loc[lines['country'] == c, fill_between[0]],
lines.loc[lines['country'] == c, fill_between[1]], alpha=0.3, edgecolor=cl, facecolor=cl, linewidth=0)
ax.legend()
ax.set_ylabel("% optimal routes disrupted")
ax.set_xlabel(xlabel)
#Todo: add function to link country names with official codes NUTS0
if save: #TODO REPLACE ALL INSTANCES OF THIS PART OF CODE WITH A SPECIAL FUNCTION
save_figs = load_config(config_file)['paths']['output_images'] / 'aggregate_line'
if not save_figs.exists(): save_figs.mkdir()
filename = "aggregateline_{}_{}.png".format('-'.join(countries),fill_between[0] + '-' + fill_between[1])
if relative: filename = "aggregateline_{}_{}_relative.png".format(\
'-'.join(countries),fill_between[0] + '-' + fill_between[1])
fig.savefig(save_figs / filename)
return fig,ax
|
0f6214298bb23e0c87d23458092b2b15302e2689
| 3,639,909
|
from typing import OrderedDict
def _stat_categories():
"""
Returns a `collections.OrderedDict` of all statistical categories
available for play-by-play data.
"""
cats = OrderedDict()
for row in nfldb.category.categories:
cat_type = Enums.category_scope[row[2]]
cats[row[3]] = Category(row[3], row[0], cat_type, row[1], row[4])
return cats
|
fd4df74e8b3c2f94d41f407c88a3be997913adb4
| 3,639,910
|
from typing import List
import random
import math
def rsafactor(d: int, e: int, N: int) -> List[int]:
"""
This function returns the factors of N, where p*q=N
Return: [p, q]
We call N the RSA modulus, e the encryption exponent, and d the decryption exponent.
The pair (N, e) is the public key. As its name suggests, it is public and is used to
encrypt messages.
The pair (N, d) is the secret key or private key and is known only to the recipient
of encrypted messages.
>>> rsafactor(3, 16971, 25777)
[149, 173]
>>> rsafactor(7331, 11, 27233)
[113, 241]
>>> rsafactor(4021, 13, 17711)
[89, 199]
"""
k = d * e - 1
p = 0
q = 0
while p == 0:
g = random.randint(2, N - 1)
t = k
while True:
if t % 2 == 0:
t = t // 2
x = (g ** t) % N
y = math.gcd(x - 1, N)
if x > 1 and y > 1:
p = y
q = N // y
break # find the correct factors
else:
break # t is not divisible by 2, break and choose another g
return sorted([p, q])
|
21e655bc3f5b098da0d437a305baf89c70cebd56
| 3,639,911
|
def integrate_prob_current(psi, n0, n1, h):
"""
Numerically integrate the probability current, which is
Im{psi d/dx psi^*} over the given spatial interval.
"""
psi_diff = get_imag_grad(psi, h)
curr = get_prob_current(psi, psi_diff)
res = np.zeros(psi.shape[0])
with progressbar.ProgressBar(max_value=int(psi.shape[0])) as bar:
for i in range(0, psi.shape[0]):
res [i] = np.trapz(curr[i,n0:n1], dx=h)
bar.update(i)
print("Finished calculating the integrated prob. current!")
return res
|
bda6efda2f61d21139011f579398c5363ae53872
| 3,639,912
|
import base64
def getFile(path):
"""
指定一个文件的路径,放回该文件的信息。
:param path: 文件路径
:return: PHP-> base64 code
"""
code = """
@ini_set("display_errors","0");
@set_time_limit(0);
@set_magic_quotes_runtime(0);
$path = '%s';
$hanlder = fopen($path, 'rb');
$res = fread($hanlder, filesize($path));
fclose($hanlder);
echo $res;
"""% path
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8")
|
e44e3f90e5febee54d2f5de48e35f0b83acf9842
| 3,639,913
|
def rgc(tmpdir):
""" Provide an RGC instance; avoid disk read/write and stay in memory. """
return RGC(entries={CFG_GENOMES_KEY: dict(CONF_DATA),
CFG_FOLDER_KEY: tmpdir.strpath,
CFG_SERVER_KEY: "http://staging.refgenomes.databio.org/"})
|
1b79c9f4d5e07e13a23fe5ff88acf4e022481b42
| 3,639,914
|
import sys
import os
def execlog(command): # logs commands and control errors
"""
controling the command executions using os.system, and logging the commands
if an error raise when trying to execute a command, stops the script and writting the
rest of commands to the log file after a 'Skipping from here' note.
"""
global skipping
try:
log = open(cmd_logfile,'a')
except IOError:
sys.exit("Could not fined "+cmd_logfile)
else:
log.write(command+"\n")
log.close()
if not skipping:
cmd_strerror = os.strerror(os.system(command))
if not cmd_strerror == 'Success':
message(cmd_strerror)
message("Faild at "+stage)
if not stage == "* Unwrapping":
message("############## Skipping from here ##############")
log = open(cmd_logfile,'a')
log.write("############## Skipping from here ##############\n")
log.close()
skipping = 1
else:
return "unwfaild"
|
cea497df52838cbeb22fe9d29f72234f4e0076d5
| 3,639,915
|
import time
def simulate_quantities_of_interest_superoperator(tlist, c_ops, noise_parameters_CZ, fluxlutman,
fluxbias_q1, amp,
sim_step,
verbose: bool=True):
"""
Calculates the propagator and the quantities of interest from the propagator (either unitary or superoperator)
Args:
tlist (array): times in s, describes the x component of the
trajectory to simulate (not actually used, we just use sim_step)
sim_step(float): time between one point and another of amp
c-ops (list of Qobj): time (in)dependent jump operators
amp(array): amplitude in voltage describes the y-component of the trajectory to simulate
fluxlutman,noise_parameters_CZ: instruments containing various parameters
Returns
phi_cond (float): conditional phase (deg)
L1 (float): leakage
L2 (float): seepage
avgatefid_pc (float): average gate fidelity in full space, phase corrected
avgatefid_compsubspace_pc (float): average gate fidelity only in the computational subspace, phase corrected
avgatefid_compsubspace (float): average gate fidelity only in the computational subspace, not phase corrected,
but taking into account the rotating frame of the two qutrits as qubits
phase_q0 / q1 (float): single qubit phases in the rotating frame at the end of the pulse
"""
H_0=calc_hamiltonian(0,fluxlutman,noise_parameters_CZ) # computed at 0 amplitude
# NOTE: parameters of H_0 could be not exactly e.g. the bare frequencies
# We change the basis from the standard basis to the basis of eigenvectors of H_0
# The columns of S are the eigenvectors of H_0, appropriately ordered
if noise_parameters_CZ.dressed_compsub():
S = qtp.Qobj(matrix_change_of_variables(H_0),dims=[[3, 3], [3, 3]])
else:
S = qtp.tensor(qtp.qeye(3),qtp.qeye(3)) # line here to quickly switch off the use of S
H_0_diag = S.dag()*H_0*S
#w_q0 = fluxlutman.q_freq_01()
w_q0 = (H_0_diag[1,1]-H_0_diag[0,0]) / (2*np.pi)
#w_q1 = fluxlutman.q_freq_10()
w_q1 = (H_0_diag[3,3]-H_0_diag[0,0]) / (2*np.pi)
# H_rotateaway = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1,
# alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)
w_q1_sweetspot = noise_parameters_CZ.w_q1_sweetspot()
# Correction up to second order of the frequency due to flux noise, computed from w_q0(phi) = w_q0^sweetspot * sqrt(cos(pi * phi/phi_0))
w_q1_biased = w_q1 - np.pi/2 * (w_q1_sweetspot**2/w_q1) * np.sqrt(1 - (w_q1**4/w_q1_sweetspot**4)) * fluxbias_q1 - \
- np.pi**2/2 * w_q1_sweetspot * (1+(w_q1**4/w_q1_sweetspot**4)) / (w_q1/w_q1_sweetspot)**3 * fluxbias_q1**2
# with sigma up to circa 1e-3 \mu\Phi_0 the second order is irrelevant
correction_to_H = coupled_transmons_hamiltonian_new(w_q0=0, w_q1=np.real(w_q1_biased-w_q1), alpha_q0=0, alpha_q1=0, J=0)
t0 = time.time()
exp_L_total=1
for i in range(len(amp)):
H=calc_hamiltonian(amp[i],fluxlutman,noise_parameters_CZ) + correction_to_H
H=S.dag()*H*S
if c_ops != []:
c_ops_temp=[]
for c in range(len(c_ops)):
if isinstance(c_ops[c],list):
c_ops_temp.append(c_ops[c][0]*c_ops[c][1][i]) # c_ops are already in the H_0 basis
else:
c_ops_temp.append(c_ops[c])
liouville_exp_t=(qtp.liouvillian(H,c_ops_temp)*sim_step).expm()
else:
liouville_exp_t=(-1j*H*sim_step).expm()
exp_L_total=liouville_exp_t*exp_L_total
t1 = time.time()
#print('\n alternative propagator',t1-t0)
U_final = exp_L_total
#U_final=rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_0_diag)
phases = phases_from_superoperator(U_final) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond
phi_cond = phases[-1]
L1 = leakage_from_superoperator(U_final)
population_02_state = calc_population_02_state(U_final)
L2 = seepage_from_superoperator(U_final)
avgatefid = pro_avfid_superoperator_phasecorrected(U_final,phases)
avgatefid_compsubspace = pro_avfid_superoperator_compsubspace_phasecorrected(U_final,L1,phases) # leakage has to be taken into account, see Woods & Gambetta
#print('avgatefid_compsubspace',avgatefid_compsubspace)
#H_twoqubits = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1,
# alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)
#U_final_new = rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_twoqubits) ### old method rotating away also the phase of the |2> state
t = tlist[-1]+sim_step
U_final_new = correct_reference(U=U_final,w_q1=w_q1,w_q0=w_q0,t=t)
### Script to check that we are correctly removing the single qubit phases in the rotating frame
# cz_length = fluxlutman.cz_length()
# U_check = (1j*H_twoqubits*cz_length).expm() * (-1j*H_0_diag*cz_length).expm()
# phases_check = phases_from_superoperator(U_check)
# print(phases_check)
avgatefid_compsubspace_notphasecorrected = pro_avfid_superoperator_compsubspace(U_final_new,L1)
# NOTE: a single qubit phase off by 30 degrees costs 5.5% fidelity
### Script to check that leakage and phi_cond are not affected by the phase correction, as it should be
# L1_bis = leakage_from_superoperator(U_final_new)
# phi_cond_bis = phases_from_superoperator(U_final_new)[-1]
# print('leakage',L1-L1_bis)
# print('phi_cond',phi_cond-phi_cond_bis)
phases = phases_from_superoperator(U_final_new) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond
phase_q0 = (phases[1]-phases[0]) % 360
phase_q1 = (phases[2]-phases[0]) % 360
# We now correct only for the phase of qubit left (q1), in the rotating frame
avgatefid_compsubspace_pc_onlystaticqubit = pro_avfid_superoperator_compsubspace_phasecorrected_onlystaticqubit(U_final_new,L1,phases)
return {'phi_cond': phi_cond, 'L1': L1, 'L2': L2, 'avgatefid_pc': avgatefid,
'avgatefid_compsubspace_pc': avgatefid_compsubspace, 'phase_q0': phase_q0, 'phase_q1': phase_q1,
'avgatefid_compsubspace': avgatefid_compsubspace_notphasecorrected,
'avgatefid_compsubspace_pc_onlystaticqubit': avgatefid_compsubspace_pc_onlystaticqubit, 'population_02_state': population_02_state,
'U_final_new': U_final_new}
|
d81737e78a9e58ee160f80c58f70f73250f47844
| 3,639,916
|
def __polyline():
"""Read polyline in from package data.
:return:
"""
polyline_filename = resource_filename(
'cad', join(join('data', 'dxf'), 'polyline.dxf'))
with open(polyline_filename, 'r') as polyline_file:
return polyline_file.read()
|
667a8a31decd074b1a8599bcde24caaedfd88d02
| 3,639,917
|
import random
import math
def create_identity_split(all_chain_sequences, cutoff, split_size,
min_fam_in_split):
"""
Create a split while retaining diversity specified by min_fam_in_split.
Returns split and removes any pdbs in this split from the remaining dataset
"""
dataset_size = len(all_chain_sequences)
tmp = {x: y for (x, y) in all_chain_sequences}
assert len(tmp) == len(all_chain_sequences)
all_chain_sequences = tmp
# Get structure tuple.
split, used = set(), set()
to_use = set(all_chain_sequences.keys())
while len(split) < split_size:
# Get random structure tuple and random chain_sequence.
rstuple = random.sample(to_use, 1)[0]
rcs = all_chain_sequences[rstuple]
found = seq.find_similar(rcs, 'blast_db', cutoff, dataset_size)
# Get structure tuples.
found = set([seq.fasta_name_to_tuple(x)[0] for x in found])
# ensure that at least min_fam_in_split families in each split
max_fam_size = int(math.ceil(split_size / min_fam_in_split))
split = split.union(list(found)[:max_fam_size])
to_use = to_use.difference(found)
used = used.union(found)
selected_chain_sequences = \
[(s, cs) for s, cs in all_chain_sequences.items() if s in split]
remaining_chain_sequences = \
[(s, cs) for s, cs in all_chain_sequences.items() if s in to_use]
return selected_chain_sequences, remaining_chain_sequences
|
2be670c382d93437d22a931314eade6ed8332436
| 3,639,918
|
def get_SNR(raw, fmin=1, fmax=55, seconds=3, freq=[8, 13]):
"""Compute power spectrum and calculate 1/f-corrected SNR in one band.
Parameters
----------
raw : instance of Raw
Raw instance containing traces for which to compute SNR
fmin : float
minimum frequency that is used for fitting spectral model.
fmax : float
maximum frequency that is used for fitting spectral model.
seconds: float
Window length in seconds, converts to FFT points for PSD calculation.
freq : list | [8, 13]
SNR in that frequency window is computed.
Returns
-------
SNR : array, 1-D
Contains SNR (1/f-corrected, for a chosen frequency) for each channel.
"""
SNR = np.zeros((len(raw.ch_names),))
n_fft = int(seconds * raw.info["sfreq"])
psd, freqs = mne.time_frequency.psd_welch(
raw, fmin=fmin, fmax=fmax, n_fft=n_fft
)
fm = fooof.FOOOFGroup()
fm.fit(freqs, psd)
for pick in range(len(raw.ch_names)):
psd_corr = 10 * np.log10(psd[pick]) - 10 * fm.get_fooof(pick)._ap_fit
idx = np.where((freqs > freq[0]) & (freqs < freq[1]))[0]
idx_max = np.argmax(psd_corr[idx])
SNR[pick] = psd_corr[idx][idx_max]
return SNR
|
8536e0856c3e82f31a99d6d783befd9455054e7d
| 3,639,919
|
def get_all_child_wmes(self):
""" Returns a list of (attr, val) tuples representing all wmes rooted at this identifier
val will either be an Identifier or a string, depending on its type """
wmes = []
for index in range(self.GetNumberChildren()):
wme = self.GetChild(index)
if wme.IsIdentifier():
wmes.append( (wme.GetAttribute(), wme.ConvertToIdentifier()) )
else:
wmes.append( (wme.GetAttribute(), wme.GetValueAsString()) )
return wmes
|
fb66aef96ca5fd5a61a34a86052ab9014d5db8a4
| 3,639,920
|
from pathlib import Path
import tqdm
def scan_image_directory(path):
"""Scan directory of FITS files to create basic stats.
Creates CSV file ready to be read by pandas and print-out of the stats if
less than 100 entries.
Parameters
----------
path : str, pathlib.Path
Returns
-------
pd.DataFrame
DataFrame containing the collected stats
"""
input_dir = Path(path)
directory_file = input_dir / "directory.csv"
files = list(input_dir.glob("*.fits"))
print(len(files), "images found.")
# fields = [
# "name",
# "date",
# "time",
# "filter",
# "exposure",
# "hasSlit",
# "isValid",
# "angle",
# "radius",
# "area",
# "centerY",
# "centerX",
# "sublon",
# "sublat",
# "isFull",
# "group",
# ]
bucket = []
print("Scanning directory...")
for fitspath in tqdm(files):
_, head = pf.getdata(fitspath, header=True)
line = {}
line["name"] = fitspath.name
line["date"] = head["DATE_OBS"]
line["time"] = head["TIME_OBS"]
line["filter"] = head["GFLT"]
line["exposure"] = head["ELAPTIME"]
line["hasSlit"] = head["SLIT"] != "Mirror"
line["isValid"] = head["ELAPTIME"] == 0.482500 and head["GFLT"] == "contK"
line["naxis1"] = head["NAXIS1"]
line["naxis2"] = head["NAXIS2"]
bucket.append(line)
df = pd.DataFrame(bucket)
df.to_csv(directory_file, index=False)
print("Metadata CSV generated at", directory_file)
return df
|
aaffe891d9c5879973d18d28eff4a18954f0a348
| 3,639,921
|
def load_mat(filename):
"""
Reads a OpenCV Mat from the given filename
"""
return read_mat(open(filename, 'rb'))
|
73faf2d2890a859681abdd6043bb4975516465fb
| 3,639,922
|
def connected_components(image, threshold, min_area, max_area, max_features, invert=False):
"""
Detect features using connected-component labeling.
Arguments:
image (float array): The image data. \n
threshold (float): The threshold value. \n
...
Returns:
features (pandas DataFrame): A pandas DataFrame with the detected features. \n
image_out (2D array): The output image.
"""
features = pd.DataFrame()
threshold_image = (image > threshold).astype(int) # threshold image
if invert:
threshold_image = 1 - threshold_image
label_image = skimage.measure.label(threshold_image)
regions = skimage.measure.regionprops(label_image = label_image, intensity_image = image) # http://scikit-image.org/docs/dev/api/skimage.measure.html
j = 0
for region in regions:
# Area filter first
if region.area < min_area or region.area > max_area: # Do not add feature
continue
if j >= max_features: # Do not add feature
continue
features = features.append([{'y': region.centroid[0],
'x': region.centroid[1],
'y_weighted': region.weighted_centroid[0],
'x_weighted': region.weighted_centroid[1],
'orientation': region.orientation,
'minor_axis_length': region.minor_axis_length,
'major_axis_length': region.major_axis_length,
'eccentricity': region.eccentricity,
'area': region.area,
'equivalent_diameter': region.equivalent_diameter,
'filled_area': region.filled_area,
'max_intensity': region.max_intensity,
'mean_intensity': region.mean_intensity,}])
return features, threshold_image
|
541e2e8213681a064b605053033fc9aea095ad69
| 3,639,923
|
import os
def reduce_scan(row, params, **kwargs):
"""
Reduce scan-mode grism data
.. warning::
This function is not yet implemented. It will raise an exception.
Parameters
----------
row : abscal.common.exposure_data_table.AbscalDataTable
Single-row table of the exposure to be extracted.
params : dict
Dictionary of parameters to use for the reduction
arg_list : namespace
Namespace of command-line arguments.
Returns
-------
row : abscal.common.exposure_data_table.AbscalDataTable
Updated single-row table of the exposure
"""
raise NotImplementedError("Scan mode is not yet available.")
default_values = get_defaults('abscal.common.args')
base_defaults = default_values | get_defaults(kwargs.get('module_name', __name__))
verbose = arg_list.verbose
show_plots = arg_list.plots
bkg_flat_order = arg_list.bkg_flat_order
file = os.path.join(row["path"], row["filename"])
with fits.open(file) as inf:
image = inf['SCI'].data
filter = row['filter']
xsize, ysize = image.shape[1], image.shape[0]
err = inf['ERR'].data
time = inf['TIME'].data
dq = inf['DQ'].data
return input_table
|
9a9e19f8a5a48d62181208562a4ecff526b41638
| 3,639,924
|
import os
import sys
import importlib
def module(spec):
""" Returns the module at :spec:
@see Issue #2
:param spec: to load.
:type spec: str
"""
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append(cwd)
return importlib.import_module(spec)
|
33928f92dddeee5fa8822e2a592d9a957867b5d9
| 3,639,925
|
from typing import Tuple
def transform_digits_to_string(labels: Tuple[str], coefficients,
offset: Fraction) -> str:
"""Form a string from digits.
Arguments
---------
labels: the tuple of lablels (ex.: ('x', 'y', 'z') or ('a', 'b', 'c')))
coefficients: the parameters in front of label (ex.: (1.0, 0.5, 0.0))
offset: the number (ex.: 2/3)
Output
------
string
Example
-------
>>> transform_digits_to_string(('x', 'y', 'z'), (1.0, 0.5, 0.0), 0.6666667)
x+1/2y+2/3
"""
l_res = []
for _coefficient, _label in zip(coefficients, labels):
_name = transform_fraction_with_label_to_string(_coefficient, _label)
if _name == "":
pass
elif _name.startswith("-"):
l_res.append(_name)
elif l_res == []:
l_res.append(_name)
else:
l_res.append(f"+{_name:}")
_name = str(Fraction(offset).limit_denominator(10))
if _name == "0":
if l_res == []:
l_res.append(_name)
elif ((l_res == []) | (_name.startswith("-"))):
l_res.append(_name)
else:
l_res.append(f"+{_name:}")
return "".join(l_res)
|
e9486bd7cd1749a4a33e1ca8d29637468dd0d54b
| 3,639,926
|
def match_peaks_with_mz_info_in_spectra(spec_a, spec_b, ms2_ppm=None, ms2_da=None):
"""
Match two spectra, find common peaks. If both ms2_ppm and ms2_da is defined, ms2_da will be used.
:return: list. Each element in the list is a list contain three elements:
m/z from spec 1; intensity from spec 1; m/z from spec 2; intensity from spec 2.
"""
a = 0
b = 0
spec_merged = []
peak_b_mz = 0.
peak_b_int = 0.
while a < spec_a.shape[0] and b < spec_b.shape[0]:
mass_delta_ppm = (spec_a[a, 0] - spec_b[b, 0]) / spec_a[a, 0] * 1e6
if ms2_da is not None:
ms2_ppm = ms2_da / spec_a[a, 0] * 1e6
if mass_delta_ppm < -ms2_ppm:
# Peak only existed in spec a.
spec_merged.append([spec_a[a, 0], spec_a[a, 1], peak_b_mz, peak_b_int])
peak_b_mz = 0.
peak_b_int = 0.
a += 1
elif mass_delta_ppm > ms2_ppm:
# Peak only existed in spec b.
spec_merged.append([0., 0., spec_b[b, 0], spec_b[b, 1]])
b += 1
else:
# Peak existed in both spec.
peak_b_mz = ((peak_b_mz * peak_b_int) + (spec_b[b, 0] * spec_b[b, 1])) / (peak_b_int + spec_b[b, 1])
peak_b_int += spec_b[b, 1]
b += 1
if peak_b_int > 0.:
spec_merged.append([spec_a[a, 0], spec_a[a, 1], peak_b_mz, peak_b_int])
peak_b_mz = 0.
peak_b_int = 0.
a += 1
if b < spec_b.shape[0]:
spec_merged += [[0., 0., x[0], x[1]] for x in spec_b[b:]]
if a < spec_a.shape[0]:
spec_merged += [[x[0], x[1], 0., 0.] for x in spec_a[a:]]
if spec_merged:
spec_merged = np.array(spec_merged, dtype=np.float64)
else:
spec_merged = np.array([[0., 0., 0., 0.]], dtype=np.float64)
return spec_merged
|
253b51abdc9469a411d5ed969d2091929ba20cd6
| 3,639,927
|
from typing import Callable
from typing import List
from typing import Type
from typing import Optional
def make_recsim_env(
recsim_user_model_creator: Callable[[EnvContext], AbstractUserModel],
recsim_document_sampler_creator: Callable[[EnvContext], AbstractDocumentSampler],
reward_aggregator: Callable[[List[AbstractResponse]], float],
) -> Type[gym.Env]:
"""Creates a RLlib-ready gym.Env class given RecSim user and doc models.
See https://github.com/google-research/recsim for more information on how to
build the required components from scratch in python using RecSim.
Args:
recsim_user_model_creator: A callable taking an EnvContext and returning
a RecSim AbstractUserModel instance to use.
recsim_document_sampler_creator: A callable taking an EnvContext and
returning a RecSim AbstractDocumentSampler
to use. This will include a AbstractDocument as well.
reward_aggregator: Callable taking a list of RecSim
AbstractResponse instances and returning a float (aggregated
reward).
Returns:
An RLlib-ready gym.Env class to use inside a Trainer.
"""
class _RecSimEnv(gym.Wrapper):
def __init__(self, config: Optional[EnvContext] = None):
# Override with default values, in case they are not set by the user.
default_config = {
"num_candidates": 10,
"slate_size": 2,
"resample_documents": True,
"seed": 0,
"convert_to_discrete_action_space": False,
"wrap_for_bandits": False,
}
if config is None or isinstance(config, dict):
config = EnvContext(config or default_config, worker_index=0)
config.set_defaults(default_config)
# Create the RecSim user model instance.
recsim_user_model = recsim_user_model_creator(config)
# Create the RecSim document sampler instance.
recsim_document_sampler = recsim_document_sampler_creator(config)
# Create a raw RecSim environment (not yet a gym.Env!).
raw_recsim_env = environment.SingleUserEnvironment(
recsim_user_model,
recsim_document_sampler,
config["num_candidates"],
config["slate_size"],
resample_documents=config["resample_documents"],
)
# Convert raw RecSim env to a gym.Env.
gym_env = recsim_gym.RecSimGymEnv(raw_recsim_env, reward_aggregator)
# Fix observation space and - if necessary - convert to discrete
# action space (from multi-discrete).
env = recsim_gym_wrapper(
gym_env,
config["convert_to_discrete_action_space"],
config["wrap_for_bandits"],
)
# Call the super (Wrapper constructor) passing it the created env.
super().__init__(env=env)
return _RecSimEnv
|
8056be88f9eb24b4ad29aea9c1a518efb07b26a4
| 3,639,928
|
def unflatten(dictionary, delim='.'):
"""Breadth first turn flattened dictionary into a nested one.
Arguments
---------
dictionary : dict
The dictionary to traverse and linearize.
delim : str, default='.'
The delimiter used to indicate nested keys.
"""
out = defaultdict(dict)
# try to maintain curent order of the dictionary
for key, value in dictionary.items():
key, sep, sub_key = key.partition(delim)
if sep:
out[key][sub_key] = value
else:
out[key] = value
for k, v in out.items():
if isinstance(v, dict):
out[k] = unflatten(v, delim)
return dict(out)
|
f052363b8d71afa8bec609497db8646c26107b54
| 3,639,929
|
def read_array(dtype, data):
"""Reads a formatted string and outputs an array.
The format is as for standard python arrays, which is
[array[0], array[1], ... , array[n]]. Note the use of comma separators, and
the use of square brackets.
Args:
data: The string to be read in.
dtype: The data type of the elements of the target array.
Raises:
ValueError: Raised if the input data is not of the correct format.
Returns:
An array of data type dtype.
"""
rlist = read_list(data)
for i in range(len(rlist)):
rlist[i] = read_type(dtype,rlist[i])
return np.array(rlist, dtype)
|
cc272b0e71ddec3200075fe5089cdaf2d6eeea29
| 3,639,930
|
def in_ipynb():
"""
Taken from Adam Ginsburg's SO answer here:
http://stackoverflow.com/a/24937408/4118756
"""
try:
cfg = get_ipython().config
if cfg['IPKernelApp']['parent_appname'] == 'ipython-notebook':
return True
else:
return False
except NameError:
return False
|
04c9aece820248b3b1e69eaf2b6721e555557162
| 3,639,931
|
import logging
def load_bioschemas_jsonld_from_html(url, config):
"""
Load Bioschemas JSON-LD from a webpage.
:param url:
:param config:
:return: array of extracted jsonld
"""
try:
extractor = bioschemas.extractors.ExtractorFromHtml(config)
filt = bioschemas.filters.BioschemasFilter(config)
jsonlds = extractor.extract_jsonld_from_url(url)
jsonlds = filt.filter(jsonlds)
logger.info('Got %d jsonld sections', len(jsonlds))
return jsonlds
except Exception as e:
logging.exception('Ignoring failure')
|
6b3838260c39023b44423d5bcbc81f91a0113a95
| 3,639,932
|
import collections
def pformat(dictionary, function):
"""Recursively print dictionaries and lists with %.3f precision."""
if isinstance(dictionary, dict):
return type(dictionary)((key, pformat(value, function)) for key, value in dictionary.items())
# Warning: bytes and str are two kinds of collections.Container, but we don't want to go inside it, so it should be pick out here.
if isinstance(dictionary, bytes) or isinstance(dictionary, str):
return dictionary
if isinstance(dictionary, collections.Container):
return type(dictionary)(pformat(value, function) for value in dictionary)
if isinstance(dictionary, float):
return function(dictionary)
return dictionary
|
d509e8871c6749be61d7b987e5fa67cd3e824232
| 3,639,933
|
def _tonal_unmodulo(x):
"""
>>> _tonal_unmodulo((0,10,0))
(0, -2, 0)
>>> _tonal_unmodulo((6,0,0))
(6, 12, 0)
>>> _tonal_unmodulo((2, 0))
(2, 0)
"""
d = x[0]
c = x[1]
base_c = MS[d].c
# Example: Cb --- base=0 c=11 c-base=11 11 - 12 = -1
if c - base_c > 6:
c = c - C_LEN
# Example: B# --- base=11 c=0 c-base=-11 c+C_LEN =12
if c - base_c < -6:
c = c + C_LEN
try:
return (d, c, x[2])
except:
return (d, c)
|
50ae6b1eea4a281b32d07f0661837748b066af8d
| 3,639,934
|
def get_ncopy(path, aboutlink = False):
"""Returns an ncopy attribute value (it is a requested count of
replicas). It calls gfs_getxattr_cached."""
(n, cc) = getxattr(path, GFARM_EA_NCOPY, aboutlink)
if (n != None):
return (int(n), cc)
else:
return (None, cc)
|
82c212d1d6aa68b49cde7ec170a47fdb09d1dd46
| 3,639,935
|
def has_three_or_more_vowels(string):
"""Check if string has three or more vowels."""
return sum(string.count(vowel) for vowel in 'aeiou') >= 3
|
8b0b683ebe51b18bdc5d6f200b41794a4cb3a510
| 3,639,936
|
def lbfgs_inverse_hessian_factors(S, Z, alpha):
"""
Calculates factors for inverse hessian factored representation.
It implements algorithm of figure 7 in:
Pathfinder: Parallel quasi-newton variational inference, Lu Zhang et al., arXiv:2108.03782
"""
J = S.shape[1]
StZ = S.T @ Z
R = jnp.triu(StZ)
eta = jnp.diag(StZ)
beta = jnp.hstack([jnp.diag(alpha) @ Z, S])
minvR = -jnp.linalg.inv(R)
alphaZ = jnp.diag(jnp.sqrt(alpha)) @ Z
block_dd = minvR.T @ (alphaZ.T @ alphaZ + jnp.diag(eta)) @ minvR
gamma = jnp.block([[jnp.zeros((J, J)), minvR],
[minvR.T, block_dd]])
return beta, gamma
|
1cc279a3fd97d8d1987be532bb3bc3c06a76bea7
| 3,639,937
|
from typing import List
from typing import Dict
from typing import Any
def get_geojson_observations(properties: List[str] = None, **kwargs) -> Dict[str, Any]:
""" Get all observation results combined into a GeoJSON ``FeatureCollection``.
By default this includes some basic observation properties as GeoJSON ``Feature`` properties.
The ``properties`` argument can be used to override these defaults.
Example:
>>> get_geojson_observations(observation_id=16227955, properties=["photo_url"])
{"type": "FeatureCollection",
"features": [{
"type": "Feature",
"geometry": {"type": "Point", "coordinates": [4.360086, 50.646894]},
"properties": {
"photo_url": "https://static.inaturalist.org/photos/24355315/square.jpeg?1536150659"
}
}
]
}
Args:
properties: Properties from observation results to include as GeoJSON properties
kwargs: Arguments for :py:func:`.get_observations`
Returns:
A ``FeatureCollection`` containing observation results as ``Feature`` dicts.
"""
kwargs["mappable"] = True
observations = get_all_observations(kwargs)
return as_geojson_feature_collection(
(flatten_nested_params(obs) for obs in observations),
properties=properties if properties is not None else DEFAULT_OBSERVATION_ATTRS,
)
|
b87f3bcff5ea022ef6509c3b29491dd0f3c665be
| 3,639,938
|
import signal
def createFilter(fc, Q, fs):
"""
Returns digital BPF with given specs
:param fc: BPF center frequency (Hz)
:param Q: BPF Q (Hz/Hz)
:param fs: sampling rate (Samp/sec)
:returns: digital implementation of BPF
"""
wc = 2*pi*fc
num = [wc/Q, 0]
den = [1, wc/Q, wc**2]
dig_tf = signal.bilinear(num, den, fs)
return dig_tf
|
9152d9f89781e1151db481cd88a736c2035b9fbd
| 3,639,939
|
def create_uno_struct(cTypeName: str):
"""Create a UNO struct and return it.
Similar to the function of the same name in OOo Basic.
Returns:
object: uno struct
"""
oCoreReflection = get_core_reflection()
# Get the IDL class for the type name
oXIdlClass = oCoreReflection.forName(cTypeName)
# Create the struct.
oReturnValue, oStruct = oXIdlClass.createObject(None)
return oStruct
|
acdb7dfaedc75d25e0592b7edf4928779835e5f4
| 3,639,940
|
import pkg_resources
def get_dir():
"""Return the location of resources for report"""
return pkg_resources.resource_filename('naarad.resources',None)
|
e9f450e3f46f65fed9fc831aaa37661477ad3d14
| 3,639,941
|
import torch
def SoftCrossEntropyLoss(input, target):
"""
Calculate the CrossEntropyLoss with soft targets
:param input: prediction logicts
:param target: target probabilities
"""
total_loss = torch.tensor(0.0)
for i in range(input.size(1)):
cls_idx = torch.full((input.size(0),), i, dtype=torch.long)
loss = F.cross_entropy(input, cls_idx, reduce=False)
total_loss += target[:, i].dot(loss)
return total_loss / input.shape[0]
|
e760fb7a8c85cc32e18bf5c1b5882c0a0682d211
| 3,639,942
|
def composite_layer(inputs, mask, hparams):
"""Composite layer."""
x = inputs
# Applies ravanbakhsh on top of each other.
if hparams.composite_layer_type == "ravanbakhsh":
for layer in xrange(hparams.layers_per_layer):
with tf.variable_scope(".%d" % layer):
x = common_layers.ravanbakhsh_set_layer(
hparams.hidden_size,
x,
mask=mask,
dropout=0.0)
# Transforms elements to get a context, and then uses this in a final layer.
elif hparams.composite_layer_type == "reembedding":
# Transform elements n times and then pool.
for layer in xrange(hparams.layers_per_layer):
with tf.variable_scope(".%d" % layer):
x = common_layers.linear_set_layer(
hparams.hidden_size,
x,
dropout=0.0)
context = common_layers.global_pool_1d(x, mask=mask)
# Final layer.
x = common_layers.linear_set_layer(
hparams.hidden_size,
x,
context=context,
dropout=0.0)
return x
|
f5cc3981b103330eef9d2cf47ede7278eef00bdc
| 3,639,943
|
def edit_expense(expense_id, budget_id, date_incurred, description, amount, payee_id):
"""
Changes the details of the given expense.
"""
query = sqlalchemy.text("""
UPDATE budget_expenses
SET
budget_id = (:budget_id),
date_incurred = (:date_incurred),
description = (:description),
cost = (:amount),
payee_id = (:payee_id)
WHERE
expense_id = (:expense_id)
""")
rp = flask.g.db.execute(
query,
expense_id=expense_id,
budget_id=budget_id,
date_incurred=date_incurred,
description=description,
amount=amount,
payee_id=payee_id
)
return rp.rowcount != 0
|
83a1e591e71efa9a5f8382c934f9090a737a9d5c
| 3,639,944
|
def get_mnist_iterator(batch_size, input_shape, num_parts=1, part_index=0):
"""Returns training and validation iterators for MNIST dataset
"""
get_mnist_ubyte()
flat = False if len(input_shape) == 3 else True
train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
input_shape=input_shape,
batch_size=batch_size,
shuffle=True,
flat=flat,
num_parts=num_parts,
part_index=part_index)
val_dataiter = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
input_shape=input_shape,
batch_size=batch_size,
flat=flat,
num_parts=num_parts,
part_index=part_index)
return (train_dataiter, val_dataiter)
|
4a042595b30aa2801221607d5605dc41eac7acf4
| 3,639,945
|
def dataset():
"""Get data frame for test purposes."""
return pd.DataFrame(
data=[['alice', 26], ['bob', 34], ['claire', 19]],
index=[0, 2, 1],
columns=['Name', 'Age']
)
|
53d023b57f5abdd1226f2dfe22ce1853e405c71f
| 3,639,946
|
def get_consumer_key():
"""This is entirely questionable. See settings.py"""
consumer_key = None
try:
loc = "%s/consumer_key.txt" % settings.TWITTER_CONSUMER_URL
url = urllib2.urlopen(loc)
consumer_key = url.read().rstrip()
except (urllib2.HTTPError, IOError), e:
print "Unable to obtain consumer_key from %s: %s" % (loc, e)
return consumer_key
|
77ac3ce96660c32ce9dc43afa0116b7a6e1e1bc2
| 3,639,947
|
from typing import Tuple
def disconnect() -> Tuple[str, int]:
"""Deletes the DroneServerThread with a given id.
Iterates over all the drones in the shared list and deletes the one with a
matching drone_id. If none are found returns an error.
Request:
drone_id (str): UUID of the drone.
Response:
Tuple[str, int]: Response status.
200, "OK" - Drone disconnected created successfully.
400, "Bad Request" - Incorrect drone_id.
"""
# Check if the json is correct and making a variable
if not 'uuid' in request.json or request.json["uuid"] == "":
return "Bad Request", 400
drone_id = request.json["uuid"]
# Iterates through the array and checks elements
drones_lock, drone_ts = common_variables.get_drone_ts()
drones_lock.acquire()
for drone in drone_ts:
if drone_id == drone.drone_id:
ports_lock, ports_assigned = common_variables.get_ports_assigned()
ports_assigned.remove(drone.ports[0])
ports_assigned.remove(drone.ports[1])
ports_assigned.remove(drone.ports[2])
drone_ts.remove(drone)
drones_lock.release()
return "OK", 200
drones_lock.release()
return "Bad Request", 400
|
c69192ccdc73c27089952d3a27c3ff79dfb932a5
| 3,639,948
|
import torch
def get_graph_feature(x, k=20, idx=None, x_coord=None):
"""
Args:
x: (B, d, N)
"""
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
if idx is None:
if x_coord is None: # dynamic knn graph
idx = knn(x, k=k)
else: # fixed knn graph with input point coordinates
idx = knn(x_coord, k=k)
if k is None: k = idx.size(-1)
_, num_dims, _ = x.size()
feature = gather(x, idx)
x = x.transpose(2, 1).contiguous()
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2).contiguous() # (B, d, N, K)
return feature
|
e895a1663fb716846af0976a3203045509591a6e
| 3,639,949
|
def get_markers(
image_array: np.ndarray,
evened_selem_size: int = 4,
markers_contrast_times: float = 15,
markers_sd: float = 0.25,
) -> np.ndarray:
"""Finds the highest and lowest grey scale values for image flooding."""
selem = smo.disk(evened_selem_size)
evened = sfi.rank.mean_bilateral(
inc_contrast(image_array, contrast_times=markers_contrast_times), selem
)
# Markers defined by highest and lowest grey levels set as markers
high = np.max(evened)
low = np.min(evened)
std = np.std(evened)
neatarray = np.array(image_array)
markers: np.ndarray = np.zeros_like(neatarray)
# Level reduced/decreased by 1/4 SD
markers[evened < low + (markers_sd * std)] = 3
markers[evened > high - (markers_sd * std)] = 2
return markers
|
865d2f5170b85a54902aabdfaee61199359e7d90
| 3,639,950
|
def pd_bigdata_read_csv(file, **pd_read_csv_params):
"""
读取速度提升不明显
但是内存占用显著下降
"""
reader = pd.read_csv(file, **pd_read_csv_params, iterator=True)
loop = True
try:
chunk_size = pd_read_csv_params['chunksize']
except:
chunk_size = 1000000
chunks = []
while loop:
try:
chunk = reader.get_chunk(chunk_size)
chunks.append(chunk)
except StopIteration:
loop = False
print('[Info]: Iteration is stopped.')
df = pd.concat(chunks, ignore_index=True, axis=0)
return df
|
0350e543bc10da5165b97b18c83d6f848cbbc503
| 3,639,951
|
import numpy
def PCA(Y_name, input_dim):
"""
Principal component analysis: maximum likelihood solution by SVD
Adapted from GPy.util.linalg
Arguments
---------
:param Y: NxD np.array of data
:param input_dim: int, dimension of projection
Returns
-------
:rval X: - Nxinput_dim np.array of dimensionality reduced data
W - input_dimxD mapping from X to Y
"""
Y = genfromtxt(Y_name, delimiter=',')
Z = numpy.linalg.svd(Y - Y.mean(axis=0), full_matrices=False)
[X, W] = [Z[0][:, 0:input_dim], numpy.dot(numpy.diag(Z[1]), Z[2]).T[:, 0:input_dim]]
v = X.std(axis=0)
X /= v;
W *= v;
return X
|
0d49a1c8470cba2d6d56a4ce191449b3106e8a93
| 3,639,952
|
import os
import sh
def data_cache_path(page, page_id_field='slug'):
"""
Get (and make) local data cache path for data
:param page:
:return:
"""
path = os.path.join(CACHE_ROOT, '.cache', 'data', *os.path.split(getattr(page, page_id_field)))
if not os.path.exists(path):
sh.mkdir('-p', path)
return path
|
6e96637afab0daaa3e77cd664fa099fd1404dcdf
| 3,639,953
|
import collections
def _get_sequence(value, n, channel_index, name):
"""Formats a value input for gen_nn_ops."""
# Performance is fast-pathed for common cases:
# `None`, `list`, `tuple` and `int`.
if value is None:
return [1] * (n + 2)
# Always convert `value` to a `list`.
if isinstance(value, list):
pass
elif isinstance(value, tuple):
value = list(value)
elif isinstance(value, int):
value = [value]
elif not isinstance(value, collections.abc.Sized):
value = [value]
else:
value = list(value) # Try casting to a list.
len_value = len(value)
# Fully specified, including batch and channel dims.
if len_value == n + 2:
return value
# Apply value to spatial dims only.
if len_value == 1:
value = value * n # Broadcast to spatial dimensions.
elif len_value != n:
raise ValueError('{} should be of length 1, {} or {} but was {}'.format(
name, n, n + 2, len_value))
# Add batch and channel dims (always 1).
if channel_index == 1:
return [1, 1] + value
else:
return [1] + value + [1]
|
e2ac408cf299f186bb74fa4b1decc885b1229f9d
| 3,639,954
|
def make_linear(input_dim, output_dim, bias=True, std=0.02):
"""
Parameters
----------
input_dim: int
output_dim: int
bias: bool
std: float
Returns
-------
torch.nn.modules.linear.Linear
"""
linear = nn.Linear(input_dim, output_dim, bias)
init.normal_(linear.weight, std=std)
if bias:
init.zeros_(linear.bias)
return linear
|
57361cadbf3121501da65c3f2f37e61404bc26e3
| 3,639,955
|
def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov):
"""
Log likelihood for centered conditional matrix-variate normal density.
Consider the following partitioned matrix-normal density:
.. math::
\\begin{bmatrix}
\\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\\\
\\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right]\\end{bmatrix}
\\sim \\mathcal{N}\\left(0,\\begin{bmatrix} \\Sigma_{j} \\otimes
\\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\\\
\\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i}
\\end{bmatrix}\\right)
Then we can write the conditional:
.. math::
\\mathbf{X}_{i j} \\mid \\mathbf{Y}_{i k} \\sim \\mathcal{M}\\
\\mathcal{N}\\left(0, \\Sigma_{i}, \\Sigma_{j}-\\Sigma_{j k}\\
\\Sigma_{k}^{-1} \\Sigma_{k j}\\right)
This function efficiently computes the conditionals by unpacking some
info in the covariance classes and then dispatching to
`solve_det_conditional`.
Parameters
---------------
x: tf.Tensor
Observation tensor
row_cov: CovBase
Row covariance (:math:`\\Sigma_{i}` in the notation above).
col_cov: CovBase
Column covariance (:math:`\\Sigma_{j}` in the notation above).
cond: tf.Tensor
Off-diagonal block of the partitioned covariance (:math:`\\Sigma_{jk}`
in the notation above).
cond_cov: CovBase
Covariance of conditioning variable (:math:`\\Sigma_{k}` in the
notation above).
"""
rowsize = tf.cast(tf.shape(input=x)[0], "float64")
colsize = tf.cast(tf.shape(input=x)[1], "float64")
solve_row = row_cov.solve(x)
logdet_row = row_cov.logdet
solve_col, logdet_col = solve_det_conditional(
tf.transpose(a=x), col_cov, tf.transpose(a=cond), cond_cov
)
return _mnorm_logp_internal(
colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col
)
|
0970ba5a2f67a6156a6077dbd05e2d1cca331476
| 3,639,956
|
import os
def get_map_folderpath(detectionID):
"""
Make sure map directory exists and return folder location for maps to be
saved to.
"""
homedir = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists('map'):
os.makedirs('map')
detection_folder = 'map/'+str(detectionID)
if not os.path.exists(detection_folder):
os.makedirs(detection_folder)
map_dirpath = os.path.join(homedir, detection_folder)
return(map_dirpath)
|
3fd3f0bae5d8152b9b46f4f99dc79e14b5318e76
| 3,639,957
|
def get_next_by_date(name, regexp):
"""Get the next page by page publishing date"""
p = Page.get(Page.name == name)
query = (Page.select(Page.name, Page.title)
.where(Page.pubtime > p.pubtime)
.order_by(Page.pubtime.asc())
.dicts())
for p in ifilter(lambda x: regexp.match(x['name']), query):
return p
|
16e956508c1ccbdf444e84ad769848124449ab84
| 3,639,958
|
import sys
def relative_performance(r_df, combinations, optimal_combinations, ref_method='indp', ref_jt='nan', ref_at='nan',
ref_vt='nan', cost_type='Total', deaggregate=False):
"""
This functions computes the relative performance, relative cost, and univeral
relative measure :cite:`Talebiyan2019c` based on results from JC and INDP.
Parameters
----------
r_df : dict
Dictionary that contains complete results by JC and INDP collected by
:func:`read_results`.
combinations : dict
All combinations of magnitude, sample, judgment type, resource allocation type
involved in the JC (or any other decentralized results) collected by
:func:`generate_combinations`.
optimal_combinations : dict
All combinations of magnitude, sample, judgment type, resource allocation type
involved in the INDP (or any other optimal results) collected by :func:`generate_combinations`.
ref_method : str, optional
Referece method to computue relative measure in comparison to. The default is 'indp'.
ref_jt : str, optional
Referece judgment type to computue relative measure in comparison to. It is used only
when the reference method is JC. The default is 'nan'.
ref_at : str, optional
Referece resource allocation type to computue relative measure in comparison to.
It is used only when the reference method is JC. The default is 'nan'.
ref_vt : str, optional
Referece val;uation type to computue relative measure in comparison to.
It is used only when the reference method is JC, and the reference resource
allocation type is Auntion. The default is 'nan'.
cost_type : str, optional
Cost type for which the relative measure is computed. The default is 'Total'.
deaggregate : bool, optional
Should the deaggregated results (for seperate layers) be computed. The default is False.
Returns
-------
lambda_df : dict
Dictionary that contains the relative measures.
"""
columns = ['Magnitude', 'cost_type', 'decision_type', 'judgment_type', 'auction_type',
'valuation_type', 'no_resources', 'sample',
'Area_TC', 'Area_P', 'lambda_tc', 'lambda_p', 'lambda_U', 'layer']
T = len(r_df['t'].unique())
lambda_df = pd.DataFrame(columns=columns, dtype=int)
# Computing reference area for lambda
# Check if the method in optimal combination is the reference method #!!!
print('\nRef area calculation\n', end='')
for idx, x in enumerate(optimal_combinations):
if x[4] == ref_method:
rows = r_df[(r_df['Magnitude'] == x[0]) & (r_df['decision_type'] == ref_method) &
(r_df['sample'] == x[1]) & (r_df['auction_type'] == ref_at) &
(r_df['valuation_type'] == ref_vt) & (r_df['no_resources'] == x[3]) &
(r_df['judgment_type'] == ref_jt)]
if not rows.empty:
row_all = rows[(rows['layer'] == 'nan')]
area_tc = trapz_int(y=list(row_all[(row_all['cost_type'] == cost_type)].cost[:T]),
x=list(row_all[row_all['cost_type'] == cost_type].t[:T]))
area_p = -trapz_int(y=list(row_all[row_all['cost_type'] == 'Under Supply Perc'].cost[:T]),
x=list(row_all[row_all['cost_type'] == 'Under Supply Perc'].t[:T]))
values = [x[0], cost_type, x[4], ref_jt, ref_at, ref_vt, x[3], x[1],
area_tc, area_p, 'nan', 'nan', 'nan', 'nan']
lambda_df = lambda_df.append(dict(zip(columns, values)), ignore_index=True)
if deaggregate:
for l in range(x[2]):
row_lyr = rows[(rows['layer'] == l + 1)]
area_tc = trapz_int(y=list(row_lyr[row_lyr['cost_type'] == cost_type].cost[:T]),
x=list(row_lyr[row_lyr['cost_type'] == cost_type].t[:T]))
area_p = -trapz_int(y=list(row_lyr[row_lyr['cost_type'] == 'Under Supply Perc'].cost[:T]),
x=list(row_lyr[row_lyr['cost_type'] == 'Under Supply Perc'].t[:T]))
values = [x[0], cost_type, x[4], ref_jt, ref_at, ref_vt, x[3], x[1],
area_tc, area_p, 'nan', 'nan', 'nan', l + 1]
lambda_df = lambda_df.append(dict(zip(columns, values)), ignore_index=True)
if idx % (len(optimal_combinations) / 10 + 1) == 0:
update_progress(idx + 1, len(optimal_combinations))
update_progress(len(optimal_combinations), len(optimal_combinations))
# Computing areas and lambdas
print('\nLambda calculation\n', end='')
for idx, x in enumerate(combinations + optimal_combinations):
if x[4] != ref_method:
# Check if reference area exists
cond = ((lambda_df['Magnitude'] == x[0]) & (lambda_df['decision_type'] == ref_method) &
(lambda_df['auction_type'] == ref_at) & (lambda_df['valuation_type'] == ref_vt) &
(lambda_df['cost_type'] == cost_type) & (lambda_df['sample'] == x[1]) &
(lambda_df['no_resources'] == x[3]) & (lambda_df['judgment_type'] == ref_jt))
if not cond.any():
sys.exit('Error:Reference type is not here! for %s,%s, m %d, resource %d' \
% (x[4], x[5], x[0], x[3]))
rows = r_df[(r_df['Magnitude'] == x[0]) & (r_df['decision_type'] == x[4]) &
(r_df['judgment_type'] == x[5]) & (r_df['auction_type'] == x[6]) &
(r_df['valuation_type'] == x[7]) & (r_df['sample'] == x[1]) &
(r_df['no_resources'] == x[3])]
if not rows.empty:
row_all = rows[(rows['layer'] == 'nan')]
ref_area_tc = lambda_df.loc[cond & (lambda_df['layer'] == 'nan'), 'Area_TC']
ref_area_P = lambda_df.loc[cond & (lambda_df['layer'] == 'nan'), 'Area_P']
area_tc = trapz_int(y=list(row_all[row_all['cost_type'] == cost_type].cost[:T]),
x=list(row_all[row_all['cost_type'] == cost_type].t[:T]))
area_p = -trapz_int(y=list(row_all[row_all['cost_type'] == 'Under Supply Perc'].cost[:T]),
x=list(row_all[row_all['cost_type'] == 'Under Supply Perc'].t[:T]))
lambda_tc, lambda_p = compute_lambdas(float(ref_area_tc), float(ref_area_P), area_tc, area_p)
values = [x[0], cost_type, x[4], x[5], x[6], x[7], x[3], x[1], area_tc,
area_p, lambda_tc, lambda_p, (lambda_tc + lambda_p) / 2, 'nan']
lambda_df = lambda_df.append(dict(zip(columns, values)), ignore_index=True)
if deaggregate:
for l in range(x[2]):
row_lyr = rows[(rows['layer'] == l + 1)]
ref_area_tc = lambda_df.loc[cond & (lambda_df['layer'] == l + 1), 'Area_TC']
ref_area_P = lambda_df.loc[cond & (lambda_df['layer'] == l + 1), 'Area_P']
area_tc = trapz_int(y=list(row_lyr[row_lyr['cost_type'] == cost_type].cost[:T]),
x=list(row_lyr[row_lyr['cost_type'] == cost_type].t[:T]))
area_p = -trapz_int(y=list(row_lyr[row_lyr['cost_type'] == 'Under Supply Perc'].cost[:T]),
x=list(row_lyr[row_lyr['cost_type'] == 'Under Supply Perc'].t[:T]))
lambda_tc, lambda_p = compute_lambdas(float(ref_area_tc), float(ref_area_P),
area_tc, area_p)
values = [x[0], cost_type, x[4], x[5], x[6], x[7], x[3], x[1], area_tc,
area_p, lambda_tc, lambda_p, (lambda_tc + lambda_p) / 2, l + 1]
lambda_df = lambda_df.append(dict(zip(columns, values)), ignore_index=True)
else:
sys.exit('Error: No entry for %s %s %s m %d|resource %d, ...' \
% (x[4], x[5], x[6], x[0], x[3]))
if idx % (len(combinations + optimal_combinations) / 10 + 1) == 0:
update_progress(idx + 1, len(combinations + optimal_combinations))
update_progress(idx + 1, len(combinations + optimal_combinations))
return lambda_df
|
95a9ed19cf989a426dccf351deb8fd631eaefd98
| 3,639,959
|
def generate_raw_mantissa_extraction(optree):
""" generate an operation graph to extraction the significand field
of floating-point node <optree> (may be scalar or vector).
The implicit bit is not injected in this raw version """
if optree.precision.is_vector_format():
base_precision = optree.precision.get_scalar_format()
vector_size = optree.precision.get_vector_size()
int_precision = {
v2float32: v2int32,
v2float64: v2int64,
v4float32: v4int32,
v4float64: v4int64,
v8float32: v8int32,
v8float64: v8int64,
}[optree.precision]
else:
int_precision = optree.precision.get_integer_format()
base_precision = optree.precision
return generate_field_extraction(
optree,
int_precision,
0,
base_precision.get_field_size() - 1,
)
|
f1f0b38f0c68e997ade20ead827f71427104d138
| 3,639,960
|
import time
def read_temp_f(p):
"""
read_temp_f
Returns the temperature from the probe in degrees farenheit
p = 1-Wire device file
"""
lines = read_temp_raw(p)
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw(p)
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos + 2:]
temp_f = temp_string * 9.0 / 5.0 + 32.0
return temp_f
|
52114550688f06c8f58dfe37f7c0faa4d93715a2
| 3,639,961
|
def count_parameters(model, trainable_only=True, is_dict=False):
"""
Count number of parameters in a model or state dictionary
:param model:
:param trainable_only:
:param is_dict:
:return:
"""
if is_dict:
return sum(np.prod(list(model[k].size())) for k in model)
if trainable_only:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
else:
return sum(p.numel() for p in model.parameters())
|
8e95c3302eca217c694bb4c5262c0196254505fb
| 3,639,962
|
def setup_conf(conf=cfg.CONF):
"""Setup the cfg for the status check utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during checks.
"""
common_config.register_common_config_options()
neutron_conf_base.register_core_common_config_opts(conf)
neutron_conf_service.register_service_opts(
neutron_conf_service.SERVICE_OPTS, cfg.CONF)
db_options.set_defaults(conf)
return conf
|
c5ebcc4516e317fc558d8bddeb74343b7006c999
| 3,639,963
|
import pathlib
def release_kind():
"""
Determine which release to make based on the files in the
changelog.
"""
# use min here as 'major' < 'minor' < 'patch'
return min(
'major' if 'breaking' in file.name else
'minor' if 'change' in file.name else
'patch'
for file in pathlib.Path('changelog.d').iterdir()
)
|
115f75c1e0f1e8b02916db518e3983462d9bc19c
| 3,639,964
|
import re
def edit_text_file(filepath: str, regex_search_string: str, replace_string: str):
"""
This function is used to replace text inside a file.
:param filepath: the path where the file is located.
:param regex_search_string: string used in the regular expression to find what has to be replaced.
:param replace_string: the string which will replace all matches found using regex_search_string.
:return: None
:raise RuntimeError: if regex_search_string doesn't find any match.
"""
# open the file and read the content
with open(filepath, "r") as f:
text_file = f.read()
# find all matches
matches = re.finditer(regex_search_string, text_file)
if matches is None:
raise RuntimeError("No match has been found using the given regex_search_string!")
# replace all matches with replace_string
for match in matches:
text_file = text_file.replace(match.group(0), replace_string)
# overwrite the file
with open(filepath, "w") as f:
f.write(text_file)
return None
|
e0f5945a96f755a9c289262c3d19552c0e1b40fd
| 3,639,965
|
def find_sums(sheet):
"""
Tallies the total assets and total liabilities for each person.
RETURNS:
Tuple of assets and liabilities.
"""
pos = 0
neg = 0
for row in sheet:
if row[-1] > 0:
pos += row[-1]
else:
neg += row[-1]
return pos, neg
|
351e13d6915288268a56d8292c470fe354fa9842
| 3,639,966
|
def read_links(title):
"""
Reads the links from a file in directory link_data.
Assumes the file exists, as well as the directory link_data
Args:
title: (Str) The title of the current wiki file to read
Returns a list of all the links in the wiki article with the name title
"""
with open(f"link_data/{title}", "r") as f:
read_data = f.read()
return read_data.split("\n")[:-1]
|
50f128bcf4cd36bc783bc848ab2e6b6280973ea3
| 3,639,967
|
def test_compile_model_from_params():
"""Tests that if build_fn returns an un-compiled model,
the __init__ parameters will be used to compile it
and that if build_fn returns a compiled model
it is not re-compiled.
"""
# Load data
data = load_boston()
X, y = data.data[:100], data.target[:100]
losses = ("mean_squared_error", "mean_absolute_error")
# build_fn that does not compile
def build_fn(compile_with_loss=None):
model = Sequential()
model.add(keras.layers.Dense(X.shape[1], input_shape=(X.shape[1],)))
model.add(keras.layers.Activation("relu"))
model.add(keras.layers.Dense(1))
model.add(keras.layers.Activation("linear"))
if compile_with_loss:
model.compile(loss=compile_with_loss)
return model
for loss in losses:
estimator = KerasRegressor(
model=build_fn,
loss=loss,
# compile_with_loss=None returns an un-compiled model
compile_with_loss=None,
)
estimator.fit(X, y)
assert estimator.model_.loss.__name__ == loss
for myloss in losses:
estimator = KerasRegressor(
model=build_fn,
loss="binary_crossentropy",
# compile_with_loss != None overrides loss
compile_with_loss=myloss,
)
estimator.fit(X, y)
assert estimator.model_.loss == myloss
|
a4cbc7b4dbc4d9836766c37d8eb1cfdd3d5c324e
| 3,639,968
|
import numpy
def writeFEvalsMaxSymbols(fevals, maxsymbols, isscientific=False):
"""Return the smallest string representation of a number.
This method is only concerned with the maximum number of significant
digits.
Two alternatives:
1) modified scientific notation (without the trailing + and zero in
the exponent)
2) float notation
:returns: string representation of a number of function evaluations
or ERT.
"""
#Compared to writeFEvals2?
#Printf:
# %[flags][width][.precision][length]specifier
assert not numpy.isnan(fevals)
if numpy.isinf(fevals):
return r'$\infty$'
#repr1 is the alternative scientific notation
#repr2 is the full notation but with a number of significant digits given
#by the variable precision.
# modified scientific notation:
#smallest representation of the decimal part
#drop + and starting zeros of the exponent part
repr1 = (('%.' + str(maxsymbols) + 'e') % fevals)
size1 = len(repr1)
tmp = repr1.split('e', 1)
tmp2 = tmp[-1].lstrip('+-0')
if float(tmp[-1]) < 0:
tmp2 = '-' + tmp2
tmp[-1] = tmp2
remainingsymbols = max(maxsymbols - len(tmp2) - 2, 0)
tmp[0] = (('%.' + str(remainingsymbols) + 'f') % float(tmp[0]))
repr1 = 'e'.join(tmp)
#len(repr1) <= maxsymbols is not always the case but should be most usual
tmp = '%.0f' % fevals
remainingsymbols = max(maxsymbols - len(tmp), 0)
repr2 = (('%.' + str(remainingsymbols) + 'f') % fevals)
tmp = repr2.split('.', 1)
if len(tmp) > 1:
tmp[-1] = tmp[-1].rstrip('0')
repr2 = '.'.join(tmp)
repr2 = repr2.rstrip('.')
#set_trace()
if len(repr1)-repr1.count('.') < len(repr2)-repr2.count('.') or isscientific:
return repr1
#tmp1 = '%4.0f' % bestalgdata[-1]
#tmp2 = ('%2.2g' % bestalgdata[-1]).split('e', 1)
#if len(tmp2) > 1:
# tmp2[-1] = tmp2[-1].lstrip('+0')
# tmp2 = 'e'.join(tmp2)
# tmp = tmp1
# if len(tmp1) >= len(tmp2):
# tmp = tmp2
# curline.append(r'\multicolumn{2}{c|}{%s}' % tmp)
return repr2
|
a5434c5f6e845473f2187b969e4fa42538a95633
| 3,639,969
|
def closedcone(r=1, h=5, bp=[0,0,0], sampH=360, sampV=50, fcirc=20):
"""
Returns parametrization of a closed cone with radius 'r' and height 'h at
basepoint (bpx,bpy,bpz), where 'sampH' and 'sampV' specify the amount of
samples used horizontally, i.e. for circles, and vertically, i.e.
for height, and 'fcirc' specifies the amount
of circles that fill the bottom of the cone with radius 'r',
The base point is in the cones's center at the bottom.
The default values are 1, 5, (0,0,0), 360 and 50 for the radius, center,
and amount of horizontal and vertical samples, respectively.
"""
bpx, bpy, bpz = bp
theta0 = np.linspace(0, 2*np.pi, sampH)
z = np.linspace(bpz, bpz+h, sampV)
theta, z = np.meshgrid(theta0, z)
r = np.linspace(r, 0, sampV)
theta, r = np.meshgrid(theta0, r)
x = r * np.cos(theta) - bpx
y = r * np.sin(theta) - bpy
xcirc, ycirc, zcirc = filledcircle(r=r,c=[bpx,bpy,bpz], sampH=sampH,
fcirc=fcirc)
x = np.append(x,xcirc,0)
y = np.append(y,ycirc,0)
z = np.append(z,zcirc,0)
return x, y, z
|
8cbf46f0a626d8cc858bab004a21dd9eb189a3eb
| 3,639,970
|
def E_lndetW_Wishart(nu,V):
"""
mean of log determinant of precision matrix over Wishart <lndet(W)>
input
nu [float] : dof parameter of Wichart distribution
V [ndarray, shape (D x D)] : base matrix of Wishart distribution
"""
if nu < len(V) + 1:
raise ValueError, "dof parameter nu must larger than len(V)"
D = len(V)
E = D*np.log(2.0) - np.log(det(V)) + \
digamma(np.arange(nu+1-D,nu+1)*0.5).sum()
return E
|
1fa84eb843c91b66b3937b7542be31c00faf002d
| 3,639,971
|
def crop_range_image(range_images, new_width, shift=None, scope=None):
"""Crops range image by shrinking the width.
Requires: new_width is smaller than the existing width.
Args:
range_images: [B, H, W, ...]
new_width: an integer.
shift: a list of integer of same size as batch that shifts the crop window.
Positive is right shift. Negative is left shift. We assume the shift keeps
the window inside the image (i.e. no wrap).
scope: the name scope.
Returns:
range_image_crops: [B, H, new_width, ...]
"""
# pylint: disable=unbalanced-tuple-unpacking
shape = _combined_static_and_dynamic_shape(range_images)
batch = shape[0]
width = shape[2]
if width == new_width:
return range_images
if new_width < 1:
raise ValueError('new_width must be positive.')
if width is not None and new_width >= width:
raise ValueError('new_width {} should be < the old width {}.'.format(
new_width, width))
if shift is None:
shift = [0] * batch
diff = width - new_width
left = [diff // 2 + i for i in shift]
right = [i + new_width for i in left]
for l, r in zip(left, right):
if l < 0 or r > width:
raise ValueError(
'shift {} is invalid given new_width {} and width {}.'.format(
shift, new_width, width))
range_image_crops = []
with tf.compat.v1.name_scope(scope, 'CropRangeImage', [range_images]):
for i in range(batch):
range_image_crop = range_images[i, :, left[i]:right[i], ...]
range_image_crops.append(range_image_crop)
return tf.stack(range_image_crops, axis=0)
|
364dc2e1e77052327e3517fb35c0223463179a69
| 3,639,972
|
import string
import random
def randomString(length):
"""Generates a random string of LENGTH length."""
chars = string.letters + string.digits
s = ""
for i in random.sample(chars, length):
s += i
return s
|
fff13713271b3064b4e42c42c420aad190475d85
| 3,639,973
|
def DrawMACCloseButton(colour, backColour=None):
"""
Draws the wxMAC tab close button using wx.GraphicsContext.
:param `colour`: the colour to use to draw the circle.
"""
bmp = wx.EmptyBitmapRGBA(16, 16)
dc = wx.MemoryDC()
dc.SelectObject(bmp)
gc = wx.GraphicsContext.Create(dc)
gc.SetBrush(wx.Brush(colour))
path = gc.CreatePath()
path.AddCircle(6.5, 7, 6.5)
path.CloseSubpath()
gc.FillPath(path)
path = gc.CreatePath()
if backColour is not None:
pen = wx.Pen(backColour, 2)
else:
pen = wx.Pen("white", 2)
pen.SetCap(wx.CAP_BUTT)
pen.SetJoin(wx.JOIN_BEVEL)
gc.SetPen(pen)
path.MoveToPoint(3.5, 4)
path.AddLineToPoint(9.5, 10)
path.MoveToPoint(3.5, 10)
path.AddLineToPoint(9.5, 4)
path.CloseSubpath()
gc.DrawPath(path)
dc.SelectObject(wx.NullBitmap)
return bmp
|
96982b68aa926341d7ab74d7ed705c19c232392e
| 3,639,974
|
def dispatch(args, validator):
"""
'dispath' set in the 'validator' object the level of validation
chosen by the user. By default, the validator
makes topology level validation.
"""
print("Printing all the arguments: {}\n".format(args))
if args.vnfd:
print("VNFD validation")
validator.schema_validator.load_schemas("VNFD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False,
custom=False)
elif args.integrity:
print("Syntax and integrity validation")
validator.configure(syntax=True, integrity=True, topology=False,
custom=False)
elif args.topology:
print("Syntax, integrity and topology validation")
validator.configure(syntax=True, integrity=True, topology=True,
custom=False)
elif args.custom:
validator.configure(syntax=True, integrity=True, topology=True,
custom=True, cfile=args.cfile)
print("Syntax, integrity, topology and custom rules validation")
else:
print("Default mode: Syntax, integrity and topology validation")
if validator.validate_function(args.vnfd):
if ((validator.error_count == 0) and
(len(validator.customErrors) == 0)):
print("No errors found in the VNFD")
else:
print("Errors in validation")
return validator
elif args.nsd:
print("NSD validation")
validator.schema_validator.load_schemas("NSD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False)
elif args.integrity:
print("Syntax and integrity validation")
validator.configure(syntax=True, integrity=True, topology=False,
dpath=args.dpath)
elif args.topology:
print("Syntax, integrity and topology validation")
validator.configure(syntax=True, integrity=True, topology=True,
dpath=args.dpath)
elif args.custom:
validator.configure(syntax=True, integrity=True, topology=True,
custom=True, cfile=args.cfile,
dpath=args.dpath)
print("Syntax, integrity, topology and custom rules validation")
else:
validator.configure(syntax=True, integrity=True, topology=True,
dpath=args.dpath)
print("Default mode: Syntax, integrity and topology validation")
if validator.validate_service(args.nsd):
if ((validator.error_count == 0) and (len(validator.customErrors) == 0)):
print("No errors found in the Service descriptor validation")
else:
print("Errors in custom rules validation")
return validator
elif args.project_path:
print("Project descriptor validation")
validator.schema_validator.load_schemas("NSD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False,
workspace_path=args.workspace_path)
elif args.integrity:
print("Syntax and integrity validation")
validator.configure(syntax=True, integrity=True, topology=False,
workspace_path=args.workspace_path)
elif args.topology:
print("Syntax, integrity and topology validation")
validator.configure(syntax=True, integrity=True, topology=True,
workspace_path=args.workspace_path)
elif args.custom:
validator.configure(syntax=True, integrity=True, topology=True,
custom=True, cfile=args.cfile)
print("Syntax, integrity, topology and custom rules validation")
else:
print("Default mode: Syntax, integrity and topology validation")
if not validator.validate_project(args.project_path):
print('Cant validate the project descriptors')
else:
if validator.error_count == 0:
if len(validator.customErrors) == 0:
print("No errors found in the validation of the project descriptors")
else:
print("Errors in custom rules validation")
return validator
elif args.tstd:
print("Test descriptor validation")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False, custom=False)
elif args.integrity:
print("Integrity validation")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
else:
print("Default test descriptor validation syntax and integrity")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
if not validator.validate_test(args.tstd):
print('Cant validate the test descriptors')
else:
if validator.error_count == 0 and len(validator.customErrors) == 0:
print("No errors found in the validation of the test descriptors")
else:
print("Errors in validation")
return validator
elif args.nstd:
print("Slice descriptor validation")
validator.schema_validator.load_schemas("NSTD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False, custom=False)
elif args.integrity:
print("Integrity validation")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
else:
print("Default test descriptor validation syntax and integrity")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
if not validator.validate_slice(args.nstd):
print('Cant validate the slice descriptors')
else:
if validator.error_count == 0 and len(validator.customErrors) == 0:
print("No errors found in the validation of the slice descriptors")
else:
print("Errors in validation")
return validator
elif args.slad:
print("SLA descriptor validation")
validator.schema_validator.load_schemas("SLAD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False, custom=False)
elif args.integrity:
print("Integrity validation")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
else:
print("Default test descriptor validation syntax and integrity")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
if not validator.validate_sla(args.slad):
print('Cant validate the sla descriptors')
else:
if validator.error_count == 0 and len(validator.customErrors) == 0:
print("No errors found in the validation of the sla descriptors")
else:
print("Errors in validation")
return validator
elif args.rpd:
print("RP descriptor validation")
validator.schema_validator.load_schemas("RPD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False, custom=False)
elif args.integrity:
print("Integrity validation")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
else:
print("Default test descriptor validation syntax and integrity")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
if not validator.validate_runtime_policy(args.rpd):
print('Cant validate the sla descriptors')
else:
if validator.error_count == 0 and len(validator.customErrors) == 0:
print("No errors found in the validation of the sla descriptors")
else:
print("Errors in validation")
return validator
|
b2625b5cb46295d0790b37fa691b8a4d60341e47
| 3,639,975
|
def create_app():
"""Create and configure and instance of the Flask application"""
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB.init_app(app)
@app.route('/')
def home():
return render_template('base.html')
@app.route('/load')
def db_load_city():
df_city = api.cities(df=True)
load_cities(df_city)
return render_template('base.html', title='Cities Loaded')
@app.route('/countries')
def db_load_country():
df_country = api.countries(df=True)
load_countries(df_country)
return render_template('base.html', title='Countries Loaded')
return app
|
1e122846bfdfc68a1143eb2d53b87eda9ae9cff6
| 3,639,976
|
import re
import os
def write_chart_json(j2_file_name, item):
"""Write a chart JSON file.
Args:
j2_file_name: the name of the Jinja template file
item: a (benchmark_id, chart_dict) pair
Returns:
returns the (filepath, json_data) pair
"""
file_name = fcompose(
lambda x: r"{0}_{1}".format(x, j2_file_name),
lambda x: re.sub(r"([0-9]+[abcd])\.(.+)\.yaml\.j2",
r"\1\2.json",
x)
)
return pipe(
item[0],
file_name,
lambda file_: os.path.join(get_path(), '../_data/charts', file_),
write_json(item[1])
)
|
a3b19e487ea2c0d4578a88ff12fc32d6de0f0d65
| 3,639,977
|
def get_motif_class(motif: str) -> str:
"""Return the class of the given motif."""
for mcls in gen_motif_classes(len(motif), len(motif) + 1):
for m in motif_set(mcls):
if m == motif:
return mcls
else:
raise ValueError(
"Unable to find the class of the given motif. "
"Maybe it contains a character other than ['A', 'C', 'G', 'T']?"
)
|
fea293fcf25b77bbf78c400facf450067c94be2b
| 3,639,978
|
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode. If this is set
to None, the callers can specify slim.batch_norm's is_training parameter
from an outer slim.arg_scope.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with (slim.arg_scope([slim.batch_norm], is_training=is_training)
if is_training is not None else NoOpScope()):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride,
store_non_strided_activations)
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
|
b2008da41f5ada502941c058134ded4d95c3d5c0
| 3,639,979
|
def findConstell(cc):
"""
input is one character (from rinex satellite line)
output is integer added to the satellite number
0 for GPS, 100 for Glonass, 200 for Galileo, 300 for everything else?
author: kristine larson, GFZ, April 2017
"""
if (cc == 'G' or cc == ' '):
out = 0
elif (cc == 'R'): # glonass
out = 100
elif (cc == 'E'): # galileo
out = 200
else:
out = 300
return out
|
d7a85fc5f7324acdb5277fd6db458523cd4ad4b8
| 3,639,980
|
def Controller(idx):
"""(read-only) Full name of the i-th controller attached to this element. Ex: str = Controller(2). See NumControls to determine valid index range"""
return get_string(lib.CktElement_Get_Controller(idx))
|
5adb2f806133319546ea627c705579a3a7e662dd
| 3,639,981
|
def smooth_2d_map(bin_map, n_bins=5, sigma=2, apply_median_filt=True, **kwargs):
"""
:param bin_map: map to be smooth.
array in which each cell corresponds to the value at that xy position
:param n_bins: number of smoothing bins
:param sigma: std for the gaussian smoothing
:return: sm_map: smoothed map. note that this is a truncated sigma map, meaning that high or
low values wont affect far away bins
"""
if apply_median_filt:
sm_map = ndimage.filters.median_filter(bin_map, n_bins)
else:
sm_map = bin_map
trunc = (((n_bins - 1) / 2) - 0.5) / sigma
return ndimage.filters.gaussian_filter(sm_map, sigma, mode='constant', truncate=trunc)
|
a1d8c9b2b8107663746d2c1af9e129d7226e9d0b
| 3,639,982
|
import socket
def _select_socket(lower_port, upper_port):
"""Create and return a socket whose port is available and adheres to the given port range, if applicable."""
sock = socket(AF_INET, SOCK_STREAM)
found_port = False
retries = 0
while not found_port:
try:
sock.bind(('0.0.0.0', _get_candidate_port(lower_port, upper_port)))
found_port = True
except Exception:
retries = retries + 1
if retries > max_port_range_retries:
raise RuntimeError(
"Failed to locate port within range {}..{} after {} retries!".
format(lower_port, upper_port, max_port_range_retries))
return sock
|
19427fd0146b5537c6fab898b5e3e0868c8c4a21
| 3,639,983
|
import tqdm
def _stabilization(sr, nmax, err_fn, err_xi):
"""
A function that computes the stabilisation matrices needed for the
stabilisation chart. The computation is focused on comparison of
eigenfrequencies and damping ratios in the present step
(N-th model order) with the previous step ((N-1)-th model order).
:param sr: list of lists of complex natrual frequencies
:param n: maximum number of degrees of freedom
:param err_fn: relative error in frequency
:param err_xi: relative error in damping
:return fn_temap eigenfrequencies matrix
:return xi_temp: updated damping matrix
:return test_fn: updated eigenfrequencies stabilisation test matrix
:return test_xi: updated damping stabilisation test matrix
"""
# TODO: check this later for optimisation # this doffers by LSCE and LSCF
fn_temp = np.zeros((2*nmax, nmax), dtype='double')
xi_temp = np.zeros((2*nmax, nmax), dtype='double')
test_fn = np.zeros((2*nmax, nmax), dtype='int')
test_xi = np.zeros((2*nmax, nmax), dtype='int')
for nr, n in enumerate(tqdm(range(nmax), ncols=100)):
fn, xi = tools.complex_freq_to_freq_and_damp(sr[nr])
# elimination of conjugate values in
fn, xi = _redundant_values(fn, xi, 1e-3)
# order to decrease computation time
if n == 1:
# first step
fn_temp[0:len(fn), 0:1] = fn
xi_temp[0:len(fn), 0:1] = xi
else:
# Matrix test is created for comparison between present(N-th) and
# previous (N-1-th) data (eigenfrequencies). If the value equals:
# --> 1, the data is within relative tolerance err_fn
# --> 0, the data is outside the relative tolerance err_fn
fn_test = np.zeros((len(fn), len(fn_temp[:, n - 1])), dtype='int')
xi_test = np.zeros((len(xi), len(xi_temp[:, n - 1])), dtype='int')
for i in range(len(fn)):
fn_test[i, np.abs((fn[i] - fn_temp[:, n-2]) /
fn_temp[:, n-2]) < err_fn] = 1
xi_test[i, np.abs((xi[i] - xi_temp[:, n-2]) /
xi_temp[:, n-2]) < err_xi] = 1
fn_temp[i, n - 1] = fn[i]
xi_temp[i, n - 1] = xi[i]
test_fn[i, n-1] = np.sum(fn_test[i, :2*n])
test_xi[i, n-1] = np.sum(xi_test[i, :2*n])
return fn_temp, xi_temp, test_fn, test_xi
|
945f1cb74506753f81112497fb87bf805031f957
| 3,639,984
|
def _factory(cls_name, parent_cls, search_nested_subclasses=False):
"""Return subclass from parent
Args:
cls_name (basestring)
parent_cls (cls)
search_nested_subclasses (bool)
Return:
cls
"""
member_cls = None
subcls_name = _filter_out_underscore(cls_name.lower())
members = (_all_subclasses(parent_cls) if search_nested_subclasses else
parent_cls.__subclasses__())
for member_cls in members:
if member_cls.__name__.lower() == subcls_name:
break
else:
raise exception.NoClassFound(
"%s for parent %s" % (subcls_name, parent_cls))
return member_cls
|
2eb5fb4c3333aaddec418ebac8ecdd824ff4e8ba
| 3,639,985
|
def tabuleiro_actualiza_pontuacao(t,v):
"""list x int -> list
Esta funcao recebe um elemento tabuleiro do tipo lista e um elemento v do tipo inteiro e modifica o tabuleiro, acrescentando ao valor da pontuacao v pontos"""
if isinstance(v,int) and v%4==0 and v>=0:
t[4]=tabuleiro_pontuacao(t)+v
return t
else:
raise ValueError('tabuleiro_actualiza_pontuacao: argumentos invalidos')
|
a247f2c14ffd42fc4d77ae9871ccc08bd967296d
| 3,639,986
|
import os
def pybullet_options_from_shape(shape, path='', force_concave=False):
"""Pybullet shape"""
options = {}
collision = isinstance(shape, Collision)
if collision:
options['collisionFramePosition'] = shape.pose[:3]
options['collisionFrameOrientation'] = rot_quat(shape.pose[3:])
else:
options['visualFramePosition'] = shape.pose[:3]
options['visualFrameOrientation'] = rot_quat(shape.pose[3:])
options['rgbaColor'] = shape.diffuse
options['specularColor'] = shape.specular
if isinstance(shape.geometry, Plane):
options['shapeType'] = pybullet.GEOM_PLANE
options['planeNormal'] = shape.geometry.normal
elif isinstance(shape.geometry, Box):
options['shapeType'] = pybullet.GEOM_BOX
options['halfExtents'] = 0.5*np.array(shape.geometry.size)
elif isinstance(shape.geometry, Sphere):
options['shapeType'] = pybullet.GEOM_SPHERE
options['radius'] = shape.geometry.radius
elif isinstance(shape.geometry, Cylinder):
options['shapeType'] = pybullet.GEOM_CYLINDER
options['radius'] = shape.geometry.radius
options['height' if collision else 'length'] = shape.geometry.length
elif isinstance(shape.geometry, Capsule):
options['shapeType'] = pybullet.GEOM_CAPSULE
options['radius'] = shape.geometry.radius
options['height' if collision else 'length'] = shape.geometry.length
elif isinstance(shape.geometry, Mesh):
options['shapeType'] = pybullet.GEOM_MESH
options['fileName'] = os.path.join(path, shape.geometry.uri)
options['meshScale'] = shape.geometry.scale
if force_concave:
options['flags'] = pybullet.GEOM_FORCE_CONCAVE_TRIMESH
elif isinstance(shape.geometry, Heightmap):
options['shapeType'] = pybullet.GEOM_HEIGHTMAP
else:
raise Exception('Unknown type {}'.format(type(shape.geometry)))
return options
|
e89d0fbf23abb99c64914da5ffd132e9bd3f0372
| 3,639,987
|
def showcase_code(pyfile,class_name = False, method_name = False, end_string = False):
"""shows content of py file"""
with open(pyfile) as f:
code = f.read()
if class_name:
#1. find beginning (class + <name>)
index = code.find(f'class {class_name}')
code = code[index:]
#2. find end (class (new class!) or end of script)
end_index = code[7:].find('class')
if method_name:
#1. find beginning (class + <name>)
index = code.find(f'def {method_name}')
code = code[index:]
#2. find end (class (new class!) or end of script)
end_index = code[7:].find('def')
if end_string:
end_index = code[7:].find('# helpers')
code = code[:end_index]
formatter = HtmlFormatter()
return IPython.display.HTML('<style type="text/css">{}</style>{}'.format(
formatter.get_style_defs('.highlight'),
highlight(code, PythonLexer(), formatter)))
|
fe62a99adf5f97164ac69e68554f31d20e126dfa
| 3,639,988
|
def get_hyperparams(data, ind):
"""
Gets the hyperparameters for hyperparameter settings index ind
data : dict
The Python data dictionary generated from running main.py
ind : int
Gets the returns of the agent trained with this hyperparameter
settings index
Returns
-------
dict
The dictionary of hyperparameters
"""
return data["experiment_data"][ind]["agent_hyperparams"]
|
3734f4cf00564a1aa7c852091d366e6e42b6d55b
| 3,639,989
|
from typing import Dict
from typing import Any
from typing import Tuple
def _check_df_params_require_iter(
func_params: Dict[str, ParamAttrs],
src_df: pd.DataFrame,
func_kwargs: Dict[str, Any],
**kwargs,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Return params that require iteration and those that don't."""
list_params: Dict[str, Any] = {}
df_iter_params: Dict[str, Any] = {}
for kw_name, arg in kwargs.items():
if kw_name in _DEF_IGNORE_PARAM:
continue
if (
arg not in src_df.columns
or not isinstance(func_kwargs.get(kw_name), str)
or kw_name not in func_params
):
# Not intended/usable as a column specification
continue
col_name = func_kwargs.pop(kw_name)
if func_params[kw_name].type == "list":
# If the parameter accepts iterable types try to use the
# values of that column directly
list_params[kw_name] = list(src_df[col_name].values)
# But also store it as a param that we might need to iterate through
df_iter_params[kw_name] = col_name
return df_iter_params, list_params
|
e66a42a173f24a33f2457bf6b8cfe4124984f646
| 3,639,990
|
import torch
def train_linear_classifier(loss_func, W, X, y, learning_rate=1e-3,
reg=1e-5, num_iters=100, batch_size=200,
verbose=False):
"""
Train this linear classifier using stochastic gradient descent.
Inputs:
- loss_func: loss function to use when training. It should take W, X, y
and reg as input, and output a tuple of (loss, dW)
- W: A PyTorch tensor of shape (D, C) giving the initial weights of the
classifier. If W is None then it will be initialized here.
- X: A PyTorch tensor of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A PyTorch tensor of shape (N,) containing training labels; y[i] = c
means that X[i] has label 0 <= c < C for C classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Returns: A tuple of:
- W: The final value of the weight matrix and the end of optimization
- loss_history: A list of Python scalars giving the values of the loss at each
training iteration.
"""
# assume y takes values 0...K-1 where K is number of classes
num_train, dim = X.shape
if W is None:
# lazily initialize W
num_classes = torch.max(y) + 1
W = 0.000001 * torch.randn(dim, num_classes, device=X.device, dtype=X.dtype)
else:
num_classes = W.shape[1]
# Run stochastic gradient descent to optimize W
loss_history = []
for it in range(num_iters):
# TODO: implement sample_batch function
X_batch, y_batch = sample_batch(X, y, num_train, batch_size)
# evaluate loss and gradient
loss, grad = loss_func(W, X_batch, y_batch, reg)
loss_history.append(loss.item())
# perform parameter update
#########################################################################
# TODO: #
# Update the weights using the gradient and the learning rate. #
#########################################################################
# Replace "pass" statement with your code
W -= learning_rate * grad
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
return W, loss_history
|
4587781a47859acf4f4360b0e09e0ef3cdd74289
| 3,639,991
|
import os
def plot_confusion_matrix(cm, classes, std, filename=None,
normalize=False, cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
# std = std * 100
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] *100
# std = std.astype('float') / std.sum(axis=1)[:, np.newaxis] *100
print("Normalized confusion matrix")
else:
print('Confusion matrix, as input by user')
print(cm)
fig, ax = plt.subplots(figsize=(4,4))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
# ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else '.2f'
fmt_std = '.2f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt) + '±' + format(std[i, j], fmt_std),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.savefig(os.path.join(config.plot_dir, filename + '_cm.pdf' ))
return ax
|
88e6321deafd1cd10bc142dea263ae636466e675
| 3,639,992
|
import sys
def ruleset_from_auto(source):
""" Automatically load a ruleset from any format
Automatically uncompresses files in either gzip or bzip2 format based
on the file extension if source is a filepath.
source: A file path or file handle
return: A ruleset or an exception
NOTE: Requires the file to be seekable and will likely throw an exception otherwise
unless we guess the format correctly on our first attempt.
"""
file_name = ''
if hasattr(source, 'name'):
file_name = source.name
else:
file_name = str(source)
f_priority = []
# Based on extension pick the most likely candidate format first
if 'json' in file_name or 'jsn' in file_name:
f_priority.append(ruleset_from_ryu_json)
if 'pickle' in file_name or 'pkl' in file_name:
f_priority.append(ruleset_from_pickle)
if 'fib' in file_name:
f_priority.append(ruleset_from_fib)
if 'ovs' in file_name:
f_priority.append(ruleset_from_ovs)
# Add all remaining format options
f_priority.extend(set(_input_load_fn.values()) -
set(f_priority + [ruleset_from_ovs]))
f_priority.append(ruleset_from_ovs)
errors = []
for load_fn in f_priority:
try:
ruleset = load_fn(source)
return ruleset
except Exception as e:
errors.append((load_fn.__name__, e))
if hasattr(source, 'seek'):
source.seek(0)
for error in errors:
print(error[0], ":", error[1], file=sys.stderr)
raise ValueError("Could not automatically open the file, unknown format: " + file_name)
|
a1085a534a5918447745e7a630e1c0a1fa380f0c
| 3,639,993
|
import random
import re
import os
import subprocess
def GetEvol(x, **kwargs):
"""
Run a VPLanet simulation for this initial condition vector, x
"""
# Get the current vector
dMass, dSatXUVFrac, dSatXUVTime, dStopTime, dXUVBeta = x
dSatXUVFrac = 10 ** dSatXUVFrac # Unlog
dStopTime *= 1.e9 # Convert from Gyr -> yr
# Get the prior probability
lnprior = kwargs["LnPrior"](x, **kwargs)
if np.isinf(lnprior):
return None
# Get strings containing VPLanet input files (they must be provided!)
try:
star_in = kwargs.get("STARIN")
vpl_in = kwargs.get("VPLIN")
except KeyError as err:
print("ERROR: Must supply STARIN and VPLIN.")
raise
# Get PATH
try:
PATH = kwargs.get("PATH")
except KeyError as err:
print("ERROR: Must supply PATH.")
raise
# Randomize file names
sysName = 'vpl%012x' % random.randrange(16**12)
starName = 'st%012x' % random.randrange(16**12)
sysFile = sysName + '.in'
starFile = starName + '.in'
logfile = sysName + '.log'
starFwFile = '%s.star.forward' % sysName
# Populate the star input file
star_in = re.sub("%s(.*?)#" % "dMass", "%s %.6e #" % ("dMass", dMass), star_in)
star_in = re.sub("%s(.*?)#" % "dSatXUVFrac", "%s %.6e #" % ("dSatXUVFrac", dSatXUVFrac), star_in)
star_in = re.sub("%s(.*?)#" % "dSatXUVTime", "%s %.6e #" % ("dSatXUVTime", -dSatXUVTime), star_in)
star_in = re.sub("%s(.*?)#" % "dXUVBeta", "%s %.6e #" % ("dXUVBeta", -dXUVBeta), star_in)
with open(os.path.join(PATH, "output", starFile), 'w') as f:
print(star_in, file = f)
# Populate the system input file
# Populate list of planets
saBodyFiles = str(starFile) + " #"
saBodyFiles = saBodyFiles.strip()
vpl_in = re.sub('%s(.*?)#' % "dStopTime", '%s %.6e #' % ("dStopTime", dStopTime), vpl_in)
vpl_in = re.sub('sSystemName(.*?)#', 'sSystemName %s #' % sysName, vpl_in)
vpl_in = re.sub('saBodyFiles(.*?)#', 'saBodyFiles %s #' % saBodyFiles, vpl_in)
with open(os.path.join(PATH, "output", sysFile), 'w') as f:
print(vpl_in, file = f)
# Run VPLANET and get the output, then delete the output files
subprocess.call(["vplanet", sysFile], cwd = os.path.join(PATH, "output"))
output = vpl.GetOutput(os.path.join(PATH, "output"), logfile = logfile)
try:
os.remove(os.path.join(PATH, "output", starFile))
os.remove(os.path.join(PATH, "output", sysFile))
os.remove(os.path.join(PATH, "output", starFwFile))
os.remove(os.path.join(PATH, "output", logfile))
except FileNotFoundError:
# Run failed!
return None
# Ensure we ran for as long as we set out to
if not output.log.final.system.Age / utils.YEARSEC >= dStopTime:
return None
return output
|
6935000c6fae31faf1beebe9f7e5719f5680677f
| 3,639,994
|
def _inverse_permutation(p):
"""inverse permutation p"""
n = p.size
s = np.zeros(n, dtype=np.int32)
i = np.arange(n, dtype=np.int32)
np.put(s, p, i) # s[p] = i
return s
|
0e8a4cf7156c9dac6a3bb89eb3edb8960478d7b6
| 3,639,995
|
def blend0(d=0.0, u=1.0, s=1.0):
"""
blending function trapezoid
d = delta x = xabs - xdr
u = uncertainty radius of xabs estimate error
s = tuning scale factor
returns blend
"""
d = float(abs(d))
u = float(abs(u))
s = float(abs(s))
v = d - u #offset by radius
if v >= s: #first so if s == 0 catches here so no divide by zero below
b = 0.0
elif v <= 0.0:
b = 1.0
else: # 0 < v < s
b = 1.0 - (v / s)
return b
|
d501db66c34f28421c1517dcd3052fa7b2ee8643
| 3,639,996
|
def median(a, dim=None):
"""
Calculate median along a given dimension.
Parameters
----------
a: af.Array
The input array.
dim: optional: int. default: None.
The dimension for which to obtain the median from input data.
Returns
-------
output: af.Array
Array containing the median of the input array along a
given dimension.
"""
if dim is not None:
out = Array()
safe_call(backend.get().af_median(c_pointer(out.arr), a.arr, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
safe_call(backend.get().af_median_all(c_pointer(real), c_pointer(imag), a.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
|
0a117fe2f072747e752e77613dc658812630dacc
| 3,639,997
|
from typing import Union
async def is_photo(obj: Union[Message, CallbackQuery]) -> bool:
"""
Checks if message content is photo
:return: True if so
"""
obj = await _to_message(obj)
return obj.content_type == 'photo'
|
13207a44dba000ad0486997f364f011cfffa9d26
| 3,639,998
|
def check_win(mat):
"""
Returns either:
False: Game not over.
True: Game won, 2048 is found in mat
"""
if 2048 in mat: # If won, teriminal state is needed for RL agent
return True # Terminal state
else:
return False
|
0824bc059cfa32b275c7b63f98d22e8a5b667e06
| 3,639,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.