content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import sys
def reset_syspath():
"""
Return a function to remove given path from sys.path.
This is to use at the end (after all assertions) of test which
use ``setup_project.setup_project`` to add base directory to sys.path
and avoid clash with next tests doing the same.
"""
def reset_func(path):
if path in sys.path:
del sys.path[sys.path.index(path)]
return reset_func | b169c51bb86d6e4e713309526ca402872d353766 | 23,200 |
def FSA(profile_exp, profile_sm, diffsys, time, Xlim=[], n=[400, 500], w=None, f=None, alpha=0.3, name=''):
"""
Forward Simulation Analysis
Extract diffusion coefficients based on a diffusion profile.
Please do not close any plot window during the FSA process.
This is the final step of FSA.
Parameters
----------
profile_exp : DiffProfile
Experimental diffusion profile, used for comparison with simulation
results.
profile_sm : DiffProfile
Diffusion profile after data smooth on experimental profile.
diffsys : DiffSystem
Diffusion coefficients
time : float
Diffusion time in seconds
Xlim : list (float), optional
Passed to 'pydiffusion.Dtools.SF', 'pydiffusion.utils.step'.
Indicates the left and right concentration limits for calculation.
Default value = [profile.X[0], profile.X[-1]].
n : list. optional
Passed to 'pydiffusion.utils.automesh'.
Meshing number range, default = [400, 500].
w : list, optional
Weights of each phase to calculate error.
Passed to 'pydiffusion.utils.error_profile'.
f : function of Meshing
Keyword argument of automesh()
alpha : float
Keyword argument of automesh()
name : str, optional
Name the output DiffProfile
Returns
-------
profile_sim : DiffProfile
Simulated diffusion profile after FSA.
diffsys_sim : DiffSystem
Calculated diffusion efficients by FSA.
Examples
--------
After datasmooth() and Dmodel(), FSA can be performed to calculate accurate diffusion coefficients:
>>> ds = datasmooth(exp)
>>> dsys = Dmodel(ds, time)
>>> fsa = FSA(exp, ds, dsys, time)
"""
# Create step profile on meshed grids
dism = automesh(profile=profile_sm, diffsys=diffsys, n=n, f=f, alpha=alpha)
matano = matanocalc(profile_sm, Xlim)
if Xlim == [] and profile_sm.X[-1] < profile_sm.X[0]:
profile_init = step(dism, matano, diffsys, [diffsys.Xr[-1, 1], diffsys.Xr[0, 0]])
else:
profile_init = step(dism, matano, diffsys, Xlim)
# Determine the stop criteria of forward simulations
error_sm = error_profile(profile_sm, profile_exp)
ipt = input('Default error = %.6f\nInput the stop criteria of error: [%.6f]\n'
% (error_sm, error_sm*2))
error_stop = error_sm*2 if ipt == '' else float(ipt)
# If there is no Xspl info in diffsys, use Phase Mode
# else: ask if use Phase or Point Mode
if diffsys.Xspl is not None:
ipt = input('Use Phase Mode? [n]\n(The shape of diffusivity curve does not change)\n')
pp = False if 'y' in ipt or 'Y' in ipt else True
else:
pp = False
if name == '':
name = profile_exp.name+'_FSA'
# Diffusion coefficients used for forward simulations
diffsys_sim = DiffSystem(diffsys.Xr, diffsys.Dfunc, Xspl=diffsys.Xspl, name=name)
# Plot FSA status
fig = plt.figure('FSA', figsize=(16, 6))
ax1, ax2 = fig.add_subplot(121), fig.add_subplot(122)
profileplot(profile_exp, ax1, ls='none', marker='o', c='b', fillstyle='none')
profileplot(profile_sm, ax1, ls='-', c='g', lw=1)
SFplot(profile_sm, time, Xlim, ax2, ls='none', c='b', marker='.')
DCplot(diffsys_sim, ax2, ls='-', c='r', lw=2)
plt.draw()
plt.tight_layout()
plt.pause(0.1)
n_sim = 0
while True:
# Simulation
n_sim += 1
profile_sim = mphSim(profile_init, diffsys_sim, time, name=name)
error_sim = error_profile(profile_sim, profile_exp, w)
print('Simulation %i, error = %f(%f)' % (n_sim, error_sim, error_stop))
# Plot simulation results
ax1.cla()
ax2.cla()
profileplot(profile_exp, ax1, ls='none', marker='o', c='b', fillstyle='none')
profileplot(profile_sm, ax1, ls='-', c='g', lw=1)
profileplot(profile_sim, ax1, ls='-', c='r', lw=2)
SFplot(profile_sm, time, Xlim, ax2, ls='none', c='b', marker='.')
DCplot(diffsys_sim, ax2, ls='-', c='r', lw=2)
plt.draw()
plt.tight_layout()
# DC adjust
Dfunc_adjust = [0] * diffsys_sim.Np
# If error > stop criteria, continue simulation by auto DC adjustment
if error_sim > error_stop:
for ph in range(diffsys_sim.Np):
try:
Dfunc_adjust[ph] = Dadjust(profile_sm, profile_sim, diffsys_sim, ph, pp)
except (ValueError, TypeError) as error:
ita_finish()
raise error
diffsys_sim.Dfunc = Dfunc_adjust
# If error < stop criteria or simulate too many times
if error_sim <= error_stop or n_sim > 9:
ita_start()
# Ask if exit
ipt = ask_input('Satisfied with FSA? [n]')
if 'y' in ipt or 'Y' in ipt:
ita_finish()
break
# If use Point Mode
if diffsys_sim.Xspl is not None:
ipt = ask_input('Use Point Mode (y) or Phase Mode (n)? [y]')
pp = False if 'n' in ipt or 'N' in ipt else True
if pp:
for ph in range(diffsys_sim.Np):
try:
Dfunc_adjust[ph] = Dadjust(profile_sm, profile_sim, diffsys_sim, ph, pp)
except (ValueError, TypeError) as error:
ita_finish()
raise error
diffsys_sim.Dfunc = Dfunc_adjust
DCplot(diffsys_sim, ax2, ls='-', c='m', lw=2)
plt.draw()
plt.pause(0.1)
ita_finish()
continue
# Phase Mode, ask if use manual input for each phase
pp = False
ipt = input('Phase Mode\nManually input for each phase? [n]')
manual = True if 'y' in ipt or 'Y' in ipt else False
for ph in range(diffsys_sim.Np):
if manual:
ipt = input('Input deltaD for phase # %i:\n(DC = DC * 10^deltaD, default deltaD = auto)\n' % (ph+1))
deltaD = float(ipt) if ipt != '' else None
else:
deltaD = None
try:
Dfunc_adjust[ph] = Dadjust(profile_sm, profile_sim, diffsys_sim, ph, pp, deltaD)
except (ValueError, TypeError) as error:
ita_finish()
raise error
# Apply the adjustment to diffsys_sim
diffsys_sim.Dfunc = Dfunc_adjust
DCplot(diffsys_sim, ax2, ls='-', c='m', lw=2)
plt.draw()
plt.pause(0.1)
ita_finish()
return profile_sim, diffsys_sim | 049475203d30ac02dadc0cb281d38909ba32039c | 23,201 |
def permutacion_matriz(U, fila_i, idx_max, verbose=False, P=None, r=None):
"""Efectua una permutación por filas de una matriz
Args:
U (matriz): MAtriz a permutar
fila_i (int): indice de fila origen
idx_max (int): indice de fila a la que permutar
verbose (bool, optional): verbose. Defaults to False.
P (mat, optional): matriz de permutación. Defaults to None.
r (mat, optional): rhs de ecuación. Defaults to None.
Returns:
(U, P, r): Matrices U, P y r cambiadas
"""
print_verbose(
[
f"Permutamos fila {fila_i} con {idx_max}",
f"U antes:\n {np.array(U)}",
f"P antes:\n {np.array(P)}",
],
verbose,
)
if fila_i != idx_max:
fila_origen, fila_destino = U[fila_i, :].copy(), U[idx_max, :].copy()
U[idx_max, :], U[fila_i, :] = fila_origen, fila_destino
if P is not None:
fila_origen, fila_destino = P[fila_i, :].copy(), P[idx_max, :].copy()
P[idx_max, :], P[fila_i, :] = fila_origen, fila_destino
if r is not None:
fila_origen, fila_destino = r[fila_i, :].copy(), r[idx_max, :].copy()
r[idx_max, :], r[fila_i, :] = fila_origen, fila_destino
print_verbose(
[f"U despues:\n {np.array(U)}", f"P despues:\n {np.array(P)}"], verbose
)
return U, P, r | 2ff9ae6a0b789de24c3479df49275fa254876dd2 | 23,202 |
def get_compliance_by_rules(scan_id):
"""
Lists compliance results by rule for a scan.
"""
items = []
offset = 0
while True:
params = {'offset': offset}
response = get('scans/%s/compliance_by_rules' % scan_id, params)
items.extend(response['items'])
if not response['is_truncated']:
break
offset = response['next_offset']
return items | 205dccc9b7714eb9a2179272b2fc1cec4cbc26e3 | 23,203 |
def QuadRemeshBrep1(brep, parameters, guideCurves, multiple=False):
"""
Create Quad Remesh from a Brep
Args:
brep (Brep): Set Brep Face Mode by setting QuadRemeshParameters.PreserveMeshArrayEdgesMode
guideCurves (IEnumerable<Curve>): A curve array used to influence mesh face layout
The curves should touch the input mesh
Set Guide Curve Influence by using QuadRemeshParameters.GuideCurveInfluence
"""
url = "rhino/geometry/mesh/quadremeshbrep-brep_quadremeshparameters_curvearray"
if multiple: url += "?multiple=true"
args = [brep, parameters, guideCurves]
if multiple: args = list(zip(brep, parameters, guideCurves))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | 042e89c1da55fd74f18c9592fc18bd2e0a18e2a8 | 23,204 |
def tags2turbo(lon, lat, dist, bdim=155, timeout=60, pretty_print=False, maxsize=None, tags=[]):
""" """
gtypes = ('node', 'way', 'relation',)
turbo = Turbo()
qconditions = [{
"query": filter2query(tags),
"distance": dist,
"gtypes": gtypes, # Optional. Possible values:
# "node", "way", "relation", "way-node", node-relation",
# "relation-way", "relation-relation", "relation-backwards"
# "amplitude": 0,
"newer": "%Y-%m-%ddT%H:%M:%SZ" #
}]
query = turbo.build_query(
Turbo.optimize_centralized_query_by_base_tile(lon, lat, qconditions, bdim=bdim),
timeout=timeout, maxsize=maxsize
)
return dict(query=query) | a9eae6c63266818f9e05993100432bdce2df851e | 23,205 |
async def get_people(from_number: int = None, up_to_number: int = None):
"""
Endpoint to get all people from-to given number
:return: list of people from-to numbers
"""
return _people[from_number:up_to_number] | 0ea214227493642eace1a1dc864268176cf872af | 23,206 |
def stacked_bar(data, series_labels=None, category_labels=None,
show_values=False, value_format="{}", y_label=None,
grid=True, reverse=False, y_limit=None, size_plot=None, use_dataframe=False, throw_zeros=False,dict_colors={}):
"""Plots a stacked bar chart with the data and labels provided.
Keyword arguments:
data -- 2-dimensional numpy array or nested list containing data for each series in rows
series_labels -- list of series labels (these appear in the legend)
category_labels -- list of category labels (these appear on the x-axis)
show_values -- If True then numeric value labels will be shown on each bar
value_format -- Format string for numeric value labels (default is "{}")
y_label -- Label for y-axis (str)
grid -- If True display grid
reverse -- If True reverse the order that the series are displayed (left-to-right or right-to-left)
y_limit -- containes a int\float that will be the highest y value shown in the graph and y axis
size_plot -- contains an array of [ width , hight] we want the plot square area size will be
use_dataframe -- Bool, if true, data is treated as pandas df with series labels and category labels as rows and colums respectivly
throw_zeros -- Only applicable if use_dataframe is True, throws rows with all zeros in them
"""
if throw_zeros and not use_dataframe:
# TODO make throw zeros work without df too
raise ValueError("throw_zeros only works if use_dataframe is chosen")
# if throw zeros, remove rows with all zeros
if throw_zeros:
data = data[(data.T != 0).any()]
# if data frame extract info from dataframe
if use_dataframe:
# remove no_change filter if needed:
if 'no_change' in data.index:
data = data.drop(['no_change'])
series_labels = data.index
category_labels = data.columns
data = data.values
ny = len(data[0])
ind2 = range(ny)
axes = []
cum_size = np.zeros(ny)
data = np.array(data)
if reverse:
data = np.flip(data, axis=1)
category_labels = reversed(category_labels)
if size_plot:
fig = plt.figure(figsize=size_plot)
plt.rcParams['font.size'] = '20'
suit_colors_dict = {}
for index, column in enumerate(series_labels):
suit_colors_dict[index] = dict_colors[column]
#print(data)
sum_column = np.sum(data, axis=0)
#print("old_data",data)
#print("sum_column", sum_column)
data = data.astype(float)
for row_index in range(len(data)):
for column_index in range(len(data[row_index])):
if data[row_index][column_index] != 0.0:
#print("before", "data[row_index][column_index]",data[row_index][column_index],"sum_column[column_index]*100", sum_column[column_index]*100)
data[row_index][column_index] = format(data[row_index][column_index]/sum_column[column_index]*100, '.2f')
#print("after:","\n","data[row_index][column_index]",data[row_index][column_index])
#print("new data", data)
#print("category_labels",category_labels )
#print("series_labels",series_labels)
# set the text in the same color as the bar
for i, row_data in enumerate(data):
axes.append(plt.bar(ind2, row_data, bottom=cum_size,
label=series_labels[i]))
for row in range(len(row_data)):
axes[i][row].set_color(suit_colors_dict[i])
cum_size += row_data
if not category_labels is None:
plt.xticks(ind2, category_labels, rotation=20, fontsize=30)
if y_label != None:
plt.ylabel(y_label, fontsize=30)
plt.legend()
if grid:
plt.grid()
if y_limit != None:
plt.ylim(0, y_limit)
if show_values:
max_tmp = []
for axis in axes:
max_tmp.append(max([bar.get_height() for bar in axis]))
max_height_data = max(max_tmp)
proportion_to_high = 0.08*max_height_data
need_arrow = 0.08*max_height_data
start_extra_heights = [axes[-1][i].get_y() + axes[-1][i].get_height() for i in range(len(axes[-1]))]
jumps = [proportion_to_high for i in range(len(axes[0]))]
for index,axis in enumerate(axes):
for counter, bar in enumerate(axis):
max_height = start_extra_heights[counter]
w, h = bar.get_width(), bar.get_height()
if 0.0 < h < need_arrow:
plt.annotate(value_format.format(h)+'%', xy=(bar.get_x(), bar.get_y()),
xytext=(bar.get_x() + 0.2, max_height + jumps[counter]), color=suit_colors_dict[index],
arrowprops=dict(arrowstyle="->"))
jumps[counter] += proportion_to_high * 1.2
elif h > 0.0:
plt.text(bar.get_x() + w / 2, bar.get_y() + h / 2, value_format.format(h)+'%', ha="center",
va="center")
# adding the number of total lines of the original pileups
for index, bar in enumerate(axes[-1]):
max_height = start_extra_heights[index]
if max_height == 0.0:
max_height = 1.3
plt.annotate(value_format.format(sum_column[index]), xy=(bar.get_x(), bar.get_y()+bar.get_height()),
xytext=(bar.get_x(), max_height + jumps[index]),
arrowprops=dict(arrowstyle='fancy'))
return plt, axes | 5906c6acaebf7791b7d16c2fbb1298e5f04bdd58 | 23,207 |
import click
import os
import yaml
def process_results(economy):
"""
Combine OSeMOSYS solution files and write as the result as an Excel file where each result parameter is a tab in the Excel file.
"""
click.echo(click.style('\n-- Preparing results...',fg='cyan'))
tmp_directory = 'tmp/{}'.format(economy)
parent_directory = "./results/"
child_directory = economy
path = os.path.join(parent_directory,child_directory)
try:
os.mkdir(path)
except OSError:
#print ("Creation of the directory %s failed" % path)
pass
else:
click.echo(click.style("Successfully created the directory %s " % path,fg='yellow'))
with resources.open_text('aperc_osemosys','results_config.yml') as open_file:
contents_var = yaml.load(open_file, Loader=yaml.FullLoader)
results_df={}
for key,value in contents_var.items():
if contents_var[key]['type'] == 'var':
fpath = './{}/'.format(tmp_directory)+key+'.csv'
#print(fpath)
_df = pd.read_csv(fpath).reset_index(drop=True)
results_df[key] = _df
results_dfs = {}
results_dfs = {k:v for (k,v) in results_df.items() if not v.empty}
_result_tables = {}
for key,value in results_dfs.items():
indices = contents_var[key]['indices']
_df = results_dfs[key]
if 'TIMESLICE' in indices:
unwanted_members = {'YEAR', 'VALUE'}
_indices = [ele for ele in indices if ele not in unwanted_members]
df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE',aggfunc=np.sum)
df = df.loc[(df != 0).any(1)] # remove rows if all are zero
_result_tables[key] = df
elif 'TIMESLICE' not in indices:
if contents_var[key]['type'] == 'var':
unwanted_members = {'YEAR', 'VALUE'}
_indices = [ele for ele in indices if ele not in unwanted_members]
df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE')
df = df.loc[(df != 0).any(1)] # remove rows if all are zero
_result_tables[key] = df
elif contents_var[key]['type'] == 'param':
unwanted_members = {'YEAR', 'VALUE'}
_indices = [ele for ele in indices if ele not in unwanted_members]
df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE')
df = df.loc[(df != 0).any(1)] # remove rows if all are zero
_result_tables[key] = df
elif contents_var[key]['type'] == 'equ':
unwanted_members = {'YEAR', 'VALUE'}
_indices = [ele for ele in indices if ele not in unwanted_members]
df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE')
#df = df.loc[(df != 0).any(1)] # remove rows if all are zero
_result_tables[key] = df
_result_tables[key]=_result_tables[key].fillna(0)
results_tables = {k: v for k, v in _result_tables.items() if not v.empty}
return results_tables | 46f92e8a59c4ffaca47eb8e5262b05e99b3aac4e | 23,208 |
import chunk
def window(x, y, width, overlap=0., x_0=None, expansion=None, cap_left=True,
cap_right=True, ret_x=True):
"""Break arrays x and y into slices.
Parameters
----------
x : array_like
Monotonically increasing numbers. If x is not monotonically increasing
then it will be flipped, beware that this may not have the desired
effect.
y : array_like
Arbitrary values, same size as x.
width : float
Window width in the same units as x.
overlap : float, optional
Overlap of windows in the same units as x. If negative, the window
steps along x values rather than binning.
x_0 : float, optional
Position in x at which to start windowing. (untested)
expansion : polynomial coefficients, optional
Describes the rate of change of window size with x. (not implimented)
The idea is that width = width*np.polyval(expansion, x). Overlap is
similarly increased.
cap_left : boolean, optional
Stop window exceeding left most (minimum) value of x. Only applies when
overlap is positive.
cap_right : boolean, optional
Stop window exceeding right most (maximum) value of x. Only applies
when overlap is positive.
Returns
-------
vals : numpy.array
Contains all the windowed chunks of x and y.
Notes
-----
The current check on monotonicity is whether more than 20% of points in
x are are not monotonic. This is a sort of hack to avoid flipping for the
occasional erroneous non-monotonic point.
"""
if x.size != y.size:
raise ValueError('x and y must be of equal size.')
if overlap > width:
raise ValueError('The overlap cannot be larger than the width.')
# Incredibly bad check for monotonicity.
not_monotonic = np.sum(np.diff(x) < 0) > 0.2*len(x)
if not_monotonic:
x = utils.flip_padded(x)
y = utils.flip_padded(y)
if x_0 is not None:
idxs = ~np.isnan(x) & (x >= x_0)
else:
idxs = ~np.isnan(x)
x = x[idxs]
y = y[idxs]
if overlap < 0.:
left = x - width/2.
right = left + width
elif overlap >= 0.:
step = width - overlap
if cap_left:
xmin = x[0]
else:
xmin = x[0] - width
if cap_right:
# Take away slightly less than the full width to allow for the last
# bin to complete the full range.
xmax = x[-1] - 0.99*width
else:
xmax = x[-1]
left = np.arange(xmin, xmax, step)
right = left + width
bins = np.transpose(np.vstack((left, right)))
if ret_x:
vals = np.asarray([chunk(x, b, y) for b in bins])
else:
vals = np.asarray([chunk(x, b, y)[1] for b in bins])
if not_monotonic:
vals = np.flipud(vals)
return vals | 947b84a33e351e7e75fbdaf4997de689f44c83ec | 23,209 |
import json
def mark_property_purchased(request):
"""
Api to mark a property as purchased by the buyer without page reload using vue or htmx
"""
data = json.loads(request.body)
property = data['property_id']
if not property.property_status == Property.SOLD and property.property_sold:
property.update(property_status = Property.SOLD)
messages.success(request, f"You have successfully completed {property.property_title} purchase.")
return JsonResponse({"success": True}) | 2ce7e22b8cf566361d307c53423b0b0a26ab2dbc | 23,210 |
import os
def file_exists(file: str) -> bool:
"""Accepts path/file or file and tests if it exists (as a file)."""
if os.path.exists(file):
if os.path.isfile(file):
return True
return False | bef5c957c17184bea54f3b47bff41d50cd122e32 | 23,211 |
def argmax_unique(arr, axis):
"""Return a mask so that we can exclude the nonunique maximums, i.e. the nodes that aren't completely resolved"""
arrm = np.argmax(arr, axis)
arrs = np.sum(arr, axis)
nonunique_mask = np.ma.make_mask((arrs == 1) is False)
uni_argmax = np.ma.masked_array(arrm, mask=nonunique_mask, fill_value=-1)
return uni_argmax, nonunique_mask | e6691fea688c6f0044fd9e0a70e4651ae56aaa49 | 23,212 |
from typing import Dict
import json
def get_json(response: func.HttpResponse) -> Dict:
"""Get JSON from an HttpResponse."""
return json.loads(response.get_body().decode("utf-8")) | 58fa7916d6b988b2e949e55db838f3f6b9430fb3 | 23,213 |
def _float_feature(value):
"""Returns a float_list from a float / double."""
if isinstance(value, list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) | 2caaa8b572ee2744cf6deeaff6a287fb472cef7f | 23,214 |
def sendVillasNodeOutput(message, output_mapping_vector, powerflow_results, state_estimation_results, scenario_flag):
"""
to create the payload according to "villas_node_output.json"
@param message: received message from the server (json.loads(msg.payload)[0])
@param output_mapping_vector: according to villas_node_output.json (see function read_mapping_file)
@param powerflow_results: results of powerflow (type acs.state_estimation.results.Results)
@param state_estimation_results: results of state_estimation (type acs.state_estimation.results.Results)
@param scenario_flag:
@return: string formatted according to "villas_node_output.json"
"""
VillasNodeOutput = {}
VillasNodeOutput["ts"] = {}
VillasNodeOutput["ts"]["origin"] = message["ts"]["origin"]
if "sequence" in message:
VillasNodeOutput["sequence"] = message["sequence"]
else:
print('Sequence no. not available.')
VillasNodeOutput["sequence"] = 1
# calculate Vmag_err
Vmag_err = np.zeros(len(powerflow_results.nodes))
for idx, elem in enumerate(powerflow_results.nodes):
uuid_pf = elem.topology_node.uuid
Vmag_true = np.absolute(elem.voltage)
Vmag_est = np.absolute(state_estimation_results.get_node(uuid=uuid_pf).voltage)
Vmag_err[idx] = np.absolute(Vmag_est - Vmag_true)
Vmag_err[idx] = 100 * np.divide(Vmag_err[idx], Vmag_true)
max_err = np.amax(Vmag_err)
mean_err = np.mean(Vmag_err)
data = [None] * len(output_mapping_vector)
for idx, elem in enumerate(output_mapping_vector):
if elem[0] == "max_err":
data[idx] = max_err
continue
elif elem[0] == "mean_err":
data[idx] = mean_err
continue
elif elem[0] == "scenario_flag":
data[idx] = float(scenario_flag)
continue
else: # elem = ["N4", "V", "phase", "est"] or elem = ["N4", "V", "phase", "pf"]
node = None
if elem[3] == "est":
node = state_estimation_results.get_node(uuid=elem[0])
elif elem[3] == "pf":
node = powerflow_results.get_node(uuid=elem[0])
value = None
if elem[2] == "mag": # elem_data[2] = "mag" or "phase"
value = np.absolute(node.voltage)
elif elem[2] == "phase":
value = np.angle(node.voltage)
data[idx] = value
VillasNodeOutput["data"] = data
return "[" + dumps(VillasNodeOutput) + "]" | dc992622a625219c6befb865cee87c4d3ffa0aef | 23,215 |
def find_org_rooms(dbs, user_id, meeting_date):
"""
获取可分配的机构
:param dbs:
:param user_id:
:param meeting_date:
:return:
"""
orgs = dbs.query(SysOrg.id, SysOrg.org_name, SysOrg.parent_id)\
.outerjoin(SysUserOrg, (SysUserOrg.org_id == SysOrg.id))\
.filter(SysUserOrg.user_id == user_id).all()
rooms = dbs.query(HasBoardroom.id, HasBoardroom.name, HasBoardroom.org_id) \
.outerjoin(SysOrg, SysOrg.id == HasBoardroom.org_id)\
.outerjoin(SysUserOrg, (SysUserOrg.org_id == SysOrg.id)) \
.filter(SysUserOrg.user_id == user_id).all()
meetings = dbs.query(HasMeeting.id, HasMeeting.name, HasMeeting.description, HasMeetBdr.boardroom_id,
HasMeetBdr.meeting_date, HasMeeting.start_time,
HasMeeting.end_time, HasMeeting.repeat, HasMeeting.create_user, HasMeeting.create_time,
SysUser.user_name, SysUser.phone, SysOrg.org_name)\
.outerjoin(SysUser, HasMeeting.create_user == SysUser.id)\
.outerjoin(SysOrg, SysUser.org_id == SysOrg.id)\
.outerjoin(HasMeetBdr, HasMeetBdr.meeting_id == HasMeeting.id)\
.outerjoin(HasBoardroom, HasBoardroom.id == HasMeetBdr.boardroom_id)\
.filter(HasMeetBdr.meeting_date == meeting_date).all()
lists = []
for org in orgs:
org_id = org.id
org_name = org.org_name
parent_id = org.parent_id
# room_list = []
# for room in rooms:
# # 将会议室拼入公司机构list
# if org_id == room.org_id:
# room_dict = {
# 'id': room.id,
# 'name': room.name,
# 'org_id': org_id
# }
# room_list.append(room_dict)
temp_dict = {
'org_id': org_id,
'org_name': org_name,
'parent_id': parent_id
# 'rooms': room_list
}
lists.append(temp_dict)
return lists | 18f3c9c75377a4c212141c0b14f4e4802d7782b5 | 23,216 |
def config_openapi(app: FastAPI, settings: ApiSettings):
"""Config openapi."""
def custom_openapi():
"""Config openapi."""
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title="Arturo STAC API", version="0.1", routes=app.routes
)
if settings.api_extension_is_enabled(ApiExtensions.fields):
openapi_schema["paths"]["/search"]["get"]["responses"]["200"]["content"][
"application/json"
]["schema"] = {"$ref": "#/components/schemas/ItemCollection"}
openapi_schema["paths"]["/search"]["post"]["responses"]["200"]["content"][
"application/json"
]["schema"] = {"$ref": "#/components/schemas/ItemCollection"}
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi | d57f37ce33cef4639f635330c6313a9fea5ab7dc | 23,217 |
from typing import Sequence
def twelve_tone_matrix(
row: Sequence,
) -> DataFrame:
""" Returns a twelve-tone matrix in the form of a Pandas DataFrame.
"""
inverted_row = inversion(row)
inv_mat = transposition(inverted_row, row[0]-inverted_row[0])
new = [row]
for i in range(1, 12):
k = transposition(row, (inv_mat[i] - row[0]) % 12)
new.append(k)
m = reshape(new, (12, 12))
df = DataFrame(m)
return df | 3ab247e7e347aa3c84e4b040380f0529e8081625 | 23,218 |
async def upstream_http_exception_handler(request, exc: HTTPError):
"""Handle http exceptions from upstream server"""
logger.warning(f"Upstream HTTP error [{request.query_params['url']}]: {repr(exc)}")
# Convert to FastApi exception
exc = HTTPException(502, f"Upstream server returned: [{exc.status}] {exc.message}")
return await http_exception_handler(request, exc) | 9f96e83c5b120bdcb8f6f7173ec928a33be1b210 | 23,219 |
from typing import List
from pathlib import Path
import re
def extract_latest_checkpoint_and_epoch(available_files: List[Path]) -> PathAndEpoch:
"""
Checkpoints are saved as recovery_epoch={epoch}.ckpt, find the latest ckpt and epoch number.
:param available_files: all available checkpoints
:return: path the checkpoint from latest epoch and epoch number
"""
recovery_epochs = [int(re.findall(r"[\d]+", f.stem)[0]) for f in available_files]
idx_max_epoch = int(np.argmax(recovery_epochs))
return available_files[idx_max_epoch], recovery_epochs[idx_max_epoch] | f11d627b79baef580c0dc354a9d9be28552fe4d9 | 23,220 |
def bark_filter_banks(nfilts=20,
nfft=512,
fs=16000,
low_freq=0,
high_freq=None,
scale="constant"):
"""
Compute Bark-filterbanks. The filters are stored in the rows, the columns
correspond to fft bins.
Args:
nfilts (int) : the number of filters in the filterbank.
(Default 20)
nfft (int) : the FFT size.
(Default is 512)
fs (int) : sample rate/ sampling frequency of the signal.
(Default 16000 Hz)
low_freq (int) : lowest band edge of mel filters.
(Default 0 Hz)
high_freq (int) : highest band edge of mel filters.
(Default samplerate/2)
scale (str) : choose if max bins amplitudes ascend, descend or are constant (=1).
Default is "constant"
Returns:
a numpy array of size nfilts * (nfft/2 + 1) containing filterbank.
Each row holds 1 filter.
"""
# init freqs
high_freq = high_freq or fs / 2
low_freq = low_freq or 0
# run checks
if low_freq < 0:
raise ParameterError(ErrorMsgs["low_freq"])
if high_freq > (fs / 2):
raise ParameterError(ErrorMsgs["high_freq"])
# compute points evenly spaced in Bark scale (points are in Bark)
low_bark = hz2bark(low_freq)
high_bark = hz2bark(high_freq)
bark_points = np.linspace(low_bark, high_bark, nfilts + 4)
# we use fft bins, so we have to convert from Bark to fft bin number
bins = np.floor(bark2fft(bark_points, fs, nfft))
fbank = np.zeros([nfilts, nfft // 2 + 1])
# init scaler
if scale == "descendant" or scale == "constant":
c = 1
else:
c = 0
for j in range(2, nfilts + 2):
# compute scaler
if scale == "descendant":
c -= 1 / nfilts
c = c * (c > 0) + 0 * (c < 0)
elif scale == "ascendant":
c += 1 / nfilts
c = c * (c < 1) + 1 * (c > 1)
for i in range(int(bins[j - 2]), int(bins[j + 2])):
fc = bark_points[j]
fb = fft2bark(i, fs, nfft)
fbank[j - 2, i] = c * Fm(fb, fc)
return np.abs(fbank) | 227edcca91c64515b73531d80e1bd9db7faf9bb0 | 23,221 |
import json
def auth_check_response_fixture():
"""Define a fixture to return a successful authorization check."""
return json.loads(load_fixture("auth_check_response.json")) | 18bb02313534376f4244430725135d089941693f | 23,222 |
def cancel_session(session_id):
"""
Cancel all tasks within a session
Args:
string: session_id
Returns:
dict: results
"""
lambda_response = {}
all_cancelled_tasks = []
for state in task_states_to_cancel:
res = cancel_tasks_by_status(session_id, state)
print("Cancelling session: {} status: {} result: {}".format(
session_id, state, res))
lambda_response["cancelled_{}".format(state)] = len(res)
all_cancelled_tasks += res
lambda_response["tatal_cancelled_tasks"] = len(all_cancelled_tasks)
return(lambda_response) | 5d07d9038023ad15eaca33bb409b5a4c4db66089 | 23,223 |
import sys
def valid_capture_area(top_left, bottom_right):
"""Check the capture area extents for sanity.
"""
tl_x = top_left['x']
tl_y = top_left['y']
br_x = bottom_right['x']
br_y = bottom_right['y']
if (br_x <= tl_x) or (br_y <= tl_y):
print('The capture area ({},{}) ({},{}) '
'is invalid.'.format(tl_x, tl_y, br_x, br_y),
file=sys.stderr)
return False
print('Capture area: ({},{}) ({},{})'.format(tl_x, tl_y, br_x, br_y))
return True | 654cbb1123ed5536d0ae217d0d70e9eabd36d0b3 | 23,224 |
def get_cli_args():
"""Gets, parses, and returns CLI arguments"""
parser = ArgumentParser(description='Check modules formatting')
parser.add_argument('filepath', help='path to a file to check')
parser.add_argument('-n', '--fqcn',
dest='fqcn',
metavar='FQCN',
default=False,
required=False,
help='FQCN to check examples')
parser.add_argument("-c", "--comments",
dest="check_comments",
action="store_true",
required=False,
help="check comments")
parser.add_argument("-l", "--length",
dest="check_length",
action="store_true",
required=False,
help="check description length")
parser.add_argument("-s", "--spelling",
dest="check_spelling",
action="store_true",
required=False,
help="check spelling")
return parser.parse_args() | 4e54a7141e19ebba9c0502e4bb40293a583e2d96 | 23,225 |
def __virtual__():
"""
Determine whether or not to load this module
"""
return __virtualname__ | 3b5f873a504d44aba03691f58d8f19a834287eff | 23,226 |
def load_glove_embeddings():
"""
Load the glove embeddings into a array and a dictionary with words as
keys and their associated index as the value. Assumes the glove
embeddings are located in the same directory and named "glove.6B.50d.txt"
RETURN: embeddings: the array containing word vectors
word_index_dict: a dictionary matching a word in string form to
its index in the embeddings array. e.g. {"apple": 119"}
"""
#if you are running on the CSE machines, you can load the glove data from here
#data = open("/home/cs9444/public_html/17s2/hw2/glove.6B.50d.txt",'r',encoding="utf-8")
data = open("glove.6B.50d.txt",'r',encoding="utf-8")
word_index_dict = {}
word_index_dict['UNK'] = 0
embeddings = np.ndarray(shape=(500001, batch_size), dtype='float32')
embeddings_list = []
i = 1
for line in data:
load_array = line.split()
# Sets the word to the 0th value in array
word = load_array[0]
# Other values are the assigned index
values = np.asarray(load_array[1:], dtype='float32')
# Put values in row of array
embeddings[i] = values
# E.g. word_index_dict["the"] = 0
word_index_dict[word] = i
i = i+1
data.close()
return embeddings, word_index_dict | eaac7465e7a4d9658add81ea7a17e62684d38bed | 23,227 |
def _f2_rsub_ ( self , other ) :
"""Operator for ``2D-function - other''"""
return _f2_rop_ ( self , other , Ostap.MoreRooFit.Subtraction , "Subtract_" ) | 2b771d39ea3d1cd3cb7f8c4bc88c11d3814f2e4e | 23,228 |
def attenuate(source, factor=0.01, duration=1.0, srate=None):
"""Exponential attenuation towards target value within 'factor' in time 'duration' for constant signals."""
if srate is None:
srate = get_srate()
return onepole(source, 1.0, -factor ** (srate / duration), 1.0 - factor ** (srate / duration)) | d67c141ec36bfcaf9dde2518488f8cc5d2b24ef5 | 23,229 |
import argparse
def get_parser(name):
""" make default formatted parser """
parser = argparse.ArgumentParser(
name, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# print default value always
parser.add_argument = partial(parser.add_argument, help=" ")
return parser | 36c5e156a0d8de1399c0c4207691762a324a067a | 23,230 |
def IsMultiPanel(hcuts, vcuts) -> bool:
"""
Check if the image is multi-panel or not.
Could have more logic.
"""
return bool(hcuts or vcuts) | fc62a31007445eac90b6f5ceb3a7c9c006dd2eef | 23,231 |
def is_subject_mutable(context, subject):
"""Return True if the subject is mutable in this context."""
if context.is_admin:
return True
if subject.owner is None or context.owner is None:
return False
return subject.owner == context.owner | 74e36a2f79111c9d09ea9f90579ce9b51beb3e61 | 23,232 |
from datetime import datetime
def time_filter(df, start_date, end_date):
"""Remove times that are not within the start/end bounds."""
if start_date:
datetime_start = datetime.strptime(start_date, '%Y-%m-%d')
start_selection = df.index >= datetime_start
if end_date:
datetime_end = datetime.strptime(end_date, '%Y-%m-%d')
end_selection = df.index <= datetime_end
if start_date and end_date:
selection = start_selection & end_selection
filtered_df = df[selection]
elif start_date:
filtered_df = df[start_selection]
elif end_date:
filtered_df = df[end_selection]
else:
filtered_df = df
return filtered_df | 13e66add286ddad649e4ae09dc9d6ec5dd013d8a | 23,233 |
from datetime import datetime
def notifications():
"""
Fetches the notifications events that occurred between the given block numbers
URL Params:
min_block_number: (int) The start block number for querying for notifications
max_block_number?: (int) The end block number for querying for notifications
track_id?: (Array<int>) Array of track id for fetching the track's owner id
and adding the track id to owner user id mapping to the `owners` response field
NOTE: this is added for notification for listen counts
Response - Json object w/ the following fields
notifications: Array of notifications of shape:
type: 'Follow' | 'Favorite' | 'Repost' | 'Create' | 'RemixCreate' | 'RemixCosign' | 'PlaylistUpdate'
blocknumber: (int) blocknumber of notification
timestamp: (string) timestamp of notification
initiator: (int) the user id that caused this notification
metadata?: (any) additional information about the notification
entity_id?: (int) the id of the target entity (ie. playlist id of a playlist that is reposted)
entity_type?: (string) the type of the target entity
entity_owner_id?: (int) the id of the target entity's owner (if applicable)
playlist_update_timestamp?: (string) timestamp of last update of a given playlist
playlist_update_users?: (array<int>) user ids which favorited a given playlist
info: Dictionary of metadata w/ min_block_number & max_block_number fields
milestones: Dictionary mapping of follows/reposts/favorites (processed within the blocks params)
Root fields:
follower_counts: Contains a dictionary of user id => follower count (up to the max_block_number)
repost_counts: Contains a dictionary tracks/albums/playlists of id to repost count
favorite_counts: Contains a dictionary tracks/albums/playlists of id to favorite count
owners: Dictionary containing the mapping for track id / playlist id / album -> owner user id
The root keys are 'tracks', 'playlists', 'albums' and each contains the id to owner id mapping
"""
db = get_db_read_replica()
min_block_number = request.args.get("min_block_number", type=int)
max_block_number = request.args.get("max_block_number", type=int)
track_ids_to_owner = []
try:
track_ids_str_list = request.args.getlist("track_id")
track_ids_to_owner = [int(y) for y in track_ids_str_list]
except Exception as e:
logger.error(f"Failed to retrieve track list {e}")
# Max block number is not explicitly required (yet)
if not min_block_number and min_block_number != 0:
return api_helpers.error_response({"msg": "Missing min block number"}, 400)
if not max_block_number:
max_block_number = min_block_number + max_block_diff
elif (max_block_number - min_block_number) > max_block_diff:
max_block_number = min_block_number + max_block_diff
with db.scoped_session() as session:
current_block_query = session.query(Block).filter_by(is_current=True)
current_block_query_results = current_block_query.all()
current_block = current_block_query_results[0]
current_max_block_num = current_block.number
if current_max_block_num < max_block_number:
max_block_number = current_max_block_num
notification_metadata = {
"min_block_number": min_block_number,
"max_block_number": max_block_number,
}
# Retrieve milestones statistics
milestone_info = {}
# Cache owner info for network entities and pass in w/results
owner_info = {const.tracks: {}, const.albums: {}, const.playlists: {}}
# List of notifications generated from current protocol state
notifications_unsorted = []
with db.scoped_session() as session:
#
# Query relevant follow information
#
follow_query = session.query(Follow)
# Impose min block number restriction
follow_query = follow_query.filter(
Follow.is_current == True,
Follow.is_delete == False,
Follow.blocknumber > min_block_number,
Follow.blocknumber <= max_block_number,
)
follow_results = follow_query.all()
# Used to retrieve follower counts for this window
followed_users = []
# Represents all follow notifications
follow_notifications = []
for entry in follow_results:
follow_notif = {
const.notification_type: const.notification_type_follow,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.follower_user_id,
const.notification_metadata: {
const.notification_follower_id: entry.follower_user_id,
const.notification_followee_id: entry.followee_user_id,
},
}
follow_notifications.append(follow_notif)
# Add every user who gained a new follower
followed_users.append(entry.followee_user_id)
# Query count for any user w/new followers
follower_counts = get_follower_count_dict(
session, followed_users, max_block_number
)
milestone_info["follower_counts"] = follower_counts
notifications_unsorted.extend(follow_notifications)
#
# Query relevant favorite information
#
favorites_query = session.query(Save)
favorites_query = favorites_query.filter(
Save.is_current == True,
Save.is_delete == False,
Save.blocknumber > min_block_number,
Save.blocknumber <= max_block_number,
)
favorite_results = favorites_query.all()
# ID lists to query count aggregates
favorited_track_ids = []
favorited_album_ids = []
favorited_playlist_ids = []
# List of favorite notifications
favorite_notifications = []
favorite_remix_tracks = []
for entry in favorite_results:
favorite_notif = {
const.notification_type: const.notification_type_favorite,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.user_id,
}
save_type = entry.save_type
save_item_id = entry.save_item_id
metadata = {
const.notification_entity_type: save_type,
const.notification_entity_id: save_item_id,
}
# NOTE if deleted, the favorite can still exist
# TODO: Can we aggregate all owner queries and perform at once...?
if save_type == SaveType.track:
owner_id = get_owner_id(session, "track", save_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
favorited_track_ids.append(save_item_id)
owner_info[const.tracks][save_item_id] = owner_id
favorite_remix_tracks.append(
{
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
"user_id": entry.user_id,
"item_owner_id": owner_id,
"item_id": save_item_id,
}
)
elif save_type == SaveType.album:
owner_id = get_owner_id(session, "album", save_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
favorited_album_ids.append(save_item_id)
owner_info[const.albums][save_item_id] = owner_id
elif save_type == SaveType.playlist:
owner_id = get_owner_id(session, "playlist", save_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
favorited_playlist_ids.append(save_item_id)
owner_info[const.playlists][save_item_id] = owner_id
favorite_notif[const.notification_metadata] = metadata
favorite_notifications.append(favorite_notif)
notifications_unsorted.extend(favorite_notifications)
track_favorite_dict = {}
album_favorite_dict = {}
playlist_favorite_dict = {}
if favorited_track_ids:
track_favorite_counts = get_save_counts(
session,
False,
False,
favorited_track_ids,
[SaveType.track],
max_block_number,
)
track_favorite_dict = dict(track_favorite_counts)
favorite_remix_notifications = get_cosign_remix_notifications(
session, max_block_number, favorite_remix_tracks
)
notifications_unsorted.extend(favorite_remix_notifications)
if favorited_album_ids:
album_favorite_counts = get_save_counts(
session,
False,
False,
favorited_album_ids,
[SaveType.album],
max_block_number,
)
album_favorite_dict = dict(album_favorite_counts)
if favorited_playlist_ids:
playlist_favorite_counts = get_save_counts(
session,
False,
False,
favorited_playlist_ids,
[SaveType.playlist],
max_block_number,
)
playlist_favorite_dict = dict(playlist_favorite_counts)
milestone_info[const.notification_favorite_counts] = {}
milestone_info[const.notification_favorite_counts][
const.tracks
] = track_favorite_dict
milestone_info[const.notification_favorite_counts][
const.albums
] = album_favorite_dict
milestone_info[const.notification_favorite_counts][
const.playlists
] = playlist_favorite_dict
#
# Query relevant tier change information
#
balance_change_query = session.query(UserBalanceChange)
# Impose min block number restriction
balance_change_query = balance_change_query.filter(
UserBalanceChange.blocknumber > min_block_number,
UserBalanceChange.blocknumber <= max_block_number,
)
balance_change_results = balance_change_query.all()
tier_change_notifications = []
for entry in balance_change_results:
prev = int(entry.previous_balance)
current = int(entry.current_balance)
# Check for a tier change and add to tier_change_notification
tier = None
if prev < 100000 <= current:
tier = "platinum"
elif prev < 10000 <= current:
tier = "gold"
elif prev < 100 <= current:
tier = "silver"
elif prev < 10 <= current:
tier = "bronze"
if tier is not None:
tier_change_notif = {
const.notification_type: const.notification_type_tier_change,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: datetime.now(),
const.notification_initiator: entry.user_id,
const.notification_metadata: {
const.notification_tier: tier,
},
}
tier_change_notifications.append(tier_change_notif)
notifications_unsorted.extend(tier_change_notifications)
#
# Query relevant repost information
#
repost_query = session.query(Repost)
repost_query = repost_query.filter(
Repost.is_current == True,
Repost.is_delete == False,
Repost.blocknumber > min_block_number,
Repost.blocknumber <= max_block_number,
)
repost_results = repost_query.all()
# ID lists to query counts
reposted_track_ids = []
reposted_album_ids = []
reposted_playlist_ids = []
# List of repost notifications
repost_notifications = []
# List of repost notifications
repost_remix_notifications = []
repost_remix_tracks = []
for entry in repost_results:
repost_notif = {
const.notification_type: const.notification_type_repost,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.user_id,
}
repost_type = entry.repost_type
repost_item_id = entry.repost_item_id
metadata = {
const.notification_entity_type: repost_type,
const.notification_entity_id: repost_item_id,
}
if repost_type == RepostType.track:
owner_id = get_owner_id(session, "track", repost_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
reposted_track_ids.append(repost_item_id)
owner_info[const.tracks][repost_item_id] = owner_id
repost_remix_tracks.append(
{
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
"user_id": entry.user_id,
"item_owner_id": owner_id,
"item_id": repost_item_id,
}
)
elif repost_type == RepostType.album:
owner_id = get_owner_id(session, "album", repost_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
reposted_album_ids.append(repost_item_id)
owner_info[const.albums][repost_item_id] = owner_id
elif repost_type == RepostType.playlist:
owner_id = get_owner_id(session, "playlist", repost_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
reposted_playlist_ids.append(repost_item_id)
owner_info[const.playlists][repost_item_id] = owner_id
repost_notif[const.notification_metadata] = metadata
repost_notifications.append(repost_notif)
# Append repost notifications
notifications_unsorted.extend(repost_notifications)
track_repost_count_dict = {}
album_repost_count_dict = {}
playlist_repost_count_dict = {}
# Aggregate repost counts for relevant fields
# Used to notify users of entity-specific milestones
if reposted_track_ids:
track_repost_counts = get_repost_counts(
session,
False,
False,
reposted_track_ids,
[RepostType.track],
max_block_number,
)
track_repost_count_dict = dict(track_repost_counts)
repost_remix_notifications = get_cosign_remix_notifications(
session, max_block_number, repost_remix_tracks
)
notifications_unsorted.extend(repost_remix_notifications)
if reposted_album_ids:
album_repost_counts = get_repost_counts(
session,
False,
False,
reposted_album_ids,
[RepostType.album],
max_block_number,
)
album_repost_count_dict = dict(album_repost_counts)
if reposted_playlist_ids:
playlist_repost_counts = get_repost_counts(
session,
False,
False,
reposted_playlist_ids,
[RepostType.playlist],
max_block_number,
)
playlist_repost_count_dict = dict(playlist_repost_counts)
milestone_info[const.notification_repost_counts] = {}
milestone_info[const.notification_repost_counts][
const.tracks
] = track_repost_count_dict
milestone_info[const.notification_repost_counts][
const.albums
] = album_repost_count_dict
milestone_info[const.notification_repost_counts][
const.playlists
] = playlist_repost_count_dict
# Query relevant created entity notification - tracks/albums/playlists
created_notifications = []
#
# Query relevant created tracks for remix information
#
remix_created_notifications = []
# Aggregate track notifs
tracks_query = session.query(Track)
# TODO: Is it valid to use Track.is_current here? Might not be the right info...
tracks_query = tracks_query.filter(
Track.is_unlisted == False,
Track.is_delete == False,
Track.stem_of == None,
Track.blocknumber > min_block_number,
Track.blocknumber <= max_block_number,
)
tracks_query = tracks_query.filter(Track.created_at == Track.updated_at)
track_results = tracks_query.all()
for entry in track_results:
track_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
},
}
created_notifications.append(track_notif)
if entry.remix_of:
# Add notification to remix track owner
parent_remix_tracks = [
t["parent_track_id"] for t in entry.remix_of["tracks"]
]
remix_track_parents = (
session.query(Track.owner_id, Track.track_id)
.filter(
Track.track_id.in_(parent_remix_tracks),
Track.is_unlisted == False,
Track.is_delete == False,
Track.is_current == True,
)
.all()
)
for remix_track_parent in remix_track_parents:
[
remix_track_parent_owner,
remix_track_parent_id,
] = remix_track_parent
remix_notif = {
const.notification_type: const.notification_type_remix_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
const.notification_remix_parent_track_user_id: remix_track_parent_owner,
const.notification_remix_parent_track_id: remix_track_parent_id,
},
}
remix_created_notifications.append(remix_notif)
# Handle track update notifications
# TODO: Consider switching blocknumber for updated at?
updated_tracks_query = session.query(Track)
updated_tracks_query = updated_tracks_query.filter(
Track.is_unlisted == False,
Track.stem_of == None,
Track.created_at != Track.updated_at,
Track.blocknumber > min_block_number,
Track.blocknumber <= max_block_number,
)
updated_tracks = updated_tracks_query.all()
for entry in updated_tracks:
prev_entry_query = (
session.query(Track)
.filter(
Track.track_id == entry.track_id,
Track.blocknumber < entry.blocknumber,
)
.order_by(desc(Track.blocknumber))
)
# Previous unlisted entry indicates transition to public, triggering a notification
prev_entry = prev_entry_query.first()
# Tracks that were unlisted and turned to public
if prev_entry.is_unlisted == True:
track_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
},
}
created_notifications.append(track_notif)
# Tracks that were not remixes and turned into remixes
if not prev_entry.remix_of and entry.remix_of:
# Add notification to remix track owner
parent_remix_tracks = [
t["parent_track_id"] for t in entry.remix_of["tracks"]
]
remix_track_parents = (
session.query(Track.owner_id, Track.track_id)
.filter(
Track.track_id.in_(parent_remix_tracks),
Track.is_unlisted == False,
Track.is_delete == False,
Track.is_current == True,
)
.all()
)
for remix_track_parent in remix_track_parents:
[
remix_track_parent_owner,
remix_track_parent_id,
] = remix_track_parent
remix_notif = {
const.notification_type: const.notification_type_remix_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
const.notification_remix_parent_track_user_id: remix_track_parent_owner,
const.notification_remix_parent_track_id: remix_track_parent_id,
},
}
remix_created_notifications.append(remix_notif)
notifications_unsorted.extend(remix_created_notifications)
# Aggregate playlist/album notifs
collection_query = session.query(Playlist)
# TODO: Is it valid to use is_current here? Might not be the right info...
collection_query = collection_query.filter(
Playlist.is_delete == False,
Playlist.is_private == False,
Playlist.blocknumber > min_block_number,
Playlist.blocknumber <= max_block_number,
)
collection_query = collection_query.filter(
Playlist.created_at == Playlist.updated_at
)
collection_results = collection_query.all()
for entry in collection_results:
collection_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.playlist_owner_id,
}
metadata = {
const.notification_entity_id: entry.playlist_id,
const.notification_entity_owner_id: entry.playlist_owner_id,
const.notification_collection_content: entry.playlist_contents,
}
if entry.is_album:
metadata[const.notification_entity_type] = "album"
else:
metadata[const.notification_entity_type] = "playlist"
collection_notif[const.notification_metadata] = metadata
created_notifications.append(collection_notif)
# Playlists that were private and turned to public aka 'published'
# TODO: Consider switching blocknumber for updated at?
publish_playlists_query = session.query(Playlist)
publish_playlists_query = publish_playlists_query.filter(
Playlist.is_private == False,
Playlist.created_at != Playlist.updated_at,
Playlist.blocknumber > min_block_number,
Playlist.blocknumber <= max_block_number,
)
publish_playlist_results = publish_playlists_query.all()
for entry in publish_playlist_results:
prev_entry_query = (
session.query(Playlist)
.filter(
Playlist.playlist_id == entry.playlist_id,
Playlist.blocknumber < entry.blocknumber,
)
.order_by(desc(Playlist.blocknumber))
)
# Previous private entry indicates transition to public, triggering a notification
prev_entry = prev_entry_query.first()
if prev_entry.is_private == True:
publish_playlist_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.playlist_owner_id,
}
metadata = {
const.notification_entity_id: entry.playlist_id,
const.notification_entity_owner_id: entry.playlist_owner_id,
const.notification_collection_content: entry.playlist_contents,
const.notification_entity_type: "playlist",
}
publish_playlist_notif[const.notification_metadata] = metadata
created_notifications.append(publish_playlist_notif)
notifications_unsorted.extend(created_notifications)
# Get additional owner info as requested for listen counts
tracks_owner_query = session.query(Track).filter(
Track.is_current == True, Track.track_id.in_(track_ids_to_owner)
)
track_owner_results = tracks_owner_query.all()
for entry in track_owner_results:
owner = entry.owner_id
track_id = entry.track_id
owner_info[const.tracks][track_id] = owner
# Get playlist updates
today = date.today()
thirty_days_ago = today - timedelta(days=30)
thirty_days_ago_time = datetime(
thirty_days_ago.year, thirty_days_ago.month, thirty_days_ago.day, 0, 0, 0
)
playlist_update_query = session.query(Playlist)
playlist_update_query = playlist_update_query.filter(
Playlist.is_current == True,
Playlist.is_delete == False,
Playlist.last_added_to >= thirty_days_ago_time,
Playlist.blocknumber > min_block_number,
Playlist.blocknumber <= max_block_number,
)
playlist_update_results = playlist_update_query.all()
# Represents all playlist update notifications
playlist_update_notifications = []
playlist_update_notifs_by_playlist_id = {}
for entry in playlist_update_results:
playlist_update_notifs_by_playlist_id[entry.playlist_id] = {
const.notification_type: const.notification_type_playlist_update,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.playlist_owner_id,
const.notification_metadata: {
const.notification_entity_id: entry.playlist_id,
const.notification_entity_type: "playlist",
const.notification_playlist_update_timestamp: entry.last_added_to,
},
}
# get all favorited playlists
# playlists may have been favorited outside the blocknumber bounds
# e.g. before the min_block_number
playlist_favorites_query = session.query(Save)
playlist_favorites_query = playlist_favorites_query.filter(
Save.is_current == True,
Save.is_delete == False,
Save.save_type == SaveType.playlist,
)
playlist_favorites_results = playlist_favorites_query.all()
# dictionary of playlist id => users that favorited said playlist
# e.g. { playlist1: [user1, user2, ...], ... }
# we need this dictionary to know which users need to be notified of a playlist update
users_that_favorited_playlists_dict = ft.reduce(
lambda accumulator, current: accumulator.update(
{
current.save_item_id: accumulator[current.save_item_id]
+ [current.user_id]
if current.save_item_id in accumulator
else [current.user_id]
}
)
or accumulator,
playlist_favorites_results,
{},
)
for playlist_id in users_that_favorited_playlists_dict:
if playlist_id not in playlist_update_notifs_by_playlist_id:
continue
playlist_update_notif = playlist_update_notifs_by_playlist_id[playlist_id]
playlist_update_notif[const.notification_metadata].update(
{
const.notification_playlist_update_users: users_that_favorited_playlists_dict[
playlist_id
]
}
)
playlist_update_notifications.append(playlist_update_notif)
notifications_unsorted.extend(playlist_update_notifications)
# Final sort - TODO: can we sort by timestamp?
sorted_notifications = sorted(
notifications_unsorted,
key=lambda i: i[const.notification_blocknumber],
reverse=False,
)
return api_helpers.success_response(
{
"notifications": sorted_notifications,
"info": notification_metadata,
"milestones": milestone_info,
"owners": owner_info,
}
) | 1b990c9fcd5a4f2ab2ad538f9e0e31056b768f7e | 23,234 |
def collapse_umi(cells):
"""
Input set of genotypes for each read
Return list with one entry for each UMI, per cell barcode
"""
collapsed_data = {}
for cell_barcode, umi_set in cells.items():
for _, genotypes in umi_set.items():
if len(set(genotypes)) > 1:
pass
else:
try:
collapsed_data[cell_barcode]
except KeyError:
collapsed_data[cell_barcode] = [genotypes[0]]
else:
collapsed_data[cell_barcode].append(genotypes[0])
# count total ref, total alt UMIs for each genotype
for key, value in collapsed_data.items():
collapsed_data[key] = [value.count("ref"), value.count("alt")]
assert len(collapsed_data[key]) == 2
return collapsed_data | e98b44193487691fb04e8e0f4ec25c3438175c65 | 23,235 |
def get_corrected_PRES(PRES: np.ndarray, ele_gap: float, TMP: np.ndarray) -> np.ndarray:
"""気圧の標高補正
Args:
PRES (np.ndarray): 補正前の気圧 [hPa]
ele_gap (float): 標高差 [m]
TMP (np.ndarray): 気温 [℃]
Returns:
np.ndarray: 標高補正後の気圧 [hPa]
Notes:
気温減率の平均値を0.0065℃/mとする。
"""
return PRES * np.power(1 - ((ele_gap * 0.0065) / (TMP + 273.15)), 5.257) | 1be9c2bd5a07714463ac7a6b05bc4d7ca1f84e70 | 23,236 |
def mishra_bird(x, *args):
"""Mishra's Bird constrained function with 2 parameters.
To be used in the constrained optimization examples.
When subject to:
(x[0] + 5) ** 2 + (x[1] + 5) ** 2 < 25
the global minimum is at f(-3.1302, -1.5821) = -106.7645
Bounds: -10 <= x[0] <= 0
-6.5 <= x[1] <= 0
Reference:
https://en.wikipedia.org/wiki/Test_functions_for_optimization
"""
fx = np.sin(x[1]) * np.exp((1 - np.cos(x[0])) ** 2) + \
np.cos(x[0]) * np.exp((1 - np.sin(x[1])) ** 2) + (x[0] - x[1]) ** 2
return fx | ca6fd1211f8715ef1cbc39c75e252107774da54d | 23,237 |
def find_target_migration_file(database=DEFAULT_DB_ALIAS, changelog_file=None):
"""Finds best matching target migration file"""
if not database:
database = DEFAULT_DB_ALIAS
if not changelog_file:
changelog_file = get_changelog_file_for_database(database)
try:
doc = minidom.parse(changelog_file)
except ExpatError as ex:
raise InvalidChangelogFile(
'Could not parse XML file %s: %s' % (changelog_file, ex))
try:
dbchglog = doc.getElementsByTagName('databaseChangeLog')[0]
except IndexError:
raise InvalidChangelogFile(
'Missing <databaseChangeLog> node in file %s' % (
changelog_file))
else:
nodes = list(filter(lambda x: x.nodeType is x.ELEMENT_NODE,
dbchglog.childNodes))
if not nodes:
return changelog_file
last_node = nodes[-1]
if last_node.tagName == 'include':
last_file = last_node.attributes.get('file').firstChild.data
return find_target_migration_file(
database=database, changelog_file=last_file)
else:
return changelog_file | 39a3ed89dacd9393f69e081aaf44c64f34852592 | 23,238 |
from functools import reduce
def encode_message(ctl, addr, src_id, msg_code, data=""):
"""Encode a message for the PIM, assumes data formatted"""
ctl = create_control_word(addr.is_link) if ctl == -1 else ctl
length = 7 + len(data)
ctl = ctl | (length << 8)
msg = bytearray(length)
msg[0:2] = ctl.to_bytes(2, byteorder="big")
msg[2] = addr.network_id
msg[3] = addr.upb_id
msg[4] = src_id
msg[5] = msg_code
if data:
msg[6 : len(data) + 6] = data
msg[-1] = (256 - reduce(lambda x, y: x + y, msg)) % 256 # Checksum
return msg.hex().upper() | 3ffef7a1b65e5dabc6ae4c237018d446b7852cc1 | 23,239 |
def rule_16(l, r):
"""
Rule for "vyaṁjana sandhi - ghośī karaṇaya"
:return:
"""
l_suffix = utils.endswith(l, letters.AGOSHA_LETTERS)
r_prefix = utils.startswith(r, letters.GOSHA_LETTERS)
if l_suffix is not None and r_prefix is not None:
if r_prefix in letters.VOWELS:
return l[:-len(l_suffix)] + letters.AGOSHA_TO_GOSHA_MAPPING[l_suffix][0] + \
letters.DIACRITICS_MAPPING[r_prefix] + r[len(r_prefix):]
return l[:-len(l_suffix)] + letters.AGOSHA_TO_GOSHA_MAPPING[l_suffix] + r | ea151d8697128e933ecd927ac2e18d039f578a1c | 23,240 |
def is_vertex_cover(G, vertex_cover):
"""Determines whether the given set of vertices is a vertex cover of graph G.
A vertex cover is a set of vertices such that each edge of the graph
is incident with at least one vertex in the set.
Parameters
----------
G : NetworkX graph
The graph on which to check the vertex cover.
vertex_cover :
Iterable of nodes.
Returns
-------
is_cover : bool
True if the given iterable forms a vertex cover.
Examples
--------
This example checks two covers for a graph, G, of a single Chimera
unit cell. The first uses the set of the four horizontal qubits, which
do constitute a cover; the second set removes one node.
>>> import dwave_networkx as dnx
>>> G = dnx.chimera_graph(1, 1, 4)
>>> cover = [0, 1, 2, 3]
>>> dnx.is_vertex_cover(G,cover)
True
>>> cover = [0, 1, 2]
>>> dnx.is_vertex_cover(G,cover)
False
"""
cover = set(vertex_cover)
return all(u in cover or v in cover for u, v in G.edges) | 4213db1953ec976b1606c3756fa73ff0cae9f578 | 23,241 |
def datetimeformat(value, formatstring='%Y-%m-%d %H:%M', nonchar=''):
"""Formates a datetime.
Tries to convert the given ``value`` to a ``datetime`` object and then formats
it according to ``formatstring``::
{{ datetime.now()|datetimeformat }}
{{ "20171224T235959"|datetimeformat('%H:%M') }}
"""
if not value:
return nonchar
return Markup(convert_to_datetime(value).strftime(formatstring).replace('-', '‑')) | a95510b5734168899d81c893f647b5b836ee3b27 | 23,242 |
def get_interface_for_name(protocols, target_interface_name):
# type: (Iterable[Protocol], str) -> Optional[Interface]
"""Given a name string, gets the interface that has that name, or None."""
for protocol in protocols:
for interface in protocol.interfaces:
if interface.name == target_interface_name:
return interface
return None | 22c8d4ad64058a068700fd8f16a1dee49efe4001 | 23,243 |
import os
def __get_out_file(in_file, out_dir):
"""
Get the path of the output file.
Parameters
----------
in_file: str
Path to input file.
out_dir: str
Path to output directory.
Returns
-------
file_no_ext: str
The file name without extension.
out_dir: str
The path to the output directory.
out_file: str
The path of the output file.
"""
if out_dir == '':
out_dir = os.path.dirname(in_file)
file_no_ext = os.path.splitext(in_file)[0].split(os.sep)[-1]
if out_dir == '':
out_dir = '.'
out_file = '/'.join([out_dir, file_no_ext])
return file_no_ext, out_dir, out_file | e191d45becc7df330f359baf6e11d72d30343a4b | 23,244 |
def extract_validation_set(x: ndarray, y: ndarray, size=6000):
"""Will extract a validation set of "size" from given x,y pair
Parameters:
x (ndarray): numpy array
y (ndarray): numpy array
size (int): Size of validation set. Must be smaller than examples count
in x, y and multiple of label_count
"""
assert x.shape[0] == y.shape[0]
assert (
x.shape[0] % size == 0
), f"number of examples ({x.shape[0]}) needs to be evenly divisible by parameter size ({size})"
assert size % len(set(y)) == 0, "size must be a multiple of number of labels"
x_balanced, y_balanced = classes_balanced_randomized_per_partition(x, y)
xy_val = (x_balanced[:size], y_balanced[:size])
xy_train = (x_balanced[size:], y_balanced[size:])
return xy_train, xy_val | a5cc15cdd7a5889a29196c2bcee7b50aae5b3bc5 | 23,245 |
def send_update(peer_ip, attr, nlri, withdraw):
"""
send update message
:param peer_ip: peer ip address
:return:
"""
if cfg.CONF.bgp.running_config['factory'].fsm.protocol.send_update({
'attr': attr, 'nlri': nlri, 'withdraw': withdraw}):
return {
'status': True
}
else:
return {
'status': False,
'code': 'failed when send this message out'
} | f53f4ba009d21e3d36867d5292131fcffb73dc5e | 23,246 |
def generate_basic_blame_experiment_actions(
project: Project,
bc_file_extensions: tp.Optional[tp.List[BCFileExtensions]] = None,
extraction_error_handler: tp.Optional[PEErrorHandler] = None
) -> tp.List[actions.Step]:
"""
Generate the basic actions for a blame experiment.
- handle caching of BC files
- compile project, if needed
Args:
project: reference to the BB project
bc_file_extensions: list of bitcode file extensions (e.g. opt, no opt)
extraction_error_handler: handler to manage errors during the
extraction process
"""
return get_bc_cache_actions(
project, bc_file_extensions, extraction_error_handler
) | bd2785e3e611fea95f9e64b8f68ecf8746905bff | 23,247 |
from typing import Any
from typing import Optional
from typing import Union
from typing import OrderedDict
from typing import Mapping
from typing import Literal
def is_json_encodable(t: Any) -> bool:
""" Checks whether a type is json encodable. """
# pylint:disable=invalid-name,too-many-return-statements,too-many-branches
if not is_typecheckable(t):
return False
if t in JSON_BASE_TYPES:
return True
if t in (None, type(None)):
return True
if t is ...:
return True
if is_namedtuple(t):
field_types = getattr(t, "_field_types")
return all(is_json_encodable(field_types[field]) for field in field_types)
if hasattr(t, "__origin__") and hasattr(t, "__args__"):
if t.__origin__ in (list, set, frozenset, deque, Optional):
return is_json_encodable(t.__args__[0])
if t.__origin__ is tuple:
if len(t.__args__) == 2 and t.__args__[1] is ...: # pylint:disable=no-else-return
return is_json_encodable(t.__args__[0])
else:
return all(is_json_encodable(s) for s in t.__args__)
if t.__origin__ is Union:
return all(is_json_encodable(s) for s in t.__args__)
if t.__origin__ in (dict, OrderedDict, Mapping):
return t.__args__[0] == str and is_json_encodable(t.__args__[1])
if t.__origin__ is Literal:
return all(isinstance(s, JSON_BASE_TYPES+(type(None),)) for s in t.__args__)
return False | bdebbebd28d16949c2786386c4be666f3445783e | 23,248 |
def request_parse_platform_id(validated_request):
"""Parses the PlatformID from a provided visibility API request.
Args:
validated_request (obj:Request): A Flask request object that has been generated for a
visibility/opportunity endpoint.
Requires:
The request object MUST have been validated against the requested schema.
Returns:
A list of Satellite model objects.
Throws:
InputError: If any provided platform ID(s) are invalid.
"""
if 'PlatformID' not in validated_request.json:
return Satellite.query.all()
satellites = []
for satellite in validated_request.json['PlatformID']:
satellite = Satellite.query.get(satellite)
if satellite is None:
raise InputError('PlatformID', 'No such platform')
satellites.append(satellite)
return satellites | 68fd6847549cf68edddfed2fca4e9177a6e41e6f | 23,249 |
def crear_comentario_submeta(request, pk):
""" Crea y agrega un comentario a una meta identificada por su id """
# meta = get_object_or_404(Meta, pk=pk)
meta = Submeta.objects.get(pk=pk)
# si ya se creo se guarda el comentario y se redirecciona el navegador a
# la meta
if request.method == "POST":
form = ComentarioFormulario(request.POST)
if form.is_valid():
# Se crea el comentario con los datos del formulario
comentario = form.save(commit=False)
# se lo relaciona con la meta (foreing key y eso)
comentario.meta = meta
# se guarda el comentario en la base de datos
comentario.save()
# comentarios = Comentario.objects.filter(meta__pk=pk)
return redirect('info_submeta', pk=meta.id)
# sino se crea un formulario vacio y se lo envia al template
# crear_comentario, para que el usuario cree el comentario
# cargando los datos.
else:
form = ComentarioFormulario(instance=meta)
return render(request, 'crear_comentario_submeta.html', {'form': form}) | 56452834478c43560eaf168553ac4723091d400d | 23,250 |
def load_mask_from_shapefile(filename, shape, transform):
"""Load a mask from a shapefile."""
multipolygon, _ = load_shapefile2multipolygon(filename)
mask = multipolygon2mask(multipolygon, shape, transform)
return mask | 82af816cb92828862003929a42ddb62f5153cada | 23,251 |
def _spectra_resample(spectra, wvl_orig, wvl_target):
"""
:param spectra:
:param wvl_orig:
:param wvl_target:
:param k:
:return:
"""
idx_finite = np.isfinite(spectra)
min_wvl_s = np.nanmin(wvl_orig[idx_finite])
max_wvl_s = np.nanmax(wvl_orig[idx_finite])
idx_target = np.logical_and(wvl_target >= min_wvl_s,
wvl_target <= max_wvl_s)
new_flux = np.interp(wvl_target[idx_target], wvl_orig[idx_finite], spectra[idx_finite])
nex_flux_out = np.ndarray(len(wvl_target))
nex_flux_out.fill(np.nan)
nex_flux_out[idx_target] = new_flux
return nex_flux_out | 5dedfce082d1d417cd609e53a2c73fafa69c451a | 23,252 |
from datetime import datetime
def test_standard_surface():
"""Test to read a standard surface file."""
def dtparse(string):
return datetime.strptime(string, '%y%m%d/%H%M')
skip = ['text']
gsf = GempakSurface(get_test_data('gem_std.sfc'))
gstns = gsf.sfjson()
gempak = pd.read_csv(get_test_data('gem_std.csv'),
index_col=['STN', 'YYMMDD/HHMM'],
parse_dates=['YYMMDD/HHMM'],
date_parser=dtparse)
for stn in gstns:
idx_key = (stn['properties']['station_id'],
stn['properties']['date_time'])
gemsfc = gempak.loc[idx_key, :]
for param, val in stn['values'].items():
if param not in skip:
assert val == pytest.approx(gemsfc[param.upper()]) | 4538481d85b313826d488e09c5504956f06b7865 | 23,253 |
def calc_mu(Rs):
""" Calculates mu for use in LinKK """
neg_sum = sum(abs(x) for x in Rs if x < 0)
pos_sum = sum(abs(x) for x in Rs if x >= 0)
return 1 - neg_sum/pos_sum | 915cd4718d255e963fede2b73b5637a1afc13d4b | 23,254 |
def computeAnomaly(data):
"""
Remove the seasonality
"""
period = _get_period(data)
meanclim = computeMeanClimatology(data)
anom = data.groupby(f'time.{period}') - meanclim
return anom | 2d42fb7c2f219f78e2971a554eaddb593f5dbc9c | 23,255 |
def product(*args):
"""Calculate product of args.
@param args: list of floats to multiply
@type args: list of float
@return: product of args
@rtype: float
"""
r = args[0]
for x in args[1:]:
r *= x
return r | 3862c4c9ac2ccd8336f70d86a17ae9fee4c7fed5 | 23,256 |
def unpack_into_tensorarray(value, axis, size=None):
"""
unpacks a given tensor along a given axis into a TensorArray
Parameters:
----------
value: Tensor
the tensor to be unpacked
axis: int
the axis to unpack the tensor along
size: int
the size of the array to be used if shape inference resulted in None
Returns: TensorArray
the unpacked TensorArray
"""
shape = value.get_shape().as_list()
rank = len(shape)
dtype = value.dtype
array_size = shape[axis] if not shape[axis] is None else size
if array_size is None:
raise ValueError("Can't create TensorArray with size None")
array = tf.TensorArray(dtype=dtype, size=array_size)
dim_permutation = [axis] + list(range(1, axis)) + [0] + list(range(axis + 1, rank))
unpack_axis_major_value = tf.transpose(value, dim_permutation)
full_array = array.unstack(unpack_axis_major_value)
return full_array | e6320350d5963a47ec8f853e5b9c819b730c352f | 23,257 |
from datetime import datetime
def get_error_page(status_code, message):
"""
获取错误页面
:param status_code:
:param message:
:return:
"""
context = {
'site_web': settings.SITE_TITLE,
'site_url': reverse(settings.SITE_NAME),
'status_code': status_code,
'message': message,
'date': datetime.now().year
}
return context | 76da02826b026fd63562c2d51976ee77ad86f794 | 23,258 |
def fits_difference(*args, **keys):
"""Difference two FITS files with parameters specified as Differencer class."""
differ = FitsDifferencer(*args, **keys)
return differ.difference() | 4ccfd9c521d76e5b3d47a2be8afefe594895e570 | 23,259 |
def esta_balanceada(expressao):
"""
Função que calcula se expressão possui parenteses, colchetes e chaves balanceados
O Aluno deverá informar a complexidade de tempo e espaço da função
Deverá ser usada como estrutura de dados apenas a pilha feita na aula anterior
:param expressao: string com expressao a ser balanceada
:return: boleano verdadeiro se expressao está balanceada e falso caso contrário
"""
#Análise de Complexidade
#Tempo e memória são O(n)
pilha = Pilha()
if expressao == "":
return True
elif expressao[0] in ')}]':
return False
else:
for caracter in expressao:
if caracter in '({[':
pilha.empilhar(caracter)
else:
try:
desenpilhado = pilha.desempilhar()
except PilhaVaziaErro:
return pilha.vazia()
if caracter == '}' and desenpilhado != '{':
return False
elif caracter == ']' and desenpilhado != '[':
return False
elif caracter == ')' and desenpilhado != '(':
return False
return pilha.vazia() | c35a2f8ca4afef76e722d03809edca9f4dbac3fd | 23,260 |
def create_doc(im_src, tag, coords, fea_arr, fea_bin_arr):
"""
Create elasticsearch doc
Params:
im_src: image file name
tag: tag or class for image
coords: list of boxes corresponding to a tag
fea_arr: list of ImFea objects
fea_bin_arr: list of ImFeaBin objects
"""
doc = {}
doc['coords'] = coords
f_bin = ImFeaBinArr()
f = ImFeaArr()
f.arr.extend(fea_arr)
f_bin.arr.extend(fea_bin_arr)
obj_bin_str = b64encode(f_bin.SerializeToString())
obj_str = b64encode(f.SerializeToString())
doc['sigs'] = obj_str
doc['bin_sigs'] = obj_bin_str
doc['im_src'] = im_name
doc['cl'] = tag
return doc | 4e7afe795d30873516840b66a0e4a54b4599fe8c | 23,261 |
def scaled_mouse_pos(mouse): # pragma: no cover
"""
Renvoie la position de la souris mise à l'échelle de l'image.
Parameters
----------
mouse : int * int
La position réelle de la souris
Returns
-------
int * int
La position mise à l'échelle
"""
# Récupération de la dimension de la fenêtre
window_dimensions = ut.get_screen_size()
# Calcul du facteur d'échelle
scale_factor_x = cf.SCREEN_WIDTH / window_dimensions[0]
scale_factor_y = cf.SCREEN_HEIGHT / window_dimensions[1]
return mouse[0] * scale_factor_x, mouse[1] * scale_factor_y | ad216c50f1492bb0248c04f3195aceed4622bfe1 | 23,262 |
import re
def is_valid_zcs_image_id(zcs_image_id):
"""
Validates Zadara Container Services (ZCS) image IDs, also known as the ZCS
image "name". A valid ZCS image name should look like: img-00000001 - It
should always start with "img-" and end with 8 hexadecimal characters in
lower case.
:type zcs_image_id: str
:param zcs_image_id: The ZCS image name to be validated.
:rtype: bool
:return: True or False depending on whether zcs_image_id passes
validation.
"""
if zcs_image_id is None:
return False
match = re.match(r'^img-[0-9a-f]{8}$', zcs_image_id)
if not match:
return False
return True | 4b2e689c5ff62c32c147dec1c05b77cf0df31c9a | 23,263 |
import random
def get_topology2(gid: int, cfg: Config):
"""
Create a uniformly and randomly sampled genome of fixed topology:
Sigmoid with bias 1.5 --> Actuation default of 95,3%
(key=0, bias=1.5) (key=1, bias=?)
____ / /
/ /
GRU /
| _____/
| /
(key=-1)
"""
# Create an initial dummy genome with fixed configuration
genome = Genome(
key=gid,
num_outputs=cfg.genome.num_outputs,
bot_config=cfg.bot,
)
# Setup the parameter-ranges
conn_range = cfg.genome.weight_max_value - cfg.genome.weight_min_value
bias_range = cfg.genome.bias_max_value - cfg.genome.bias_min_value
rnn_range = cfg.genome.rnn_max_value - cfg.genome.rnn_min_value
# Create the nodes
genome.nodes[0] = OutputNodeGene(key=0, cfg=cfg.genome) # OutputNode 0
genome.nodes[0].bias = 1.5 # Drive with 0.953 actuation by default
genome.nodes[1] = OutputNodeGene(key=1, cfg=cfg.genome) # OutputNode 1
genome.nodes[1].bias = random() * bias_range + cfg.genome.bias_min_value # Uniformly sampled bias
genome.nodes[2] = GruNodeGene(key=2, cfg=cfg.genome, input_keys=[-1], input_keys_full=[-1]) # Hidden node
genome.nodes[2].bias = 0 # Bias is irrelevant for GRU-node
# Uniformly sample the genome's GRU-component
genome.nodes[2].bias_h = rand_arr((3,)) * bias_range + cfg.genome.bias_min_value
genome.nodes[2].weight_xh_full = rand_arr((3, 1)) * rnn_range + cfg.genome.weight_min_value
genome.nodes[2].weight_hh = rand_arr((3, 1)) * rnn_range + cfg.genome.weight_min_value
# Create the connections
genome.connections = dict()
# input2gru
key = (-1, 2)
genome.connections[key] = ConnectionGene(key=key, cfg=cfg.genome)
genome.connections[key].weight = 1 # Simply forward distance
genome.connections[key].enabled = True
# gru2output - Uniformly sampled
key = (2, 1)
genome.connections[key] = ConnectionGene(key=key, cfg=cfg.genome)
genome.connections[key].weight = 3 # Enforce capabilities of full spectrum
genome.connections[key].enabled = True
# input2output - Uniformly sampled
key = (-1, 1)
genome.connections[key] = ConnectionGene(key=key, cfg=cfg.genome)
genome.connections[key].weight = random() * conn_range + cfg.genome.weight_min_value
genome.connections[key].enabled = True
genome.update_rnn_nodes(config=cfg.genome)
return genome | 4b8d21b8e22857c0bec06f58d6d13857a3834649 | 23,264 |
def resolve_implicits(implies, opts):
# type: (Dict[str, List[str]], List[str]) -> Set[str]
"""Adds implied logging options recursively so that specifying e.g.
--debug=popenio results in --debug=popenio,popen.
"""
optset = set(opts)
last_num_opts = None
num_opts = len(optset)
while last_num_opts != num_opts:
for opt, impliedopts in implies.items():
if opt in optset:
optset.update(impliedopts)
last_num_opts = num_opts
num_opts = len(optset)
return optset | e9c4f13436e215d253867c85166885f740abd24d | 23,265 |
def dry_press(
H,
Pv,
alt_setting=P0,
alt_units=default_alt_units,
press_units=default_press_units,
):
"""
Returns dry air pressure, i.e. the total air pressure, less the water
vapour pressure.
"""
HP = pressure_alt(H, alt_setting, alt_units=alt_units)
P = alt2press(HP, press_units=press_units, alt_units=alt_units)
Pd = P - Pv
return Pd | eceffd1bf8c13edc77c3a3bbb4131acf6c6b9bca | 23,266 |
def InvocationAddCallerAuthid(builder, callerAuthid):
"""This method is deprecated. Please switch to AddCallerAuthid."""
return AddCallerAuthid(builder, callerAuthid) | 63144e4311430009c419543c4ebb6a4f83f60281 | 23,267 |
def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
"""Performs the max pooling on the input.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool1d", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim) | d20db7475284f9b52ce1bc0d10d68f6ab96555a0 | 23,268 |
from typing import Concatenate
def DeepLabV3Plus(shape):
""" Inputs """
inputs = Input(shape)
""" Pre-trained ResNet50 """
base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=inputs)
""" Pre-trained ResNet50 Output """
image_features = base_model.get_layer('conv4_block6_out').output
x_a = ASPP(image_features)
x_a = UpSampling2D((4, 4), interpolation="bilinear")(x_a)
""" Get low-level features """
x_b = base_model.get_layer('conv2_block2_out').output
x_b = Conv2D(filters=48, kernel_size=1, padding='same', use_bias=False)(x_b)
x_b = BatchNormalization()(x_b)
x_b = Activation('relu')(x_b)
x = Concatenate()([x_a, x_b])
x = Conv2D(filters=256, kernel_size=3, padding='same', activation='relu',use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters=256, kernel_size=3, padding='same', activation='relu', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = UpSampling2D((4, 4), interpolation="bilinear")(x)
""" Outputs """
x = Conv2D(1, (1, 1), name='output_layer')(x)
x = Activation('sigmoid')(x)
""" Model """
model = Model(inputs=inputs, outputs=x)
return model | da838d36b7926fc394bb8153ff3e6ed67926059b | 23,269 |
import argparse
import os
def get_parameters():
"""Parse the supplied command line arguments.
Returns:
args: The parsed and validated command line arguments
"""
parser = argparse.ArgumentParser(
description="Start up and shut down ASGs on demand"
)
# Parse command line inputs and set defaults
parser.add_argument("--aws-profile", default="default")
parser.add_argument("--aws-region", default="eu-west-2")
parser.add_argument("--sns-topic", help="SNS topic ARN")
parser.add_argument("--environment", help="Environment value", default=UNSET_TEXT)
parser.add_argument("--application", help="Application", default=UNSET_TEXT)
parser.add_argument(
"--slack-channel-override",
help="Slack channel to use for overriden jobs",
default=UNSET_TEXT,
)
parser.add_argument("--log-level", help="Log level for lambda", default="INFO")
_args = parser.parse_args()
# Override arguments with environment variables where set
if "AWS_PROFILE" in os.environ:
_args.aws_profile = os.environ["AWS_PROFILE"]
if "AWS_REGION" in os.environ:
_args.aws_region = os.environ["AWS_REGION"]
if "SNS_TOPIC" in os.environ:
_args.sns_topic = os.environ["SNS_TOPIC"]
if "ENVIRONMENT" in os.environ:
_args.environment = os.environ["ENVIRONMENT"]
if "APPLICATION" in os.environ:
_args.application = os.environ["APPLICATION"]
if "SLACK_CHANNEL_OVERRIDE" in os.environ:
_args.slack_channel_override = os.environ["SLACK_CHANNEL_OVERRIDE"]
if "LOG_LEVEL" in os.environ:
_args.log_level = os.environ["LOG_LEVEL"]
return _args | 6cffc8150b8d36761c59f6224b5a225cc0b6d983 | 23,270 |
def white_noise(template, rms_uKarcmin_T, rms_uKarcmin_pol=None):
"""Generate a white noise realisation corresponding to the template pixellisation
Parameters
----------
template: ``so_map`` template
the template for the white noise generalisation
rms_uKarcmin_T: float
the white noise temperature rms in uK.arcmin
rms_uKarcmin_pol: float
the white noise polarisation rms in uK.arcmin
if None set it to sqrt(2)*rms_uKarcmin_T
"""
noise = template.copy()
rad_to_arcmin = 60 * 180 / np.pi
if noise.pixel == "HEALPIX":
nside = noise.nside
pixArea = hp.pixelfunc.nside2pixarea(nside) * rad_to_arcmin ** 2
if noise.pixel == "CAR":
pixArea = noise.data.pixsizemap() * rad_to_arcmin ** 2
if noise.ncomp == 1:
if noise.pixel == "HEALPIX":
size = len(noise.data)
noise.data = np.random.randn(size) * rms_uKarcmin_T / np.sqrt(pixArea)
if noise.pixel == "CAR":
size = noise.data.shape
noise.data = np.random.randn(size[0], size[1]) * rms_uKarcmin_T / np.sqrt(pixArea)
if noise.ncomp == 3:
if rms_uKarcmin_pol is None:
rms_uKarcmin_pol = rms_uKarcmin_T * np.sqrt(2)
if noise.pixel == "HEALPIX":
size = len(noise.data[0])
noise.data[0] = np.random.randn(size) * rms_uKarcmin_T / np.sqrt(pixArea)
noise.data[1] = np.random.randn(size) * rms_uKarcmin_pol / np.sqrt(pixArea)
noise.data[2] = np.random.randn(size) * rms_uKarcmin_pol / np.sqrt(pixArea)
if noise.pixel == "CAR":
size = noise.data[0].shape
noise.data[0] = np.random.randn(size[0], size[1]) * rms_uKarcmin_T / np.sqrt(pixArea)
noise.data[1] = np.random.randn(size[0], size[1]) * rms_uKarcmin_pol / np.sqrt(pixArea)
noise.data[2] = np.random.randn(size[0], size[1]) * rms_uKarcmin_pol / np.sqrt(pixArea)
return noise | 2e72d5362e66409081e0a4a222d49d18034006e2 | 23,271 |
def index():
"""process request to the root."""
return render_template('index.html') | ea02cfd380d51670ca69cbec74f9b299dd650e88 | 23,272 |
import torch
def aromatic_bonds(mol: IndigoObject) -> dict:
"""Get whether bonds in a molecule are aromatic or not.
Args:
IndigoObject: molecule object
Returns:
dict: key - feature name, value - torch.tensor of booleans
"""
is_aromatic = []
for bond in mol.iterateBonds():
is_aromatic.append(bond.bondOrder() == 4)
return {"is_aromatic": torch.tensor(is_aromatic * 2).unsqueeze(1).float()} | 92bae4b5b5a67f8165732ab64e345f85ddaa7d28 | 23,273 |
import warnings
def get_unitroot(df: pd.DataFrame, fuller_reg: str, kpss_reg: str) -> pd.DataFrame:
"""Calculate test statistics for unit roots
Parameters
----------
df : pd.DataFrame
DataFrame of target variable
fuller_reg : str
Type of regression of ADF test
kpss_reg : str
Type of regression for KPSS test
Returns
-------
pd.DataFrame
Dataframe with results of ADF test and KPSS test
"""
# The Augmented Dickey-Fuller test
# Used to test for a unit root in a univariate process in the presence of serial correlation.
try:
result = adfuller(df, regression=fuller_reg)
except MissingDataError:
df = df.dropna(axis=0)
result = adfuller(df, regression=fuller_reg)
cols = ["Test Statistic", "P-Value", "NLags", "Nobs", "ICBest"]
vals = [result[0], result[1], result[2], result[3], result[5]]
data = pd.DataFrame(data=vals, index=cols, columns=["ADF"])
# Kwiatkowski-Phillips-Schmidt-Shin test
# Test for level or trend stationarity
# This test seems to produce an Interpolation Error which says
# The test statistic is outside of the range of p-values available in the
# look-up table. The actual p-value is greater than the p-value returned.
# Wrap this in catch_warnings to prevent
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res2 = kpss(df, regression=kpss_reg, nlags="auto")
vals2 = [res2[0], res2[1], res2[2], "", ""]
data["KPSS"] = vals2
return data | 2d459e0980cb8983a03269f4e6b57e831065e6a0 | 23,274 |
def lat_avg(data, lat_wgt):
"""Perform latitude average of data:
Inputs:
data - n dimensional spatial data. The last 2 dimensions are assumed to lat and
lon respectively
lat_wgt - weights by latitudes"""
lat_shape = lat_wgt.shape
data_shape = data.shape
# If one dimensional:
if(len(lat_wgt) == 1):
lat_wgt_re = np.broadcast_to(lat_wgt, data.shape)
elif(len(lat_shape) > 1):
raise ValueError ("lat_wgt must be 1 dimensional latitude weights")
else:
lat_2d = np.broadcast_to(lat_wgt.reshape(len(lat_wgt), 1), data_shape[-2:])
lat_wgt_re = np.broadcast_to(lat_2d, data_shape)
return (data * lat_wgt_re).mean(axis = -2) | 2cef0a7e0eeead1983a8ae9dfc9d4cd7954a2c29 | 23,275 |
def create_line(line_coefficients, height=5, step=0.5, vis=False):
"""
Args:
line_coefficients: A dictionary containing cylindrical coefficients:
(r, x0, y0, z0_, a, b, c
r not used: to keep the same form between cylinder coefficients and line coefficients,
so that a same group of coefficients can generate a cylinder and a line, then the line is
the Central axis of the cylinder
x0,y0,z0 the Starting center of the cylinder
a, b, c the axis coefficient of the cylinder)
height: length of the line
step: Density of line point cloud
vis: whether to visualize the cylinder
Returns:
numpy form of the line point cloud: n x 3
@Author: Carlos_Lee 202111
"""
x0 = line_coefficients['x0']
y0 = line_coefficients['y0']
z0 = line_coefficients['z0']
a = line_coefficients['a']
b = line_coefficients['b']
c = line_coefficients['c']
v = np.arange(0, height, step)
npy = np.zeros((len(v), 3))
for idx_, i in enumerate(v):
x = x0 + a / np.power(a * a + b * b + c * c, 0.5) * i
y = y0 + b / np.power(a * a + b * b + c * c, 0.5) * i
z = z0 + c / np.power(a * a + b * b + c * c, 0.5) * i
npy[idx_] = [x, y, z]
if vis:
coordinate_ = o3d.geometry.TriangleMesh.create_coordinate_frame(size=height / 2., origin=[0.0, 0.0, 0.0])
pcd_ = o3d.geometry.PointCloud()
pcd_.points = o3d.utility.Vector3dVector(npy)
o3d.visualization.draw_geometries([coordinate_, pcd_], window_name="generate line",
width=960, height=900, left=960, top=100)
return npy | 0cafddb6263841152d506c08177ef8cc0691dd89 | 23,276 |
def get_result_summaries_query(start, end, sort, state, tags):
"""Returns TaskResultSummary.query() with these filters.
Arguments:
start: Earliest creation date of retrieved tasks.
end: Most recent creation date of retrieved tasks, normally None.
sort: Order to use. Must default to 'created_ts' to use the default. Cannot
be used along start and end.
state: One of State enum value as str. Use 'all' to get all tasks.
tags: List of search for one or multiple task tags.
"""
# Disable the in-process local cache. This is important, as there can be up to
# a thousand entities loaded in memory, and this is a pure memory leak, as
# there's no chance this specific instance will need these again, therefore
# this leads to 'Exceeded soft memory limit' AppEngine errors.
q = TaskResultSummary.query(
default_options=ndb.QueryOptions(use_cache=False))
# Filter by one or more tags.
if tags:
# Add TaskResultSummary indexes if desired.
if sort != 'created_ts':
raise ValueError(
'Add needed indexes for sort:%s and tags if desired' % sort)
for tag in tags:
parts = tag.split(':', 1)
if len(parts) != 2 or any(i.strip() != i or not i for i in parts):
raise ValueError('Invalid tags')
values = parts[1].split(OR_DIM_SEP)
separated_tags = ['%s:%s' % (parts[0], v) for v in values]
q = q.filter(TaskResultSummary.tags.IN(separated_tags))
return filter_query(TaskResultSummary, q, start, end, sort, state) | 45e0d7c09ea2c2a4cd9f3827fff5eaf09b12d98a | 23,277 |
def approx_jacobian(tform, image, delta=0.01):
"""approximate the image pixel gradient wrt tform using central differences
(This has been so helpful while troubleshooting jacobians,
let's keep it around for unit testing.
Parameters
----------
tform : TForm
current transform, to be applied to image and its gradient
image : ndarray(h, v)
untransformed image
delta : real or ndarray(nparams)
stepsize
Returns
-------
jacobian : ndarray(h * v, nparams)
transformation parameter derivatives at each image pixel.
out-of-bounds points will be populated with 0's
"""
if not isinstance(delta, np.ndarray):
delta = np.ones(len(tform.paramv)) * delta
npixels = np.prod(tform.output_shape)
gradvecs = np.empty((npixels, len(tform.paramv)))
for i in range(len(tform.paramv)):
dimage = np.zeros(tform.output_shape)
for sign in (-1, 1):
paramv = tform.paramv.copy()
paramv[i] += delta[i] * sign
stepT = tform.clone(paramv)
dimage += stepT.imtransform(image) * sign
gradvecs[:, i] = (dimage / (2 * delta[i])).flatten()
return np.nan_to_num(gradvecs) | 4153471fe4f94255a21b4d63c564aabe0db3b1d6 | 23,278 |
from typing import Collection
def create_collection(collection_id: str) -> Collection:
"""Creates a STAC Collection for Landsat Collection 2 Level-1 or Level-2
data.
Args:
collection_id (str): ID of the STAC Collection. Must be one of
"landsat-c2-l1" or "landsat-c2-l2".
Returns:
Collection: The created STAC Collection.
"""
if collection_id not in COLLECTION_IDS:
raise ValueError(f"Invalid collection id: {collection_id}")
fragment = CollectionFragments(collection_id).collection()
collection = Collection(id=collection_id,
title=fragment["title"],
description=fragment["description"],
license=fragment["license"],
keywords=fragment["keywords"],
providers=fragment["providers"],
extent=fragment["extent"],
summaries=fragment["summaries"])
collection.add_links(fragment["links"])
item_assets = ItemAssetsExtension(collection)
item_assets.item_assets = fragment["item_assets"]
ItemAssetsExtension.add_to(collection)
ViewExtension.add_to(collection)
ScientificExtension.add_to(collection)
RasterExtension.add_to(collection)
EOExtension.add_to(collection)
return collection | 111992f59a9a69aac0742350a0bce8b66b1ebb5d | 23,279 |
def rician_noise(image, sigma, rng=None):
"""
Add Rician distributed noise to the input image.
Parameters
----------
image : array-like, shape ``(dim_x, dim_y, dim_z)`` or ``(dim_x, dim_y,
dim_z, K)``
sigma : double
rng : random number generator (a numpy.random.RandomState instance).
"""
n1 = rng.normal(loc=0, scale=sigma, size=image.shape)
n2 = rng.normal(loc=0, scale=sigma, size=image.shape)
return np.sqrt((image + n1)**2 + n2**2) | a7f5962a8c388cd69f1bdb364fa8fd5dcd1e2dd4 | 23,280 |
def compose_pinned_post(post):
"""
1.Verify that this is the pinned post
2.Obtain the results json from the results rig
3.Compose the HTML for the compact graphic
"""
pinned_post = post
# Get the timestamps collection
client = MongoClient(app_config.MONGODB_URL)
database = client['liveblog']
collection = database.pinned
try:
post['pinned']
except KeyError:
logger.error("First post should always be the pinned post")
# Cache pinned post contents
if post['published mode'] != 'yes':
result = collection.find_one({'_id': post['slug']})
if not result:
logger.debug('did not find pinned post %s' % post['slug'])
collection.insert({
'_id': post['slug'],
'cached_contents': post['contents'],
'cached_headline': post['headline'],
})
post['cached_contents'] = post['contents']
post['cached_headline'] = post['headline']
else:
logger.debug('found pinned post %s' % post['slug'])
post['cached_contents'] = result['cached_contents']
post['cached_headline'] = result['cached_headline']
logger.debug('returning cached headline %s' % (
post['cached_headline']))
else:
# Update mongodb cache
post['cached_contents'] = post['contents']
post['cached_headline'] = post['headline']
logger.debug("update cached headline to %s" % post['headline'])
collection.update({'_id': post['slug']},
{'cached_contents': post['contents'],
'cached_headline': post['headline']})
return pinned_post | 700ea00d0eb46aa966dc3171531b7780775d338d | 23,281 |
def compute_composition_df(seq_df):
"""
Compute the composition matrix for all proteins.
Args:
seq_df: df, dataframe with sequences
Returns:
df, with the composition of the proteins
"""
# get composition table
df_seq_comp = pd.DataFrame(
list(seq_df["sequence"].apply(parser.amino_acid_composition).values)) * 1.0
# add column with 0s for amino acids that didnt occur in the protein fasta file
for i in parser.std_amino_acids:
if i not in df_seq_comp.columns:
df_seq_comp[i] = 0
df_seq_comp = df_seq_comp.fillna(0.0)
df_seq_comp.index = seq_df.index
return df_seq_comp | 4d68dc4568914df8349dd32d9e24a55f74896023 | 23,282 |
def make_gradient_squared(
grid: CylindricalSymGrid, central: bool = True
) -> OperatorType:
"""make a discretized gradient squared operator for a cylindrical grid
{DESCR_CYLINDRICAL_GRID}
Args:
grid (:class:`~pde.grids.cylindrical.CylindricalSymGrid`):
The grid for which the operator is created
central (bool):
Whether a central difference approximation is used for the gradient
operator. If this is False, the squared gradient is calculated as
the mean of the squared values of the forward and backward
derivatives.
Returns:
A function that can be applied to an array of values
"""
# use processing for large enough arrays
dim_r, dim_z = grid.shape
parallel = dim_r * dim_z >= config["numba.parallel_threshold"]
if central:
# use central differences
scale_r, scale_z = 0.25 / grid.discretization ** 2
@jit(parallel=parallel)
def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:
"""apply gradient operator to array `arr`"""
for i in nb.prange(1, dim_r + 1): # iterate radial points
for j in range(1, dim_z + 1): # iterate axial points
term_r = (arr[i + 1, j] - arr[i - 1, j]) ** 2
term_z = (arr[i, j + 1] - arr[i, j - 1]) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
else:
# use forward and backward differences
scale_r, scale_z = 0.5 / grid.discretization ** 2
@jit(parallel=parallel)
def gradient_squared(arr: np.ndarray, out: np.ndarray) -> None:
"""apply gradient operator to array `arr`"""
for i in nb.prange(1, dim_r + 1): # iterate radial points
for j in range(1, dim_z + 1): # iterate axial points
arr_z_l, arr_c, arr_z_h = arr[i, j - 1], arr[i, j], arr[i, j + 1]
term_r = (arr[i + 1, j] - arr_c) ** 2 + (arr_c - arr[i - 1, j]) ** 2
term_z = (arr_z_h - arr_c) ** 2 + (arr_c - arr_z_l) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
return gradient_squared | a5e64b5e217200b9b5d22b20484217c2483420f2 | 23,283 |
def state(git_root):
"""Return a hash of the current state of the .git directory. Only considers
fsck verbose output and refs.
"""
if not git_root.is_dir():
return 0
rc, stdout, stderr = util.captured_run(*"git fsck --full -v".split(), cwd=git_root)
refs = "".join([ref.name + ref.value for ref in collect_refs(git_root)])
return hash(stdout + stderr + refs) | c86f528dc80cdb12e88e83ce0bfdb7b393d8ede5 | 23,284 |
def search_handler(data_type_name, search_key=None, search_value=None):
"""
Purpose: Adapt PathError and QueryError to appropriate Django error types.
Input Parameters:
data_type_name - One of the searchable types 'PasswordData' or 'GroupData'.
search_key - Name of searchable field for type specified Optional, default = None.
search_value - Value of defined field to match from data, default = None.
Return: HttpResponse with json representation of returned values.
Exceptions: Http404 on QueryError,
ImproperlyConfigured on PathError """
result_list = []
try:
search_type = settings.PWDSVC_SEARCH
if search_type == 'DataBaseSearch':
db_search = DataBaseSearch(DATAMGR)
result_list = db_search.search(data_type_name, search_key, search_value)
else:
result_list = DATAMGR.search(data_type_name, search_key, search_value)
except PathError, path_error:
raise ImproperlyConfigured(path_error)
except QueryError, query_error:
raise Http404(query_error)
if not result_list:
raise Http404('No results.')
return result_list | 5c2fd5a9e3199418febe215ee10524f70a5e9af3 | 23,285 |
def render_CardsCounter_edit(self, h, comp, *args):
"""Render the title of the associated object"""
text = var.Var(self.text)
with h.div(class_='list-counter'):
with h.div(class_='cardCounter'):
with h.form(onsubmit='return false;'):
action = h.input(type='submit').action(lambda: self.validate(text(), comp)).get('onclick')
id_ = h.generate_id()
h << h.input(id=id_, type='text', value=self.column.nb_max_cards or '', onblur=action).action(text)
h << h.script(
"""YAHOO.util.Event.on(%s, 'keyup', function (e) {
if (e.keyCode == 13) {
e.preventDefault();
this.blur();
}
var result = this.value.replace(/[^0-9]/g, '')
if (this.value !=result) {
this.value = result;
}
});""" % ajax.py2js(id_)
)
h << h.script(
"YAHOO.kansha.app.selectElement(%s);" % ajax.py2js(id_)
)
if self.error is not None:
with h.div(class_='nagare-error-message'):
h << self.error
return h.root | fca04ae6551961f1c0187d016ac06614d1a77388 | 23,286 |
def get_embedding_tids(tids, mapping):
"""Obtain token IDs based on our own tokenization, through the mapping to BERT tokens."""
mapped = []
for t in tids:
mapped += mapping[t]
return mapped | a31c9b0cf5b791590d6e30d8238cf0eb6ae2272b | 23,287 |
from typing import AnyStr
import shutil
import logging
import errno
import sys
def delete_configuration(timestamp: AnyStr) -> AnyStr:
"""
Delete the configuration folder.
:timestamp (AnyStr) The name of the configuration to delete
Return the name of the deleted configuration, or crash
"""
rating_rates_dir = envvar('RATING_RATES_DIR')
with Lockfile(rating_rates_dir):
try:
shutil.rmtree('{}/{}'.format(rating_rates_dir, timestamp))
except OSError as err:
logging.error(
f'An error happened while removing {timestamp} configuration directory.')
if err.errno == errno.ENOENT:
logging.error(
f'Configuration directory {timestamp} does not exist.')
sys.exit(1)
else:
logging.info(f'removed {timestamp} configuration')
return timestamp | d78417e8994f6e372ebc3eab0600d93cc909468f | 23,288 |
import requests
def extract_stream_url(ashx_url):
""" Extract real stream url from tunein stream url """
r = requests.get(ashx_url)
for l in r.text.splitlines():
if len(l) != 0:
return l | 679ca261510413f652d0953551b65db8e5c2a62e | 23,289 |
def check_for_rematch(player_id1, player_id2):
"""Checks whether the two players specified have played a match before.
Args:
player_id1: ID of first player
player_id2: ID of second player
Returns:
Bool: True if they have met before, False if they have not.
"""
query = """SELECT EXISTS(SELECT 1
FROM matches
WHERE winner_pid=%(id1)s AND loser_pid=%(id2)s
OR winner_pid=%(id2)s AND loser_pid=%(id1)s);"""
parameter = {'id1': player_id1, 'id2': player_id2}
with connect_to_db() as database:
database['cursor'].execute(query, parameter)
is_rematch = database['cursor'].fetchone()[0]
return is_rematch
# Credits
# Idea for using the EXISTS PSQL keyword found on this Stack Overflow page:
# http://stackoverflow.com/questions/7471625/ | 8ee0652dc089cb286021f1d54672439881e86e56 | 23,290 |
def nextrandombitsAES(cipher, bitlength):
"""
<Purpose>
generate random bits using AES-CTR
<Arguments>
bitlength: the lenght of the random string in BITS
<Side Effects>
Increases the AES counter
<Returns>
A random string with the supplied bitlength (the rightmost bits are zero if bitlength is not a multiple of 8)
"""
# offset for the last byte
bytelength = bits_to_bytes(bitlength)
bitoffset = bitlength % 8
if bitoffset > 0:
# if the bitlength is not a multiple of 8, clear the rightmost bits
pt = (bytelength - 1) * b'\0'
randombytes = cipher.encrypt(pt)
b = cipher.encrypt(b'\0')
b = (b[0] & ((0xff00 >> bitoffset) & 0xff)).to_bytes(1, byteorder = 'big')
randombytes += b
return randombytes
else:
pt = bytelength * b'\0'
return cipher.encrypt(pt) | ccc8a0ca2e2af595452e996d17e2d51125bf1d97 | 23,291 |
def _binparams2img(mc, param):
"""
Maximum data of all the bins
Parameters
----------
mc : dict
Molecular cloud dimensions
param : boolean
Parameter
----------
"""
if not param in sos.all_params:
raise Exception('Parameter not valid')
# Get binned or full dimensions
dims, nbins, mc_binned = _get_mc_dims(mc)
sx, sy = dims
# Define paremeter matrix
param_matrix = np.zeros((sx, sy))
# Scan all the bins
for b in range(nbins):
if mc_binned:
# Get bin name
name = 'B'+str(b)
# Get coordinates
i, j = mc[name]['pos']
if not mc[name]['flag']:
# Get parameter value
m = mc[name][param]
else:
m = np.nan
else:
# Get coordinate
i, j = 0, 0
# Get parameter value
m = mc[param]
#if param == 'den':
# m = m/(1.28*1e3*1.672622e-27) # Neutral gas * proton mass [g]
param_matrix[i][j] = m
return param_matrix, dims | 70224eeb3e9a6096d4ac19232f55966873078174 | 23,292 |
def arccos(x):
"""
Compute the inverse cosine of x.
Return the "principal value" (for a description of this, see
`numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
`abs(x) <= 1`, this is a real number in the closed interval
:math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
Parameters
----------
x : array_like or scalar
The value(s) whose arccos is (are) required.
Returns
-------
out : ndarray or scalar
The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
is `out`, otherwise an array object is returned.
See Also
--------
numpy.arccos
Notes
-----
For an arccos() that returns ``NAN`` when real `x` is not in the
interval ``[-1,1]``, use `numpy.arccos`.
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.arccos(1) # a scalar is returned
0.0
>>> np.emath.arccos([1,2])
array([ 0.-0.j , 0.+1.317j])
"""
x = _fix_real_abs_gt_1(x)
return nx.arccos(x) | 67b387ce0ee0b3b7927d97f163bd3258f87388bf | 23,293 |
def request_authentication(user, organization_id, short_code):
"""
Request for an authentication token from Safaricom's MPesa API
"""
mpesa_api_account = get_object_or_404(
MpesaAPIAccount.objects.filter(
organization__owner=user,
linked_account__identifier=short_code,
organization__organization_id=organization_id
))
return api.authenticate(
env="production" if mpesa_api_account.in_production else "sandbox",
app_key=mpesa_api_account.consumer_key,
app_secret=mpesa_api_account.consumer_secret) | afc012a8160c24d44b6e8986d09ffdf61ad25554 | 23,294 |
def none_to_null(value):
""" Returns None if the specified value is null, else returns the value
"""
return "null" if value == None else value | 394b1f9620cf69c862905171f4aec96838ffc631 | 23,295 |
def get_dsd_url():
"""Returns the remote URL to the global SDMX DSD for the SDGs."""
return 'https://registry.sdmx.org/ws/public/sdmxapi/rest/datastructure/IAEG-SDGs/SDG/latest/?format=sdmx-2.1&detail=full&references=children' | 996568a92825aa7a7bf1be1db8ac2cac0828360a | 23,296 |
from jcvi.utils.orderedcollections import SortedCollection
def range_closest(ranges, b, left=True):
"""
Returns the range that's closest to the given position. Notice that the
behavior is to return ONE closest range to the left end (if left is True).
This is a SLOW method.
>>> ranges = [("1", 30, 40), ("1", 33, 35), ("1", 10, 20)]
>>> b = ("1", 22, 25)
>>> range_closest(ranges, b)
('1', 10, 20)
>>> range_closest(ranges, b, left=False)
('1', 33, 35)
>>> b = ("1", 2, 5)
>>> range_closest(ranges, b)
"""
key = (lambda x: x) if left else (lambda x: (x[0], x[2], x[1]))
rr = SortedCollection(ranges, key=key)
try:
if left:
s = rr.find_le(b)
assert key(s) <= key(b), (s, b)
else:
s = rr.find_ge(b)
assert key(s) >= key(b), (s, b)
except ValueError:
s = None
return s | c32d19a4725d733855cf86bc7edd62133c42fa0f | 23,297 |
def _fill_array(data, mask=None, fill_value=None):
"""
Mask numpy array and/or fill array value without demasking.
Additionally set fill_value to value.
If data is not a MaskedArray and mask is None returns silently data.
:param mask: apply mask to array
:param fill_value: fill value
"""
if mask is not None and mask is not False:
data = np.ma.MaskedArray(data, mask=mask, copy=False)
if np.ma.is_masked(data) and fill_value is not None:
data._data[data.mask] = fill_value
np.ma.set_fill_value(data, fill_value)
# elif not np.ma.is_masked(data):
# data = np.ma.filled(data)
return data | de6190f9960a854e6cb67fe5eb61fd6f984cb147 | 23,298 |
def make_Dog(size, name):
"""Create dog entity."""
new_dog = Dog(size=size, name=str(name))
if new_dog.called() == "":
return f"The {size} dog says {new_dog.talk()}."
return f"{new_dog.called()}, the {size} dog says {new_dog.talk()}." | 4c091b09d045fc8d354beebffbb1ef12b9d63840 | 23,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.