content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def analyze_images(url_or_path: str, json_keys: Optional[Sequence[str]] = None,
account: Optional[str] = None,
container: Optional[str] = None,
sas_token: Optional[str] = None) -> None:
"""
Args:
url_or_path: str, URL or local path to a file containing a list
of image paths. Each image path is either <blob_name> if account and
container are given, or <dataset>/<blob_name> if account and
container are None. File can either be a list of image paths, or a
JSON file containing image paths.
json_keys: optional list of str, only relevant if url_or_path is a JSON
file. If json_keys=None, then the JSON file at url_or_path is
assumed to be a JSON list of image paths. If json_keys is not None,
then the JSON file should be a dict, whose values corresponding to
json_keys are lists of image paths.
account: str, name of Azure Blob Storage account
container: str, name of Azure Blob Storage container
sas_token: str, optional SAS token (without leading '?') if the
container is not publicly accessible
"""
datasets_table = None
if (account is None) or (container is None):
assert account is None
assert container is None
assert sas_token is None
datasets_table = MegadbUtils().get_datasets_table()
is_json = ('.json' in url_or_path)
if url_or_path.startswith(('http://', 'https://')):
r = requests.get(url_or_path)
if is_json:
img_paths = r.json()
else:
img_paths = r.text.splitlines()
else:
with open(url_or_path, 'r') as f:
if is_json:
img_paths = json.load(f)
else:
img_paths = f.readlines()
if is_json and json_keys is not None:
img_paths_json = img_paths
img_paths = []
for k in json_keys:
img_paths += img_paths_json[k]
mapping: dict[str, list[str]] = {
status: []
for status in ['good', 'nonexistant', 'non_image', 'truncated', 'bad']
}
pool = futures.ThreadPoolExecutor(max_workers=100)
# lock before changing ImageFile.LOAD_TRUNCATED_IMAGES
truncated_images_lock = threading.Lock()
futures_list = []
for img_path in tqdm(img_paths):
future = pool.submit(
check_image_condition, img_path, truncated_images_lock, account,
container, sas_token, datasets_table)
futures_list.append(future)
total = len(futures_list)
for future in tqdm(futures.as_completed(futures_list), total=total):
img_file, status = future.result()
mapping[status].append(img_file)
for status, img_list in mapping.items():
print(f'{status}: {len(img_list)}')
pprint(sorted(img_list)) | 34,000 |
def permutacion_matriz(U, fila_i, idx_max, verbose=False, P=None, r=None):
"""Efectua una permutación por filas de una matriz
Args:
U (matriz): MAtriz a permutar
fila_i (int): indice de fila origen
idx_max (int): indice de fila a la que permutar
verbose (bool, optional): verbose. Defaults to False.
P (mat, optional): matriz de permutación. Defaults to None.
r (mat, optional): rhs de ecuación. Defaults to None.
Returns:
(U, P, r): Matrices U, P y r cambiadas
"""
print_verbose(
[
f"Permutamos fila {fila_i} con {idx_max}",
f"U antes:\n {np.array(U)}",
f"P antes:\n {np.array(P)}",
],
verbose,
)
if fila_i != idx_max:
fila_origen, fila_destino = U[fila_i, :].copy(), U[idx_max, :].copy()
U[idx_max, :], U[fila_i, :] = fila_origen, fila_destino
if P is not None:
fila_origen, fila_destino = P[fila_i, :].copy(), P[idx_max, :].copy()
P[idx_max, :], P[fila_i, :] = fila_origen, fila_destino
if r is not None:
fila_origen, fila_destino = r[fila_i, :].copy(), r[idx_max, :].copy()
r[idx_max, :], r[fila_i, :] = fila_origen, fila_destino
print_verbose(
[f"U despues:\n {np.array(U)}", f"P despues:\n {np.array(P)}"], verbose
)
return U, P, r | 34,001 |
def admin_toggle_access(self, *, sender: AdminDashboardUser, task_data: ToggleAccessRequest):
"""
Toggle a user's access to MarinTrace in Auth0 and Neo4j
:param sender: the user that initiated the task
:param task_data: the user identifier to toggle
"""
logger.info(f"Setting block={task_data.block} for the user {task_data.email} at the request of {sender.email}")
with Neo4JGraph() as graph:
graph.run("""MATCH (m: Member {email: $email, school: $school}) SET m.disabled = $blocked""",
email=task_data.email, school=sender.school, blocked=task_data.block)
user_id = get_user(email=task_data.email, fields=['user_id'])['user_id']
if user_id:
update_user(user_id=user_id, content={'blocked': task_data.block})
logger.info(f"Toggling user {task_data.email} in Neo4J") | 34,002 |
def get_compliance_by_rules(scan_id):
"""
Lists compliance results by rule for a scan.
"""
items = []
offset = 0
while True:
params = {'offset': offset}
response = get('scans/%s/compliance_by_rules' % scan_id, params)
items.extend(response['items'])
if not response['is_truncated']:
break
offset = response['next_offset']
return items | 34,003 |
def tree_command(ctx, source):
"""View the AST for the given source file."""
loader = ctx.obj['loader']
cf = loader[source]
with io.StringIO() as temp:
cf.node.pprint(file=temp)
click.echo_via_pager(temp.getvalue()) | 34,004 |
def QuadRemeshBrep1(brep, parameters, guideCurves, multiple=False):
"""
Create Quad Remesh from a Brep
Args:
brep (Brep): Set Brep Face Mode by setting QuadRemeshParameters.PreserveMeshArrayEdgesMode
guideCurves (IEnumerable<Curve>): A curve array used to influence mesh face layout
The curves should touch the input mesh
Set Guide Curve Influence by using QuadRemeshParameters.GuideCurveInfluence
"""
url = "rhino/geometry/mesh/quadremeshbrep-brep_quadremeshparameters_curvearray"
if multiple: url += "?multiple=true"
args = [brep, parameters, guideCurves]
if multiple: args = list(zip(brep, parameters, guideCurves))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | 34,005 |
def tags2turbo(lon, lat, dist, bdim=155, timeout=60, pretty_print=False, maxsize=None, tags=[]):
""" """
gtypes = ('node', 'way', 'relation',)
turbo = Turbo()
qconditions = [{
"query": filter2query(tags),
"distance": dist,
"gtypes": gtypes, # Optional. Possible values:
# "node", "way", "relation", "way-node", node-relation",
# "relation-way", "relation-relation", "relation-backwards"
# "amplitude": 0,
"newer": "%Y-%m-%ddT%H:%M:%SZ" #
}]
query = turbo.build_query(
Turbo.optimize_centralized_query_by_base_tile(lon, lat, qconditions, bdim=bdim),
timeout=timeout, maxsize=maxsize
)
return dict(query=query) | 34,006 |
async def get_people(from_number: int = None, up_to_number: int = None):
"""
Endpoint to get all people from-to given number
:return: list of people from-to numbers
"""
return _people[from_number:up_to_number] | 34,007 |
def stacked_bar(data, series_labels=None, category_labels=None,
show_values=False, value_format="{}", y_label=None,
grid=True, reverse=False, y_limit=None, size_plot=None, use_dataframe=False, throw_zeros=False,dict_colors={}):
"""Plots a stacked bar chart with the data and labels provided.
Keyword arguments:
data -- 2-dimensional numpy array or nested list containing data for each series in rows
series_labels -- list of series labels (these appear in the legend)
category_labels -- list of category labels (these appear on the x-axis)
show_values -- If True then numeric value labels will be shown on each bar
value_format -- Format string for numeric value labels (default is "{}")
y_label -- Label for y-axis (str)
grid -- If True display grid
reverse -- If True reverse the order that the series are displayed (left-to-right or right-to-left)
y_limit -- containes a int\float that will be the highest y value shown in the graph and y axis
size_plot -- contains an array of [ width , hight] we want the plot square area size will be
use_dataframe -- Bool, if true, data is treated as pandas df with series labels and category labels as rows and colums respectivly
throw_zeros -- Only applicable if use_dataframe is True, throws rows with all zeros in them
"""
if throw_zeros and not use_dataframe:
# TODO make throw zeros work without df too
raise ValueError("throw_zeros only works if use_dataframe is chosen")
# if throw zeros, remove rows with all zeros
if throw_zeros:
data = data[(data.T != 0).any()]
# if data frame extract info from dataframe
if use_dataframe:
# remove no_change filter if needed:
if 'no_change' in data.index:
data = data.drop(['no_change'])
series_labels = data.index
category_labels = data.columns
data = data.values
ny = len(data[0])
ind2 = range(ny)
axes = []
cum_size = np.zeros(ny)
data = np.array(data)
if reverse:
data = np.flip(data, axis=1)
category_labels = reversed(category_labels)
if size_plot:
fig = plt.figure(figsize=size_plot)
plt.rcParams['font.size'] = '20'
suit_colors_dict = {}
for index, column in enumerate(series_labels):
suit_colors_dict[index] = dict_colors[column]
#print(data)
sum_column = np.sum(data, axis=0)
#print("old_data",data)
#print("sum_column", sum_column)
data = data.astype(float)
for row_index in range(len(data)):
for column_index in range(len(data[row_index])):
if data[row_index][column_index] != 0.0:
#print("before", "data[row_index][column_index]",data[row_index][column_index],"sum_column[column_index]*100", sum_column[column_index]*100)
data[row_index][column_index] = format(data[row_index][column_index]/sum_column[column_index]*100, '.2f')
#print("after:","\n","data[row_index][column_index]",data[row_index][column_index])
#print("new data", data)
#print("category_labels",category_labels )
#print("series_labels",series_labels)
# set the text in the same color as the bar
for i, row_data in enumerate(data):
axes.append(plt.bar(ind2, row_data, bottom=cum_size,
label=series_labels[i]))
for row in range(len(row_data)):
axes[i][row].set_color(suit_colors_dict[i])
cum_size += row_data
if not category_labels is None:
plt.xticks(ind2, category_labels, rotation=20, fontsize=30)
if y_label != None:
plt.ylabel(y_label, fontsize=30)
plt.legend()
if grid:
plt.grid()
if y_limit != None:
plt.ylim(0, y_limit)
if show_values:
max_tmp = []
for axis in axes:
max_tmp.append(max([bar.get_height() for bar in axis]))
max_height_data = max(max_tmp)
proportion_to_high = 0.08*max_height_data
need_arrow = 0.08*max_height_data
start_extra_heights = [axes[-1][i].get_y() + axes[-1][i].get_height() for i in range(len(axes[-1]))]
jumps = [proportion_to_high for i in range(len(axes[0]))]
for index,axis in enumerate(axes):
for counter, bar in enumerate(axis):
max_height = start_extra_heights[counter]
w, h = bar.get_width(), bar.get_height()
if 0.0 < h < need_arrow:
plt.annotate(value_format.format(h)+'%', xy=(bar.get_x(), bar.get_y()),
xytext=(bar.get_x() + 0.2, max_height + jumps[counter]), color=suit_colors_dict[index],
arrowprops=dict(arrowstyle="->"))
jumps[counter] += proportion_to_high * 1.2
elif h > 0.0:
plt.text(bar.get_x() + w / 2, bar.get_y() + h / 2, value_format.format(h)+'%', ha="center",
va="center")
# adding the number of total lines of the original pileups
for index, bar in enumerate(axes[-1]):
max_height = start_extra_heights[index]
if max_height == 0.0:
max_height = 1.3
plt.annotate(value_format.format(sum_column[index]), xy=(bar.get_x(), bar.get_y()+bar.get_height()),
xytext=(bar.get_x(), max_height + jumps[index]),
arrowprops=dict(arrowstyle='fancy'))
return plt, axes | 34,008 |
def process_results(economy):
"""
Combine OSeMOSYS solution files and write as the result as an Excel file where each result parameter is a tab in the Excel file.
"""
click.echo(click.style('\n-- Preparing results...',fg='cyan'))
tmp_directory = 'tmp/{}'.format(economy)
parent_directory = "./results/"
child_directory = economy
path = os.path.join(parent_directory,child_directory)
try:
os.mkdir(path)
except OSError:
#print ("Creation of the directory %s failed" % path)
pass
else:
click.echo(click.style("Successfully created the directory %s " % path,fg='yellow'))
with resources.open_text('aperc_osemosys','results_config.yml') as open_file:
contents_var = yaml.load(open_file, Loader=yaml.FullLoader)
results_df={}
for key,value in contents_var.items():
if contents_var[key]['type'] == 'var':
fpath = './{}/'.format(tmp_directory)+key+'.csv'
#print(fpath)
_df = pd.read_csv(fpath).reset_index(drop=True)
results_df[key] = _df
results_dfs = {}
results_dfs = {k:v for (k,v) in results_df.items() if not v.empty}
_result_tables = {}
for key,value in results_dfs.items():
indices = contents_var[key]['indices']
_df = results_dfs[key]
if 'TIMESLICE' in indices:
unwanted_members = {'YEAR', 'VALUE'}
_indices = [ele for ele in indices if ele not in unwanted_members]
df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE',aggfunc=np.sum)
df = df.loc[(df != 0).any(1)] # remove rows if all are zero
_result_tables[key] = df
elif 'TIMESLICE' not in indices:
if contents_var[key]['type'] == 'var':
unwanted_members = {'YEAR', 'VALUE'}
_indices = [ele for ele in indices if ele not in unwanted_members]
df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE')
df = df.loc[(df != 0).any(1)] # remove rows if all are zero
_result_tables[key] = df
elif contents_var[key]['type'] == 'param':
unwanted_members = {'YEAR', 'VALUE'}
_indices = [ele for ele in indices if ele not in unwanted_members]
df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE')
df = df.loc[(df != 0).any(1)] # remove rows if all are zero
_result_tables[key] = df
elif contents_var[key]['type'] == 'equ':
unwanted_members = {'YEAR', 'VALUE'}
_indices = [ele for ele in indices if ele not in unwanted_members]
df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE')
#df = df.loc[(df != 0).any(1)] # remove rows if all are zero
_result_tables[key] = df
_result_tables[key]=_result_tables[key].fillna(0)
results_tables = {k: v for k, v in _result_tables.items() if not v.empty}
return results_tables | 34,009 |
def window(x, y, width, overlap=0., x_0=None, expansion=None, cap_left=True,
cap_right=True, ret_x=True):
"""Break arrays x and y into slices.
Parameters
----------
x : array_like
Monotonically increasing numbers. If x is not monotonically increasing
then it will be flipped, beware that this may not have the desired
effect.
y : array_like
Arbitrary values, same size as x.
width : float
Window width in the same units as x.
overlap : float, optional
Overlap of windows in the same units as x. If negative, the window
steps along x values rather than binning.
x_0 : float, optional
Position in x at which to start windowing. (untested)
expansion : polynomial coefficients, optional
Describes the rate of change of window size with x. (not implimented)
The idea is that width = width*np.polyval(expansion, x). Overlap is
similarly increased.
cap_left : boolean, optional
Stop window exceeding left most (minimum) value of x. Only applies when
overlap is positive.
cap_right : boolean, optional
Stop window exceeding right most (maximum) value of x. Only applies
when overlap is positive.
Returns
-------
vals : numpy.array
Contains all the windowed chunks of x and y.
Notes
-----
The current check on monotonicity is whether more than 20% of points in
x are are not monotonic. This is a sort of hack to avoid flipping for the
occasional erroneous non-monotonic point.
"""
if x.size != y.size:
raise ValueError('x and y must be of equal size.')
if overlap > width:
raise ValueError('The overlap cannot be larger than the width.')
# Incredibly bad check for monotonicity.
not_monotonic = np.sum(np.diff(x) < 0) > 0.2*len(x)
if not_monotonic:
x = utils.flip_padded(x)
y = utils.flip_padded(y)
if x_0 is not None:
idxs = ~np.isnan(x) & (x >= x_0)
else:
idxs = ~np.isnan(x)
x = x[idxs]
y = y[idxs]
if overlap < 0.:
left = x - width/2.
right = left + width
elif overlap >= 0.:
step = width - overlap
if cap_left:
xmin = x[0]
else:
xmin = x[0] - width
if cap_right:
# Take away slightly less than the full width to allow for the last
# bin to complete the full range.
xmax = x[-1] - 0.99*width
else:
xmax = x[-1]
left = np.arange(xmin, xmax, step)
right = left + width
bins = np.transpose(np.vstack((left, right)))
if ret_x:
vals = np.asarray([chunk(x, b, y) for b in bins])
else:
vals = np.asarray([chunk(x, b, y)[1] for b in bins])
if not_monotonic:
vals = np.flipud(vals)
return vals | 34,010 |
def mark_property_purchased(request):
"""
Api to mark a property as purchased by the buyer without page reload using vue or htmx
"""
data = json.loads(request.body)
property = data['property_id']
if not property.property_status == Property.SOLD and property.property_sold:
property.update(property_status = Property.SOLD)
messages.success(request, f"You have successfully completed {property.property_title} purchase.")
return JsonResponse({"success": True}) | 34,011 |
def save_app(name, executable, description='', envscript='', preprocess='', postprocess=''):
"""
Adds a new app with the given properties to the balsam database.
Parameters
----------
name: str, name of the app
executable: str, path to the executable
description: str, info about the app
envscript:str, path to a bash script that can be `source`d
preprocess: str, path to the preprocessing script or command to be executed
postprocess: str, path to the postprocessing script
"""
from balsam.core.models import ApplicationDefinition as App
import shutil
import os
newapp = App()
if App.objects.filter(name=name).exists():
print(f"An application named {name} already exists")
return
else:
newapp.name = name
newapp.executable = executable
newapp.description = description
newapp.envscript = envscript
newapp.preprocess = preprocess
newapp.postprocess = postprocess
appexe = shutil.which(executable)
if appexe:
print(f'{appexe} is found')
newapp.save()
print(f'{newapp.name} added to the balsam database {os.environ["BALSAM_DB_PATH"]}.')
else:
print('{executable} is not found')
return | 34,012 |
def create_checkpoint(weights_and_biases, global_step, model_dir):
"""Create checkpoint file with provided model weights.
Args:
weights_and_biases: Iterable of tuples of weight and bias values.
global_step: Initial global step to save in checkpoint.
model_dir: Directory into which checkpoint is saved.
"""
weights, biases = zip(*weights_and_biases)
model_weights = {}
# Hidden layer weights.
for i in range(0, len(weights) - 1):
model_weights[HIDDEN_WEIGHTS_NAME_PATTERN % i] = weights[i]
model_weights[HIDDEN_BIASES_NAME_PATTERN % i] = biases[i]
# Output layer weights.
model_weights[LOGITS_WEIGHTS_NAME] = weights[-1]
model_weights[LOGITS_BIASES_NAME] = biases[-1]
with ops.Graph().as_default():
# Create model variables.
for k, v in six.iteritems(model_weights):
variables_lib.Variable(v, name=k, dtype=dtypes.float32)
# Create non-model variables.
global_step_var = training_util.create_global_step()
# Initialize vars and save checkpoint.
with tf_session.Session() as sess:
variables_lib.global_variables_initializer().run()
global_step_var.assign(global_step).eval()
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt')) | 34,013 |
def file_exists(file: str) -> bool:
"""Accepts path/file or file and tests if it exists (as a file)."""
if os.path.exists(file):
if os.path.isfile(file):
return True
return False | 34,014 |
def test_issue588(en_vocab):
"""Test if empty specs still cause an error when adding patterns"""
matcher = Matcher(en_vocab)
with pytest.raises(ValueError):
matcher.add("TEST", [[]]) | 34,015 |
def argmax_unique(arr, axis):
"""Return a mask so that we can exclude the nonunique maximums, i.e. the nodes that aren't completely resolved"""
arrm = np.argmax(arr, axis)
arrs = np.sum(arr, axis)
nonunique_mask = np.ma.make_mask((arrs == 1) is False)
uni_argmax = np.ma.masked_array(arrm, mask=nonunique_mask, fill_value=-1)
return uni_argmax, nonunique_mask | 34,016 |
def main():
"""putting the different functions together"""
while True:
url_list = input("Paste some RSS feed URLs (Separate them using a space or comma): \n\n")
urls = get_url(url_list)
if urls:
for url in urls:
fetch_xml(url)
root = parse_xml('data/latestfeed.xml')
feed = feed_items(root)
display_feed(feed)
break | 34,017 |
def mod_builtins():
"""
Replaces all builtins by versions wrapped using
DerivableException.wrap.
It is not recommended to do this, and there is pretty much no
benefit.
"""
g = globals()
for builtin in dir(builtins):
if builtin.endswith('Error') or builtin.endswith('Exception'):
setattr(builtins, builtin, g[builtin]) | 34,018 |
def get_json(response: func.HttpResponse) -> Dict:
"""Get JSON from an HttpResponse."""
return json.loads(response.get_body().decode("utf-8")) | 34,019 |
def _float_feature(value):
"""Returns a float_list from a float / double."""
if isinstance(value, list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) | 34,020 |
def sendVillasNodeOutput(message, output_mapping_vector, powerflow_results, state_estimation_results, scenario_flag):
"""
to create the payload according to "villas_node_output.json"
@param message: received message from the server (json.loads(msg.payload)[0])
@param output_mapping_vector: according to villas_node_output.json (see function read_mapping_file)
@param powerflow_results: results of powerflow (type acs.state_estimation.results.Results)
@param state_estimation_results: results of state_estimation (type acs.state_estimation.results.Results)
@param scenario_flag:
@return: string formatted according to "villas_node_output.json"
"""
VillasNodeOutput = {}
VillasNodeOutput["ts"] = {}
VillasNodeOutput["ts"]["origin"] = message["ts"]["origin"]
if "sequence" in message:
VillasNodeOutput["sequence"] = message["sequence"]
else:
print('Sequence no. not available.')
VillasNodeOutput["sequence"] = 1
# calculate Vmag_err
Vmag_err = np.zeros(len(powerflow_results.nodes))
for idx, elem in enumerate(powerflow_results.nodes):
uuid_pf = elem.topology_node.uuid
Vmag_true = np.absolute(elem.voltage)
Vmag_est = np.absolute(state_estimation_results.get_node(uuid=uuid_pf).voltage)
Vmag_err[idx] = np.absolute(Vmag_est - Vmag_true)
Vmag_err[idx] = 100 * np.divide(Vmag_err[idx], Vmag_true)
max_err = np.amax(Vmag_err)
mean_err = np.mean(Vmag_err)
data = [None] * len(output_mapping_vector)
for idx, elem in enumerate(output_mapping_vector):
if elem[0] == "max_err":
data[idx] = max_err
continue
elif elem[0] == "mean_err":
data[idx] = mean_err
continue
elif elem[0] == "scenario_flag":
data[idx] = float(scenario_flag)
continue
else: # elem = ["N4", "V", "phase", "est"] or elem = ["N4", "V", "phase", "pf"]
node = None
if elem[3] == "est":
node = state_estimation_results.get_node(uuid=elem[0])
elif elem[3] == "pf":
node = powerflow_results.get_node(uuid=elem[0])
value = None
if elem[2] == "mag": # elem_data[2] = "mag" or "phase"
value = np.absolute(node.voltage)
elif elem[2] == "phase":
value = np.angle(node.voltage)
data[idx] = value
VillasNodeOutput["data"] = data
return "[" + dumps(VillasNodeOutput) + "]" | 34,021 |
def find_org_rooms(dbs, user_id, meeting_date):
"""
获取可分配的机构
:param dbs:
:param user_id:
:param meeting_date:
:return:
"""
orgs = dbs.query(SysOrg.id, SysOrg.org_name, SysOrg.parent_id)\
.outerjoin(SysUserOrg, (SysUserOrg.org_id == SysOrg.id))\
.filter(SysUserOrg.user_id == user_id).all()
rooms = dbs.query(HasBoardroom.id, HasBoardroom.name, HasBoardroom.org_id) \
.outerjoin(SysOrg, SysOrg.id == HasBoardroom.org_id)\
.outerjoin(SysUserOrg, (SysUserOrg.org_id == SysOrg.id)) \
.filter(SysUserOrg.user_id == user_id).all()
meetings = dbs.query(HasMeeting.id, HasMeeting.name, HasMeeting.description, HasMeetBdr.boardroom_id,
HasMeetBdr.meeting_date, HasMeeting.start_time,
HasMeeting.end_time, HasMeeting.repeat, HasMeeting.create_user, HasMeeting.create_time,
SysUser.user_name, SysUser.phone, SysOrg.org_name)\
.outerjoin(SysUser, HasMeeting.create_user == SysUser.id)\
.outerjoin(SysOrg, SysUser.org_id == SysOrg.id)\
.outerjoin(HasMeetBdr, HasMeetBdr.meeting_id == HasMeeting.id)\
.outerjoin(HasBoardroom, HasBoardroom.id == HasMeetBdr.boardroom_id)\
.filter(HasMeetBdr.meeting_date == meeting_date).all()
lists = []
for org in orgs:
org_id = org.id
org_name = org.org_name
parent_id = org.parent_id
# room_list = []
# for room in rooms:
# # 将会议室拼入公司机构list
# if org_id == room.org_id:
# room_dict = {
# 'id': room.id,
# 'name': room.name,
# 'org_id': org_id
# }
# room_list.append(room_dict)
temp_dict = {
'org_id': org_id,
'org_name': org_name,
'parent_id': parent_id
# 'rooms': room_list
}
lists.append(temp_dict)
return lists | 34,022 |
def config_openapi(app: FastAPI, settings: ApiSettings):
"""Config openapi."""
def custom_openapi():
"""Config openapi."""
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title="Arturo STAC API", version="0.1", routes=app.routes
)
if settings.api_extension_is_enabled(ApiExtensions.fields):
openapi_schema["paths"]["/search"]["get"]["responses"]["200"]["content"][
"application/json"
]["schema"] = {"$ref": "#/components/schemas/ItemCollection"}
openapi_schema["paths"]["/search"]["post"]["responses"]["200"]["content"][
"application/json"
]["schema"] = {"$ref": "#/components/schemas/ItemCollection"}
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi | 34,023 |
def test_sorter_factory_returns_bubble_sorter_when_requested():
"""Test sorter factory returns bubble sorter when requested."""
sorter_name = SortType.BUBBLE.value
received_sorter = sorter_factory(sorter_name)
assert isinstance(received_sorter, BubbleSorter) | 34,024 |
def update_anime_info(name, new):
"""
Update information of anime with a dict contains new information
params:
name Name of anime you want to update
new A dict contains new infomation, the value whose key name starts
with new_ will replaces the corresponding item
"""
db = opendb()
anime = tinydb.Query()
info = db.get(anime.name == name)
try:
print('\nUpdating {}:'.format(name))
except UnicodeEncodeError:
print('\nUpdating {}:'.format(name.encode('gbk', 'ignore')))
print('Unicode Encode Error raised')
for key in new:
if key.startswith('new_'):
new_key = key[4:]
info[new_key] = new[key]
print('{} is replaced with {}'.format(new_key, new[key]))
db.update(info, anime.name == name)
db.close() | 34,025 |
def twelve_tone_matrix(
row: Sequence,
) -> DataFrame:
""" Returns a twelve-tone matrix in the form of a Pandas DataFrame.
"""
inverted_row = inversion(row)
inv_mat = transposition(inverted_row, row[0]-inverted_row[0])
new = [row]
for i in range(1, 12):
k = transposition(row, (inv_mat[i] - row[0]) % 12)
new.append(k)
m = reshape(new, (12, 12))
df = DataFrame(m)
return df | 34,026 |
async def upstream_http_exception_handler(request, exc: HTTPError):
"""Handle http exceptions from upstream server"""
logger.warning(f"Upstream HTTP error [{request.query_params['url']}]: {repr(exc)}")
# Convert to FastApi exception
exc = HTTPException(502, f"Upstream server returned: [{exc.status}] {exc.message}")
return await http_exception_handler(request, exc) | 34,027 |
def extract_latest_checkpoint_and_epoch(available_files: List[Path]) -> PathAndEpoch:
"""
Checkpoints are saved as recovery_epoch={epoch}.ckpt, find the latest ckpt and epoch number.
:param available_files: all available checkpoints
:return: path the checkpoint from latest epoch and epoch number
"""
recovery_epochs = [int(re.findall(r"[\d]+", f.stem)[0]) for f in available_files]
idx_max_epoch = int(np.argmax(recovery_epochs))
return available_files[idx_max_epoch], recovery_epochs[idx_max_epoch] | 34,028 |
def bark_filter_banks(nfilts=20,
nfft=512,
fs=16000,
low_freq=0,
high_freq=None,
scale="constant"):
"""
Compute Bark-filterbanks. The filters are stored in the rows, the columns
correspond to fft bins.
Args:
nfilts (int) : the number of filters in the filterbank.
(Default 20)
nfft (int) : the FFT size.
(Default is 512)
fs (int) : sample rate/ sampling frequency of the signal.
(Default 16000 Hz)
low_freq (int) : lowest band edge of mel filters.
(Default 0 Hz)
high_freq (int) : highest band edge of mel filters.
(Default samplerate/2)
scale (str) : choose if max bins amplitudes ascend, descend or are constant (=1).
Default is "constant"
Returns:
a numpy array of size nfilts * (nfft/2 + 1) containing filterbank.
Each row holds 1 filter.
"""
# init freqs
high_freq = high_freq or fs / 2
low_freq = low_freq or 0
# run checks
if low_freq < 0:
raise ParameterError(ErrorMsgs["low_freq"])
if high_freq > (fs / 2):
raise ParameterError(ErrorMsgs["high_freq"])
# compute points evenly spaced in Bark scale (points are in Bark)
low_bark = hz2bark(low_freq)
high_bark = hz2bark(high_freq)
bark_points = np.linspace(low_bark, high_bark, nfilts + 4)
# we use fft bins, so we have to convert from Bark to fft bin number
bins = np.floor(bark2fft(bark_points, fs, nfft))
fbank = np.zeros([nfilts, nfft // 2 + 1])
# init scaler
if scale == "descendant" or scale == "constant":
c = 1
else:
c = 0
for j in range(2, nfilts + 2):
# compute scaler
if scale == "descendant":
c -= 1 / nfilts
c = c * (c > 0) + 0 * (c < 0)
elif scale == "ascendant":
c += 1 / nfilts
c = c * (c < 1) + 1 * (c > 1)
for i in range(int(bins[j - 2]), int(bins[j + 2])):
fc = bark_points[j]
fb = fft2bark(i, fs, nfft)
fbank[j - 2, i] = c * Fm(fb, fc)
return np.abs(fbank) | 34,029 |
def test_appgroup():
"""Test of with_appcontext."""
@click.group(cls=AppGroup)
def cli():
pass
@cli.command(with_appcontext=True)
def test():
click.echo(current_app.name)
@cli.group()
def subgroup():
pass
@subgroup.command(with_appcontext=True)
def test2():
click.echo(current_app.name)
obj = ScriptInfo(create_app=lambda info: Flask("testappgroup"))
runner = CliRunner()
result = runner.invoke(cli, ['test'], obj=obj)
assert result.exit_code == 0
assert result.output == 'testappgroup\n'
result = runner.invoke(cli, ['subgroup', 'test2'], obj=obj)
assert result.exit_code == 0
assert result.output == 'testappgroup\n' | 34,030 |
def auth_check_response_fixture():
"""Define a fixture to return a successful authorization check."""
return json.loads(load_fixture("auth_check_response.json")) | 34,031 |
def cancel_session(session_id):
"""
Cancel all tasks within a session
Args:
string: session_id
Returns:
dict: results
"""
lambda_response = {}
all_cancelled_tasks = []
for state in task_states_to_cancel:
res = cancel_tasks_by_status(session_id, state)
print("Cancelling session: {} status: {} result: {}".format(
session_id, state, res))
lambda_response["cancelled_{}".format(state)] = len(res)
all_cancelled_tasks += res
lambda_response["tatal_cancelled_tasks"] = len(all_cancelled_tasks)
return(lambda_response) | 34,032 |
def tensor2tensor(tensor):
"""Convert a TensorFLow tensor to PyTorch Tensor,
or vice versa
"""
... | 34,033 |
def valid_capture_area(top_left, bottom_right):
"""Check the capture area extents for sanity.
"""
tl_x = top_left['x']
tl_y = top_left['y']
br_x = bottom_right['x']
br_y = bottom_right['y']
if (br_x <= tl_x) or (br_y <= tl_y):
print('The capture area ({},{}) ({},{}) '
'is invalid.'.format(tl_x, tl_y, br_x, br_y),
file=sys.stderr)
return False
print('Capture area: ({},{}) ({},{})'.format(tl_x, tl_y, br_x, br_y))
return True | 34,034 |
def get_cli_args():
"""Gets, parses, and returns CLI arguments"""
parser = ArgumentParser(description='Check modules formatting')
parser.add_argument('filepath', help='path to a file to check')
parser.add_argument('-n', '--fqcn',
dest='fqcn',
metavar='FQCN',
default=False,
required=False,
help='FQCN to check examples')
parser.add_argument("-c", "--comments",
dest="check_comments",
action="store_true",
required=False,
help="check comments")
parser.add_argument("-l", "--length",
dest="check_length",
action="store_true",
required=False,
help="check description length")
parser.add_argument("-s", "--spelling",
dest="check_spelling",
action="store_true",
required=False,
help="check spelling")
return parser.parse_args() | 34,035 |
def __virtual__():
"""
Determine whether or not to load this module
"""
return __virtualname__ | 34,036 |
def load_glove_embeddings():
"""
Load the glove embeddings into a array and a dictionary with words as
keys and their associated index as the value. Assumes the glove
embeddings are located in the same directory and named "glove.6B.50d.txt"
RETURN: embeddings: the array containing word vectors
word_index_dict: a dictionary matching a word in string form to
its index in the embeddings array. e.g. {"apple": 119"}
"""
#if you are running on the CSE machines, you can load the glove data from here
#data = open("/home/cs9444/public_html/17s2/hw2/glove.6B.50d.txt",'r',encoding="utf-8")
data = open("glove.6B.50d.txt",'r',encoding="utf-8")
word_index_dict = {}
word_index_dict['UNK'] = 0
embeddings = np.ndarray(shape=(500001, batch_size), dtype='float32')
embeddings_list = []
i = 1
for line in data:
load_array = line.split()
# Sets the word to the 0th value in array
word = load_array[0]
# Other values are the assigned index
values = np.asarray(load_array[1:], dtype='float32')
# Put values in row of array
embeddings[i] = values
# E.g. word_index_dict["the"] = 0
word_index_dict[word] = i
i = i+1
data.close()
return embeddings, word_index_dict | 34,037 |
def _f2_rsub_ ( self , other ) :
"""Operator for ``2D-function - other''"""
return _f2_rop_ ( self , other , Ostap.MoreRooFit.Subtraction , "Subtract_" ) | 34,038 |
def attenuate(source, factor=0.01, duration=1.0, srate=None):
"""Exponential attenuation towards target value within 'factor' in time 'duration' for constant signals."""
if srate is None:
srate = get_srate()
return onepole(source, 1.0, -factor ** (srate / duration), 1.0 - factor ** (srate / duration)) | 34,039 |
def test_nonindexed_dimensions_restored():
"""When the selection removes a dimension, xarray.expand_dims does not expand the non-indexed
dimensions that were removed. For example, if one selects only a single zplane, it reduce the z
physical coordinate to a coordinate scalar, and not an array of size 1. This verifies that the
workaround we introduced to restore the dependent axes's labels is in place.
"""
stack = synthetic_stack(num_round=5, num_ch=5, num_z=15, tile_height=200, tile_width=200)
for selector in (
{Axes.ROUND: 0, Axes.CH: 2, Axes.ZPLANE: 5},
{Axes.ROUND: (0, 3), Axes.CH: 2, Axes.ZPLANE: 5},
{Axes.CH: (None, 3), Axes.ZPLANE: 5},
):
sel_xarray = stack.sel(selector).xarray
# when the selection removes a dimension (e.g., only select a single z plane)
for primary_axis, dependent_axis in (
(Axes.X, Coordinates.X),
(Axes.Y, Coordinates.Y),
(Axes.ZPLANE, Coordinates.Z),
):
assert len(sel_xarray[primary_axis.value]) == len(sel_xarray[dependent_axis.value]) | 34,040 |
def get_parser(name):
""" make default formatted parser """
parser = argparse.ArgumentParser(
name, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# print default value always
parser.add_argument = partial(parser.add_argument, help=" ")
return parser | 34,041 |
def IsMultiPanel(hcuts, vcuts) -> bool:
"""
Check if the image is multi-panel or not.
Could have more logic.
"""
return bool(hcuts or vcuts) | 34,042 |
def is_subject_mutable(context, subject):
"""Return True if the subject is mutable in this context."""
if context.is_admin:
return True
if subject.owner is None or context.owner is None:
return False
return subject.owner == context.owner | 34,043 |
def time_filter(df, start_date, end_date):
"""Remove times that are not within the start/end bounds."""
if start_date:
datetime_start = datetime.strptime(start_date, '%Y-%m-%d')
start_selection = df.index >= datetime_start
if end_date:
datetime_end = datetime.strptime(end_date, '%Y-%m-%d')
end_selection = df.index <= datetime_end
if start_date and end_date:
selection = start_selection & end_selection
filtered_df = df[selection]
elif start_date:
filtered_df = df[start_selection]
elif end_date:
filtered_df = df[end_selection]
else:
filtered_df = df
return filtered_df | 34,044 |
def update_post(post_dict):
""" 更新文章 """
category = post_dict.get('category')
category_query_result = Category.query.filter_by(name=category).first()
if category_query_result is None:
category_item = Category(name=category)
db.session.add(category_item)
db.session.commit()
article_id = post_dict.get('article_id')
article = Article.query.get(article_id)
if not article:
article = Article()
article.title = post_dict.get('title', article.title)
article.body = post_dict.get('body', article.body)
article.category = Category.query.filter_by(name=category).first()
article.author = post_dict.get('username', article.author)
article.show = post_dict.get('show', article.show)
db.session.commit()
send_mail(subject='Tblog: Post Success',
to=current_app.config['TBLOG_ADMIN_MAIL'],
html='<h1>Note</h1>'
'<p>Post Success By: <b>{mail}</b></p>'
'<h1>Main body</h1>'
'{body}'.format(mail=post_dict['from_mail'],
body=post_dict['body']))
if post_dict.get('from_mail') != current_app.config['TBLOG_ADMIN_MAIL']:
send_mail(subject='Tblog: Post Success',
to=post_dict.get('from_mail'),
html='<h1>Note</h1>'
'<p>Post Success By: <b>{mail}</b></p>'.format(
mail=post_dict['from_mail'])) | 34,045 |
def notifications():
"""
Fetches the notifications events that occurred between the given block numbers
URL Params:
min_block_number: (int) The start block number for querying for notifications
max_block_number?: (int) The end block number for querying for notifications
track_id?: (Array<int>) Array of track id for fetching the track's owner id
and adding the track id to owner user id mapping to the `owners` response field
NOTE: this is added for notification for listen counts
Response - Json object w/ the following fields
notifications: Array of notifications of shape:
type: 'Follow' | 'Favorite' | 'Repost' | 'Create' | 'RemixCreate' | 'RemixCosign' | 'PlaylistUpdate'
blocknumber: (int) blocknumber of notification
timestamp: (string) timestamp of notification
initiator: (int) the user id that caused this notification
metadata?: (any) additional information about the notification
entity_id?: (int) the id of the target entity (ie. playlist id of a playlist that is reposted)
entity_type?: (string) the type of the target entity
entity_owner_id?: (int) the id of the target entity's owner (if applicable)
playlist_update_timestamp?: (string) timestamp of last update of a given playlist
playlist_update_users?: (array<int>) user ids which favorited a given playlist
info: Dictionary of metadata w/ min_block_number & max_block_number fields
milestones: Dictionary mapping of follows/reposts/favorites (processed within the blocks params)
Root fields:
follower_counts: Contains a dictionary of user id => follower count (up to the max_block_number)
repost_counts: Contains a dictionary tracks/albums/playlists of id to repost count
favorite_counts: Contains a dictionary tracks/albums/playlists of id to favorite count
owners: Dictionary containing the mapping for track id / playlist id / album -> owner user id
The root keys are 'tracks', 'playlists', 'albums' and each contains the id to owner id mapping
"""
db = get_db_read_replica()
min_block_number = request.args.get("min_block_number", type=int)
max_block_number = request.args.get("max_block_number", type=int)
track_ids_to_owner = []
try:
track_ids_str_list = request.args.getlist("track_id")
track_ids_to_owner = [int(y) for y in track_ids_str_list]
except Exception as e:
logger.error(f"Failed to retrieve track list {e}")
# Max block number is not explicitly required (yet)
if not min_block_number and min_block_number != 0:
return api_helpers.error_response({"msg": "Missing min block number"}, 400)
if not max_block_number:
max_block_number = min_block_number + max_block_diff
elif (max_block_number - min_block_number) > max_block_diff:
max_block_number = min_block_number + max_block_diff
with db.scoped_session() as session:
current_block_query = session.query(Block).filter_by(is_current=True)
current_block_query_results = current_block_query.all()
current_block = current_block_query_results[0]
current_max_block_num = current_block.number
if current_max_block_num < max_block_number:
max_block_number = current_max_block_num
notification_metadata = {
"min_block_number": min_block_number,
"max_block_number": max_block_number,
}
# Retrieve milestones statistics
milestone_info = {}
# Cache owner info for network entities and pass in w/results
owner_info = {const.tracks: {}, const.albums: {}, const.playlists: {}}
# List of notifications generated from current protocol state
notifications_unsorted = []
with db.scoped_session() as session:
#
# Query relevant follow information
#
follow_query = session.query(Follow)
# Impose min block number restriction
follow_query = follow_query.filter(
Follow.is_current == True,
Follow.is_delete == False,
Follow.blocknumber > min_block_number,
Follow.blocknumber <= max_block_number,
)
follow_results = follow_query.all()
# Used to retrieve follower counts for this window
followed_users = []
# Represents all follow notifications
follow_notifications = []
for entry in follow_results:
follow_notif = {
const.notification_type: const.notification_type_follow,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.follower_user_id,
const.notification_metadata: {
const.notification_follower_id: entry.follower_user_id,
const.notification_followee_id: entry.followee_user_id,
},
}
follow_notifications.append(follow_notif)
# Add every user who gained a new follower
followed_users.append(entry.followee_user_id)
# Query count for any user w/new followers
follower_counts = get_follower_count_dict(
session, followed_users, max_block_number
)
milestone_info["follower_counts"] = follower_counts
notifications_unsorted.extend(follow_notifications)
#
# Query relevant favorite information
#
favorites_query = session.query(Save)
favorites_query = favorites_query.filter(
Save.is_current == True,
Save.is_delete == False,
Save.blocknumber > min_block_number,
Save.blocknumber <= max_block_number,
)
favorite_results = favorites_query.all()
# ID lists to query count aggregates
favorited_track_ids = []
favorited_album_ids = []
favorited_playlist_ids = []
# List of favorite notifications
favorite_notifications = []
favorite_remix_tracks = []
for entry in favorite_results:
favorite_notif = {
const.notification_type: const.notification_type_favorite,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.user_id,
}
save_type = entry.save_type
save_item_id = entry.save_item_id
metadata = {
const.notification_entity_type: save_type,
const.notification_entity_id: save_item_id,
}
# NOTE if deleted, the favorite can still exist
# TODO: Can we aggregate all owner queries and perform at once...?
if save_type == SaveType.track:
owner_id = get_owner_id(session, "track", save_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
favorited_track_ids.append(save_item_id)
owner_info[const.tracks][save_item_id] = owner_id
favorite_remix_tracks.append(
{
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
"user_id": entry.user_id,
"item_owner_id": owner_id,
"item_id": save_item_id,
}
)
elif save_type == SaveType.album:
owner_id = get_owner_id(session, "album", save_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
favorited_album_ids.append(save_item_id)
owner_info[const.albums][save_item_id] = owner_id
elif save_type == SaveType.playlist:
owner_id = get_owner_id(session, "playlist", save_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
favorited_playlist_ids.append(save_item_id)
owner_info[const.playlists][save_item_id] = owner_id
favorite_notif[const.notification_metadata] = metadata
favorite_notifications.append(favorite_notif)
notifications_unsorted.extend(favorite_notifications)
track_favorite_dict = {}
album_favorite_dict = {}
playlist_favorite_dict = {}
if favorited_track_ids:
track_favorite_counts = get_save_counts(
session,
False,
False,
favorited_track_ids,
[SaveType.track],
max_block_number,
)
track_favorite_dict = dict(track_favorite_counts)
favorite_remix_notifications = get_cosign_remix_notifications(
session, max_block_number, favorite_remix_tracks
)
notifications_unsorted.extend(favorite_remix_notifications)
if favorited_album_ids:
album_favorite_counts = get_save_counts(
session,
False,
False,
favorited_album_ids,
[SaveType.album],
max_block_number,
)
album_favorite_dict = dict(album_favorite_counts)
if favorited_playlist_ids:
playlist_favorite_counts = get_save_counts(
session,
False,
False,
favorited_playlist_ids,
[SaveType.playlist],
max_block_number,
)
playlist_favorite_dict = dict(playlist_favorite_counts)
milestone_info[const.notification_favorite_counts] = {}
milestone_info[const.notification_favorite_counts][
const.tracks
] = track_favorite_dict
milestone_info[const.notification_favorite_counts][
const.albums
] = album_favorite_dict
milestone_info[const.notification_favorite_counts][
const.playlists
] = playlist_favorite_dict
#
# Query relevant tier change information
#
balance_change_query = session.query(UserBalanceChange)
# Impose min block number restriction
balance_change_query = balance_change_query.filter(
UserBalanceChange.blocknumber > min_block_number,
UserBalanceChange.blocknumber <= max_block_number,
)
balance_change_results = balance_change_query.all()
tier_change_notifications = []
for entry in balance_change_results:
prev = int(entry.previous_balance)
current = int(entry.current_balance)
# Check for a tier change and add to tier_change_notification
tier = None
if prev < 100000 <= current:
tier = "platinum"
elif prev < 10000 <= current:
tier = "gold"
elif prev < 100 <= current:
tier = "silver"
elif prev < 10 <= current:
tier = "bronze"
if tier is not None:
tier_change_notif = {
const.notification_type: const.notification_type_tier_change,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: datetime.now(),
const.notification_initiator: entry.user_id,
const.notification_metadata: {
const.notification_tier: tier,
},
}
tier_change_notifications.append(tier_change_notif)
notifications_unsorted.extend(tier_change_notifications)
#
# Query relevant repost information
#
repost_query = session.query(Repost)
repost_query = repost_query.filter(
Repost.is_current == True,
Repost.is_delete == False,
Repost.blocknumber > min_block_number,
Repost.blocknumber <= max_block_number,
)
repost_results = repost_query.all()
# ID lists to query counts
reposted_track_ids = []
reposted_album_ids = []
reposted_playlist_ids = []
# List of repost notifications
repost_notifications = []
# List of repost notifications
repost_remix_notifications = []
repost_remix_tracks = []
for entry in repost_results:
repost_notif = {
const.notification_type: const.notification_type_repost,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.user_id,
}
repost_type = entry.repost_type
repost_item_id = entry.repost_item_id
metadata = {
const.notification_entity_type: repost_type,
const.notification_entity_id: repost_item_id,
}
if repost_type == RepostType.track:
owner_id = get_owner_id(session, "track", repost_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
reposted_track_ids.append(repost_item_id)
owner_info[const.tracks][repost_item_id] = owner_id
repost_remix_tracks.append(
{
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
"user_id": entry.user_id,
"item_owner_id": owner_id,
"item_id": repost_item_id,
}
)
elif repost_type == RepostType.album:
owner_id = get_owner_id(session, "album", repost_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
reposted_album_ids.append(repost_item_id)
owner_info[const.albums][repost_item_id] = owner_id
elif repost_type == RepostType.playlist:
owner_id = get_owner_id(session, "playlist", repost_item_id)
if not owner_id:
continue
metadata[const.notification_entity_owner_id] = owner_id
reposted_playlist_ids.append(repost_item_id)
owner_info[const.playlists][repost_item_id] = owner_id
repost_notif[const.notification_metadata] = metadata
repost_notifications.append(repost_notif)
# Append repost notifications
notifications_unsorted.extend(repost_notifications)
track_repost_count_dict = {}
album_repost_count_dict = {}
playlist_repost_count_dict = {}
# Aggregate repost counts for relevant fields
# Used to notify users of entity-specific milestones
if reposted_track_ids:
track_repost_counts = get_repost_counts(
session,
False,
False,
reposted_track_ids,
[RepostType.track],
max_block_number,
)
track_repost_count_dict = dict(track_repost_counts)
repost_remix_notifications = get_cosign_remix_notifications(
session, max_block_number, repost_remix_tracks
)
notifications_unsorted.extend(repost_remix_notifications)
if reposted_album_ids:
album_repost_counts = get_repost_counts(
session,
False,
False,
reposted_album_ids,
[RepostType.album],
max_block_number,
)
album_repost_count_dict = dict(album_repost_counts)
if reposted_playlist_ids:
playlist_repost_counts = get_repost_counts(
session,
False,
False,
reposted_playlist_ids,
[RepostType.playlist],
max_block_number,
)
playlist_repost_count_dict = dict(playlist_repost_counts)
milestone_info[const.notification_repost_counts] = {}
milestone_info[const.notification_repost_counts][
const.tracks
] = track_repost_count_dict
milestone_info[const.notification_repost_counts][
const.albums
] = album_repost_count_dict
milestone_info[const.notification_repost_counts][
const.playlists
] = playlist_repost_count_dict
# Query relevant created entity notification - tracks/albums/playlists
created_notifications = []
#
# Query relevant created tracks for remix information
#
remix_created_notifications = []
# Aggregate track notifs
tracks_query = session.query(Track)
# TODO: Is it valid to use Track.is_current here? Might not be the right info...
tracks_query = tracks_query.filter(
Track.is_unlisted == False,
Track.is_delete == False,
Track.stem_of == None,
Track.blocknumber > min_block_number,
Track.blocknumber <= max_block_number,
)
tracks_query = tracks_query.filter(Track.created_at == Track.updated_at)
track_results = tracks_query.all()
for entry in track_results:
track_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
},
}
created_notifications.append(track_notif)
if entry.remix_of:
# Add notification to remix track owner
parent_remix_tracks = [
t["parent_track_id"] for t in entry.remix_of["tracks"]
]
remix_track_parents = (
session.query(Track.owner_id, Track.track_id)
.filter(
Track.track_id.in_(parent_remix_tracks),
Track.is_unlisted == False,
Track.is_delete == False,
Track.is_current == True,
)
.all()
)
for remix_track_parent in remix_track_parents:
[
remix_track_parent_owner,
remix_track_parent_id,
] = remix_track_parent
remix_notif = {
const.notification_type: const.notification_type_remix_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
const.notification_remix_parent_track_user_id: remix_track_parent_owner,
const.notification_remix_parent_track_id: remix_track_parent_id,
},
}
remix_created_notifications.append(remix_notif)
# Handle track update notifications
# TODO: Consider switching blocknumber for updated at?
updated_tracks_query = session.query(Track)
updated_tracks_query = updated_tracks_query.filter(
Track.is_unlisted == False,
Track.stem_of == None,
Track.created_at != Track.updated_at,
Track.blocknumber > min_block_number,
Track.blocknumber <= max_block_number,
)
updated_tracks = updated_tracks_query.all()
for entry in updated_tracks:
prev_entry_query = (
session.query(Track)
.filter(
Track.track_id == entry.track_id,
Track.blocknumber < entry.blocknumber,
)
.order_by(desc(Track.blocknumber))
)
# Previous unlisted entry indicates transition to public, triggering a notification
prev_entry = prev_entry_query.first()
# Tracks that were unlisted and turned to public
if prev_entry.is_unlisted == True:
track_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
},
}
created_notifications.append(track_notif)
# Tracks that were not remixes and turned into remixes
if not prev_entry.remix_of and entry.remix_of:
# Add notification to remix track owner
parent_remix_tracks = [
t["parent_track_id"] for t in entry.remix_of["tracks"]
]
remix_track_parents = (
session.query(Track.owner_id, Track.track_id)
.filter(
Track.track_id.in_(parent_remix_tracks),
Track.is_unlisted == False,
Track.is_delete == False,
Track.is_current == True,
)
.all()
)
for remix_track_parent in remix_track_parents:
[
remix_track_parent_owner,
remix_track_parent_id,
] = remix_track_parent
remix_notif = {
const.notification_type: const.notification_type_remix_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.owner_id,
# TODO: is entity owner id necessary for tracks?
const.notification_metadata: {
const.notification_entity_type: "track",
const.notification_entity_id: entry.track_id,
const.notification_entity_owner_id: entry.owner_id,
const.notification_remix_parent_track_user_id: remix_track_parent_owner,
const.notification_remix_parent_track_id: remix_track_parent_id,
},
}
remix_created_notifications.append(remix_notif)
notifications_unsorted.extend(remix_created_notifications)
# Aggregate playlist/album notifs
collection_query = session.query(Playlist)
# TODO: Is it valid to use is_current here? Might not be the right info...
collection_query = collection_query.filter(
Playlist.is_delete == False,
Playlist.is_private == False,
Playlist.blocknumber > min_block_number,
Playlist.blocknumber <= max_block_number,
)
collection_query = collection_query.filter(
Playlist.created_at == Playlist.updated_at
)
collection_results = collection_query.all()
for entry in collection_results:
collection_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.playlist_owner_id,
}
metadata = {
const.notification_entity_id: entry.playlist_id,
const.notification_entity_owner_id: entry.playlist_owner_id,
const.notification_collection_content: entry.playlist_contents,
}
if entry.is_album:
metadata[const.notification_entity_type] = "album"
else:
metadata[const.notification_entity_type] = "playlist"
collection_notif[const.notification_metadata] = metadata
created_notifications.append(collection_notif)
# Playlists that were private and turned to public aka 'published'
# TODO: Consider switching blocknumber for updated at?
publish_playlists_query = session.query(Playlist)
publish_playlists_query = publish_playlists_query.filter(
Playlist.is_private == False,
Playlist.created_at != Playlist.updated_at,
Playlist.blocknumber > min_block_number,
Playlist.blocknumber <= max_block_number,
)
publish_playlist_results = publish_playlists_query.all()
for entry in publish_playlist_results:
prev_entry_query = (
session.query(Playlist)
.filter(
Playlist.playlist_id == entry.playlist_id,
Playlist.blocknumber < entry.blocknumber,
)
.order_by(desc(Playlist.blocknumber))
)
# Previous private entry indicates transition to public, triggering a notification
prev_entry = prev_entry_query.first()
if prev_entry.is_private == True:
publish_playlist_notif = {
const.notification_type: const.notification_type_create,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.playlist_owner_id,
}
metadata = {
const.notification_entity_id: entry.playlist_id,
const.notification_entity_owner_id: entry.playlist_owner_id,
const.notification_collection_content: entry.playlist_contents,
const.notification_entity_type: "playlist",
}
publish_playlist_notif[const.notification_metadata] = metadata
created_notifications.append(publish_playlist_notif)
notifications_unsorted.extend(created_notifications)
# Get additional owner info as requested for listen counts
tracks_owner_query = session.query(Track).filter(
Track.is_current == True, Track.track_id.in_(track_ids_to_owner)
)
track_owner_results = tracks_owner_query.all()
for entry in track_owner_results:
owner = entry.owner_id
track_id = entry.track_id
owner_info[const.tracks][track_id] = owner
# Get playlist updates
today = date.today()
thirty_days_ago = today - timedelta(days=30)
thirty_days_ago_time = datetime(
thirty_days_ago.year, thirty_days_ago.month, thirty_days_ago.day, 0, 0, 0
)
playlist_update_query = session.query(Playlist)
playlist_update_query = playlist_update_query.filter(
Playlist.is_current == True,
Playlist.is_delete == False,
Playlist.last_added_to >= thirty_days_ago_time,
Playlist.blocknumber > min_block_number,
Playlist.blocknumber <= max_block_number,
)
playlist_update_results = playlist_update_query.all()
# Represents all playlist update notifications
playlist_update_notifications = []
playlist_update_notifs_by_playlist_id = {}
for entry in playlist_update_results:
playlist_update_notifs_by_playlist_id[entry.playlist_id] = {
const.notification_type: const.notification_type_playlist_update,
const.notification_blocknumber: entry.blocknumber,
const.notification_timestamp: entry.created_at,
const.notification_initiator: entry.playlist_owner_id,
const.notification_metadata: {
const.notification_entity_id: entry.playlist_id,
const.notification_entity_type: "playlist",
const.notification_playlist_update_timestamp: entry.last_added_to,
},
}
# get all favorited playlists
# playlists may have been favorited outside the blocknumber bounds
# e.g. before the min_block_number
playlist_favorites_query = session.query(Save)
playlist_favorites_query = playlist_favorites_query.filter(
Save.is_current == True,
Save.is_delete == False,
Save.save_type == SaveType.playlist,
)
playlist_favorites_results = playlist_favorites_query.all()
# dictionary of playlist id => users that favorited said playlist
# e.g. { playlist1: [user1, user2, ...], ... }
# we need this dictionary to know which users need to be notified of a playlist update
users_that_favorited_playlists_dict = ft.reduce(
lambda accumulator, current: accumulator.update(
{
current.save_item_id: accumulator[current.save_item_id]
+ [current.user_id]
if current.save_item_id in accumulator
else [current.user_id]
}
)
or accumulator,
playlist_favorites_results,
{},
)
for playlist_id in users_that_favorited_playlists_dict:
if playlist_id not in playlist_update_notifs_by_playlist_id:
continue
playlist_update_notif = playlist_update_notifs_by_playlist_id[playlist_id]
playlist_update_notif[const.notification_metadata].update(
{
const.notification_playlist_update_users: users_that_favorited_playlists_dict[
playlist_id
]
}
)
playlist_update_notifications.append(playlist_update_notif)
notifications_unsorted.extend(playlist_update_notifications)
# Final sort - TODO: can we sort by timestamp?
sorted_notifications = sorted(
notifications_unsorted,
key=lambda i: i[const.notification_blocknumber],
reverse=False,
)
return api_helpers.success_response(
{
"notifications": sorted_notifications,
"info": notification_metadata,
"milestones": milestone_info,
"owners": owner_info,
}
) | 34,046 |
def collapse_umi(cells):
"""
Input set of genotypes for each read
Return list with one entry for each UMI, per cell barcode
"""
collapsed_data = {}
for cell_barcode, umi_set in cells.items():
for _, genotypes in umi_set.items():
if len(set(genotypes)) > 1:
pass
else:
try:
collapsed_data[cell_barcode]
except KeyError:
collapsed_data[cell_barcode] = [genotypes[0]]
else:
collapsed_data[cell_barcode].append(genotypes[0])
# count total ref, total alt UMIs for each genotype
for key, value in collapsed_data.items():
collapsed_data[key] = [value.count("ref"), value.count("alt")]
assert len(collapsed_data[key]) == 2
return collapsed_data | 34,047 |
def get_corrected_PRES(PRES: np.ndarray, ele_gap: float, TMP: np.ndarray) -> np.ndarray:
"""気圧の標高補正
Args:
PRES (np.ndarray): 補正前の気圧 [hPa]
ele_gap (float): 標高差 [m]
TMP (np.ndarray): 気温 [℃]
Returns:
np.ndarray: 標高補正後の気圧 [hPa]
Notes:
気温減率の平均値を0.0065℃/mとする。
"""
return PRES * np.power(1 - ((ele_gap * 0.0065) / (TMP + 273.15)), 5.257) | 34,048 |
def mishra_bird(x, *args):
"""Mishra's Bird constrained function with 2 parameters.
To be used in the constrained optimization examples.
When subject to:
(x[0] + 5) ** 2 + (x[1] + 5) ** 2 < 25
the global minimum is at f(-3.1302, -1.5821) = -106.7645
Bounds: -10 <= x[0] <= 0
-6.5 <= x[1] <= 0
Reference:
https://en.wikipedia.org/wiki/Test_functions_for_optimization
"""
fx = np.sin(x[1]) * np.exp((1 - np.cos(x[0])) ** 2) + \
np.cos(x[0]) * np.exp((1 - np.sin(x[1])) ** 2) + (x[0] - x[1]) ** 2
return fx | 34,049 |
def find_target_migration_file(database=DEFAULT_DB_ALIAS, changelog_file=None):
"""Finds best matching target migration file"""
if not database:
database = DEFAULT_DB_ALIAS
if not changelog_file:
changelog_file = get_changelog_file_for_database(database)
try:
doc = minidom.parse(changelog_file)
except ExpatError as ex:
raise InvalidChangelogFile(
'Could not parse XML file %s: %s' % (changelog_file, ex))
try:
dbchglog = doc.getElementsByTagName('databaseChangeLog')[0]
except IndexError:
raise InvalidChangelogFile(
'Missing <databaseChangeLog> node in file %s' % (
changelog_file))
else:
nodes = list(filter(lambda x: x.nodeType is x.ELEMENT_NODE,
dbchglog.childNodes))
if not nodes:
return changelog_file
last_node = nodes[-1]
if last_node.tagName == 'include':
last_file = last_node.attributes.get('file').firstChild.data
return find_target_migration_file(
database=database, changelog_file=last_file)
else:
return changelog_file | 34,050 |
def encode_message(ctl, addr, src_id, msg_code, data=""):
"""Encode a message for the PIM, assumes data formatted"""
ctl = create_control_word(addr.is_link) if ctl == -1 else ctl
length = 7 + len(data)
ctl = ctl | (length << 8)
msg = bytearray(length)
msg[0:2] = ctl.to_bytes(2, byteorder="big")
msg[2] = addr.network_id
msg[3] = addr.upb_id
msg[4] = src_id
msg[5] = msg_code
if data:
msg[6 : len(data) + 6] = data
msg[-1] = (256 - reduce(lambda x, y: x + y, msg)) % 256 # Checksum
return msg.hex().upper() | 34,051 |
def rule_16(l, r):
"""
Rule for "vyaṁjana sandhi - ghośī karaṇaya"
:return:
"""
l_suffix = utils.endswith(l, letters.AGOSHA_LETTERS)
r_prefix = utils.startswith(r, letters.GOSHA_LETTERS)
if l_suffix is not None and r_prefix is not None:
if r_prefix in letters.VOWELS:
return l[:-len(l_suffix)] + letters.AGOSHA_TO_GOSHA_MAPPING[l_suffix][0] + \
letters.DIACRITICS_MAPPING[r_prefix] + r[len(r_prefix):]
return l[:-len(l_suffix)] + letters.AGOSHA_TO_GOSHA_MAPPING[l_suffix] + r | 34,052 |
def look(x: Any, shrink_right: int = 1) -> None:
"""Dumps any object to stdout using the technique in `expose()`.
The parameter `shrink_right` is used to set narrowing of
indentation. (Use `0` to turn off).
"""
print(pformat(expose(x, shrink_right)).replace(' ', shrink_right * ' ')) | 34,053 |
def is_vertex_cover(G, vertex_cover):
"""Determines whether the given set of vertices is a vertex cover of graph G.
A vertex cover is a set of vertices such that each edge of the graph
is incident with at least one vertex in the set.
Parameters
----------
G : NetworkX graph
The graph on which to check the vertex cover.
vertex_cover :
Iterable of nodes.
Returns
-------
is_cover : bool
True if the given iterable forms a vertex cover.
Examples
--------
This example checks two covers for a graph, G, of a single Chimera
unit cell. The first uses the set of the four horizontal qubits, which
do constitute a cover; the second set removes one node.
>>> import dwave_networkx as dnx
>>> G = dnx.chimera_graph(1, 1, 4)
>>> cover = [0, 1, 2, 3]
>>> dnx.is_vertex_cover(G,cover)
True
>>> cover = [0, 1, 2]
>>> dnx.is_vertex_cover(G,cover)
False
"""
cover = set(vertex_cover)
return all(u in cover or v in cover for u, v in G.edges) | 34,054 |
def datetimeformat(value, formatstring='%Y-%m-%d %H:%M', nonchar=''):
"""Formates a datetime.
Tries to convert the given ``value`` to a ``datetime`` object and then formats
it according to ``formatstring``::
{{ datetime.now()|datetimeformat }}
{{ "20171224T235959"|datetimeformat('%H:%M') }}
"""
if not value:
return nonchar
return Markup(convert_to_datetime(value).strftime(formatstring).replace('-', '‑')) | 34,055 |
def get_interface_for_name(protocols, target_interface_name):
# type: (Iterable[Protocol], str) -> Optional[Interface]
"""Given a name string, gets the interface that has that name, or None."""
for protocol in protocols:
for interface in protocol.interfaces:
if interface.name == target_interface_name:
return interface
return None | 34,056 |
def __get_out_file(in_file, out_dir):
"""
Get the path of the output file.
Parameters
----------
in_file: str
Path to input file.
out_dir: str
Path to output directory.
Returns
-------
file_no_ext: str
The file name without extension.
out_dir: str
The path to the output directory.
out_file: str
The path of the output file.
"""
if out_dir == '':
out_dir = os.path.dirname(in_file)
file_no_ext = os.path.splitext(in_file)[0].split(os.sep)[-1]
if out_dir == '':
out_dir = '.'
out_file = '/'.join([out_dir, file_no_ext])
return file_no_ext, out_dir, out_file | 34,057 |
def extract_validation_set(x: ndarray, y: ndarray, size=6000):
"""Will extract a validation set of "size" from given x,y pair
Parameters:
x (ndarray): numpy array
y (ndarray): numpy array
size (int): Size of validation set. Must be smaller than examples count
in x, y and multiple of label_count
"""
assert x.shape[0] == y.shape[0]
assert (
x.shape[0] % size == 0
), f"number of examples ({x.shape[0]}) needs to be evenly divisible by parameter size ({size})"
assert size % len(set(y)) == 0, "size must be a multiple of number of labels"
x_balanced, y_balanced = classes_balanced_randomized_per_partition(x, y)
xy_val = (x_balanced[:size], y_balanced[:size])
xy_train = (x_balanced[size:], y_balanced[size:])
return xy_train, xy_val | 34,058 |
def check_rpc_reply(response):
"""Checks RPC Reply for string. Notifies user config was saved"""
if response.rfind("Save running-config successful") != -1:
print("\nConfiguration Saved!")
else:
print("\nConfiguration Not Saved!") | 34,059 |
def send_update(peer_ip, attr, nlri, withdraw):
"""
send update message
:param peer_ip: peer ip address
:return:
"""
if cfg.CONF.bgp.running_config['factory'].fsm.protocol.send_update({
'attr': attr, 'nlri': nlri, 'withdraw': withdraw}):
return {
'status': True
}
else:
return {
'status': False,
'code': 'failed when send this message out'
} | 34,060 |
def test_struct2vector(source):
"""
(struct posn (x y) #:transparent)
(struct 3d-posn posn ([z #:mutable]))
(let* ([d (3d-posn 1 2 3)]
[v (struct->vector d)]
[v_name (vector-ref v 0)]
[v0 (vector-ref v 1)]
[v2 (vector-ref v 3)])
(and (eq? v_name 'struct:3d-posn) (= v0 1) (= v2 3)))
"""
result = run_mod_expr(source, wrap=True)
assert result == w_true | 34,061 |
def generate_basic_blame_experiment_actions(
project: Project,
bc_file_extensions: tp.Optional[tp.List[BCFileExtensions]] = None,
extraction_error_handler: tp.Optional[PEErrorHandler] = None
) -> tp.List[actions.Step]:
"""
Generate the basic actions for a blame experiment.
- handle caching of BC files
- compile project, if needed
Args:
project: reference to the BB project
bc_file_extensions: list of bitcode file extensions (e.g. opt, no opt)
extraction_error_handler: handler to manage errors during the
extraction process
"""
return get_bc_cache_actions(
project, bc_file_extensions, extraction_error_handler
) | 34,062 |
def test_validate_by():
"""Test ``modin.core.dataframe.algebra.default2pandas.groupby.GroupBy.validate_by``."""
def compare(obj1, obj2):
assert type(obj1) == type(
obj2
), f"Both objects must be instances of the same type: {type(obj1)} != {type(obj2)}."
if isinstance(obj1, list):
for val1, val2 in itertools.zip_longest(obj1, obj2):
df_equals(val1, val2)
else:
df_equals(obj1, obj2)
# This emulates situation when the Series's query compiler being passed as a 'by':
# 1. The Series at the QC level is represented as a single-column frame with the "__reduced__" columns.
# 2. The valid representation of such QC is an unnamed Series.
reduced_frame = pandas.DataFrame({"__reduced__": [1, 2, 3]})
series_result = GroupBy.validate_by(reduced_frame)
series_reference = [pandas.Series([1, 2, 3], name=None)]
compare(series_reference, series_result)
# This emulates situation when several 'by' columns of the group frame are passed as a single QueryCompiler:
# 1. If grouping on several columns the 'by' at the QC level is the following: ``df[by]._query_compiler``.
# 2. The valid representation of such QC is a list of Series.
splited_df = [pandas.Series([1, 2, 3], name=f"col{i}") for i in range(3)]
splited_df_result = GroupBy.validate_by(
pandas.concat(splited_df, axis=1, copy=True)
)
compare(splited_df, splited_df_result)
# This emulates situation of mixed by (two column names and an external Series):
by = ["col1", "col2", pandas.DataFrame({"__reduced__": [1, 2, 3]})]
result_by = GroupBy.validate_by(by)
reference_by = ["col1", "col2", pandas.Series([1, 2, 3], name=None)]
compare(reference_by, result_by) | 34,063 |
def is_json_encodable(t: Any) -> bool:
""" Checks whether a type is json encodable. """
# pylint:disable=invalid-name,too-many-return-statements,too-many-branches
if not is_typecheckable(t):
return False
if t in JSON_BASE_TYPES:
return True
if t in (None, type(None)):
return True
if t is ...:
return True
if is_namedtuple(t):
field_types = getattr(t, "_field_types")
return all(is_json_encodable(field_types[field]) for field in field_types)
if hasattr(t, "__origin__") and hasattr(t, "__args__"):
if t.__origin__ in (list, set, frozenset, deque, Optional):
return is_json_encodable(t.__args__[0])
if t.__origin__ is tuple:
if len(t.__args__) == 2 and t.__args__[1] is ...: # pylint:disable=no-else-return
return is_json_encodable(t.__args__[0])
else:
return all(is_json_encodable(s) for s in t.__args__)
if t.__origin__ is Union:
return all(is_json_encodable(s) for s in t.__args__)
if t.__origin__ in (dict, OrderedDict, Mapping):
return t.__args__[0] == str and is_json_encodable(t.__args__[1])
if t.__origin__ is Literal:
return all(isinstance(s, JSON_BASE_TYPES+(type(None),)) for s in t.__args__)
return False | 34,064 |
def test_raise_on_vector_dimension_mismatch(
L: np.ndarray, method_kwargs: Dict[str, Any]
):
"""Tests whether a :class:`ValueError` is raised if the shape of the vector is not
compatible with the shape of the Cholesky factor"""
N = L.shape[0]
# Generate arbitrary v with incompatible length
v_len = N + np.random.randint(-N, N) + 1
if v_len == N:
v_len += 1
v = np.random.rand(v_len)
with pytest.raises(ValueError):
cholupdates.rank_1.update(L=L, v=v, **method_kwargs) | 34,065 |
def request_parse_platform_id(validated_request):
"""Parses the PlatformID from a provided visibility API request.
Args:
validated_request (obj:Request): A Flask request object that has been generated for a
visibility/opportunity endpoint.
Requires:
The request object MUST have been validated against the requested schema.
Returns:
A list of Satellite model objects.
Throws:
InputError: If any provided platform ID(s) are invalid.
"""
if 'PlatformID' not in validated_request.json:
return Satellite.query.all()
satellites = []
for satellite in validated_request.json['PlatformID']:
satellite = Satellite.query.get(satellite)
if satellite is None:
raise InputError('PlatformID', 'No such platform')
satellites.append(satellite)
return satellites | 34,066 |
def view_runner(step, args):
"""
input:
depthfile : path (.depth file)
nodes : text list (a list of node IDs)
mode : text (currently view or view-all)
output:
output_dir : just a link to the output directory
"""
f1 = args["depthfile"]
mode = args["mode"]
nodes = args["nodes"]
script_path = os.path.split(os.path.realpath(__file__))[0]
script_path = os.path.join(script_path, "../../RScripts/pileupVisualizer.R")
if mode == "view":
command = "Rscript " + script_path + " --args " + mode + " " + f1 + " " + step.outpath + " " + " ".join(nodes) + " "
else:
command = "Rscript " + script_path + " --args " + mode + " " + f1 + " " + step.outpath + " "
if step.execution_mode != "read":
run_sp(step, command)
step.inputs = args
# organize output
step.outputs = {
"output_dir" : ""
} | 34,067 |
def dataGenerator(data_generator_info, data_dir, target_size, color_mode, folder, batch_size, seed,
sample_weight_flag=False, is_sample_weight=False, sample_weight_dict={}, sample_rescale=1,
is_test_data=False):
"""Generate an image."""
arg_dict = dict(shuffle=not is_test_data)
datagen = ImageDataGenerator(**data_generator_info)
data_generator = datagen.flow_from_directory(data_dir,
target_size=target_size,
color_mode=color_mode,
classes=[folder],
class_mode=None,
batch_size=batch_size,
seed=seed,
**arg_dict)
for img in data_generator:
if sample_weight_flag:
if is_sample_weight:
for sample in sample_weight_dict:
img[img == sample] = sample_weight_dict[sample] * sample_rescale
img = img.reshape((len(img), -1))
else:
img = img.reshape((len(img), -1, 1))
yield img | 34,068 |
def crear_comentario_submeta(request, pk):
""" Crea y agrega un comentario a una meta identificada por su id """
# meta = get_object_or_404(Meta, pk=pk)
meta = Submeta.objects.get(pk=pk)
# si ya se creo se guarda el comentario y se redirecciona el navegador a
# la meta
if request.method == "POST":
form = ComentarioFormulario(request.POST)
if form.is_valid():
# Se crea el comentario con los datos del formulario
comentario = form.save(commit=False)
# se lo relaciona con la meta (foreing key y eso)
comentario.meta = meta
# se guarda el comentario en la base de datos
comentario.save()
# comentarios = Comentario.objects.filter(meta__pk=pk)
return redirect('info_submeta', pk=meta.id)
# sino se crea un formulario vacio y se lo envia al template
# crear_comentario, para que el usuario cree el comentario
# cargando los datos.
else:
form = ComentarioFormulario(instance=meta)
return render(request, 'crear_comentario_submeta.html', {'form': form}) | 34,069 |
def printAnswer(part: int, value: any) -> None:
""" Print the solution to Part `part` of the puzzle """
print(f"{Fore.GREEN}Answer (Part {part}):{Style.RESET_ALL} {value}") | 34,070 |
def execute_op(op: AsyncMigrationOperation, query_id: str, rollback: bool = False):
"""
sync execute the migration against the analytics db (ClickHouse) and then
run the side effect if it is defined
"""
sql = op.rollback if rollback else op.sql
if op.database == AnalyticsDBMS.CLICKHOUSE:
execute_op_clickhouse(sql, query_id, op.timeout_seconds)
else:
execute_op_postgres(sql, query_id) | 34,071 |
def do_two_stage(data, options):
"""
Run the prime-based approach.
"""
ruler = TwoStageApproach(data, options)
covers = ruler.compute()
# save result to a CSV file
if options.rdump:
data.dump_result(primes, covers) | 34,072 |
def load_mask_from_shapefile(filename, shape, transform):
"""Load a mask from a shapefile."""
multipolygon, _ = load_shapefile2multipolygon(filename)
mask = multipolygon2mask(multipolygon, shape, transform)
return mask | 34,073 |
def _spectra_resample(spectra, wvl_orig, wvl_target):
"""
:param spectra:
:param wvl_orig:
:param wvl_target:
:param k:
:return:
"""
idx_finite = np.isfinite(spectra)
min_wvl_s = np.nanmin(wvl_orig[idx_finite])
max_wvl_s = np.nanmax(wvl_orig[idx_finite])
idx_target = np.logical_and(wvl_target >= min_wvl_s,
wvl_target <= max_wvl_s)
new_flux = np.interp(wvl_target[idx_target], wvl_orig[idx_finite], spectra[idx_finite])
nex_flux_out = np.ndarray(len(wvl_target))
nex_flux_out.fill(np.nan)
nex_flux_out[idx_target] = new_flux
return nex_flux_out | 34,074 |
def test_standard_surface():
"""Test to read a standard surface file."""
def dtparse(string):
return datetime.strptime(string, '%y%m%d/%H%M')
skip = ['text']
gsf = GempakSurface(get_test_data('gem_std.sfc'))
gstns = gsf.sfjson()
gempak = pd.read_csv(get_test_data('gem_std.csv'),
index_col=['STN', 'YYMMDD/HHMM'],
parse_dates=['YYMMDD/HHMM'],
date_parser=dtparse)
for stn in gstns:
idx_key = (stn['properties']['station_id'],
stn['properties']['date_time'])
gemsfc = gempak.loc[idx_key, :]
for param, val in stn['values'].items():
if param not in skip:
assert val == pytest.approx(gemsfc[param.upper()]) | 34,075 |
def bootstrap_comparison(
molecule: str,
prediction_file: str,
datatype: str,
n_samples=1,
n_bootstrap=1000,
**kwargs,
):
"""Perform a bootstrap analysis on the experimental and the computed titration curve.
Parameters
----------
molecule - SAMPL6 identifier of the molecule.
prediction_file - file name containing the computed pKa values.
datatype - typeI or typeIII, (type II doesnt have error bars so we cant bootstrap)
n_samples - number of samples used to determine the standard error.
n_bootstrap - number of bootstrap samples to draw.
"""
if datatype == "typeI":
predicted_curve, strapped_curves = TypeIPrediction.bootstrap_from_id(
molecule, prediction_file, n_samples, n_bootstrap, **kwargs
)
elif datatype == "typeIII":
predicted_curve, strapped_curves = TypeIIIPrediction.bootstrap_from_id(
molecule, prediction_file, n_samples, n_bootstrap, **kwargs
)
experimental_curve, exp_strapped_curves = SAMPL6Experiment.bootstrap_from_id(
molecule, n_bootstrap
)
df = pd.DataFrame(columns=["Molecule", "Δ"])
predicted_curve.align_mean_charge(experimental_curve, area_between_curves, 0.1)
for i, (curve, exp_curve) in enumerate(zip(strapped_curves, exp_strapped_curves)):
curve.align_mean_charge(exp_curve, area_between_curves, 0.1)
Δ = area_between_curves(curve.mean_charge, exp_curve.mean_charge, 0.1)
df.loc[i] = [molecule, Δ] | 34,076 |
def calc_mu(Rs):
""" Calculates mu for use in LinKK """
neg_sum = sum(abs(x) for x in Rs if x < 0)
pos_sum = sum(abs(x) for x in Rs if x >= 0)
return 1 - neg_sum/pos_sum | 34,077 |
def test_tensorstore_clim_popup():
"""Regression to test, makes sure it works with tensorstore dtype"""
ts = pytest.importorskip('tensorstore')
layer = Image(ts.array(np.random.rand(20, 20)))
QContrastLimitsPopup(layer) | 34,078 |
def computeAnomaly(data):
"""
Remove the seasonality
"""
period = _get_period(data)
meanclim = computeMeanClimatology(data)
anom = data.groupby(f'time.{period}') - meanclim
return anom | 34,079 |
def product(*args):
"""Calculate product of args.
@param args: list of floats to multiply
@type args: list of float
@return: product of args
@rtype: float
"""
r = args[0]
for x in args[1:]:
r *= x
return r | 34,080 |
def unpack_into_tensorarray(value, axis, size=None):
"""
unpacks a given tensor along a given axis into a TensorArray
Parameters:
----------
value: Tensor
the tensor to be unpacked
axis: int
the axis to unpack the tensor along
size: int
the size of the array to be used if shape inference resulted in None
Returns: TensorArray
the unpacked TensorArray
"""
shape = value.get_shape().as_list()
rank = len(shape)
dtype = value.dtype
array_size = shape[axis] if not shape[axis] is None else size
if array_size is None:
raise ValueError("Can't create TensorArray with size None")
array = tf.TensorArray(dtype=dtype, size=array_size)
dim_permutation = [axis] + list(range(1, axis)) + [0] + list(range(axis + 1, rank))
unpack_axis_major_value = tf.transpose(value, dim_permutation)
full_array = array.unstack(unpack_axis_major_value)
return full_array | 34,081 |
def create_spider_(spider_name):
"""
Create a Spider for project ,must create project before
"""
CommandSpider().run(spider_name) | 34,082 |
def get_error_page(status_code, message):
"""
获取错误页面
:param status_code:
:param message:
:return:
"""
context = {
'site_web': settings.SITE_TITLE,
'site_url': reverse(settings.SITE_NAME),
'status_code': status_code,
'message': message,
'date': datetime.now().year
}
return context | 34,083 |
def fits_difference(*args, **keys):
"""Difference two FITS files with parameters specified as Differencer class."""
differ = FitsDifferencer(*args, **keys)
return differ.difference() | 34,084 |
def esta_balanceada(expressao):
"""
Função que calcula se expressão possui parenteses, colchetes e chaves balanceados
O Aluno deverá informar a complexidade de tempo e espaço da função
Deverá ser usada como estrutura de dados apenas a pilha feita na aula anterior
:param expressao: string com expressao a ser balanceada
:return: boleano verdadeiro se expressao está balanceada e falso caso contrário
"""
#Análise de Complexidade
#Tempo e memória são O(n)
pilha = Pilha()
if expressao == "":
return True
elif expressao[0] in ')}]':
return False
else:
for caracter in expressao:
if caracter in '({[':
pilha.empilhar(caracter)
else:
try:
desenpilhado = pilha.desempilhar()
except PilhaVaziaErro:
return pilha.vazia()
if caracter == '}' and desenpilhado != '{':
return False
elif caracter == ']' and desenpilhado != '[':
return False
elif caracter == ')' and desenpilhado != '(':
return False
return pilha.vazia() | 34,085 |
def create_doc(im_src, tag, coords, fea_arr, fea_bin_arr):
"""
Create elasticsearch doc
Params:
im_src: image file name
tag: tag or class for image
coords: list of boxes corresponding to a tag
fea_arr: list of ImFea objects
fea_bin_arr: list of ImFeaBin objects
"""
doc = {}
doc['coords'] = coords
f_bin = ImFeaBinArr()
f = ImFeaArr()
f.arr.extend(fea_arr)
f_bin.arr.extend(fea_bin_arr)
obj_bin_str = b64encode(f_bin.SerializeToString())
obj_str = b64encode(f.SerializeToString())
doc['sigs'] = obj_str
doc['bin_sigs'] = obj_bin_str
doc['im_src'] = im_name
doc['cl'] = tag
return doc | 34,086 |
def test_uget_package_installed(host):
"""
Tests if uget is installed.
"""
assert host.package(PACKAGE).is_installed | 34,087 |
def scaled_mouse_pos(mouse): # pragma: no cover
"""
Renvoie la position de la souris mise à l'échelle de l'image.
Parameters
----------
mouse : int * int
La position réelle de la souris
Returns
-------
int * int
La position mise à l'échelle
"""
# Récupération de la dimension de la fenêtre
window_dimensions = ut.get_screen_size()
# Calcul du facteur d'échelle
scale_factor_x = cf.SCREEN_WIDTH / window_dimensions[0]
scale_factor_y = cf.SCREEN_HEIGHT / window_dimensions[1]
return mouse[0] * scale_factor_x, mouse[1] * scale_factor_y | 34,088 |
def _send_auto_response_mail(addressee, passwd: bytearray, addressee_subject) -> None:
"""
Utility method, sends auto-response mail to addressee via SMTP
:param addressee: mail address where the mail is sent to
:param passwd: the mail server password (as bytearray)
:param addressee_subject: the subject of the mail to be sent to addressee
:return: None
"""
conf = read_config()
send_server = conf['Mail']['smtp server']
send_port = conf['Mail']['smtp port']
login = conf['Mail']['login mail address']
ssl_protocol = conf['Mail']['ssl protocol']
if not ssl_protocol or type(ssl_protocol) != int:
ssl_protocol = ssl.PROTOCOL_TLSv1_2
else:
ssl_protocol = int(ssl_protocol)
autoresponse_filename = conf['Mail']['send auto-response mail']
ar_path = os.path.join(os.path.dirname(__file__), autoresponse_filename)
with open(ar_path) as fp:
msg = email.message.EmailMessage()
msg.set_content(fp.read())
msg['Subject'] = 'Re: ' + addressee_subject
msg['From'] = login
msg['To'] = addressee
s = smtplib.SMTP_SSL(port=send_port, host=send_server, context=_create_ssl_context(ssl_protocol))
try:
s.login(user=login, password=passwd.decode(encoding='utf8'))
s.send_message(msg)
_LOGGER.info('Auto-response sent')
except Exception as e:
_LOGGER.error('ERROR: could not authenticate or connection error with SMTP server - no auto-response sent')
raise MailServerError('Authentifizierung oder Verbindung mit SMTP Server fehlgeschlagen. Passwort und Einstellungen korrekt?')
finally:
s.quit() | 34,089 |
def is_valid_zcs_image_id(zcs_image_id):
"""
Validates Zadara Container Services (ZCS) image IDs, also known as the ZCS
image "name". A valid ZCS image name should look like: img-00000001 - It
should always start with "img-" and end with 8 hexadecimal characters in
lower case.
:type zcs_image_id: str
:param zcs_image_id: The ZCS image name to be validated.
:rtype: bool
:return: True or False depending on whether zcs_image_id passes
validation.
"""
if zcs_image_id is None:
return False
match = re.match(r'^img-[0-9a-f]{8}$', zcs_image_id)
if not match:
return False
return True | 34,090 |
def get_topology2(gid: int, cfg: Config):
"""
Create a uniformly and randomly sampled genome of fixed topology:
Sigmoid with bias 1.5 --> Actuation default of 95,3%
(key=0, bias=1.5) (key=1, bias=?)
____ / /
/ /
GRU /
| _____/
| /
(key=-1)
"""
# Create an initial dummy genome with fixed configuration
genome = Genome(
key=gid,
num_outputs=cfg.genome.num_outputs,
bot_config=cfg.bot,
)
# Setup the parameter-ranges
conn_range = cfg.genome.weight_max_value - cfg.genome.weight_min_value
bias_range = cfg.genome.bias_max_value - cfg.genome.bias_min_value
rnn_range = cfg.genome.rnn_max_value - cfg.genome.rnn_min_value
# Create the nodes
genome.nodes[0] = OutputNodeGene(key=0, cfg=cfg.genome) # OutputNode 0
genome.nodes[0].bias = 1.5 # Drive with 0.953 actuation by default
genome.nodes[1] = OutputNodeGene(key=1, cfg=cfg.genome) # OutputNode 1
genome.nodes[1].bias = random() * bias_range + cfg.genome.bias_min_value # Uniformly sampled bias
genome.nodes[2] = GruNodeGene(key=2, cfg=cfg.genome, input_keys=[-1], input_keys_full=[-1]) # Hidden node
genome.nodes[2].bias = 0 # Bias is irrelevant for GRU-node
# Uniformly sample the genome's GRU-component
genome.nodes[2].bias_h = rand_arr((3,)) * bias_range + cfg.genome.bias_min_value
genome.nodes[2].weight_xh_full = rand_arr((3, 1)) * rnn_range + cfg.genome.weight_min_value
genome.nodes[2].weight_hh = rand_arr((3, 1)) * rnn_range + cfg.genome.weight_min_value
# Create the connections
genome.connections = dict()
# input2gru
key = (-1, 2)
genome.connections[key] = ConnectionGene(key=key, cfg=cfg.genome)
genome.connections[key].weight = 1 # Simply forward distance
genome.connections[key].enabled = True
# gru2output - Uniformly sampled
key = (2, 1)
genome.connections[key] = ConnectionGene(key=key, cfg=cfg.genome)
genome.connections[key].weight = 3 # Enforce capabilities of full spectrum
genome.connections[key].enabled = True
# input2output - Uniformly sampled
key = (-1, 1)
genome.connections[key] = ConnectionGene(key=key, cfg=cfg.genome)
genome.connections[key].weight = random() * conn_range + cfg.genome.weight_min_value
genome.connections[key].enabled = True
genome.update_rnn_nodes(config=cfg.genome)
return genome | 34,091 |
def resolve_implicits(implies, opts):
# type: (Dict[str, List[str]], List[str]) -> Set[str]
"""Adds implied logging options recursively so that specifying e.g.
--debug=popenio results in --debug=popenio,popen.
"""
optset = set(opts)
last_num_opts = None
num_opts = len(optset)
while last_num_opts != num_opts:
for opt, impliedopts in implies.items():
if opt in optset:
optset.update(impliedopts)
last_num_opts = num_opts
num_opts = len(optset)
return optset | 34,092 |
def symmetric_key(kms_client):
"""
Create a temporary symmetric key for use in tests that require one.
The key is marked for deletion once the tests have finished.
We don't actually test the creation/deletion process here; it's assumed to work.
:param kms_client:
:return:
"""
code, content = kms_client.post('CreateKey')
yield content['KeyMetadata']
code, unused = kms_client.post(
'ScheduleKeyDeletion',
{'KeyId': content['KeyMetadata']['KeyId'], 'PendingWindowInDays': 7},
)
if code != 200:
raise ValueError(
'Unable to delete test key %s' % content['KeyMetadata']['KeyId']
) | 34,093 |
def dry_press(
H,
Pv,
alt_setting=P0,
alt_units=default_alt_units,
press_units=default_press_units,
):
"""
Returns dry air pressure, i.e. the total air pressure, less the water
vapour pressure.
"""
HP = pressure_alt(H, alt_setting, alt_units=alt_units)
P = alt2press(HP, press_units=press_units, alt_units=alt_units)
Pd = P - Pv
return Pd | 34,094 |
def InvocationAddCallerAuthid(builder, callerAuthid):
"""This method is deprecated. Please switch to AddCallerAuthid."""
return AddCallerAuthid(builder, callerAuthid) | 34,095 |
def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
"""Performs the max pooling on the input.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool1d", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim) | 34,096 |
def DeepLabV3Plus(shape):
""" Inputs """
inputs = Input(shape)
""" Pre-trained ResNet50 """
base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=inputs)
""" Pre-trained ResNet50 Output """
image_features = base_model.get_layer('conv4_block6_out').output
x_a = ASPP(image_features)
x_a = UpSampling2D((4, 4), interpolation="bilinear")(x_a)
""" Get low-level features """
x_b = base_model.get_layer('conv2_block2_out').output
x_b = Conv2D(filters=48, kernel_size=1, padding='same', use_bias=False)(x_b)
x_b = BatchNormalization()(x_b)
x_b = Activation('relu')(x_b)
x = Concatenate()([x_a, x_b])
x = Conv2D(filters=256, kernel_size=3, padding='same', activation='relu',use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters=256, kernel_size=3, padding='same', activation='relu', use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = UpSampling2D((4, 4), interpolation="bilinear")(x)
""" Outputs """
x = Conv2D(1, (1, 1), name='output_layer')(x)
x = Activation('sigmoid')(x)
""" Model """
model = Model(inputs=inputs, outputs=x)
return model | 34,097 |
def test_get_search_permutations(search_config):
"""Test for the correct construction of GridSearch hyperparamter permutations."""
sets = [[0.0001, 0.01], [32, 512], [1e-05, 0.001]]
assert GridSearch.get_search_permutations(sets) == [
(0.0001, 32, 1e-05),
(0.0001, 32, 0.001),
(0.0001, 512, 1e-05),
(0.0001, 512, 0.001),
(0.01, 32, 1e-05),
(0.01, 32, 0.001),
(0.01, 512, 1e-05),
(0.01, 512, 0.001),
] | 34,098 |
def get_parameters():
"""Parse the supplied command line arguments.
Returns:
args: The parsed and validated command line arguments
"""
parser = argparse.ArgumentParser(
description="Start up and shut down ASGs on demand"
)
# Parse command line inputs and set defaults
parser.add_argument("--aws-profile", default="default")
parser.add_argument("--aws-region", default="eu-west-2")
parser.add_argument("--sns-topic", help="SNS topic ARN")
parser.add_argument("--environment", help="Environment value", default=UNSET_TEXT)
parser.add_argument("--application", help="Application", default=UNSET_TEXT)
parser.add_argument(
"--slack-channel-override",
help="Slack channel to use for overriden jobs",
default=UNSET_TEXT,
)
parser.add_argument("--log-level", help="Log level for lambda", default="INFO")
_args = parser.parse_args()
# Override arguments with environment variables where set
if "AWS_PROFILE" in os.environ:
_args.aws_profile = os.environ["AWS_PROFILE"]
if "AWS_REGION" in os.environ:
_args.aws_region = os.environ["AWS_REGION"]
if "SNS_TOPIC" in os.environ:
_args.sns_topic = os.environ["SNS_TOPIC"]
if "ENVIRONMENT" in os.environ:
_args.environment = os.environ["ENVIRONMENT"]
if "APPLICATION" in os.environ:
_args.application = os.environ["APPLICATION"]
if "SLACK_CHANNEL_OVERRIDE" in os.environ:
_args.slack_channel_override = os.environ["SLACK_CHANNEL_OVERRIDE"]
if "LOG_LEVEL" in os.environ:
_args.log_level = os.environ["LOG_LEVEL"]
return _args | 34,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.