content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def watershed(src):
"""
Performs a marker-based image segmentation using the watershed algorithm.
:param src: 8-bit 1-channel image.
:return: 32-bit single-channel image (map) of markers.
"""
# cv2.imwrite('{}.png'.format(np.random.randint(1000)), src)
gray = src.copy()
img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
# h, w = gray.shape[:2]
# block_size = (min(h, w) // 4 + 1) * 2 + 1
# thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size, 0)
_ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# noise removal
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
# sure background area
sure_bg = cv2.dilate(opening, kernel, iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
# dist_transform = opening & gray
# cv2.imshow('dist_transform', dist_transform)
# _ret, sure_bg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY_INV)
_ret, sure_fg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY)
# Finding unknown region
# sure_bg = np.uint8(sure_bg)
sure_fg = np.uint8(sure_fg)
# cv2.imshow('sure_fg', sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker label
lingret, marker_map = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
marker_map = marker_map + 1
# Now, mark the region of unknown with zero
marker_map[unknown == 255] = 0
marker_map = cv2.watershed(img, marker_map)
return marker_map | 31,900 |
def sub(x, y):
"""Returns the difference of compositions.
Parameters
----------
x : NumPy array, shape (n,) or (k,n)
The composition that will be subtracted from.
y : NumPy array, shape (n,) or (k,n)
The composition to be subtracted.
Returns
-------
z : NumPy array, shape (n,) or (k,n)
The result of y subtracted from x.
"""
z = perturbation(x, power(y, -1.0)) # 1.0 and not 1 forces coercion
return z | 31,901 |
def _broadcast_concatenate(arrays, axis):
"""Concatenate arrays along an axis with broadcasting."""
arrays = _broadcast_arrays(arrays, axis)
res = np.concatenate(arrays, axis=axis)
return res | 31,902 |
def get_final_bmi(data_dic, agex_low, agex_high, mrnsForFilter=[], filter=True):
"""
Function to get the distinct bmi percentile readings for predictions.
Returns outcome percentiles and labels
#### PARAMETERS ####
data_dic: dictionary of patient data
agex_low: low age range for outcome prediction
agex_high: high age range for outcome prediction
mrnsForFilter: list of mrns to get outcomes for
filter: default==True; if True returns mrn filtered data only, otherwise returns all data with either a 0 or ''
"""
outcome = np.zeros(len(data_dic.keys()), dtype=float)
outcome_pct = np.zeros(len(data_dic.keys()), dtype=float)
outcome_labels = [''] * len(data_dic.keys())
indices = np.zeros(len(data_dic.keys()))
for (ix, k) in enumerate(data_dic):
if (len(mrnsForFilter) > 0) & (str(data_dic[k]['mrn']) not in mrnsForFilter):
continue
bmi, pct, label = get_final_bmi_single(data_dic[k], agex_low, agex_high)
if pct == 0 and label == '':
continue
outcome[ix] = bmi
outcome_pct[ix] = pct
outcome_labels[ix] = label
indices[ix] = 1
if filter:
indices = (indices == 1)
return outcome[indices], outcome_pct[indices], np.array(outcome_labels)[indices]
else:
return outcome, outcome_pct, np.array(outcome_labels) | 31,903 |
def add_group_sub(uid:int, group_id:int) -> bool: #添加订阅信息
"""
向已存在的表中插入群记录, 如果群已经存在则什么都不做
:param uid: 唯一标识用户的数字uid
:param group_id: 监听该用户的群id
"""
connection = sqlite3.connect(DB_PATH)
cursor = connection.cursor()
success = True
group_exist = cursor.execute(
f'select count(*) from _{uid} where group_id={group_id};').fetchone()[0]
if not group_exist:
cursor.execute(f'insert into _{uid} values({group_id}, 1);') # 默认开启翻译
connection.commit()
else:
success = False
logger.warning(f'群{group_id} 已存在表_{uid}中')
cursor.close()
connection.close()
return success | 31,904 |
def text_parse_two_placeholders() -> None:
"""It parses two placeholders."""
types, tokens = tokenize("Parse {THIS} and {THAT}")
assert len(tokens) == len(types) == 9
assert types == [
Text,
Generic.Punctuation,
Generic.Emph,
Generic.Punctuation,
Text,
Generic.Punctuation,
Generic.Emph,
Generic.Punctuation,
Text,
]
assert tokens == ["Parse ", "{", "THIS", "}", " and ", "{", "THAT", "}", "\n"] | 31,905 |
def preprocess(corpora, divide_by_three_dots):
"""
Process corpora for the input of the translation algorithms
:param corpora: corpora to process
:return: nothing
"""
chars_sentences, mapping = build_full_line_translation_process(corpora,
True,
Path(r"../NMT_input/signs_per_line.txt"),
Path(r"../NMT_input/transcriptions_per_line.txt"),
Path(r"../NMT_input/translation_per_line.txt"))
translations = build_translations(corpora, mapping)
if divide_by_three_dots:
write_translations_to_file(chars_sentences,
translations,
Path(r"../NMT_input/signs.txt"),
Path(r"../NMT_input/transcriptions.txt"),
Path(r"../NMT_input/translation.txt"),
True)
else:
write_translations_to_file(chars_sentences,
translations,
Path(r"../NMT_input/not_divided_by_three_dots/signs.txt"),
Path(r"../NMT_input/not_divided_by_three_dots/transcriptions.txt"),
Path(r"../NMT_input/not_divided_by_three_dots/translation.txt"),
False) | 31,906 |
def chunks(lst, n):
"""Yield successive n-sized chunks from lst.
Example
-----
original_list = [0,1,2,3,4,5,6,7,8,9]
chunked_list = list(chunks(original_list, 3))
-> [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
for i in range(0, len(lst), n):
yield lst[i:i + n] | 31,907 |
def inference(cluster_info, qname='input'):
"""
Feeds Spark partitions into the shared multiprocessing.Queue and returns inference results.
"""
def _inference(iter):
# get shared queue, reconnecting if necessary
mgr = _get_manager(cluster_info, socket.gethostname(), os.getppid())
queue_in = mgr.get_queue(qname)
logging.info("Feeding partition {0} into {1} queue {2}".format(iter, qname, queue_in))
count = 0
for item in iter:
count += 1
queue_in.put(item, block=True)
# wait for consumers to finish processing all items in queue before "finishing" this iterator
queue_in.join()
logging.info("Processed {0} items in partition".format(count))
# read result queue
results = []
queue_out = mgr.get_queue('output')
while count > 0:
result = queue_out.get(block=True)
results.append(result)
count -= 1
queue_out.task_done()
logging.info("Finished processing partition")
return results
return _inference | 31,908 |
def is_array_of(obj, classinfo):
"""
Check if obj is a list of classinfo or a tuple of classinfo or a set of classinfo
:param obj: an object
:param classinfo: type of class (or subclass). See isinstance() build in function for more info
:return: flag: True or False
"""
flag = False
if isinstance(obj, classinfo):
pass
elif all(isinstance(item, classinfo) for item in obj):
flag = True
return flag | 31,909 |
def create_acronym(fullname):
"""Create an acronym for an estimator.
The acronym consists of the capital letters in the name if
there are at least two. If not, the entire name is used.
Parameters
----------
fullname: str
Estimator's __name__.
Returns
-------
str
Created acronym.
"""
from atom.models import MODELS
acronym = "".join([c for c in fullname if c.isupper()])
if len(acronym) < 2 or acronym.lower() in MODELS:
return fullname
else:
return acronym | 31,910 |
def activate_through_friendbot(address: str):
"""
Activates and funds a testnet account using friendbot
"""
resp = requests.get(url="https://friendbot.stellar.org", params={"addr": address})
resp.raise_for_status() | 31,911 |
def test_colored_svg(qtbot):
"""Test that we can create a colored icon with certain color."""
icon = QColoredSVGIcon.from_resources('new_points')
assert isinstance(icon, QIcon)
assert isinstance(icon.colored('#0934e2', opacity=0.4), QColoredSVGIcon)
assert icon.pixmap(250, 250) | 31,912 |
def transact_update_path(path):
"""input transact update to DynamoDB"""
# transact_write_itemsはclientAPIなので注意
def update_path(path):
"""input put learning path to DynamoDB"""
input = defaultdict(
dict,
TableName="primary_table",
Key={"PK": {"S": path.PK}, "SK": {"S": path.PK}},
UpdateExpression="SET updatedAt=:date, updatedUser=:user",
ExpressionAttributeValues={
":date": {"S": timestamp_jst()},
":user": {"S": path.user},
},
)
if path.name is not None:
input["UpdateExpression"] += ", #name=:name"
# name is reserved
input["ExpressionAttributeNames"]["#name"] = "name"
input["ExpressionAttributeValues"][":name"] = {"S": path.name}
if path.description is not None:
input["UpdateExpression"] += ", description=:description"
input["ExpressionAttributeValues"][":description"] = {"S": path.description}
if path.note is not None:
input["UpdateExpression"] += ", note=:note"
input["ExpressionAttributeValues"][":note"] = {"S": path.note}
if path.invalid is not None:
input["UpdateExpression"] += ", invalid=:invalid"
input["ExpressionAttributeValues"][":invalid"] = {"BOOL": path.invalid}
return {"Update": input}
def update_path_to_video(appended, path_id, user):
"""input put video path to DynamoDB"""
def get_videos(appended):
for uri in appended:
video_id = uri.split("/")[2]
video = get_video_from_db(video_id)
yield video
def generate_input(video, path_id, user):
path_ids = video.get("learningPathIds", [])
# escape empty
# なぜか集合のケースがあるので明示的にリストにする
path_ids = list(path_ids)
if not path_ids[0]:
path_ids.remove("")
path_ids.append(path_id)
input = dict(
TableName="primary_table",
Key={"PK": {"S": video["PK"]}, "SK": {"S": video["PK"]}},
UpdateExpression="SET updatedAt=:date, updatedUser=:user"
+ ", learningPathIds=:paths",
ExpressionAttributeValues={
":date": {"S": timestamp_jst()},
":user": {"S": user},
":paths": {"SS": path_ids},
},
)
return {"Update": input}
it_videos = get_videos(appended)
it_inputs = (generate_input(video, path_id, user) for video in it_videos)
return it_inputs
def remove_path_from_video(removed, path_id, user):
"""input remove video path from DynamoDB"""
def get_videos(removed):
for uri in removed:
video_id = uri.split("/")[2]
video = get_video_from_db(video_id)
yield video
def generate_input(video, path_id, user):
path_ids = video.get("learningPathIds", [])
path_ids = list(path_ids)
if path_id in path_ids:
path_ids.remove(path_id)
# escape empty
if len(path_ids) <= 0:
path_ids = [""]
input = dict(
TableName="primary_table",
Key={"PK": {"S": video["PK"]}, "SK": {"S": video["PK"]}},
UpdateExpression="SET updatedAt=:date, updatedUser=:user"
+ ", learningPathIds=:paths",
ExpressionAttributeValues={
":date": {"S": timestamp_jst()},
":user": {"S": user},
":paths": {"SS": path_ids},
},
)
return {"Update": input}
it_videos = get_videos(removed)
it_inputs = (generate_input(video, path_id, user) for video in it_videos)
return it_inputs
def update_video_order(orders, path_id):
"""input append or update video orders"""
def get_orders(orders, path_id):
for order in orders:
req_order = {"PK": path_id, "uri": order.uri}
current_order = get_order(req_order=ReqOrder(**req_order))
if current_order:
yield True, order
else:
yield False, order
def generate_input(order_with_update_or_append, path_id):
is_update, order = order_with_update_or_append
if is_update:
input = dict(
TableName="primary_table",
Key={"PK": {"S": path_id}, "SK": {"S": order.uri}},
UpdateExpression="SET #order=:order",
# order is reserved
ExpressionAttributeNames={"#order": "order"},
ExpressionAttributeValues={":order": {"N": str(order.order)}},
)
return {"Update": input}
else:
input = dict(
PK={"S": path_id},
SK={"S": order.uri},
indexKey={"S": "Video"},
createdAt={"S": str(uuid.uuid1())[:8]},
order={"N": str(order.order)},
)
item = {"TableName": "primary_table", "Item": input}
return {"Put": item}
it_orders = get_orders(orders, path_id)
it_inputs = (generate_input(order, path_id) for order in it_orders)
return it_inputs
def remove_video_order(removed, path_id):
"""input remove video orders"""
inputs = [
dict(
TableName="primary_table", Key={"PK": {"S": path_id}, "SK": {"S": uri}}
)
for uri in removed
]
orders = [{"Delete": input} for input in inputs]
return orders
transact_items = []
# 再生リストのメタ情報を更新する
transact_items.append(update_path(path))
# 再生リストに動画が追加された場合、動画のメタデータに再生リストIDを追加する
transact_items.extend(
update_path_to_video(appended=path.appended, path_id=path.PK, user=path.user)
)
# 再生リストから動画が削除された場合、動画のメタデータから再生リストIDを削除する
transact_items.extend(
remove_path_from_video(removed=path.removed, path_id=path.PK, user=path.user)
)
# 再生リストから削除された動画の再生順を削除する
transact_items.extend(remove_video_order(removed=path.removed, path_id=path.PK))
# 再生リストの更新順を更新する
transact_items.extend(update_video_order(orders=path.orders, path_id=path.PK))
return transact_items | 31,913 |
def report_model_metrics(
data: pd.DataFrame,
model_params: Dict[str, Any],
y_pred: str,
active_run_id: str,
name_prefix: str = '',
group_values : list = ['train','test']
) -> None:
"""Node for reporting the performance metrics of the predictions performed
by the previous node. Notice that this function has no outputs, except
logging.
"""
metrics_dict = {
metric_name: METRIC_NAME_TO_FUNCTION_DICT[metric_name]
for metric_name in model_params['metrics']
}
evaluation_df = evaluate_predictions(
data[(data.missing_core_features == False) &
(data['predicted_{}'.format(model_params['name'])].isna() == False) &
(data['predicted_baseline'].isna() == False) &
data.group.isin(group_values)],
y_true=model_params['target'],
y_pred=y_pred,
metrics_dict=metrics_dict,
)
# Log the accuracy of the model
log = logging.getLogger(__name__)
if (active_run_id != None) :
with mlflow.start_run(run_id=active_run_id):
# Set the metrics
# for metric_name in metrics_dict.keys():
# use evaluation_df to get the unimpeded_AMA metrics as well (if calculated)
for metric_name in evaluation_df.keys() :
log.info("metric {}:".format(name_prefix + metric_name))
for group in [v for v in data.group.unique() if v in group_values]:
log.info("{} group: {}".format(
group,
evaluation_df.loc[group, metric_name]
))
mlflow.log_metric(
name_prefix + metric_name + '_' + group,
evaluation_df.loc[group, metric_name]
) | 31,914 |
def Tc(filename,):
"""Extract Curie temperature
"""
job = AkaikkrJob(".")
v = job.get_resistivity(filename)
print(sys._getframe().f_code.co_name, v) | 31,915 |
def codon_usage(seq, aminoacid):
"""Provides the frequency of each codon encoding a given aminoacid in a DNA sequence"""
tmpList = []
for i in range(0, len(seq) - 2, 3):
if DNA_Codons[seq[i:i + 3]] == aminoacid:
tmpList.append(seq[i:i + 3])
freqDict = dict(Counter(tmpList))
totalWight = sum(freqDict.values())
for seq in freqDict:
freqDict[seq] = round(freqDict[seq] / totalWight, 2)
return freqDict | 31,916 |
def make_check_stderr_message(stderr, line, reason):
"""
Create an exception message to use inside check_stderr().
"""
return dedent("""\
{reason}:
Caused by line: {line!r}
Complete stderr: {stderr}
""").format(stderr=stderr, line=line, reason=reason) | 31,917 |
def mesh2shp(infile):
"""
"""
output_shp = infile[:-3] + 'shp'
output_shp = output_shp.replace('mesh', 'scenario')
file_length = file_len(infile)
print file_length
# Work out lines that have the required data
top_lons = 0
bottom_lons = int(file_length/3 - 1)
top_lats = int(file_length/3)
bottom_lats = int((file_length/3)*2 - 1)
top_depths = int((file_length/3)*2)
bottom_depths = int(file_length-1)
print top_lons,bottom_lons,top_lats,bottom_lats,top_depths,bottom_depths
lons = []
lats = []
depths = []
with open(infile) as f:
for i, l in enumerate(f):
if i == top_lons:
lons.append([float(x) for x in l.split(',')])
if i == bottom_lons:
tmp_lons = ([float(x) for x in l.split(',')])
print tmp_lons
tmp_lons = list(reversed(tmp_lons))
lons.append(tmp_lons)
if i == top_lats:
lats.append([float(x) for x in l.split(',')])
if i == bottom_lats:
tmp_lats = ([float(x) for x in l.split(',')])
print tmp_lats
tmp_lats = list(reversed(tmp_lats))
lats.append(tmp_lats)
if i == top_depths:
depths.append([float(x) for x in l.split(',')])
if i == bottom_depths:
tmp_depths = ([float(x) for x in l.split(',')])
print tmp_depths
tmp_depths = list(reversed(tmp_depths))
depths.append(tmp_depths)
lons = np.array(lons).flatten()
lats = np.array(lats).flatten()
depths = np.array(depths).flatten()
print lons
print lats
print depths
fault2shp(lons, lats, output_shp, corner_depths = depths, vertice_array=True) | 31,918 |
def write_to_ch(df, date_field_name, table_name, mode, config, logger, pk_columns=None) -> None:
""" Dumps PySpark DataFrame to ClickHouse, create or recreate table if needed.
Args:
df: PySpark DataFrame
mode: describe, what do if table already exists
must be one of 'overwrite' / 'append' / 'fail':
- overwrite: drop and create table and insert rows (CH hasn't truncate operator)
- append: insert rows to exist table
- fail: raise Exception
table_name: table name
date_field_name: date field for partitioning
pk_columns: list/tuple of primary key columns (None for all columns)
"""
assert mode in ['overwrite', 'append', 'fail'], "mode must be 'overwrite' / 'append' / 'fail'"
assert '.' not in table_name, 'dots are not allowed in table_name'
date_field = next(field for field in df.schema.fields if field.name == date_field_name)
assert type(date_field.dataType) == DateType, \
"df['{}'].dataType={} must be DateType".format(date_field_name, date_field.dataType)
make_sure_exsit(df, date_field_name, table_name, mode, config=config, logger=logger, pk_columns=pk_columns)
full_table_name = '{}.{}'.format(config["CH_DB_NAME"], table_name)
# Spark JDBC CH Driver works correctly only in append mode
# and without NULL-s
df = smart_ch_fillna(df)
df.write.jdbc(url=config['CH_JDBC_URL'], table=full_table_name, mode='append',
properties=config['CH_JDBC_PROPERTIES']) | 31,919 |
def leaderboard(input):
"""Generates a link to the leaderboard for the current category in chat.
Parameters
----------
input : str
the name of the chat command that calls this function
"""
if input == message.lower().strip():
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
game, platform, platform_title = getGame(USER_ID)
game_title = None
for i in range(len(GAMES)):
if GAMES[i][1] == game:
game_title = GAMES[i][0]
break
category = None
category_title = None
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
if game == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
if category != None:
sendMessage(s, CHANNEL, game_title + " " + category_title + " Leaderboard: https://www.speedrun.com/{}#{}".format(game, category))
cooldown()
return
elif category == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return | 31,920 |
def diffusion_step(matrix, row_damping=0, column_damping=0):
"""
Return the diffusion adjacency matrix produced by the input matrix
with the specified row and column normalization exponents.
Note: the row normalization is performed second, so if a value
of row_damping=1 is used, the output will be a row-stochastic
matrix regardless of choice of column normalization. Matrix will
not be modified in place.
Parameters
==========
matrix : numpy.ndarray
adjacency matrix for a given metaedge, where the source nodes are
rows and the target nodes are columns
row_damping : int or float
exponent to use in scaling each node's row by its in-degree
column_damping : int or float
exponent to use in scaling each node's column by its column-sum
Returns
=======
numpy.ndarray
Normalized matrix with dtype.float64.
"""
# returns a newly allocated array
matrix = copy_array(matrix)
# Perform column normalization
if column_damping != 0:
column_sums = numpy.array(matrix.sum(axis=0)).flatten()
matrix = normalize(matrix, column_sums, 'columns', column_damping)
# Perform row normalization
if row_damping != 0:
row_sums = numpy.array(matrix.sum(axis=1)).flatten()
matrix = normalize(matrix, row_sums, 'rows', row_damping)
return matrix | 31,921 |
def go_to_sleep(current_time):
"""Enter deep sleep for time needed."""
# compute current time offset in seconds
hour, minutes, seconds = time.localtime(current_time)[3:6]
seconds_since_midnight = 60 * (hour * 60 + minutes) + seconds
three_fifteen = (3 * 60 + 15) * 60
# wake up 15 minutes after 3am
seconds_to_sleep = (24 * 60 * 60 - seconds_since_midnight) + three_fifteen
print(
"Sleeping for {} hours, {} minutes".format(
seconds_to_sleep // 3600, (seconds_to_sleep // 60) % 60
)
)
magtag.exit_and_deep_sleep(seconds_to_sleep) | 31,922 |
def verb_context(filtertype, aidcfg, verbose):
""" closure helper """
class VerbosityContext(object):
"""
Printing filter info in a way that avoids polluting the function
namespace. This is a hack.
This is a with_statement context class that expect a variable avail_aids
to be modified inside the context. It prints the state of the variable
before and after filtering. Several static methods can be used
at the start and end of larger filtering functions.
"""
def __init__(self, *keys, **filterextra):
self.prefix = ut.get_var_from_stack('prefix', verbose=False)
if verbose:
dictkw = dict(nl=False, explicit=True, nobraces=True)
infostr = ''
if len(keys) > 0:
subdict = ut.dict_subset(aidcfg, keys, None)
infostr += '' + ut.dict_str(subdict, **dictkw)
print('[%s] * Filter by %s' % (
self.prefix.upper(), infostr.strip()))
if verbose > 1 and len(filterextra) > 0:
infostr2 = ut.dict_str(filterextra, nl=False, explicit=False)
print('[%s] %s' % (
self.prefix.upper(), infostr2))
def __enter__(self):
aids = ut.get_var_from_stack('avail_aids', verbose=False)
self.num_before = len(aids)
def __exit__(self, exc_type, exc_value, exc_traceback):
if verbose:
aids = ut.get_var_from_stack('avail_aids', verbose=False)
num_after = len(aids)
num_removed = self.num_before - num_after
if num_removed > 0 or verbose > 1:
print('[%s] ... removed %d annots. %d remain' %
(self.prefix.upper(), num_removed, num_after))
@staticmethod
def report_annot_stats(ibs, aids, prefix, name_suffix, statskw={}):
if verbose > 1:
with ut.Indenter('[%s] ' % (prefix.upper(),)):
# TODO: helpx on statskw
#statskw = dict(per_name_vpedge=None, per_name=None)
dict_name = prefix + 'aid_stats' + name_suffix
#hashid, per_name, per_qual, per_vp, per_name_vpedge,
#per_image, min_name_hourdist
ibs.print_annot_stats(aids, prefix=prefix, label=dict_name,
**statskw)
#def report_annotconfig_stats(ref_aids, aids):
# with ut.Indenter(' '):
# ibs.print_annotconfig_stats(ref_aids, avail_aids)
@staticmethod
def startfilter(withpre=True):
"""
Args:
withpre (bool): if True reports stats before filtering
"""
if verbose:
prefix = ut.get_var_from_stack('prefix', verbose=False)
print('[%s] * [%s] %sAIDS' % (prefix.upper(), filtertype,
prefix))
if verbose > 1 and withpre:
ibs = ut.get_var_from_stack('ibs', verbose=False)
aids = ut.get_var_from_stack('avail_aids', verbose=False)
VerbosityContext.report_annot_stats(ibs, aids, prefix,
'_pre')
@staticmethod
def endfilter(withpost=True):
if verbose:
ibs = ut.get_var_from_stack('ibs', verbose=False)
aids = ut.get_var_from_stack('avail_aids', verbose=False)
prefix = ut.get_var_from_stack('prefix', verbose=False)
hashid = ibs.get_annot_hashid_semantic_uuid(
aids, prefix=prefix.upper())
if withpost:
if verbose > 1:
VerbosityContext.report_annot_stats(ibs, aids, prefix,
'_post')
print('[%s] * HAHID: %s' % (prefix.upper(), hashid))
print('[%s] * [%s]: len(avail_%saids) = %r\n' % (
prefix.upper(), filtertype, prefix, len(aids)))
return VerbosityContext | 31,923 |
def get_add_many_columns_function(row_function, data_types):
"""Returns a function which adds several columns to a row based on given row function"""
def add_many_columns(row):
result = row_function(row)
data = []
for i, data_type in enumerate(data_types):
try:
value = result[i]
except TypeError as e:
raise RuntimeError("UDF returned non-indexable value. Provided schema indicated an Indexable return type")
except IndexError as e:
raise RuntimeError("UDF return value did not match the number of items in the provided schema")
cast_value = valid_data_types.cast(value, data_type)
data.append(numpy_to_bson_friendly(cast_value))
# return json.dumps(data, cls=NumpyJSONEncoder)
return data
# return bson.binary.Binary(bson.BSON.encode({"array": data}))
return add_many_columns | 31,924 |
def determine_peaks(spectrum, peak='both', amp_threshold=None):
"""Find peaks in a spectrum.
Parameters
----------
spectrum : numpy.ndarray
Array of the data values of the spectrum.
peak : 'both' (default), 'positive', 'negative'
Description of parameter `peak`.
amp_threshold : float
Required minimum threshold that at least one data point in a peak feature has to exceed.
Returns
-------
consecutive_channels or amp_vals : numpy.ndarray
If the 'amp_threshold' value is supplied an array with the maximum data values of the ranges is returned. Otherwise, the number of spectral channels of the ranges is returned.
ranges : list
List of intervals [(low, upp), ...] determined to contain peaks.
"""
if (peak == 'both') or (peak == 'positive'):
clipped_spectrum = spectrum.clip(max=0)
# Create an array that is 1 where a is 0, and pad each end with an extra 0.
iszero = np.concatenate(
([0], np.equal(clipped_spectrum, 0).view(np.int8), [0]))
absdiff = np.abs(np.diff(iszero))
# Runs start and end where absdiff is 1.
ranges = np.where(absdiff == 1)[0].reshape(-1, 2)
if (peak == 'both') or (peak == 'negative'):
clipped_spectrum = spectrum.clip(min=0)
# Create an array that is 1 where a is 0, and pad each end with an extra 0.
iszero = np.concatenate(
([0], np.equal(clipped_spectrum, 0).view(np.int8), [0]))
absdiff = np.abs(np.diff(iszero))
if peak == 'both':
# Runs start and end where absdiff is 1.
ranges = np.append(
ranges, np.where(absdiff == 1)[0].reshape(-1, 2), axis=0)
else:
ranges = np.where(absdiff == 1)[0].reshape(-1, 2)
if amp_threshold is not None:
if peak == 'positive':
mask = spectrum > abs(amp_threshold)
elif peak == 'negative':
mask = spectrum < -abs(amp_threshold)
else:
mask = np.abs(spectrum) > abs(amp_threshold)
if np.count_nonzero(mask) == 0:
return np.array([]), np.array([])
peak_mask = np.split(mask, ranges[:, 1])
mask_true = np.array([any(array) for array in peak_mask[:-1]])
ranges = ranges[mask_true]
if peak == 'positive':
amp_vals = np.array([max(spectrum[low:upp]) for low, upp in ranges])
elif peak == 'negative':
amp_vals = np.array([min(spectrum[low:upp]) for low, upp in ranges])
else:
amp_vals = np.array(
np.sign(spectrum[low])*max(np.abs(spectrum[low:upp]))
for low, upp in ranges)
# TODO: check if sorting really necessary??
sort_indices = np.argsort(amp_vals)[::-1]
return amp_vals[sort_indices], ranges[sort_indices]
else:
sort_indices = np.argsort(ranges[:, 0])
ranges = ranges[sort_indices]
consecutive_channels = ranges[:, 1] - ranges[:, 0]
return consecutive_channels, ranges | 31,925 |
def sumlike_wrap(fun_name):
"""Handle torch.sum and torch.mean"""
# Define appropriate torch function, the rest of the logic is the same
assert fun_name in ['sum', 'mean']
torch_fun = getattr(torch, fun_name)
@wraps(torch_fun)
def sumlike_fun(input, dim=None, keepdim=False):
nodim = dim is None
if nodim:
# Remove stable dims, then sum over data
input = move_sdims(input, ())
data_sum = torch_fun(input.data)
scale = input.scale.view(())
output = STensor(data_sum, scale)
else:
# Convert dim to list of non-negative indices to sum over
dim_list = tupleize(dim, input.ndim)
# Make summed indices data dims, then sum over data tensor
new_sdims = tuple(i for i in input.stable_dims
if i not in dim_list)
input = move_sdims(input, new_sdims)
data_sum = torch_fun(input.data, dim, keepdim=keepdim)
scale = input.scale
if not keepdim:
scale = squeeze_dims(scale, dim_list)
output = STensor(data_sum, scale)
output.rescale_()
return output
# Register the new sum-like function
STABLE_FUNCTIONS[torch_fun] = sumlike_fun | 31,926 |
def _generate_adapted_paths(ed_operation, Path):
"""Receives a 'Path' through a DFA where each element is a tuple
(state index reached, character set that triggered transition)
According to the three edit distance operations 'insert', 'delete',
and 'substitute' the path is adapted. The result is a list of paths.
The path is, at 'AdaptN' positions. This functions finds all combinations
of positions on the path, where adaptations can be made. For example,
if a path has 3 steps and 2 adaptations are to be made then the
the following arrays display the possible change configurations
[1, 1, 0]
[1, 0, 1]
[0, 1, 1]
were a 'array[i] == 1' indicates a change is to be made at step 'i' and
else, it is left as is. A path of N elements has N-1 steps.
"""
changed_f = False
for cursor in ed_operation.generator(len(Path)):
# cursor[i] -> where the i-th insertion happens.
new_path = []
operation_done_f = False
for position, step in enumerate(Path + [(None, None)]):
target_si, character_set = step
if position in cursor:
occupancy_n = sum(int(p == position) for p in cursor)
operation_done_f |= ed_operation.treat_transition(new_path, target_si,
character_set, occupancy_n)
elif target_si is not None:
new_path.append((target_si, character_set))
if operation_done_f:
changed_f = True
yield new_path
if not changed_f:
yield copy(Path) | 31,927 |
def __crossover(n: int, g: np.matrix, m_list: np.array, f_list: np.array) -> np.matrix:
"""
:param n: half of g.shape[0]
:param g: bin mat of genes
:param m_list: male nums
:param f_list: female nums
:return: crossed-over bin mat of genes
"""
cros = np.random.randint(low=0, high=g.shape[1], size=n)
g_cros = np.copy(g)
for m, f, c in zip(m_list, f_list, cros):
g_cros[[m, f], :c] = g_cros[[f, m], :c]
return g_cros | 31,928 |
def setup_system():
"""
Galacitic center potential and Arches cluster
position from Kruijssen 2014
"""
potential = static_potentials.Galactic_Center_Potential_Kruijssen()
cluster = Particle()
# At time 2.05 in KDL15
cluster.position = [-17.55767, -53.26560, -9.39921] | units.parsec
cluster.velocity = [-187.68008, 80.45276, 33.96556] | units.kms
cluster.position += coordinate_correction
return potential, cluster | 31,929 |
def MCM_data_scraper(species_list, get_image: bool = False, display: bool = False,
filename:str='', savepath: str = ''):
"""Function that takes a list of species, and rips all the info off the
MCM Webpage about that species.(e.g. SMILES, INCHI, etc)...
Inputs:
-------
species_list - A list of MCM Species you want info for. If you'd like info
about all of the species then pass species_list= ['All']
get_image - (optional) - Boolean of whether you'd like to save the image associated with that
species. Note: This will require saving these images to your computer.
which can take up a considerable amount of space. Default is False.
filename - (optional) - Name of the .excel and .html files generated that
contain the scraped info.
savepath - (optional) Where you'd like to save the output .csv, .html and
MCM images. If none is provided, is saved in current directory with
images in a new subfolder current_path+/"MCM_images/".
display - (optional) Boolean of whether you'd like to display the web scraped
results as a table in your web browser at the end.
Outputs:
--------
(1) Function returns a pandas dataframe with all the saved data.
(2) A excel workbook file saved at: savepath+filename+'.xlsx',
which contains all of the scraped data. Easily read into python by
pandas as a dataframe using:
df=pd.read_excel(savepath+filename_'.xlsx',engine="openpyxl", index_col=0)
(3) An HTML document saved at: savepath+filename+'.htm', which contains all the of
the scraped data. A nice way to display all the data scraped AND the
images that were scraped at the same time in a scrollable/ easily visualized way.
(4) - Optional a folder at save_path+/"MCM_images/" with .pngs of the MCM molecules
scraped from the MCM website.
Author:
-------
Dr. Jessica D. Haskins (jhaskins@alum.mit.edu) GitHub: @jdhask
Change Log:
----------
10/29/2021 JDH Created
1/18/2022 JDH modded function locations to allow use with F0AM_Tools
"""
# Check the file path + file names given.
excel_file= check_filename(filename=filename, default_name='MCM_web_scrape', ext='.xlsx',
savepath=savepath, overwrite=False, return_full=True)
html_file= check_filename(filename=filename, default_name='MCM_web_scrape', ext='.html',
savepath=savepath, overwrite=False, return_full=True)
if species_list[0].lower()=='all': species_list=load_data_files(species=True)
# Create an empty pandas dataframe with column names of all the info we're gonna scrape.
df = pd.DataFrame(columns=['MCM_Name', 'Formula', 'Molecular_Weight',
'InChI', 'SMILES', 'Description', 'Image', 'NIST_url'])
if get_image is False:
# Don't need image column if not gonna grab.
df = df.drop(columns='Image')
else: # Are grabbinb images, so make a subfolder in savepath to keep them.
if not os.path.exists(savepath+'/MCM_Images/'):
os.makedirs(savepath+'/MCM_Images/')
# Loop through all speices you'd like to scrape data for.
for sps in species_list:
# This is the 2021 base URL for browing an MCM species on the MCM website.
# URL to the MCM website for a species.
url = 'http://mcm.york.ac.uk/browse.htt?species='+sps
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
table = soup.find('table', {"class": "infobox"})
tr = table.findAll(['tr'])
# The InChI / SMILES infor is all contained within a "table" of the webpage...
# Found by inspecting "soup" manually. If website changes, will need to re-inspect the soup!
for cell in tr:
th = cell.find_all('th')
data_hdr = [col.text.strip('\n') for col in th][0]
td = cell.find_all('td')
if len(td) > 0:
row = [i.text.replace('\n', '') for i in td][0]
if data_hdr.lower() == 'molecular weight':
mw = np.float64(row)
if data_hdr.lower() == 'smiles':
smiles = row
if data_hdr.lower() == 'inchi':
inchi = row
else:
inchi = ''
synl = list() # Get the list of synonyms of this compoudn.
if data_hdr.lower() == 'synonyms':
if len(data_hdr) > 0:
nms = row.split(';')
for item in nms:
item = item.replace(' ', '')
item = item.replace('\t', '')
if (len(item) > 0) and (item not in synl):
synl.append(item)
for s in synl: # Take list, convert to string...
s=s.replace("'",'')
if len(s)==0:
syn='None'
else:
if type(s)==str:
syn=s
else:
syn=','.join(s)
if len(inchi) > 0:
if get_image is True: # Option to also save the image of the MCM species in question.
image_url = 'http://mcm.york.ac.uk/pics/species/'+sps+'.png'
img_data = requests.get(image_url).content
file_nm = savepath+'/MCM_Images/'+sps+'.png'
with open(file_nm, 'wb') as handler:
handler.write(img_data)
img = '<img src="'+file_nm+'"/>' #Save img source for displaying using IPYTHON later...
# Link the the NIST website for this using its InChI indentifier.
InChI2web = inchi.replace('InChI=', '')
InChI2web = InChI2web.replace(',', '%2C')
nist_url = 'https://webbook.nist.gov/cgi/inchi/InChI%3D'+InChI2web
nist_page = requests.get(nist_url)
nist_soup = BeautifulSoup(nist_page.content, 'html.parser')
nist_main = nist_soup.find('main', attrs={'id': 'main'}).get_text()
if 'invalid' in nist_main.lower() and 'identifier' in nist_main.lower():
# The NIST url takes you to a "not" found page... so you can't strip it.
nist_url = ''
else:
form = ''
p1 = nist_soup.find("main")
if p1 is not None:
p2 = p1.find("ul")
if p2 is not None:
parent = p2.find_all("li")
dat = [i.get_text().split(':') for i in parent]
if 'Formula' in dat[0][0].strip():
form = dat[0][1].strip()
if get_image is True:
df = df.append({'MCM_Name': sps, 'Formula': form, 'Molecular_Weight': mw,'InChI': inchi, 'SMILES': smiles,
'Description': syn, 'Image': img, 'NIST_url': nist_url}, ignore_index=True)
else:
df = df.append({'MCM_Name': sps, 'Formula': form, 'Molecular_Weight': mw, 'InChI': inchi, 'SMILES': smiles,
'Description': syn, 'NIST_url': nist_url}, ignore_index=True)
# Make sure string columns are all strings...
only_strs= ['MCM_Name','Formula','InChI','SMILES','Description', 'NIST_url', 'Image']
for i in df.index:
for col in only_strs:
if type(df.loc[i,col])!=str:
if np.isnan(df.loc[i,col]): df.at[i,col]='None'
elif df.loc[i,col] =='0':
df.at[i,col]='None'
# Convert dataframe to an HTML object.
df_htm = df.to_html(escape=False)
# Write html object to a file
df.to_html(open(html_file, 'w'))
print('HTML file saved as: ' + html_file)
# Option to display the saved info in your webbrowser
if display is True:
display_MCM_table(html_file)
# Also save the data as an excel workbook.
# Read this back in using: df=pd.read_excel(savepath+filename_'.xlsx',engine="openpyxl", index_col=0)
df.to_excel(excel_file,engine="openpyxl")
print('excel file saved as: ' + excel_file)
return df | 31,930 |
def collect_dynamic_libs(name: str, dest: str = ".", dependencies: bool = True,
excludes: Iterable[str] = None) -> List:
"""
Collect DLLs for distribution **name**.
Arguments:
name:
The distribution's project-name.
dest:
Target destination, defaults to ``'.'``.
dependencies:
Recursively collect libs for dependent distributions (recommended).
excludes:
Dependent distributions to skip, defaults to ``None``.
Returns:
List of DLLs in PyInstaller's ``(source, dest)`` format.
This collects libraries only from Conda's shared ``lib`` (Unix) or
``Library/bin`` (Windows) folders. To collect from inside a distribution's
installation use the regular
:func:`PyInstaller.utils.hooks.collect_dynamic_libs`.
"""
_files = []
for file in files(name, dependencies, excludes):
# A file is classified as a DLL if it lives inside the dedicated
# ``lib_dir`` DLL folder.
if file.parent == lib_dir:
_files.append((str(file.locate()), dest))
return _files | 31,931 |
def make_agreements(file) -> pd.DataFrame:
"""In some of the human conditions, we hold out questions. Each randomly generated agent is
given our test and then asked it's opinion on every hold out question.
agreements.pkl is a Dict[Experiment, Tuple(ndarray, ndarray)] where each array element
contains the fraction of holdout questions a single agent answered correctly. The first array
contains agents that passed our test, and the second contains agents that didn't pass our test.
This method massages that data into a DataFrame with experiments as they keys, a column
for predicted alignment, and a column for the fraction of holdout questions answered correctly.
"""
agreements = pd.Series(pickle.load(file)).reset_index()
agreements = agreements.join(
agreements.apply(lambda x: list(x[0]), result_type="expand", axis="columns"), rsuffix="_",
)
del agreements["0"]
agreements.columns = ["epsilon", "delta", "n", "aligned", "misaligned"]
agreements = agreements.set_index(["epsilon", "delta", "n"]).stack().reset_index()
agreements.columns = ["epsilon", "delta", "n", "aligned", "value"]
agreements = agreements.explode("value")
agreements["aligned"] = agreements.aligned == "aligned"
agreements.value = agreements.value.apply(lambda x: float(x))
agreements = agreements.dropna()
return agreements | 31,932 |
def column_as_html(column, table):
"""Return column as an HTML row."""
markup = "<tr>"
markup += "<td class='field'>{0}</td>".format(column.name, column.comment)
markup += "<td>{0}</td>".format(column.formattedType)
# Check for Primary Key
if table.isPrimaryKeyColumn(column):
markup += "<td class='centered primary'>✔</td>"
else:
markup += "<td class='centered'> </td>"
# Check for Foreign Key
if table.isForeignKeyColumn(column):
markup += "<td class='centered foreign'><a href='#{0}s'>✔</a></td>".format(column.name.replace(table.name, ""))
else:
markup += "<td class='centered'> </td>"
# Check for Not Null attribute
if column.isNotNull == 1:
markup += "<td class='centered notnull'>✔</td>"
else:
markup += "<td class='centered'> </td>"
# Check for Unique attribute
if is_unique(column, table):
markup += "<td class='centered unique'>✔</td>"
else:
markup += "<td class='centered'> </td>"
# Check for Binary, Unsigned and Zero Fill attributes
flags = list(column.flags)
if flags.count("BINARY"):
markup += "<td class='centered binary'>✔</td>"
else:
markup += "<td class='centered'> </td>"
if flags.count("UNSIGNED"):
markup += "<td class='centered unsigned'>✔</td>"
else:
markup += "<td class='centered'> </td>"
if flags.count("ZEROFILL"):
markup += "<td class='centered zerofill'>✔</td>"
else:
markup += "<td class='centered'> </td>"
# Check for Auto Increment attribute
if column.autoIncrement == 1:
markup += "<td class='centered autoincrement'>✔</td>"
else:
markup += "<td class='centered'> </td>"
# Default value
markup += "<td>{0}</td>".format(column.defaultValue)
# Comment
markup += "<td class='comment'>{0}</td>".format(escape(column.comment))
markup += "</tr>"
return markup | 31,933 |
def covariation(x, y):
"""
Covariation of X and Y.
:param list or tuple x: 1st array.
:param list or tuple y: 2nd array.
:return: covariation.
:rtype: float
:raise ValueError: when x or y is empty
"""
if x and y:
m_x = mean(x)
m_y = mean(y)
dev_x = [i - m_x for i in x]
dev_y = [i - m_y for i in x]
return dot(dev_x, dev_y) / (len(x) - 1)
else:
raise ValueError('x or y is empty') | 31,934 |
def review_requests(ctx, url, counts_only, time_added_from, time_added_to):
"""Print the Review Requests List Resource.
The date and time format is YYYY-MM-DD HH:MM:SS or
{yyyy}-{mm}-{dd}T{HH}:{MM}:{SS} with an optional timezone appended as
-{HH:MM}.
Args:
ctx: RBTLIB context.
url: Review Board URL.
counts_only: set to True to obtain review request counts only; False
otherwise.
time_added_from: earliest date from which to select review requests.
time__added_to: latest date from which to select review requests.
Returns:
Writes to standard output.
"""
query_dict = dict()
if counts_only:
query_dict['counts-only'] = True
if time_added_from:
query_dict['time-added-from'] = time_added_from
if time_added_to:
query_dict['time-added-to'] = time_added_to
print beautify(Root(ctx.obj['session'], url)().review_requests(query_dict))
sys.exit | 31,935 |
def stripped_spaces_around(converter):
"""Make converter that strippes leading and trailing spaces.
``converter`` is called to further convert non-``None`` values.
"""
def stripped_text_converter(value):
if value is None:
return None
return converter(value.strip())
return stripped_text_converter | 31,936 |
def ootf_inverse(
value: FloatingOrArrayLike,
function: Union[
Literal["ITU-R BT.2100 HLG", "ITU-R BT.2100 PQ"], str
] = "ITU-R BT.2100 PQ",
**kwargs: Any
) -> FloatingOrNDArray:
"""
Maps relative display linear light to scene linear light using given
inverse opto-optical transfer function (OOTF / OOCF).
Parameters
----------
value
Value.
function
Inverse opto-optical transfer function (OOTF / OOCF).
Other Parameters
----------------
kwargs
{:func:`colour.models.ootf_inverse_HLG_BT2100`,
:func:`colour.models.ootf_inverse_PQ_BT2100`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Luminance of scene linear light.
Examples
--------
>>> ootf_inverse(779.988360834115840) # doctest: +ELLIPSIS
0.1000000...
>>> ootf_inverse( # doctest: +ELLIPSIS
... 63.095734448019336, function='ITU-R BT.2100 HLG')
0.1000000...
"""
function = validate_method(
function,
OOTF_INVERSES,
'"{0}" inverse "OOTF" is invalid, it must be one of {1}!',
)
callable_ = OOTF_INVERSES[function]
return callable_(value, **filter_kwargs(callable_, **kwargs)) | 31,937 |
def GetMemoryUsageOfProcess(pid):
"""Queries the system for the current memory usage of a specified process.
This function only works in Linux and ChromeOS.
Args:
pid: The integer process identifier for the process to use.
Returns:
The memory usage of the process in MB, given as a float. If the process
doesn't exist on the machine, then the value 0 is returned.
"""
assert pyauto.PyUITest.IsLinux() or pyauto.PyUITest.IsChromeOS()
process = subprocess.Popen('ps h -o rss -p %s' % pid, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = process.communicate()[0]
if stdout:
return float(stdout.strip()) / 1024
else:
return 0 | 31,938 |
def modified_query(benchmark, model_spec, run_index: int, epochs=108, stop_halfway=False):
"""
NOTE:
Copied from https://github.com/google-research/nasbench/blob/b94247037ee470418a3e56dcb83814e9be83f3a8/nasbench/api.py#L204-L263 # noqa
We changed the function in such a way that we now can specified the run index (index of the evaluation) which was
in the original code sampled randomly.
OLD DOCSTRING:
Fetch one of the evaluations for this model spec.
Each call will sample one of the config['num_repeats'] evaluations of the
model. This means that repeated queries of the same model (or isomorphic
models) may return identical metrics.
This function will increment the budget counters for benchmarking purposes.
See self.training_time_spent, and self.total_epochs_spent.
This function also allows querying the evaluation metrics at the halfway
point of training using stop_halfway. Using this option will increment the
budget counters only up to the halfway point.
Args:
model_spec: ModelSpec object.
epochs: number of epochs trained. Must be one of the evaluated number of
epochs, [4, 12, 36, 108] for the full dataset.
stop_halfway: if True, returned dict will only contain the training time
and accuracies at the halfway point of training (num_epochs/2).
Otherwise, returns the time and accuracies at the end of training
(num_epochs).
Returns:
dict containing the evaluated data for this object.
Raises:
OutOfDomainError: if model_spec or num_epochs is outside the search space.
"""
if epochs not in benchmark.dataset.valid_epochs:
raise OutOfDomainError('invalid number of epochs, must be one of %s'
% benchmark.dataset.valid_epochs)
fixed_stat, computed_stat = benchmark.dataset.get_metrics_from_spec(model_spec)
# MODIFICATION: Use the run index instead of the sampled one.
# sampled_index = random.randint(0, self.config['num_repeats'] - 1)
computed_stat = computed_stat[epochs][run_index]
data = {}
data['module_adjacency'] = fixed_stat['module_adjacency']
data['module_operations'] = fixed_stat['module_operations']
data['trainable_parameters'] = fixed_stat['trainable_parameters']
if stop_halfway:
data['training_time'] = computed_stat['halfway_training_time']
data['train_accuracy'] = computed_stat['halfway_train_accuracy']
data['validation_accuracy'] = computed_stat['halfway_validation_accuracy']
data['test_accuracy'] = computed_stat['halfway_test_accuracy']
else:
data['training_time'] = computed_stat['final_training_time']
data['train_accuracy'] = computed_stat['final_train_accuracy']
data['validation_accuracy'] = computed_stat['final_validation_accuracy']
data['test_accuracy'] = computed_stat['final_test_accuracy']
benchmark.dataset.training_time_spent += data['training_time']
if stop_halfway:
benchmark.dataset.total_epochs_spent += epochs // 2
else:
benchmark.dataset.total_epochs_spent += epochs
return data | 31,939 |
def stop():
"""
Stop the service.
"""
print('Sorry, not implemented yet.') | 31,940 |
def gauss(x, mu=0, sigma=1):
"""
Unnormalized Gaussian distribution.
Parameters
----------
Returns
-------
y : type(x)
Gaussian evaluated at x.
Notes
-----
Some people use alpha (1/e point)
instead of the sigma (standard deviation)
to define the width of the Gaussian.
They are related through: alpha = sigma * sqrt(2)
"""
return np.exp(-((x - mu)**2) / (2 * sigma**2)) | 31,941 |
def GetSpd(ea):
"""
Get current delta for the stack pointer
@param ea: end address of the instruction
i.e.the last address of the instruction+1
@return: The difference between the original SP upon
entering the function and SP for the specified address
"""
func = idaapi.get_func(ea)
if not func:
return None
return idaapi.get_spd(func, ea) | 31,942 |
def is_fugashi_ipadic_available():
"""
Check if the library is available.
This function checks if sentencepiece is available in your environment
and returns the result as a bool value.
Returns
-------
_fugashi_ipadic_available : bool
If True, fugashi wiht ipadic is available in your environment.
Examples
--------
>>> tokenizers.is_fugashi_ipadic_available()
True
"""
return _fugashi_ipadic_available | 31,943 |
def create_new_layer(layer_name):
"""
Works on the active tab with selected glyphs.
This function takes a str (layer_name) and check if there is a layer with
this name. If the layer doesn't exists, then creates a new layer.
"""
for glyph in font.glyphs:
for layer in glyph.layers:
if layer in Font.selectedLayers:
print(layer)
if not glyph.layers[layer_name]:
new_layer = GSLayer()
new_layer.name = layer_name
glyph.layers.append(new_layer)
else:
print('The "{0}" layer already exists.'.format(layer_name)) | 31,944 |
def test_normalize_standardize_allsamples_fromfile():
"""
Testing normalize pycytominer function
data_file provided
method = "standardize"
meta_features = "none"
samples="all"
"""
normalize_result = normalize(
profiles=data_file,
features=["x", "y", "z", "zz"],
meta_features="infer",
samples="all",
method="standardize",
).round(1)
infer_normalize_result = normalize(
profiles=data_feature_infer_file,
features="infer",
meta_features=["Metadata_plate", "Metadata_treatment"],
samples="all",
method="standardize",
).round(1)
expected_result = pd.DataFrame(
{
"Metadata_plate": ["a", "a", "a", "a", "b", "b", "b", "b"],
"Metadata_treatment": [
"drug",
"drug",
"control",
"control",
"drug",
"drug",
"control",
"control",
],
"Cells_x": [-1.1, -0.7, 1.9, -0.7, 0.6, 0.6, 0.6, -1.1],
"Cells_y": [-0.6, -1.3, 0.9, -0.2, 0.2, 1.7, 0.6, -1.3],
"Cytoplasm_z": [-0.8, 0.3, -0.6, -0.2, 0.0, 2.5, -0.6, -0.6],
"Nuclei_zz": [-0.3, 0.7, -0.8, -0.6, 0.2, 2.3, -0.7, -0.7],
}
).reset_index(drop=True)
pd.testing.assert_frame_equal(infer_normalize_result, expected_result)
infer_normalize_result.columns = normalize_result.columns
pd.testing.assert_frame_equal(normalize_result, infer_normalize_result) | 31,945 |
def print_top_N_words(text, top_N=10):
"""
This function accepts a text data (in the form of string) and extracts the top N words from it.
N is supplied to the function. If not, default 10 is taken.
"""
# Initialize a dictionary
counts = dict()
txt_dump=text
top_N=top_N
words = txt_dump.split()
for word in words:
counts[word.lower()]=counts.get(word.lower(),0)+1
common_words = set(line.strip() for line in open('stopwords.txt'))
tmplst = []
for key,val in counts.items():
if key not in common_words:
# Store the value first and then key (later used for sorting)
newtup = (val,key)
tmplst.append(newtup)
# Sort the list by 'value' i.e. word count
tmplst=sorted(tmplst,reverse=True)
print(f"Top {top_N} words in this file are shown below\n"+"-"*55)
for val,key in tmplst[:top_N]:
print(f"{key}:{val}") | 31,946 |
def pad_to_sidelength(schematic, labels=None, nothing_id=0, sidelength=32):
"""Add padding to schematics to sidelength"""
szs = list(schematic.size())
szs = np.add(szs, -sidelength)
pad = []
# this is all backwards bc pytorch pad semantics :(
for s in szs:
if s >= 0:
pad.append(0)
else:
pad.append(-s)
pad.append(0)
schematic = torch.nn.functional.pad(schematic, pad[::-1])
if labels is not None:
labels = torch.nn.functional.pad(labels, pad[::-1], value=nothing_id)
return schematic, labels | 31,947 |
def segmentation_gaussian_measurement_batch(
y_true,
y_pred,
gaussian_sigma=3,
measurement=segmentation_losses.binary_crossentropy):
""" Apply metric or loss measurement to a batch of data incorporating a 2D gaussian.
Only works with batch size 1.
Loop and call this function repeatedly over each sample
to use a larger batch size.
# Arguments
y_true: is assumed to be [label, x_img_coord, y_image_coord]
y_pred: is expected to be a 2D array of labels
with shape [1, img_height, img_width, 1].
"""
with K.name_scope(name='segmentation_gaussian_measurement_batch') as scope:
if keras.backend.ndim(y_true) == 4:
# sometimes the dimensions are expanded from 2 to 4
# to meet Keras' expectations.
# In that case reduce them back to 2
y_true = K.squeeze(y_true, axis=-1)
y_true = K.squeeze(y_true, axis=-1)
y_pred_shape = tf.Tensor.get_shape(y_pred)
batch_size = y_pred_shape[0]
y_true = tf.split(y_true, batch_size)
y_pred = tf.split(y_pred, batch_size)
results = []
for y_true_img, y_pred_img in zip(y_true, y_pred):
result = segmentation_gaussian_measurement(
y_true=y_true_img, y_pred=y_pred_img,
gaussian_sigma=gaussian_sigma,
measurement=measurement
)
results = results + [result]
results = tf.concat(results, axis=0)
return results | 31,948 |
def test_can_use_std_lattice():
"""Test of can_use_std_lattice."""
conv_lat = [[6.06531185, 0.0, 0.0], [0.0, 0.0, 6.06531185], [0.0, -6.06531185, 0.0]]
std_lattice = [
[6.06531185, 0.0, 0.0],
[0.0, 6.06531185, 0.0],
[0.0, 0.0, 6.06531185],
]
tmat = [[0.0, 0.5, 0.5], [-0.5, -0.5, 0.0], [0.5, 0.0, 0.5]]
rotations = [
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 1, 0], [0, 0, 1], [-1, -1, -1]],
[[0, 0, 1], [-1, -1, -1], [1, 0, 0]],
[[-1, -1, -1], [1, 0, 0], [0, 1, 0]],
[[-1, -1, -1], [0, 0, 1], [0, 1, 0]],
[[1, 0, 0], [-1, -1, -1], [0, 0, 1]],
[[0, 1, 0], [1, 0, 0], [-1, -1, -1]],
[[0, 0, 1], [0, 1, 0], [1, 0, 0]],
[[-1, -1, -1], [1, 0, 0], [0, 0, 1]],
[[1, 0, 0], [0, 1, 0], [-1, -1, -1]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
[[0, 0, 1], [-1, -1, -1], [0, 1, 0]],
[[1, 0, 0], [-1, -1, -1], [0, 1, 0]],
[[0, 1, 0], [1, 0, 0], [0, 0, 1]],
[[0, 0, 1], [0, 1, 0], [-1, -1, -1]],
[[-1, -1, -1], [0, 0, 1], [1, 0, 0]],
[[0, 1, 0], [-1, -1, -1], [0, 0, 1]],
[[0, 0, 1], [1, 0, 0], [-1, -1, -1]],
[[-1, -1, -1], [0, 1, 0], [1, 0, 0]],
[[1, 0, 0], [0, 0, 1], [0, 1, 0]],
[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
[[-1, -1, -1], [0, 1, 0], [0, 0, 1]],
[[1, 0, 0], [0, 0, 1], [-1, -1, -1]],
[[0, 1, 0], [-1, -1, -1], [1, 0, 0]],
]
assert can_use_std_lattice(conv_lat, tmat, std_lattice, rotations) | 31,949 |
def save_image(img: Image, img_format=None, quality=85):
""" Сохранить картинку из потока в переменную для дальнейшей отправки по сети
"""
if img_format is None:
img_format = img.format
output_stream = BytesIO()
output_stream.name = 'image.jpeg'
# на Ubuntu почему-то нет jpg, но есть jpeg
if img.format == 'JPEG':
img.save(output_stream, img_format, quality=quality, optimize=True, progressive=True)
else:
img.convert('RGB').save(output_stream, format=img_format)
output_stream.seek(0)
return output_stream | 31,950 |
def main__log_exprs_multiline():
"""
Embedding newlines in a single message:
>>> @log_calls()
... def f(a):
... log_calls.print("Even multiline messages\\n"
... "are properly indented!")
... return g(a, 2*a)
>>> @log_calls()
... def g(x, y):
... retval = x + y + 1
... log_calls.print_exprs('retval', prefix="Not to mention multiline\\nprefixes -- ")
... return retval
>>> f(2)
f <== called by <module>
arguments: a=2
Even multiline messages
are properly indented!
g <== called by f
arguments: x=2, y=4
Not to mention multiline
prefixes -- retval = 7
g ==> returning to f
f ==> returning to <module>
7
or using the ``sep`` keyword parameter with multiple messages:
>>> @log_calls()
... def h():
... log_calls.print("Line 1 of 3", "line 2 of 3", "line 3 of 3",
... sep='\\n')
>>> h()
h <== called by <module>
Line 1 of 3
line 2 of 3
line 3 of 3
h ==> returning to <module>
Not indenting ``log_*`` output -- tedious:
>>> @log_calls()
... def f(a):
... log_calls.print("Even multiline messages\\n"
... "are properly indented!",
... extra_indent_level=-1000)
... return g(a, 2*a)
>>> @log_calls()
... def g(x, y):
... retval = x + y + 1
... log_calls.print_exprs('retval',
... extra_indent_level=-1000,
... prefix="Not to mention multiline\\nprefixes -- ")
... return retval
>>> f(2)
f <== called by <module>
arguments: a=2
Even multiline messages
are properly indented!
g <== called by f
arguments: x=2, y=4
Not to mention multiline
prefixes -- retval = 7
g ==> returning to f
f ==> returning to <module>
7
"""
pass | 31,951 |
def check_input(args: dict) -> dict:
"""
Check if user entries latitude and longitude are well formated. If ok, retruns a dict with
lat and lng converted as flaots
- args: dict. request.args
"""
lat = args.get("lat")
lng = args.get("lng")
if lat is None:
abort(400, "Latitude parameter (lat) is missing")
if lng is None:
abort(400, "Longitude parameter (lng) is missing")
return {"lat": check_lat_lng(lat, "latitude"), "lng": check_lat_lng(lng, "longitude")} | 31,952 |
def adjust_learning_rate(epoch, total_epochs, only_ce_epochs, learning_rate, optimizer):
"""Adjust learning rate during training.
Parameters
----------
epoch: Current training epoch.
total_epochs: Total number of epochs for training.
only_ce_epochs: Number of epochs for initial pretraining.
learning_rate: Initial learning rate for training.
"""
#We dont want to consider the only ce
#based epochs for the lr scheduler
epoch = epoch - only_ce_epochs
drocc_epochs = total_epochs - only_ce_epochs
# lr = learning_rate
if epoch <= drocc_epochs:
lr = learning_rate * 0.001
if epoch <= 0.90 * drocc_epochs:
lr = learning_rate * 0.01
if epoch <= 0.60 * drocc_epochs:
lr = learning_rate * 0.1
if epoch <= 0.30 * drocc_epochs:
lr = learning_rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer | 31,953 |
def dummy_nullgeod():
"""
Equatorial Geodesic
"""
return Nulllike(
metric="Kerr",
metric_params=(0.5,),
position=[4., np.pi / 2, 0.],
momentum=[0., 0., 2.],
steps=50,
delta=0.5,
return_cartesian=False,
suppress_warnings=True,
) | 31,954 |
def test_format_net_gain_returns_negative_number_in_parentheses_with_locale_formatting():
"""It formats negative number with locacle formatting."""
pacioli = Pacioli(config_file="tests/resources/sample_config.yml")
locale.setlocale(locale.LC_ALL, "")
n = int(-1000)
n_positive = n * -1
result = pacioli.format_negative_numbers(n)
assert f"({n_positive:n})" == result | 31,955 |
def test_find_many_settings_precedence(monkeypatch) -> None:
"""test more than one in cwd"""
expected = os.path.join(os.getcwd(), "ansible-navigator.yml")
paths = [expected, os.path.join(os.path.expanduser("~"), ".ansible-navigator.json")]
def check_path_exists(arg):
return arg in paths
monkeypatch.setattr(os.path, "exists", check_path_exists)
messages, exit_messages, found = utils.find_settings_file()
assert expected == found | 31,956 |
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order)
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.reshape(C, -1) | 31,957 |
def compare_scalar_grids(gvecs0, nkm0, gvecs1, nkm1, atol=1e-6):
"""Compare two scalar fields sampled on regular grids
Args:
gvecs0 (np.array): first grid, (npt0, ndim)
nkm0 (np.array): values, (npt0,)
gvecs1 (np.array): second grid, (npt1, ndim), expect npt1<=npt0
nkm1 (np.array): values, (npt1,)
Return:
bool: True if same scalar field
"""
from chiesa_correction import align_gvectors
comm0, comm1 = align_gvectors(gvecs0, gvecs1)
unique = len(gvecs1[comm1]) == len(gvecs1) # all unique gvecs are unique
xmatch = np.allclose(gvecs0[comm0], gvecs1[comm1],
atol=atol) # gvecs match
ymatch = np.allclose(nkm0[comm0], nkm1[comm1],
atol=atol) # nk match before unfold
return np.array([unique, xmatch, ymatch], dtype=bool) | 31,958 |
def a_dot(t):
"""
Derivative of a, the scale factor
:param t:
:return:
"""
return H0 * ((3 / 2) * H0 * t) ** (-1 / 3) | 31,959 |
def test_basic_dewpoint_rh():
"""Test dewpoint_rh function."""
temp = np.array([30., 25., 10., 20., 25.]) * units.degC
rh = np.array([30., 45., 55., 80., 85.]) / 100.
real_td = np.array([11, 12, 1, 16, 22]) * units.degC
assert_array_almost_equal(real_td, dewpoint_rh(temp, rh), 0) | 31,960 |
def frame(x, frame_length, hop_length, axis=-1, name=None):
"""
Slice the N-dimensional (where N >= 1) input into (overlapping) frames.
Args:
x (Tensor): The input data which is a N-dimensional (where N >= 1) Tensor
with shape `[..., seq_length]` or `[seq_length, ...]`.
frame_length (int): Length of the frame and `0 < frame_length <= x.shape[axis]`.
hop_length (int): Number of steps to advance between adjacent frames
and `0 < hop_length`.
axis (int, optional): Specify the axis to operate on the input Tensors. Its
value should be 0(the first dimension) or -1(the last dimension). If not
specified, the last axis is used by default.
Returns:
The output frames tensor with shape `[..., frame_length, num_frames]` if `axis==-1`,
otherwise `[num_frames, frame_length, ...]` where
`num_framse = 1 + (x.shape[axis] - frame_length) // hop_length`
Examples:
.. code-block:: python
import paddle
from paddle.signal import frame
# 1D
x = paddle.arange(8)
y0 = frame(x, frame_length=4, hop_length=2, axis=-1) # [4, 3]
# [[0, 2, 4],
# [1, 3, 5],
# [2, 4, 6],
# [3, 5, 7]]
y1 = frame(x, frame_length=4, hop_length=2, axis=0) # [3, 4]
# [[0, 1, 2, 3],
# [2, 3, 4, 5],
# [4, 5, 6, 7]]
# 2D
x0 = paddle.arange(16).reshape([2, 8])
y0 = frame(x0, frame_length=4, hop_length=2, axis=-1) # [2, 4, 3]
# [[[0, 2, 4],
# [1, 3, 5],
# [2, 4, 6],
# [3, 5, 7]],
#
# [[8 , 10, 12],
# [9 , 11, 13],
# [10, 12, 14],
# [11, 13, 15]]]
x1 = paddle.arange(16).reshape([8, 2])
y1 = frame(x1, frame_length=4, hop_length=2, axis=0) # [3, 4, 2]
# [[[0 , 1 ],
# [2 , 3 ],
# [4 , 5 ],
# [6 , 7 ]],
#
# [4 , 5 ],
# [6 , 7 ],
# [8 , 9 ],
# [10, 11]],
#
# [8 , 9 ],
# [10, 11],
# [12, 13],
# [14, 15]]]
# > 2D
x0 = paddle.arange(32).reshape([2, 2, 8])
y0 = frame(x0, frame_length=4, hop_length=2, axis=-1) # [2, 2, 4, 3]
x1 = paddle.arange(32).reshape([8, 2, 2])
y1 = frame(x1, frame_length=4, hop_length=2, axis=0) # [3, 4, 2, 2]
"""
if axis not in [0, -1]:
raise ValueError(f'Unexpected axis: {axis}. It should be 0 or -1.')
if not isinstance(frame_length, int) or frame_length <= 0:
raise ValueError(
f'Unexpected frame_length: {frame_length}. It should be an positive integer.'
)
if not isinstance(hop_length, int) or hop_length <= 0:
raise ValueError(
f'Unexpected hop_length: {hop_length}. It should be an positive integer.'
)
if frame_length > x.shape[axis]:
raise ValueError(
f'Attribute frame_length should be less equal than sequence length, '
f'but got ({frame_length}) > ({x.shape[axis]}).')
op_type = 'frame'
if in_dygraph_mode():
attrs = ('frame_length', frame_length, 'hop_length', hop_length, 'axis',
axis)
op = getattr(_C_ops, op_type)
out = op(x, *attrs)
else:
check_variable_and_dtype(
x, 'x', ['int32', 'int64', 'float16', 'float32',
'float64'], op_type)
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type=op_type,
inputs={'X': x},
attrs={
'frame_length': frame_length,
'hop_length': hop_length,
'axis': axis
},
outputs={'Out': out})
return out | 31,961 |
def wrapped_partial(func: callable, *args, **kwargs) -> callable:
"""Wrap a function with partial args and kwargs.
Args:
func (callable): The function to be wrapped.
*args (type): Args to be wrapped.
**kwargs (type): Kwargs to be wrapped.
Returns:
callable: The wrapped function.
"""
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func | 31,962 |
def with_sfw_check(
command: typing.Optional[CommandT] = None,
/,
*,
error_message: typing.Optional[str] = "Command can only be used in SFW channels",
halt_execution: bool = False,
) -> CallbackReturnT[CommandT]:
"""Only let a command run in a channel that's marked as sfw.
Parameters
----------
command : typing.Optional[CommandT]
The command to add this check to.
Other Parameters
----------------
error_message : typing.Optional[str]
The error message to send in response as a command error if the check fails.
Defaults to "Command can only be used in DMs" and setting this to `None`
will disable the error message allowing the command search to continue.
halt_execution : bool
Whether this check should raise `tanjun.errors.HaltExecution` to
end the execution search when it fails instead of returning `False`.
Defaults to `False`.
Notes
-----
* error_message takes priority over halt_execution.
* For more information on how this is used with other parameters see
`CallbackReturnT`.
Returns
-------
CallbackReturnT[CommandT]
The command this check was added to.
"""
return _wrap_with_kwargs(command, sfw_check, halt_execution=halt_execution, error_message=error_message) | 31,963 |
def algorithm_id_to_generation_class(algorithm_id):
"""
Returns the Generation class corresponding to the
provided algorithm ID (as defined in settings).
"""
return _algorithm_id_to_class_data(algorithm_id)[1] | 31,964 |
def search(session, **kwargs):
"""
Searches the Discogs API for a release object
Arguments:
session (requests.Session) - API session object
**kwargs (dict) - All kwargs are added as query parameters in the search call
Returns:
dict - The first result returned in the search
Raises:
Exception if release cannot be found
"""
try:
url = DB_API + '/search?'
for param, value in kwargs.items():
url += f'{param}={value}&'
res = session.get(url)
data = res.json()
if res.status_code != 200 or 'results' not in data.keys():
raise Exception(f'Unexpected error when querying Discogs API ({res.status_code})')
if not data['results']:
raise Exception('No results found')
return data['results'][0]
except Exception as err:
print(f'Failed to find release for search {kwargs} in Discogs database: {err}')
raise | 31,965 |
def test_tensors_equal():
""" Test tensors_equal comparison function
"""
tensor1 = { '0' : numpy.zeros((2,2), dtype=numpy.complex128), \
'1' : numpy.zeros((2,2), dtype=numpy.complex128) }
tensor2 = { '0' : numpy.zeros((2,2), dtype=numpy.complex128), \
'1' : numpy.ones((2,2), dtype=numpy.complex128) }
tensor3 = {'0': numpy.zeros((2, 2), dtype=numpy.complex128)}
assert util.tensors_equal(tensor1, tensor1)
assert not util.tensors_equal(tensor1, tensor2)
assert not util.tensors_equal(tensor1, tensor3) | 31,966 |
def plot_demand_supply(
all_dict, commod, test, demand_driven, log_scale, calculated
):
""" Plots demand, supply, calculated demand and calculated supply
on a curve for a non-driving commodity
Parameters
----------
4 dicts: dictionaries of supply, demand, calculated
demand and calculated supply
demand_driven: Boolean. If true, the commodity is demand driven,
if false, the commodity is supply driven
Returns
-------
plot of all four dicts
"""
dict_demand = all_dict["dict_demand"]
dict_supply = all_dict["dict_supply"]
dict_calc_demand = all_dict["dict_calc_demand"]
dict_calc_supply = all_dict["dict_calc_supply"]
fig, ax = plt.subplots(figsize=(15, 7))
if demand_driven:
if log_scale:
ax.semilogy(
*zip(*sorted(dict_demand.items())),
"+",
color="red",
label="Demand"
)
if calculated:
ax.semilogy(
*zip(*sorted(dict_calc_demand.items())),
"o",
alpha=0.5,
color="red",
label="Calculated Demand"
)
else:
ax.plot(
*zip(*sorted(dict_demand.items())),
"+",
color="red",
label="Demand"
)
if calculated:
ax.plot(
*zip(*sorted(dict_calc_demand.items())),
"o",
alpha=0.5,
color="red",
label="Calculated Demand"
)
ax.set_title("%s Demand Supply plot" % test)
else:
if log_scale:
ax.semilogy(
*zip(*sorted(dict_demand.items())),
"+",
color="red",
label="Capacity"
)
if calculated:
ax.semilogy(
*zip(*sorted(dict_calc_demand.items())),
"o",
alpha=0.5,
color="red",
label="Calculated Capacity"
)
else:
ax.plot(
*zip(*sorted(dict_demand.items())),
"+",
color="red",
label="Capacity"
)
if calculated:
ax.plot(
*zip(*sorted(dict_calc_demand.items())),
"o",
alpha=0.5,
color="red",
label="Calculated Capacity"
)
ax.set_title("%s Capacity Supply plot" % test)
if log_scale:
ax.semilogy(
*zip(*sorted(dict_supply.items())), "x", color="c", label="Supply"
)
if calculated:
ax.semilogy(
*zip(*sorted(dict_calc_supply.items())),
"o",
alpha=0.5,
color="c",
label="Calculated Supply"
)
else:
ax.plot(
*zip(*sorted(dict_supply.items())), "x", color="c", label="Supply"
)
if calculated:
ax.plot(
*zip(*sorted(dict_calc_supply.items())),
"o",
alpha=0.5,
color="c",
label="Calculated Supply"
)
ax.grid()
ax.set_xlabel("Time (month timestep)", fontsize=14)
if commod.lower() == "power":
ax.set_ylabel("Power (MW)", fontsize=14)
else:
ax.set_ylabel("Mass (Kg)", fontsize=14)
handles, labels = ax.get_legend_handles_labels()
ax.legend(
handles,
labels,
fontsize=11,
loc="upper center",
bbox_to_anchor=(1.1, 1.0),
fancybox=True,
)
plt.savefig(test, dpi=300, bbox_inches="tight")
plt.close() | 31,967 |
def test_array_field_exact_empty_list(Query):
"""
Test exact filter on a array field of string.
"""
schema = Schema(query=Query)
query = """
query {
events (tags: []) {
edges {
node {
name
}
}
}
}
"""
result = schema.execute(query)
assert not result.errors
assert result.data["events"]["edges"] == [
{"node": {"name": "Speech"}},
] | 31,968 |
def _conv_general_precision_config_proto(precision):
"""Convert an integer to an XLA.PrecisionConfig."""
if precision is None:
return None
proto = xla_data_pb2.PrecisionConfig()
proto.operand_precision.append(int(precision))
return proto | 31,969 |
def find_installed_packages_in_environment():
"""
Find packages under the COLCON_PREFIX_PATH.
For each prefix path the package index is being read and the first time a
package is being found its install prefix is being added to the result.
:returns: The mapping from a package name to the prefix path
:rtype: OrderedDict
"""
packages = OrderedDict()
for prefix_path in get_colcon_prefix_path():
prefix_path = Path(prefix_path)
pkgs = find_installed_packages(prefix_path)
if pkgs is None:
logger.debug(
"Ignoring prefix path '{prefix_path}'".format_map(locals()))
continue
for pkg_name in sorted(pkgs.keys()):
# ignore packages with the same name in "lower" prefix path
if pkg_name in packages:
continue
packages[pkg_name] = pkgs[pkg_name]
return packages | 31,970 |
def list_in_list(a, l):
"""Checks if a list is in a list and returns its index if it is (otherwise
returns -1).
Parameters
----------
a : list()
List to search for.
l : list()
List to search through.
"""
return next((i for i, elem in enumerate(l) if elem == a), -1) | 31,971 |
def test_service_check_instance_name_normalization(aggregator, http_check):
"""
Service check `instance` tag value is normalized.
Note: necessary to avoid mismatch and backward incompatiblity.
"""
# Run the check for the one instance
http_check.check(CONFIG_UNORMALIZED_INSTANCE_NAME['instances'][0])
# Assess instance name normalization
normalized_tags = ['url:https://github.com', 'instance:need_to_be_normalized']
aggregator.assert_service_check(HTTPCheck.SC_STATUS, status=HTTPCheck.OK, tags=normalized_tags, count=1)
aggregator.assert_service_check(HTTPCheck.SC_SSL_CERT, status=HTTPCheck.OK, tags=normalized_tags, count=1) | 31,972 |
def start_host(session=None, load_plugins=True, plugins=None):
"""Promote the current process into python plugin host for Nvim.
Start msgpack-rpc event loop for `session`, listening for Nvim requests
and notifications. It registers Nvim commands for loading/unloading
python plugins.
The sys.stdout and sys.stderr streams are redirected to Nvim through
`session`. That means print statements probably won't work as expected
while this function doesn't return.
This function is normally called at program startup and could have been
defined as a separate executable. It is exposed as a library function for
testing purposes only.
I never noticed until now but it also initializes a logger? Wth?
"""
if load_plugins:
plugins = _goofy_way_of_loading_plugins()
if not session:
session = socket_session()
else:
if isinstance(session, str):
session = _convert_str_to_session(session)
nvim = Nvim.from_session(session)
# nvim = Nvim.from_session(session).with_decode(decode)
if nvim.version.api_level < 1:
sys.stderr.write("This version of pynvim requires nvim 0.1.6 or later")
sys.exit(1)
host = Host(nvim)
if plugins is not None:
host.start(plugins)
return host | 31,973 |
def parse_args():
"""Command line argument parsing."""
global flag_target_arch, flag_echo, flag_dryrun, flag_show_output
global binutils_version, flag_parfactor, flag_do_only_gcc_build
global flag_gcc_subdir, flag_debug_gcc, flag_langs
global flag_need_android_sysroot, flag_ndk_dir
global flag_use_multilib, flag_use_bootstrap
try:
optlist, args = getopt.getopt(sys.argv[1:], "hdnest:b:N:DBMYCL:")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
for opt, arg in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-e":
flag_echo = True
elif opt == "-h":
usage("usage:")
elif opt == "-s":
flag_show_output = True
elif opt == "-B":
flag_do_only_gcc_build = True
elif opt == "-M":
flag_use_multilib = "--disable-multilib"
elif opt == "-Y":
flag_use_bootstrap = ""
elif opt == "-n":
flag_parfactor = ""
elif opt == "-b":
u.verbose(0, "setting binutils version to %s" % arg)
binutils_version = arg
elif opt == "-N":
if not os.path.exists(arg):
usage("unable to access NDK dir %s" % arg)
flag_ndk_dir = arg
elif opt == "-C":
flag_debug_gcc = True
elif opt == "-S":
u.verbose(0, "setting gcc_subdir to %s" % arg)
flag_gcc_subdir = arg
elif opt == "-D":
flag_dryrun = True
flag_echo = True
elif opt == "-L":
if arg not in legal_extralangs:
usage("specified lang %s not part of legal extras list" % arg)
flag_langs = "%s,%s" % (flag_langs, arg)
elif opt == "-t":
if arg not in legal_arches:
usage("specified arch %s not part of legal list" % arg)
flag_target_arch = arg
if args:
usage("unknown extra args")
if not flag_target_arch:
usage("select a target architecture")
matcher = re.compile(r"^.*android.*$")
m = matcher.match(flag_target_arch)
if m:
flag_need_android_sysroot = True
u.verbose(1, "target arch is android, need sysroot")
if flag_need_android_sysroot and not flag_ndk_dir:
usage("android target specified, but no NDK dir given -- use -N option") | 31,974 |
def sent_to_idx(sent, word2idx, sequence_len):
"""
convert sentence to index array
"""
unknown_id = word2idx.get("UNKNOWN", 0)
sent2idx = [word2idx.get(word, unknown_id) for word in sent.split("_")[:sequence_len]]
return sent2idx | 31,975 |
def fcall(node, file, ctx, open="(", close=")", sep=", "):
"""
C-style function call from a node that has (func_name, args) elements.
Can control the opening/closing bracket and argument separator.
"""
func_name, args = node.value
yield func_name
array(args, file, ctx, items=lambda x: x, open=open, close=close, sep=sep) | 31,976 |
def expand_sqs_results(settings: Settings, sqs_results: T.Iterable[SQSResult],
timings: T.Optional[TimingDictionary] = None, include=('configuration',),
inplace: bool = False) -> Settings:
"""
Serializes a list of :py:class:`sqsgenerator.public.SQSResult` into a JSON/YAML serializable string
:param settings: the settings used to compute the {sqs_results}
:type settings: AttrDict
:param sqs_results:
"""
dump_include = list(include)
if 'configuration' not in dump_include:
dump_include += ['configuration']
result_document = make_result_document(settings, sqs_results, fields=dump_include, timings=timings)
if inplace:
settings.update(result_document)
keys_to_remove = {'file_name', 'input_format', 'composition', 'iterations', 'max_output_configurations',
'mode', 'threads_per_rank', 'is_sublattice'}
final_document = {k: v for k, v in settings.items() if k not in keys_to_remove}
if 'sublattice' in final_document:
final_document.update(final_document['sublattice'])
del final_document['sublattice']
else:
final_document = result_document
return Settings(final_document) | 31,977 |
def test_files_atlas(test_files):
"""ATLAS files"""
# ssbio/test/test_files/atlas
return op.join(test_files, 'atlas') | 31,978 |
def _get_xaxis_polynomial_(xyv, degree=DEGREE, legendre=LEGENDRE,
xmodel=None, clipping = [5,5]):
""" """
from modefit.basics import get_polyfit
x,y,v = xyv
flagin = ((np.nanmean(y) - clipping[0] * np.nanstd(y)) < y) * (y< (np.nanmean(y) + clipping[1] * np.nanstd(y)))
contmodel = get_polyfit(x[flagin], y[flagin], v[flagin], degree=degree, legendre=legendre)
contmodel.fit(a0_guess=np.nanmedian(y[flagin]))
if xmodel is not None:
return contmodel.fitvalues, contmodel.model.get_model(x=xmodel)#, contmodel
return contmodel.fitvalues | 31,979 |
def generateKey():
"""
Method to generate a encryption key
"""
try:
key = Fernet.generate_key()
updateClipboard(f"export LVMANAGER_PW={str(key)[2:-1]}")
print(f"Key: {key}")
print("Export command copied to clipboard. Save this value!")
return True
except Exception as e:
print(f"Something went wrong\nException: {e}")
return False | 31,980 |
def _zpkbilinear(z, p, k, fs):
""" Return a digital filter from an analog one using a bilinear transform """
z = np.atleast_1d(z)
p = np.atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2.0 * fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = np.append(z_z, -np.ones(degree))
# Compensate for gain change
k_z = k * np.real(np.prod(fs2 - z) / np.prod(fs2 - p))
return z_z, p_z, k_z | 31,981 |
def preprocess(input_file: str, output_file: str):
"""
Reformat and augment the dataset.
Writes data to './data/chords.csv' by default.
Args:
input_file: the filepath to the dataset
"""
# getting chord progressions from input file
progressions = []
with open(input_file) as f:
header = f.readline().strip()
for line in f:
if "/" not in line: # ignore lines that are comments
elements = line[:-1].split(',') # remove newline, split elements
progressions.append(elements)
f.close()
satb = Satb()
for idx, prog in enumerate(progressions):
# convert all notes to ints
for i in range(len(prog)):
if not prog[i].isdigit(): # if note
prog[i] = note_to_num(prog[i])
else:
prog[i] = int(prog[i])
# ensure all chords are valid
if not satb.valid_chord(prog[5:9]):
raise ValueError(
"Current chord is invalid.",
idx,
prog[5:9],
[num_to_note(el) for el in prog[5:9]]
)
if not satb.valid_chord(prog[12:16]):
raise ValueError(
"Next chord is invalid.",
idx,
prog[12:16],
[num_to_note(el) for el in prog[12:16]]
)
# remove duplicates and keep track of unique progressions using a set
progression_set = set([tuple(prog) for prog in progressions])
progressions = [list(prog) for prog in progression_set]
beg_num_chords = len(progressions)
beg_inv_chords = 0
beg_sev_chords = 0
duplicate_chords = 0
end_num_chords = beg_num_chords
end_inv_chords = 0
end_sev_chords = 0
for i in range(len(progressions)):
if VERBOSE:
print(i)
aug_count = 10 # number of augmentations to create
sev_chord = False
inv_chord = False
if progressions[i][3] == 1 or progressions[i][10] == 1: # if seventh chord
aug_count = 40
beg_sev_chords += 1
end_sev_chords += 1
sev_chord = True
if progressions[i][4] != 0 or progressions[i][11] != 0: # if chord not in root-position
aug_count = 40
beg_inv_chords += 1
end_inv_chords += 1
inv_chord = True
# data augmentation
for _ in range(aug_count):
new_prog = augment(progressions[i])
if tuple(new_prog) not in progression_set:
progressions.append(new_prog)
progression_set.add(tuple(new_prog))
end_num_chords += 1
if inv_chord:
end_inv_chords += 1
if sev_chord:
end_sev_chords += 1
else:
duplicate_chords += 1
continue
if VERBOSE:
print("Before:")
print("\tKey: {}".format(num_to_note(progressions[i][0])))
print("\t{}".format([num_to_note(el) for el in progressions[i][5:9]]))
print("\t{}".format([num_to_note(el) for el in progressions[i][12:16]]))
print("After:")
print("\tKey: {}".format(num_to_note(new_prog[0])))
print("\t{}".format([num_to_note(el) for el in new_prog[5:9]]))
print("\t{}".format([num_to_note(el) for el in new_prog[12:16]]))
print("\n")
shuffle = True
if shuffle:
random.shuffle(progressions)
with open(output_file, 'w') as output_csv:
output_csv.write(header + '\n')
writer = csv.writer(output_csv, delimiter=',', lineterminator='\n')
writer.writerows(progressions)
output_csv.close()
print("\n************")
print("\nSTART:")
print("Total number of progressions:\t\t{}".format(beg_num_chords))
print("Number of prog. with inverted chords:\t{}\t{}".format(beg_inv_chords, beg_inv_chords/beg_num_chords))
print("Number of prog. with seventh chords:\t{}\t{}".format(beg_sev_chords, beg_sev_chords/beg_num_chords))
print("\nDuplicate progressions:\t\t\t{}".format(duplicate_chords))
print("(created during augmentation and removed)")
print("\nEND:")
print("Total number of progressions:\t\t{}".format(end_num_chords))
print("Number of prog. with inverted chords:\t{}\t{}".format(end_inv_chords, end_inv_chords/end_num_chords))
print("Number of prog. with seventh chords:\t{}\t{}".format(end_sev_chords, end_sev_chords/end_num_chords)) | 31,982 |
def so3exp(w):
"""
Maps so(3) --> SO(3) group with closed form expression.
"""
theta = np.linalg.norm(w)
if theta < _EPS * 3:
return np.eye(3)
else:
w_hat = S03_hat_operator(w)
R = np.eye(3) + (np.sin(theta) / theta) * w_hat + ((1 - np.cos(theta)) / theta**2) * np.dot(w_hat, w_hat)
return R | 31,983 |
def bin2ppm(nproc_old, model_tags, region, npts, nproc,
old_mesh_dir, old_model_dir, output_dir):
"""
convert the bin files to the ppm model.
"""
result = ""
julia_path = get_julia("specfem_gll.jl/src/program/get_ppm_model.jl")
latnproc, lonnproc = map(int, nproc.split("/"))
nproc_ppm2netcdf = latnproc * lonnproc
# ! note there is a issue of precompiling the code in a race condition, refer to https://github.com/simonbyrne/PkgLock.jl to solve the problem
# result += "julia --project -e 'push!(LOAD_PATH, \"@pkglock\"); using PkgLock; PkgLock.instantiate_precompile()'\n"
result += "module purge;module load GCC/8.2.0-2.31.1;module load OpenMPI/3.1.3;"
result += f"srun -n {nproc_ppm2netcdf} julia '{julia_path}' --nproc_old {nproc_old} --old_mesh_dir {old_mesh_dir} --old_model_dir {old_model_dir} --model_tags {model_tags} --output_file {output_dir} --region {region} --npts {npts} --nproc {nproc}; \n"
return result | 31,984 |
def st_sdata(obs, cols):
"""return string data in given observation numbers as a list of lists,
one sub-list for each row; obs should be int or iterable of int;
cols should be a single str or int or iterable of str or int
"""
obs, cols, _ = _parseObsColsVals(obs, cols)
if not all(st_isstrvar(c) for c in cols):
raise TypeError("only string Stata variables allowed")
return [[_st_sdata(i,j) for j in cols] for i in obs] | 31,985 |
def list_data(args, data):
"""List all servers and files associated with this project."""
if len(data["remotes"]) > 0:
print("Servers:")
for server in data["remotes"]:
if server["name"] == server["location"]:
print(server["user"] + "@" + server["location"])
else:
print(
server["user"] + "@" + server["name"] + " ("
+ server["location"] + ")")
else:
print("No servers added")
print("Included files and directories:")
print(data["file"] + ".py")
if len(data["files"]) > 0:
print("\n".join(data["files"]))
return data | 31,986 |
def vecInt(xx, vv, p, interpolation = 'weighted'):
"""
Interpolates the field around this position.
call signature:
vecInt(xx, vv, p, interpolation = 'weighted')
Keyword arguments:
*xx*:
Position vector around which will be interpolated.
*vv*:
Vector field to be interpolated.
*p*:
Parameter struct.
*interpolation*:
Interpolation of the vector field.
'mean': takes the mean of the adjacent grid point.
'weighted': weights the adjacent grid points according to their distance.
"""
# find the adjacent indices
i = (xx[0]-p.Ox)/p.dx
if (i < 0):
i = 0
if (i > p.nx-1):
i = p.nx-1
ii = np.array([int(np.floor(i)), \
int(np.ceil(i))])
j = (xx[1]-p.Oy)/p.dy
if (j < 0):
j = 0
if (j > p.ny-1):
j = p.ny-1
jj = np.array([int(np.floor(j)), \
int(np.ceil(j))])
k = (xx[2]-p.Oz)/p.dz
if (k < 0):
k = 0
if (k > p.nz-1):
k = p.nz-1
kk = np.array([int(np.floor(k)), \
int(np.ceil(k))])
vv = np.swapaxes(vv, 1, 3)
# interpolate the field
if (interpolation == 'mean'):
return np.mean(vv[:,ii[0]:ii[1]+1,jj[0]:jj[1]+1,kk[0]:kk[1]+1], axis = (1,2,3))
if(interpolation == 'weighted'):
if (ii[0] == ii[1]): w1 = np.array([1,1])
else: w1 = (i-ii[::-1])
if (jj[0] == jj[1]): w2 = np.array([1,1])
else: w2 = (j-jj[::-1])
if (kk[0] == kk[1]): w3 = np.array([1,1])
else: w3 = (k-kk[::-1])
weight = abs(w1.reshape((2,1,1))*w2.reshape((1,2,1))*w3.reshape((1,1,2)))
return np.sum(vv[:,ii[0]:ii[1]+1,jj[0]:jj[1]+1,kk[0]:kk[1]+1]*weight, axis = (1,2,3))/np.sum(weight) | 31,987 |
def anchor_inside_flags(flat_anchors, valid_flags, img_shape,
allowed_border=0, device='cuda'):
"""Anchor inside flags.
:param flat_anchors: flat anchors
:param valid_flags: valid flags
:param img_shape: image meta info
:param allowed_border: if allow border
:return: inside flags
"""
img_h, img_w = img_shape[:2]
if device == 'cuda':
img_h = img_h.cuda()
img_w = img_w.cuda()
img_h = img_h.float()
img_w = img_w.float()
valid_flags = valid_flags.bool()
if allowed_border >= 0:
inside_flags = (valid_flags & (flat_anchors[:, 0] >= -allowed_border) & (
flat_anchors[:, 1] >= -allowed_border) & (
flat_anchors[:, 2] < img_w + allowed_border) & (
flat_anchors[:, 3] < img_h + allowed_border))
else:
inside_flags = valid_flags
return inside_flags | 31,988 |
def notinLRG_mask(primary=None, rflux=None, zflux=None, w1flux=None,
rflux_snr=None, zflux_snr=None, w1flux_snr=None):
"""See :func:`~desitarget.sv1.sv1_cuts.isLRG` for details.
Returns
-------
:class:`array_like`
``True`` if and only if the object is NOT masked for poor quality.
"""
if primary is None:
primary = np.ones_like(rflux, dtype='?')
lrg = primary.copy()
lrg &= (rflux_snr > 0) & (rflux > 0) # ADM quality in r.
lrg &= (zflux_snr > 0) & (zflux > 0) # ADM quality in z.
lrg &= (w1flux_snr > 4) & (w1flux > 0) # ADM quality in W1.
return lrg | 31,989 |
def create_ps_realisation(
out_dir,
fault_name,
lat,
lon,
depth,
mw_mean,
mom,
strike,
rake,
dip,
n_realisations=50,
additional_options={},
dt=0.005,
vs=3.20,
rho=2.44,
target_area_km=None,
target_slip_cm=None,
stype="cos",
rise_time=0.5,
init_time=0.0,
silent=False,
logger: Logger = qclogging.get_basic_logger(),
):
"""
Creates SRF files using random variables.
Nominal values are to be passed in, while the probability characteristics are to be found in a yaml file that
declares the distribution and probability characteristics for that distribution.
Any values that are not to be passed to the CreateSRF function are to be passed in as a dict of additional_options
and will be saved as a json file. If the keys of the additional_options are present in the yaml file they will also
be perturbed with each realisation and saved as such
:param additional_options: A dictionary containing any options to be used in the realisation, but not necessarily
placed in the srf
"""
logger.debug("Creating point source realisation with perturbated parameters")
# Generate standard options dictionary
unperturbed_standard_options = {
"depth": {"mean": depth, "distribution": "none"},
"mw": {"mean": mw_mean, "distribution": "none"},
"mom": {"mean": mom, "distribution": "none"},
"strike": {"mean": strike, "distribution": "none"},
"rake": {"mean": rake, "distribution": "none"},
"dip": {"mean": dip, "distribution": "none"},
"vs": {"mean": vs, "distribution": "none"},
"rho": {"mean": rho, "distribution": "none"},
"rise_time": {"mean": rise_time, "distribution": "none"},
}
(
unperturbed_additional_options,
unperturbed_standard_options,
) = set_up_parameter_dicts(additional_options, unperturbed_standard_options)
for ns in range(1, n_realisations + 1):
logger.debug("Creating realisation {}".format(ns))
realisation_name = simulation_structure.get_realisation_name(fault_name, ns)
realisation_srf_path = os.path.join(
out_dir, simulation_structure.get_srf_location(realisation_name)
)
realisation_stoch_path = os.path.dirname(
os.path.join(
out_dir, simulation_structure.get_stoch_location(realisation_name)
)
)
logger.debug("Perturbating parameters")
(
perturbed_standard_options,
perturbed_additional_options,
) = perturbate_parameters(
unperturbed_standard_options, unperturbed_additional_options
)
save_sim_params(
out_dir,
realisation_name,
perturbed_additional_options,
unperturbed_additional_options,
logger=logger,
)
# print("Making srf with standard dict:")
# print(perturbed_standard_options)
# print("and additional dict:")
# print(perturbed_additional_options)
CreateSRF_ps(
lat,
lon,
perturbed_standard_options["depth"],
perturbed_standard_options["mw"],
perturbed_standard_options["mom"],
perturbed_standard_options["strike"],
perturbed_standard_options["rake"],
perturbed_standard_options["dip"],
dt=dt,
prefix=realisation_srf_path[:-4],
stoch=realisation_stoch_path,
vs=perturbed_standard_options["vs"],
rho=perturbed_standard_options["rho"],
target_area_km=target_area_km,
target_slip_cm=target_slip_cm,
stype=stype,
rise_time=perturbed_standard_options["rise_time"],
init_time=init_time,
silent=silent,
logger=logger,
) | 31,990 |
def capture_ops():
"""Decorator to capture ops created in the block.
with capture_ops() as ops:
# create some ops
print(ops) # => prints ops created.
"""
micros = int(time.time()*10**6)
scope_name = str(micros)
op_list = []
with tf.name_scope(scope_name):
yield op_list
g = tf.get_default_graph()
op_list.extend(ge.select_ops(scope_name+"/.*", graph=g)) | 31,991 |
def get_theta_def(pos_balle:tuple, cote:str):
"""
Retourne les deux angles theta (voir les explications) pour que le goal soit aligné avec la balle.
Ceux-ci sont calculés en fonction des deux poteaux pour avoir les deux "extrémités" pour être correctement alignées.
Paramètres:
- pos_balle : tuple - contient les positions x et y de la balle
- cote : str - Côté que l'on attaque. d pour droite, g pour gauche
( par rapport au sens de l'axe des abscisses )
"""
angles = []
if cote.lower() == "d":
alphas = get_alpha(pos_balle, goal_droit)
for alpha, poteau in zip(alphas, goal_droit):
if pos_balle[1] > poteau[1]:
angles.append(alpha)
else:
angles.append(-alpha)
elif cote.lower() == "g":
alphas = get_alpha(pos_balle, goal_gauche)
for alpha, poteau in zip(alphas, goal_gauche):
if pos_balle[1] > poteau[1]:
angles.append(pi - alpha)
else:
angles.append(alpha - pi)
return angles | 31,992 |
def test_layout_design_versus_relative_sizing():
"""
this also examines such problems with empty spaces.
"""
with pytest.raises(Exception) as e_info:
mylayout = cow.layout(design = """
A#BB
ACC#
""",
rel_heights = [1,2],
rel_widths = [3,1,1])
# relative widths need to be length 4...
with pytest.raises(Exception) as e_info:
mylayout = cow.layout(design = """
A#BB
ACC#
ACC#
""",
rel_heights = [1,2],
rel_widths = [3,1,1,1])
# relative heights need to be length 3...
with pytest.raises(Exception) as e_info:
mylayout = cow.layout(design = """
A.BB
ACC.
""",
rel_heights = [1,2],
rel_widths = [3,1,1])
# relative widths need to be length 4...
with pytest.raises(Exception) as e_info:
mylayout = cow.layout(design = """
A.BB
ACC.
ACC.
""",
rel_heights = [1,2],
rel_widths = [3,1,1,1])
# relative heights need to be length 3...
with pytest.raises(Exception) as e_info:
mylayout = cow.layout(design =np.array([[0,np.nan,1,1],
[0,2,2,np.nan]]),
rel_heights = [1,2],
rel_widths = [3,1,1])
# relative widths need to be length 4...
with pytest.raises(Exception) as e_info:
mylayout = cow.layout(design = np.array([[0,np.nan,1,1],
[0,2,2,np.nan],
[0,2,2,np.nan]]),
rel_heights = [1,2],
rel_widths = [3,1,1,1])
# relative heights need to be length 3... | 31,993 |
def public_doc():
"""Documentation for this api."""
return auto.html(groups=['public'], title='Ocean App Web Service Public Documentation') | 31,994 |
def _prepend_force_name_to_params(force):
"""
This function is called by :py:mod:`polychrom.simulation.Simulation.add_force` method.
It's goal is to avoid using the same names of global parameters defined in different forces.
To this end, it modifies names of parameters of each force to start with the force name,
which should be unique.
"""
if not hasattr(force, "getEnergyFunction"):
return
energy = force.getEnergyFunction()
if hasattr(force, "getNumGlobalParameters"):
for i in range(force.getNumGlobalParameters()):
old_name = force.getGlobalParameterName(i)
new_name = force.name + "_" + old_name
force.setGlobalParameterName(i, new_name)
energy = re.sub(r"(?<!\w)" + f"{old_name}" + r"(?!\w)", new_name, energy)
force.setEnergyFunction(energy) | 31,995 |
def getAllTeams():
"""
returns the entire list of teams
"""
return Team.objects.order_by('name').all() | 31,996 |
def task_check(rollout: RolloutStorage) -> None:
"""
Given a rollout, checks that task indices are returned from observations correctly
and that tasks are resampled correctly within and between processes.
"""
# Get initial task indices.
task_indices = get_task_indices(rollout.obs[0])
episode_tasks = {
process: [task_indices[process]] for process in range(rollout.num_processes)
}
# Check if rollout satisfies conditions at each step.
for step in range(rollout.rollout_step):
# Get information from step.
obs = rollout.obs[step]
dones = rollout.dones[step]
assert len(obs) == len(dones)
new_task_indices = get_task_indices(obs)
# Make sure that task indices are the same if we haven't reached a done,
# otherwise set new task indices. Also track tasks attempted for each process.
for process in range(len(obs)):
done = dones[process]
if done:
task_indices[process] = new_task_indices[process]
episode_tasks[process].append(task_indices[process])
else:
assert task_indices[process] == new_task_indices[process]
# Check that each process is resampling tasks.
enough_ratio = sum(
len(tasks) >= PROCESS_EPISODES for tasks in episode_tasks.values()
) / len(episode_tasks)
if enough_ratio < ENOUGH_THRESHOLD:
raise ValueError(
"Less than %d episodes ran for more than half of processes, which is the"
" minimum amount needed for testing. Try increasing rollout length."
% (PROCESS_EPISODES)
)
for process, tasks in episode_tasks.items():
if len(tasks) >= PROCESS_EPISODES:
num_unique_tasks = len(set(tasks))
assert num_unique_tasks > 1
# Check that each process has distinct sequences of tasks.
for p1, p2 in product(range(rollout.num_processes), range(rollout.num_processes)):
if p1 == p2:
continue
assert episode_tasks[p1] != episode_tasks[p2]
print("\nTasks for each process: %s" % episode_tasks) | 31,997 |
def parse_lamp_flags(flags):
"""Parses flags and returns a dict that represents the lamp states."""
# flags: [0123]{8}
values = _swap_key_and_value(_LAMP_STATES) # {value: state}
states = dict([
(color, values[flags[digit]]) for color, digit in _LAMP_DIGITS.items()
])
return {'lamps': states} | 31,998 |
def user_owns_item(function):
""" Decorator that checks that the item was created by current user. """
@wraps(function)
def wrapper(category_name, item_name, *args, **kwargs):
category = db_session.query(Category
).filter_by(name=category_name).one()
user_id = session['user_id']
item = db_session.query(Item
).filter_by(category=category, name=item_name
).one()
if item.user_id == user_id:
return function(category_name, item_name, *args, **kwargs)
else:
abort(403)
return wrapper | 31,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.