content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_set_item():
"""Tests element written to lin objects. This includes tests for negative
indexing and expected exceptions.
"""
m = lin.Matrix2x3()
def set_and_check(x, *args):
if len(args) == 1:
args = args[0]
assert m[args] != x
m[args] = x
assert m[args] == x
set_and_check(1.0, 0, 2)
set_and_check(2.0, 0, -1)
set_and_check(3.0, 1, 0)
set_and_check(4.0, -1, 0)
set_and_check(5.0, 3)
set_and_check(6.0, -2)
with pytest.raises(IndexError):
m[2, 0]
with pytest.raises(IndexError):
m[-3, 0]
with pytest.raises(IndexError):
m[6]
with pytest.raises(IndexError):
m[-7]
| 9,700
|
def populate_api_servers():
""" Find running API servers. """
def api_server_info(entry):
prefix, port = entry.rsplit('-', 1)
project_id = prefix[len(API_SERVER_PREFIX):]
return project_id, int(port)
global api_servers
monit_entries = yield monit_operator.get_entries()
server_entries = [api_server_info(entry) for entry in monit_entries
if entry.startswith(API_SERVER_PREFIX)]
for project_id, port in server_entries:
api_servers[project_id] = port
| 9,701
|
def draw_result(points, colors=None):
""" Draw point clouds
Args:
points ([ndarray]): N x 3 array
colors ([ndarray]): N x 3 array
"""
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
if colors is not None:
pcd.colors = o3d.utility.Vector3dVector(colors)
o3d.visualization.draw_geometries([pcd])
| 9,702
|
def gen_classification_func(model, *, state_file=None, transform=None, pred_func=None,
device=None):
""" 工厂函数,生成一个分类器函数
用这个函数做过渡的一个重要目的,也是避免重复加载模型
:param model: 模型结构
:param state_file: 存储参数的文件
:param transform: 每一个输入样本的预处理函数
:param pred_func: model 结果的参数的后处理
:return: 返回的函数结构见下述 cls_func
"""
if state_file: model.load_state_dict(torch.load(str(state_file), map_location=get_device()))
model.train(False)
device = device or get_device()
model.to(device)
def cls_func(raw_in):
"""
:param raw_in: 输入可以是路径、np.ndarray、PIL图片等,都为转为batch结构的tensor
im,一张图片路径、np.ndarray、PIL图片
[im1, im2, ...],多张图片清单
:return: 输入如果只有一张图片,则返回一个结果
否则会存在list,返回多个结果
"""
dataset = InputDataset(raw_in, transform)
# TODO batch_size根据device空间大小自适应设置
xs = torch.utils.data.DataLoader(dataset, batch_size=8)
res = None
for x in xs:
# 每个batch可能很大,所以每个batch依次放到cuda,而不是一次性全放入
x = x.to(device)
y = model(x)
if pred_func: y = pred_func(y)
res = y if res is None else (res + y)
return res
return cls_func
| 9,703
|
def create_recipe_json(image_paths: list) -> dict:
"""
Orchestrate the various services to respond to a create recipe request.
"""
logger.info('Creating recipe json from image paths: {}'.format(image_paths))
full_text = load_images_return_text(image_paths)
recipe_json = assign_text_to_recipe(full_text)
return recipe_json
| 9,704
|
def rstrip_extra(fname):
"""Strip extraneous, non-discriminative filename info from the end of a file.
"""
to_strip = ("_R", "_", "fastq", ".", "-")
while fname.endswith(to_strip):
for x in to_strip:
if fname.endswith(x):
fname = fname[:len(fname) - len(x)]
break
return fname
| 9,705
|
def test_xor2():
"""
Two inputs, two outputs.
"""
net = Network("XOR2")
net.add(Layer("input1", shape=1))
net.add(Layer("input2", shape=1))
net.add(Layer("hidden1", shape=2, activation="sigmoid"))
net.add(Layer("hidden2", shape=2, activation="sigmoid"))
net.add(Layer("shared-hidden", shape=2, activation="sigmoid"))
net.add(Layer("output1", shape=1, activation="sigmoid"))
net.add(Layer("output2", shape=1, activation="sigmoid"))
net.connect("input1", "hidden1")
net.connect("input2", "hidden2")
net.connect("hidden1", "shared-hidden")
net.connect("hidden2", "shared-hidden")
net.connect("shared-hidden", "output1")
net.connect("shared-hidden", "output2")
net.compile(error='mean_squared_error',
optimizer=SGD(lr=0.3, momentum=0.9))
net.dataset.load([
([[0],[0]], [[0],[0]]),
([[0],[1]], [[1],[1]]),
([[1],[0]], [[1],[1]]),
([[1],[1]], [[0],[0]])
])
net.train(2000, report_rate=10, accuracy=1, plot=False)
net.evaluate(show=True)
net.propagate_to("shared-hidden", [[1], [1]])
net.propagate_to("output1", [[1], [1]])
net.propagate_to("output2", [[1], [1]])
net.propagate_to("hidden1", [[1], [1]])
net.propagate_to("hidden2", [[1], [1]])
net.propagate_to("output1", [[1], [1]])
net.propagate_to("output2", [[1], [1]])
net.save_weights("/tmp")
net.load_weights("/tmp")
net.evaluate(show=True)
svg = net.to_svg()
assert net is not None
| 9,706
|
def test_remove_same_word_function(session, query, expected):
"""
Test when input is word with space, the result will not show the same word
"""
response = get_complete(session, {'q': query, 'contextual': False})
compare_two_lists(get_typeahead_values(response), expected)
| 9,707
|
def run(preprocessors, data, preprocessing=defaultdict(lambda: None), fit=True):
"""Applies preprocessing to data. It currently suppoerts StandardScaler and
OneHotEncoding
Parameters
----------
preprocessors : list
preprocessors to be applied
data : pd.DataFrame
data to be preprocessed
preprocessing : dict, optional
encoders of each preprocessor, by default defaultdict(lambda: None)
fit : bool, optional
if False, it applies to current encoder, by default True
Returns
-------
pd.DataFrame dict
preprocessed data and preprocessors used
"""
scaler_to_data_type = {
'StandardScaler': 'numeric',
'OneHotEncoder': 'object'}
if len(preprocessors) == 0:
return data, preprocessing
preprocessor = preprocessors[0]
data_type = scaler_to_data_type[preprocessor]
splited_data = split_by_type(data)
splited_data[data_type], preprocessing[preprocessor] = \
apply_preprocessor(splited_data[data_type],
preprocessor,
fit=fit,
encoder=preprocessing[preprocessor])
processed_data = pd.concat(splited_data.values(), axis=1)
return run(preprocessors[1:], processed_data, preprocessing, fit)
| 9,708
|
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, fan.DOMAIN, DEFAULT_CONFIG
)
| 9,709
|
def vouchers_tab(request, voucher_group, deleted=False, template_name="manage/voucher/vouchers.html"):
"""Displays the vouchers tab
"""
vouchers = voucher_group.vouchers.all()
paginator = Paginator(vouchers, 20)
page = paginator.page((request.POST if request.method == 'POST' else request.GET).get("page", 1))
taxes = Tax.objects.all()
if (request.method == "POST") and (deleted is False):
voucher_form = VoucherForm(data=request.POST)
else:
voucher_form = VoucherForm()
return render_to_string(template_name, request=request, context={
"voucher_group": voucher_group,
"taxes": taxes,
"form": voucher_form,
"vouchers_inline": vouchers_inline(request, voucher_group, vouchers, paginator, page),
})
| 9,710
|
def all_ground_operators_given_partial(
operator: STRIPSOperator, objects: Collection[Object],
sub: VarToObjSub) -> Iterator[_GroundSTRIPSOperator]:
"""Get all possible groundings of the given operator with the given objects
such that the parameters are consistent with the given substitution."""
assert set(sub).issubset(set(operator.parameters))
types = [p.type for p in operator.parameters if p not in sub]
for choice in get_object_combinations(objects, types):
# Complete the choice with the args that are determined from the sub.
choice_lst = list(choice)
choice_lst.reverse()
completed_choice = []
for p in operator.parameters:
if p in sub:
completed_choice.append(sub[p])
else:
completed_choice.append(choice_lst.pop())
assert not choice_lst
ground_op = operator.ground(tuple(completed_choice))
yield ground_op
| 9,711
|
def normalise_intensity(x,
mode="minmax",
min_in=0.0,
max_in=255.0,
min_out=0.0,
max_out=1.0,
clip=False,
clip_range_percentile=(0.05, 99.95),
):
"""
Intensity normalisation (& optional percentile clipping)
for both Numpy Array and Pytorch Tensor of arbitrary dimensions.
The "mode" of normalisation indicates different ways to normalise the intensities, including:
1) "meanstd": normalise to 0 mean 1 std;
2) "minmax": normalise to specified (min, max) range;
3) "fixed": normalise with a fixed ratio
Args:
x: (ndarray / Tensor, shape (N, *size))
mode: (str) indicate normalisation mode
min_in: (float) minimum value of the input (assumed value for fixed mode)
max_in: (float) maximum value of the input (assumed value for fixed mode)
min_out: (float) minimum value of the output
max_out: (float) maximum value of the output
clip: (boolean) value clipping if True
clip_range_percentile: (tuple of floats) percentiles (min, max) to determine the thresholds for clipping
Returns:
x: (same as input) in-place op on input x
"""
# determine data dimension
dim = x.ndim - 1
image_axes = tuple(range(1, 1 + dim)) # (1,2) for 2D; (1,2,3) for 3D
# for numpy.ndarray
if type(x) is np.ndarray:
# Clipping
if clip:
# intensity clipping
clip_min, clip_max = np.percentile(x, clip_range_percentile, axis=image_axes, keepdims=True)
x = np.clip(x, clip_min, clip_max)
# Normalise meanstd
if mode == "meanstd":
mean = np.mean(x, axis=image_axes, keepdims=True) # (N, *range(dim))
std = np.std(x, axis=image_axes, keepdims=True) # (N, *range(dim))
x = (x - mean) / std # axis should match & broadcast
# Normalise minmax
elif mode == "minmax":
min_in = np.amin(x, axis=image_axes, keepdims=True) # (N, *range(dim))
max_in = np.amax(x, axis=image_axes, keepdims=True) # (N, *range(dim)))
x = (x - min_in) * (max_out - min_out) / (max_in - min_in + 1e-12) # (!) multiple broadcasting)
# Fixed ratio
elif mode == "fixed":
x = (x - min_in) * (max_out - min_out) / (max_in - min_in + 1e-12)
else:
raise ValueError("Intensity normalisation mode not understood."
"Expect either one of: 'meanstd', 'minmax', 'fixed'")
# cast to float 32
x = x.astype(np.float32)
# for torch.Tensor
elif type(x) is torch.Tensor:
# todo: clipping not supported at the moment (requires Pytorch version of the np.percentile()
# Normalise meanstd
if mode == "meanstd":
mean = torch.mean(x, dim=image_axes, keepdim=True) # (N, *range(dim))
std = torch.std(x, dim=image_axes, keepdim=True) # (N, *range(dim))
x = (x - mean) / std # axis should match & broadcast
# Normalise minmax
elif mode == "minmax":
# get min/max across dims by flattening first
min_in = x.flatten(start_dim=1, end_dim=-1).min(dim=1)[0].view(-1, *(1,)*dim) # (N, (1,)*dim)
max_in = x.flatten(start_dim=1, end_dim=-1).max(dim=1)[0].view(-1, *(1,)*dim) # (N, (1,)*dim)
x = (x - min_in) * (max_out - min_out) / (max_in - min_in + 1e-12) # (!) multiple broadcasting)
# Fixed ratio
elif mode == "fixed":
x = (x - min_in) * (max_out - min_out) / (max_in - min_in + 1e-12)
else:
raise ValueError("Intensity normalisation mode not recognised."
"Expect: 'meanstd', 'minmax', 'fixed'")
# cast to float32
x = x.float()
else:
raise TypeError("Input data type not recognised, support numpy.ndarray or torch.Tensor")
return x
| 9,712
|
def spots_rmsd(spots):
""" Calculate the rmsd for a series of small_cell_spot objects
@param list of small_cell_spot objects
@param RMSD (pixels) of each spot
"""
rmsd = 0
count = 0
print 'Spots with no preds', [spot.pred is None for spot in spots].count(True), 'of', len(spots)
for spot in spots:
if spot.pred is None:
continue
rmsd += measure_distance(col((spot.spot_dict['xyzobs.px.value'][0],spot.spot_dict['xyzobs.px.value'][1])),col(spot.pred))**2
count += 1
if count == 0: return 0
return math.sqrt(rmsd/count)
| 9,713
|
def test_point_at_center_bottom(points, point_at_line):
"""
Test aligned at the line text position calculation, horizontal mode
"""
x, y = text_point_at_line(points, (10, 5), TextAlign.CENTER)
assert x == pytest.approx(point_at_line[0])
assert y == pytest.approx(point_at_line[1])
| 9,714
|
def sanitize_df(data_df, schema, setup_index=True, missing_column_procedure='fill_zero'):
""" Sanitize dataframe according to provided schema
Returns
-------
data_df : pandas DataFrame
Will have fields provided by schema
Will have field types (categorical, datetime, etc) provided by schema.
"""
data_df = data_df.reset_index()
for ff, field_name in enumerate(schema.field_names):
type_ff = schema.fields[ff].descriptor['type']
if field_name not in data_df.columns:
if missing_column_procedure == 'fill_zero':
if type_ff == 'integer':
data_df[field_name] = 0
elif type_ff == 'number':
data_df[field_name] = 0.0
# Reorder columns to match schema
data_df = data_df[schema.field_names]
# Cast fields to required type (categorical / datetime)
for ff, name in enumerate(schema.field_names):
ff_spec = schema.descriptor['fields'][ff]
if 'pandas_dtype' in ff_spec and ff_spec['pandas_dtype'] == 'category':
data_df[name] = data_df[name].astype('category')
elif 'type' in ff_spec and ff_spec['type'] == 'datetime':
data_df[name] = pd.to_datetime(data_df[name])
if hasattr(schema, 'primary_key'):
data_df = data_df.sort_values(schema.primary_key)
if setup_index:
data_df = data_df.set_index(schema.primary_key)
return data_df
| 9,715
|
def log(message):
"""
Write log message to stdout and to predefined file
"""
time = "[" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "] "
outmsg = time + message
print(outmsg)
with open(LOCAL_STORAGE_DIR + "/" + LOG_FILE_NAME, "a") as myfile:
myfile.write(outmsg + "\n")
| 9,716
|
def get_id_argument(id_card):
"""
获取身份证号码信息
:param id_card:
:return:
"""
id_card = id_card.upper()
id_length = len(id_card)
if id_length == 18:
code = {
'body': id_card[0:17],
'address_code': id_card[0:6],
'birthday_code': id_card[6:14],
'order_code': id_card[14:17],
'check_bit': id_card[17:18],
'type': 18
}
else:
code = {
'body': id_card,
'address_code': id_card[0:6],
'birthday_code': '19' + id_card[6:12],
'order_code': id_card[12:15],
'check_bit': '',
'type': 15
}
return code
| 9,717
|
def rhypergeometric(n, m, N, size=None):
"""
Returns hypergeometric random variates.
"""
if n == 0:
return np.zeros(size, dtype=int)
elif n == N:
out = np.empty(size, dtype=int)
out.fill(m)
return out
return np.random.hypergeometric(n, N - n, m, size)
| 9,718
|
def ctd_upload(Id, filenames, production):
""" Edit entry at Caltech Data for data set to upload data given by filenames.
Id is an integer that defines the Caltech Data entry.
"""
caltechdata.edit_ctd(Id, filenames=filenames, production=production)
| 9,719
|
def _validate_query(
hgnc_manager: HgncManager,
query_result,
original_identifier: str,
original_namespace: str,
) -> Tuple[str, str, str]:
"""Process and validate HGNC query.
:param hgnc_manager: hgnc manager
:param query_result:
:param original_identifier:
:param original_namespace:
"""
# If invalid entry from HGNC, try to find updated symbol
if not query_result and original_namespace == HGNC:
return _get_update_alias_symbol(hgnc_manager, original_identifier, HGNC)
# Invalid entry, proceed with invalid identifier
if not query_result:
logger.debug('No found HGNC Symbol for id %s in (%s)', original_identifier, original_namespace)
return original_namespace, original_identifier, original_identifier
# Multiple entries are returned, for UniProt identifiers
if isinstance(query_result, list):
if len(query_result) > 1:
logger.debug('UniProt identifier with multiple HGNC:s %s', query_result)
query_result = query_result[0]
# Correct entry, use HGNC identifier
return HGNC, query_result.symbol, query_result.identifier
| 9,720
|
def AdjustforPeriod(numsnaps,numhalos,boxsize,hval,atime,halodata,icomove=0):
"""
Map halo positions from 0 to box size
"""
for i in range(numsnaps):
if (icomove):
boxval=boxsize/hval
else:
boxval=boxsize*atime[i]/hval
wdata=np.where(halodata[i]["Xc"]<0)
halodata[i]["Xc"][wdata]+=boxval
wdata=np.where(halodata[i]["Yc"]<0)
halodata[i]["Yc"][wdata]+=boxval
wdata=np.where(halodata[i]["Zc"]<0)
halodata[i]["Zc"][wdata]+=boxval
wdata=np.where(halodata[i]["Xc"]>boxval)
halodata[i]["Xc"][wdata]-=boxval
wdata=np.where(halodata[i]["Yc"]>boxval)
halodata[i]["Yc"][wdata]-=boxval
wdata=np.where(halodata[i]["Zc"]>boxval)
halodata[i]["Zc"][wdata]-=boxval
| 9,721
|
def my_model_builder(my_model: MyModel) -> KerasModel:
"""Build the siamese network model """
input_1 = layers.Input(my_model.input_shape)
input_2 = layers.Input(my_model.input_shape)
# As mentioned above, Siamese Network share weights between
# tower networks (sister networks). To allow this, we will use
# same embedding network for both tower networks.
embedding_network = build_model_tower(my_model)
tower_1 = embedding_network(input_1)
tower_2 = embedding_network(input_2)
merge_layer = layers.Lambda(euclidean_distance)([tower_1, tower_2])
normal_layer = tf.keras.layers.BatchNormalization()(merge_layer)
output_layer = layers.Dense(1, activation="sigmoid")(normal_layer)
keras_model = keras.Model(inputs=[input_1, input_2], outputs=output_layer)
keras_model.compile(
loss=my_model.loss,
optimizer=my_model.optimizer,
metrics=my_model.metrics
)
return keras_model
| 9,722
|
def test_upper_bounds_ensured():
"""
Long enough simulation should saturate a bounded variable
"""
example_step = SimulationStep(
x0=0,
inputs=(15000,1),
duration=100
)
_, samples = bounded_angular_velocity(example_step)
assert not any(s > bounded_angular_velocity.max for s in samples)
assert samples[-1] == bounded_angular_velocity.max
| 9,723
|
def test_copy_contents():
"""test copy contents if dst is already existed."""
# create addon src
addon_path = path.join(root_path, 'tests', 'src', 'addon')
addon_main = path.join(root_path, 'tests', 'src', 'addon', 'addon_main')
addon_sub = path.join(root_path, 'tests', 'src', 'addon', 'addon_sub')
addon_rdme = path.join(root_path, 'tests', 'src', 'addon', 'readme')
addon_toc = path.join(root_path, 'tests', 'src', 'addon', 'addon_main', 'addon_main.toc')
mkdir(addon_path)
mkdir(addon_main)
mkdir(addon_sub)
with open(addon_rdme, 'w+', encoding='utf-8') as f:
f.write('readme')
with open(addon_toc, 'w+', encoding='utf-8') as f:
f.write('addon_main.toc')
# create addon_main which already existed inside dst
dst = path.join(root_path, 'tests', 'dst', 'addon')
dst_main = path.join(root_path, 'tests', 'dst', 'addon', 'addon_main')
mkdir(dst)
mkdir(dst_main)
# copy contents to dst
copied_items = helpers.copy_contents(addon_path, dst)
# assertions
dst_sub = path.join(root_path, 'tests', 'dst', 'addon', 'addon_sub')
dst_main_toc = path.join(root_path, 'tests', 'dst', 'addon', 'addon_main', 'addon_main.toc')
dst_rdme = path.join(root_path, 'tests', 'dst', 'addon', 'readme')
assert len(copied_items) == 3
assert path.exists(dst_sub)
assert path.exists(dst_main_toc), 'addon_main should be override after copy contents.'
assert path.exists(dst_rdme), 'copy contents should also work for single file.'
# remove everything created
rmtree(addon_path)
rmtree(dst)
| 9,724
|
def save_csv(log, additions, field_cfg, date_format):
"""Add any unique entries from new to current"""
qtys = collections.Counter()
pool = {}
log_rows = []
log_hashes = set()
for row in csv.DictReader(log):
log_rows.append(row)
hash_ = hash_row(row)
assert hash_ not in log_hashes
log_hashes.add(hash_)
dialect = csv.Sniffer().sniff(additions.read(1024))
additions.seek(0)
for row in csv.DictReader(additions, dialect=dialect):
conv_row = {}
for k, v in row.items():
if k in field_cfg:
conv_row[field_cfg[k]] = v.strip()
conv_row['status'] = 'N'
date = datetime.strptime(conv_row['transaction_date'], date_format).date()
conv_row['transaction_date'] = date.strftime('%Y/%m/%d')
hash_ = hash_row(conv_row)
if hash_ in log_hashes:
continue
pool[hash_] = conv_row
qtys[hash_] += 1
for key, val in qtys.items():
row = pool[key]
if val > 1:
row['qty'] = val
log_rows.append(row)
log_rows.sort(key=lambda row: (row['transaction_date'], row['description']))
log.seek(0)
writer = csv.DictWriter(log, fields_1, lineterminator='\n')
writer.writeheader()
for row in log_rows:
writer.writerow(row)
log.truncate()
| 9,725
|
def create_embed(
title,
description,
fields = None,
colour = None,
timestamp = datetime.utcnow(),
author = None,
author_icon = None,
thumbnail = None,
image = None,
footer = None
):
"""Create an Embed
Args:
title (str): Set title
description (str): Set description
fields (list of tuples): Set fields
colour (int, optional): Set color. Defaults to None.
timestamp (datetime, optional): Set timestamp. Defaults to current time.
author (str, optional): Set author. Defaults to None.
author_icon (str, optional): Set author icon using image url. Defaults to None.
thumbnail (str, optional): Set thumbnail using image url. Defaults to None.
image (str, optional): Set image using image url. Defaults to None.
footer (str, optional): Set footer. Defaults to None.
Returns:
embed: returns an embed
"""
embed = Embed(
title=title,
description=description,
colour=colour,
timestamp=timestamp
)
if fields is not None:
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
embed.set_author(name=author, icon_url=author_icon)
embed.set_footer(text=footer)
embed.set_thumbnail(url=thumbnail)
embed.set_image(url=image)
return embed
| 9,726
|
def autoCalibration(I):
"""Returns horizontal and vertical factors by which every distance in
pixels should be multiplied in order to obtain the equivalent distance in
millimeters. This program assumes that the scale presents clear axis ticks and
that the distance between two biggest ticks is equal to 10 mm.
It also assumes that both horizontal and vertical scales are present in the
up right quarter of image I.
Args:
I (array): one canal image. If I is a RGB image, it is transformed
to a grayscale image.
Returns:
calibFactorX (double) and calibFactorY (double) are respectively the
horizontal and vertical calibration factors
"""
#Check if I is a 1-canal image
if len(I.shape) > 2:
I = cv2.cvtColor(I, cv2.COLOR_RGB2GRAY)
length, width = I.shape[0], I.shape[1]
#Cropping with empirical percentages and binarization of the selection
# !!! EMPIRICAL
TCP = 0.1 #Top cropping percentage - #empirical percentage
LCP = 0.5 #Left cropping percentage
BCP = 0.65 #Bottom cropping percentage
RCP = 0.1 #Right cropping percentage
Scale_image = I[int(TCP * length):length-int(BCP * length),\
int(LCP * width):width-int(RCP * width)]
Binar_I = cv2.threshold(Scale_image, 220., 255, cv2.THRESH_BINARY)[1]
#Selection of the biggest axis ticks: contours of white objects are found as
#well as minimal rectangles encapsulating each object. Conditions on the
#size of these contours/bounding rectangles enable the removal of objects
#that are not the biggest ticks
contours = cv2.findContours(Binar_I, cv2.RETR_EXTERNAL, \
cv2.CHAIN_APPROX_NONE)[0]
contours_size = [contours[i].size for i in range (len(contours))]
BoundingRectangles = []
for i in range(len(contours)):
if contours_size[i]<=1.7*np.mean(contours_size): #condition to stop considering the objects corresponding to figures
p1, p2, l1, l2 = cv2.boundingRect(contours[i]) #rectangles identified with point (p1,p2) and vectors (l2,0), (0,l1)
BoundingRectangles.append([i, (p1,p2,l1,l2), 2.*l1+2.*l2])
MeanPerim = np.mean([BoundingRectangles[i][2] for i in range(len(BoundingRectangles))])
Dashes = [BoundingRectangles[i] for i in range(len(BoundingRectangles)) if BoundingRectangles[i][2]>MeanPerim] #removal of points and small dashes
#Calculation of the minimal distances between two horizontal ticks and
#two vertical ticks
#browse all detected axis ticks
horiz = 10000000.
vertic = 10000000.
for i in range (0, len(Dashes)-1):
ref_Dash = Dashes[i][1]
for j in range(i+1,len(Dashes)):
if len(set(list(range(Dashes[j][1][0],Dashes[j][1][0]+Dashes[j][1][2])))\
.intersection(list(range(ref_Dash[0],ref_Dash[0]+ref_Dash[2]))))>2:
h = abs(ref_Dash[1]+ref_Dash[3]-Dashes[j][1][1]-Dashes[j][1][3])
if h<vertic:
vertic = h
if len(set(list(range(Dashes[j][1][1],Dashes[j][1][1]+Dashes[j][1][3])))\
.intersection(list(range(ref_Dash[1],ref_Dash[1]+ref_Dash[3]))))>2:
h = abs(ref_Dash[0]-Dashes[j][1][0])
if h<horiz:
horiz = h
#Factors to convert distance in pixels into distance in millimeters
if horiz == 10000000. or horiz == 0:
calibFactorX = None
else:
calibFactorX = 10./horiz
if vertic == 10000000. or vertic == 0:
calibFactorY = None
else:
calibFactorY = 10./vertic
''' visual check
for d in range(len(Dashes)):
p1 = Dashes[d][1][0]
p2 = Dashes[d][1][1]
l1 = Dashes[d][1][2]
l2 = Dashes[d][1][3]
for l in range(p1,p1+l1+1):
Binar_I[p2,l] = 150
Binar_I[p2+l2,l] = 150
for c in range(p2,p2+l2+1):
Binar_I[c,p1] = 150
Binar_I[c,p1+l1] = 150
cv2.imshow('Binary image', Binar_I)
cv2.waitKey(0) & 0xFF
cv2.destroyAllWindows()
'''
return calibFactorX, calibFactorY
| 9,727
|
def reverse_handler(handler_input):
"""Check if a verb is provided in slot values. If provided, then
looks for the paradigm in the irregular_verbs file.
If not, then it asks user to provide the verb again.
"""
# iterate over the dictionaries in irregular_verbs.py and looks for
# the verb in the slot. If it finds it, it returns the dictionary
def get_verb(irregular_verbs, filled_verboconiugato_slot):
for dictionary in IRREGULAR_VERBS["verbs"]:
if dictionary["PS"] == verboconiugato or dictionary["PP"] == verboconiugato:
return dictionary
# type: (HandlerInput) -> Response
attribute_manager = handler_input.attributes_manager
session_attr = attribute_manager.session_attributes
slots = handler_input.request_envelope.request.intent.slots
if verboconiugato_slot in slots: # if slot is filled
verboconiugato = slots[verboconiugato_slot].value
handler_input.attributes_manager.session_attributes[
verboconiugato_slot_key] = verboconiugato # verbo is equal to what i said ex. know
# execute the function based on the verb the user asks for. askedVerb
# becomes equal to the dictionary returned by the function
askedVerb = get_verb(irregular_verbs.IRREGULAR_VERBS, verboconiugato)
if verboconiugato == "read" and askedVerb:
baseVerb = askedVerb["Base"]
pastSimple = askedVerb["PS"]
pastPart = askedVerb["PP"]
traduzione = askedVerb["Italiano"]
speech = ("<lang xml:lang='en-GB'>{}</lang> è il verbo <voice name='Emma'><lang xml:lang='en-GB'>to {}</lang></voice>. Il suo paradigma è <voice name='Emma'><lang xml:lang='en-GB'>to {}, <phoneme alphabet='ipa' ph='rɛd'>{}</phoneme>, <phoneme alphabet='ipa' ph='rɛd'>{}</phoneme></lang></voice>. Significa <phoneme alphabet='ipa' ph='ˈlɛdʤere'>{}</phoneme>.".format(verboconiugato, baseVerb, baseVerb, pastSimple, pastPart, traduzione))
reprompt = ("Cosa vuoi chiedermi?")
handler_input.response_builder.set_should_end_session(True)
elif askedVerb:
baseVerb = askedVerb["Base"]
pastSimple = askedVerb["PS"]
pastPart = askedVerb["PP"]
traduzione = askedVerb["Italiano"]
speech = ("<lang xml:lang='en-GB'>{}</lang> è il verbo <voice name='Emma'><lang xml:lang='en-GB'>to {}</lang></voice>. Il suo paradigma è <voice name='Emma'><lang xml:lang='en-GB'>to {}, {}, {}</lang></voice>. Significa {}.".format(
verboconiugato, baseVerb, baseVerb, pastSimple, pastPart, traduzione))
reprompt = ("Cosa vuoi chiedermi?")
handler_input.response_builder.set_should_end_session(True)
else:
speech = (
"Non trovo il verbo <lang xml:lang='en-GB'>{}</lang>. Se è corretto, allora la sua coniugazione è regolare".format(verboconiugato))
reprompt = ("Cosa vuoi chiedermi?")
handler_input.response_builder.set_should_end_session(True)
# if slot isn't filled, repeat helptext
else:
speech = ("Non ho capito." + help_text)
handler_input.response_builder.ask(help_text)
handler_input.response_builder.speak(speech).ask(
reprompt).set_should_end_session(True)
return handler_input.response_builder.response
| 9,728
|
def generate_dummy_makefile(target_dir):
"""Create a dummy makefile to demonstrate how it works.
Use dummy values unrelated to any gyp files. Its output should remain the
same unless/until makefile_writer.write_android_mk changes.
Args:
target_dir: directory in which to write the resulting Android.mk
"""
common_vars_dict = generate_dummy_vars_dict(None)
deviation_params = [('foo', 'COND'), ('bar', None)]
deviations = [generate_dummy_vars_dict_data(name, condition)
for (name, condition) in deviation_params]
makefile_writer.write_android_mk(target_dir=target_dir,
common=common_vars_dict,
deviations_from_common=deviations)
| 9,729
|
def commit_user_deletions():
"""Delete (marked) users."""
AccountTerminationQueue.objects.commit_terminations()
| 9,730
|
def load_wrf_data(filename):
"""Load required data form the WRF output file : filename"""
base_data=load_vars(filename,wrfvars)
skin_t=load_tskin(filename,tsvar,landmask)
base_data.append(skin_t)
atts=mygis.read_atts(filename,global_atts=True)
return Bunch(data=base_data,global_atts=atts)
| 9,731
|
def DensityRatio_QP(X_den, X_num, kernel, g, v_matrix, ridge=1e-3):
"""
The function computes a model of the density ratio.
The function is in the form $A^T K$
The function returns the coefficients $\alpha_i$ and the bias term b
"""
l_den, d = X_den.shape
l_num, d_num = X_num.shape
# TODO: Check d==d_num
ones_num = np.matrix(np.ones(shape=(l_num, 1)))
zeros_den = np.matrix(np.zeros(shape=(l_den, 1)))
gram = kernel(X_den)
K = np.matrix(gram + ridge * np.eye(l_den))
# K = np.matrix(gram) # No ridge
print("K max, min: %e, %e" % (np.max(K), np.min(K)))
data = np.concatenate((X_den, X_num))
if callable(v_matrix):
V = np.matrix(v_matrix(X_den, X_den, data))
V_star = np.matrix(v_matrix(X_den, X_num, data)) # l_den by l_num
else:
return -1
print("V max,min: %e, %e" % (np.max(V), np.min(V)))
print("V_star max,min: %e, %e" % (np.max(V_star), np.min(V_star)))
tgt1 = K * V * K
print("K*V*K max, min: %e, %e" % (np.max(tgt1), np.min(tgt1)))
tgt2 = g * K
print("g*K max, min: %e, %e" % (np.max(tgt2), np.min(tgt2)))
P = cvxopt.matrix(2 * (tgt1 + tgt2))
q_ = -2 * (l_den / l_num) * (K * V_star * ones_num)
print("q max, min: %e, %e" % (np.max(q_), np.min(q_)))
q = cvxopt.matrix(q_)
#### Let's construct the inequality constraints
# Now create G and h
G = cvxopt.matrix(-K)
h = cvxopt.matrix(zeros_den)
# G = cvxopt.matrix(np.vstack((-K,-np.eye(l_den))))
# h = cvxopt.matrix(np.vstack((zeros_den,zeros_den)))
# Let's construct the equality constraints
A = cvxopt.matrix((1 / l_den) * K * V_star * ones_num).T
b = cvxopt.matrix(np.ones(1))
return cvxopt.solvers.qp(P, q, G, h, A, b, options=dict(
maxiters=50))
| 9,732
|
def sanitize_date(date_dict: dict):
"""
Function to take the date values entered by the user and check their validity. If valid it returns True,
otherwise it sets the values to None and returns False
:param date_dict:
:return:
"""
month = date_dict["month"]
day = date_dict["day"]
year = date_dict["year"]
date = [month, day, year]
date_is_valid = not any([component is None for component in date])
if date_is_valid:
date_is_valid &= not (month == 2 and day > 29)
date_is_valid &= not (month in [4, 6, 9, 11] and day > 30)
is_leap_year = (year % 4) == 0
is_leap_year &= ((year % 100) != 0 or (year % 400) == 0)
date_is_valid &= not (month == 2 and day == 29 and not is_leap_year)
if not date_is_valid:
date_dict["month"] = date_dict["day"] = date_dict["year"] = None
return False
return True
| 9,733
|
def create_scale(tonic, pattern, octave=1):
"""
Create an octave-repeating scale from a tonic note
and a pattern of intervals
Args:
tonic: root note (midi note number)
pattern: pattern of intervals (list of numbers representing
intervals in semitones)
octave: span of scale (in octaves)
Returns:
list of midi notes in the scale
"""
assert(sum(pattern)==12)
scale = [tonic]
note = tonic
for o in range(octave):
for i in pattern:
note += i
if note <= 127:
scale.append(note)
return scale
| 9,734
|
def _qfloat_append(qf, values, axis=None):
"""Implement np.append for qfloats."""
# First, convert to the same unit.
qf1, qf2 = same_unit(qf, values, func=np.append)
nominal = np.append(qf1.nominal, qf2.nominal, axis)
std = np.append(qf1.uncertainty, qf2.uncertainty, axis)
return QFloat(nominal, std, qf1.unit)
| 9,735
|
def power_off_progressive(part, restart=False, ibmi_immed=False,
timeout=CONF.pypowervm_job_request_timeout):
"""Attempt soft power-off, retrying with increasing aggression on failure.
IBMi partitions always start with OS shutdown. If ibmi_immed == False,
os-normal shutdown is tried first; then os-immediate; then vsp-normal; then
vsp-hard. If ibmi_immed == True, os-normal is skipped, but the rest of the
progression is the same.
For non-IBMi partitions:
If RMC is up, os-immediate is tried first. If this times out, vsp hard is
performed next; otherwise, vsp-normal is attempted before vsp-hard.
If RMC is down, vsp-normal is tried first, then vsp-hard.
:param part: The LPAR/VIOS wrapper of the instance to power off.
:param restart: Boolean. Perform a restart after the power off.
:param ibmi_immed: Boolean. Indicates whether to try os-normal first
(False, the default) before progressing to
os-immediate. If True, skip trying os-normal shutdown.
Only applies to IBMi partitions.
:param timeout: Time in seconds to wait for the instance to stop. This is
only applied to the first attempt in the progression.
:raise VMPowerOffFailure: If the last attempt in the progression failed.
:raise VMPowerOffTimeout: If the last attempt in the progression timed out.
"""
_power_off_progressive(part, timeout, restart, ibmi_immed=ibmi_immed)
| 9,736
|
async def test_strip(hass: HomeAssistant) -> None:
"""Test a smart strip."""
already_migrated_config_entry = MockConfigEntry(
domain=DOMAIN, data={}, unique_id=MAC_ADDRESS
)
already_migrated_config_entry.add_to_hass(hass)
plug = _mocked_strip()
with _patch_discovery(device=plug), _patch_single_discovery(device=plug):
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
# Verify we only create entities for the children
# since this is what the previous version did
assert hass.states.get("switch.my_strip") is None
for plug_id in range(2):
entity_id = f"switch.plug{plug_id}"
state = hass.states.get(entity_id)
assert state.state == STATE_ON
await hass.services.async_call(
SWITCH_DOMAIN, "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
plug.children[plug_id].turn_off.assert_called_once()
plug.children[plug_id].turn_off.reset_mock()
await hass.services.async_call(
SWITCH_DOMAIN, "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
plug.children[plug_id].turn_on.assert_called_once()
plug.children[plug_id].turn_on.reset_mock()
| 9,737
|
def pre_spawn_hook_environ(monkeypatch, jupyterhub_api_environ):
"""
Set the environment variables used in the setup_course_hook function
"""
monkeypatch.setenv('NB_GID', '100')
monkeypatch.setenv('NB_NON_GRADER_UID', '1000')
| 9,738
|
def filter_by_mean_color(img:np.ndarray, circles:List[Circle], threshold=170) -> List[Circle]:
"""Filter circles to keep only those who covers an area which high pixel mean than threshold"""
filtered = []
for circle in circles:
box = Box(circle=circle)
area = box.get_region(img)
if np.mean(area) > threshold:
filtered.append(circle)
return filtered
| 9,739
|
def test_deletion_no_child(basic_tree):
"""Test the deletion of a red black tree."""
tree = red_black_tree.RBTree()
test_tree = [(23, "23"), (4, "4"), (30, "30"), (11, "11")]
for key, data in test_tree:
tree.insert(key=key, data=data)
tree.delete(4)
assert [item for item in tree.inorder_traverse()] == [
(11, "11"),
(23, "23"),
(30, "30"),
]
| 9,740
|
def ParseSortByArg(sort_by=None):
"""Parses and creates the sort by object from parsed arguments.
Args:
sort_by: list of strings, passed in from the --sort-by flag.
Returns:
A parsed sort by string ending in asc or desc.
"""
if not sort_by:
return None
fields = []
for field in sort_by:
if field.startswith('~'):
field = field.lstrip('~') + ' desc'
else:
field += ' asc'
fields.append(field)
return ','.join(fields)
| 9,741
|
def dense_to_text(decoded, originals):
"""
Convert a dense, integer encoded `tf.Tensor` into a readable string.
Create a summary comparing the decoded plaintext with a given original string.
Args:
decoded (np.ndarray):
Integer array, containing the decoded sequences.
originals (np.ndarray):
String tensor, containing the original input string for comparision.
`originals` can be an empty tensor.
Returns:
np.ndarray:
1D string Tensor containing only the decoded text outputs.
[decoded_string_0, ..., decoded_string_N]
np.ndarray:
2D string Tensor with layout:
[[decoded_string_0, original_string_0], ...
[decoded_string_N, original_string_N]]
"""
decoded_strings = []
original_strings = []
for d in decoded:
decoded_strings.append(''.join([itoc(i) for i in d]))
if len(originals) > 0:
for o in originals:
original_strings.append(''.join([c for c in o.decode('utf-8')]))
else:
original_strings = ['n/a'] * len(decoded_strings)
decoded_strings = np.array(decoded_strings, dtype=np.object)
original_strings = np.array(original_strings, dtype=np.object)
summary = np.vstack([decoded_strings, original_strings])
return np.array(decoded_strings), summary
| 9,742
|
def admin_view_all_working_curriculums(request):
""" views all the working curriculums offered by the institute """
user_details = ExtraInfo.objects.get(user = request.user)
des = HoldsDesignation.objects.all().filter(user = request.user).first()
if str(des.designation) == "student" or str(des.designation) == "Associate Professor" or str(des.designation) == "Professor" or str(des.designation) == "Assistant Professor" :
return HttpResponseRedirect('/programme_curriculum/mainpage/')
elif str(request.user) == "acadadmin" :
pass
curriculums = Curriculum.objects.filter(working_curriculum=1)
return render(request,'programme_curriculum/acad_admin/admin_view_all_working_curriculums.html',{'curriculums':curriculums})
| 9,743
|
async def test_learn(hass):
"""Test learn service."""
mock_device = MagicMock()
mock_device.enter_learning.return_value = None
mock_device.check_data.return_value = b64decode(DUMMY_IR_PACKET)
with patch.object(
hass.components.persistent_notification, "async_create"
) as mock_create:
async_setup_service(hass, DUMMY_HOST, mock_device)
await hass.async_block_till_done()
await hass.services.async_call(DOMAIN, SERVICE_LEARN, {"host": DUMMY_HOST})
await hass.async_block_till_done()
assert mock_device.enter_learning.call_count == 1
assert mock_device.enter_learning.call_args == call()
assert mock_create.call_count == 1
assert mock_create.call_args == call(
"Received packet is: {}".format(DUMMY_IR_PACKET), title="Broadlink switch"
)
| 9,744
|
def getEmuAtVa(vw, va, maxhit=None):
"""
Build and run an emulator to the given virtual address
from the function entry point.
(most useful for state analysis. kinda heavy though...)
"""
fva = vw.getFunction(va)
if fva == None:
return None
cbva,cbsize,cbfva = vw.getCodeBlock(va)
fgraph = v_graphutil.buildFunctionGraph(vw, fva)
# Just take the first one off the iterator...
for path in v_graphutil.getCodePathsTo(fgraph, cbva):
emu = vw.getEmulator()
opcodes = v_graphutil.getOpsFromPath(vw, fgraph, path)
for op in opcodes:
if op.va == va:
break
emu.executeOpcode(op)
return emu
| 9,745
|
def _create_test_validity_conditional(metric):
"""Creates BigQuery SQL clauses to specify validity rules for an NDT test.
Args:
metric: (string) The metric for which to add the conditional.
Returns:
(string) A set of SQL clauses that specify conditions an NDT test must
meet to be considered a valid, completed test.
"""
# NDT test is supposed to last 10 seconds, give some buffer for tests that
# ended slighly before 10 seconds.
MIN_DURATION = _seconds_to_microseconds(9)
# Tests that last > 1 hour are likely erroneous.
MAX_DURATION = _seconds_to_microseconds(3600)
# A test that did not exchange at least 8,192 bytes is likely erroneous.
MIN_BYTES = 8192
# web100 state variable constants from
# http://www.web100.org/download/kernel/tcp-kis.txt
STATE_CLOSED = 1
STATE_ESTABLISHED = 5
STATE_TIME_WAIT = 11
# For RTT metrics, exclude results of tests with 10 or fewer round trip time
# samples, because there are not enough samples to accurately estimate the
# RTT.
MIN_RTT_SAMPLES = 10
conditions = []
# Must have completed the TCP three-way handshake.
conditions.append((
'(web100_log_entry.snap.State = {state_closed}\n\t'
'\tOR (web100_log_entry.snap.State >= {state_established}\n\t'
'\t\tAND web100_log_entry.snap.State <= {state_time_wait}))').format(
state_closed=STATE_CLOSED,
state_established=STATE_ESTABLISHED,
state_time_wait=STATE_TIME_WAIT))
# Must have been determined to be unaffected by platform error.
conditions.append(('blacklist_flags == 0'))
if _is_server_to_client_metric(metric):
# Must leave slow start phase of TCP, indicated by reaching
# congestion at least once.
conditions.append('web100_log_entry.snap.CongSignals > 0')
# Must send at least the minimum number of bytes.
conditions.append('web100_log_entry.snap.HCThruOctetsAcked >= %d' %
MIN_BYTES)
# Must last for at least the minimum test duration.
conditions.append(
('(web100_log_entry.snap.SndLimTimeRwin +\n\t'
'\tweb100_log_entry.snap.SndLimTimeCwnd +\n\t'
'\tweb100_log_entry.snap.SndLimTimeSnd) >= %u') % MIN_DURATION)
# Must not exceed the maximum test duration.
conditions.append(
('(web100_log_entry.snap.SndLimTimeRwin +\n\t'
'\tweb100_log_entry.snap.SndLimTimeCwnd +\n\t'
'\tweb100_log_entry.snap.SndLimTimeSnd) < %u') % MAX_DURATION)
# Exclude results of tests with fewer than 10 round trip time samples,
# because there are not enough samples to accurately estimate the RTT.
if metric == 'minimum_rtt' or metric == 'average_rtt':
conditions.append('web100_log_entry.snap.CountRTT > %u' %
MIN_RTT_SAMPLES)
else:
# Must receive at least the minimum number of bytes.
conditions.append('web100_log_entry.snap.HCThruOctetsReceived >= %u' %
MIN_BYTES)
# Must last for at least the minimum test duration.
conditions.append('web100_log_entry.snap.Duration >= %u' % MIN_DURATION)
# Must not exceed the maximum test duration.
conditions.append('web100_log_entry.snap.Duration < %u' % MAX_DURATION)
return '\n\tAND '.join(conditions)
| 9,746
|
def tfds_train_test_split(
tfds: tf.data.Dataset,
test_frac: float,
dataset_size: Union[int, str],
buffer_size: int = 256,
seed: int = 123,
) -> Sequence[Union[tf.data.Dataset, tf.data.Dataset, int, int]]:
"""
!!! does not properly work, seems to be dependant on hardware, open isssue on
github/tensorflow?
Split tf-dataset into a train and test dataset.
https://stackoverflow.com/questions/48213766/split-a-dataset-created-by-tensorflow-dataset-api-in-to-train-and-test
Args:
tfds (tf.data.Dataset): Tf-dataset, that will be split into a train- and
testset.
test_frac (float): Fract
Returns:
[tf.data.Dataset, tf.data.Dataset, int, int]: Returns train and test datasets
as well as the absolut sizes of the full and the train dataset.
"""
logger.warning(
"This methods of data splitting does not gurantee same split on every machine.")
full_ds_size = None
if dataset_size == "auto":
logger.warning(
"dataset_size='auto': In order to calculate the size of the dataset, all "
"samples will be loaded.")
full_ds_size = get_tfds_size(tfds)
elif isinstance(dataset_size, int):
full_ds_size = dataset_size
logger.info(f"Using following seed to shuffle data: {seed}")
tfds = tfds.shuffle(buffer_size, reshuffle_each_iteration=False, seed=seed)
train_ds_fraction = 1.0 - test_frac
train_ds_size = int(train_ds_fraction * full_ds_size)
logger.info(f"train dataset size: {train_ds_size}, val dataset size: "
"{full_ds_size - train_ds_size}")
train_dataset = tfds.take(train_ds_size)
test_dataset = tfds.skip(train_ds_size)
return train_dataset, test_dataset, full_ds_size, train_ds_size
| 9,747
|
def seconds_to_hms(seconds):
"""
Convert seconds to H:M:S format.
Works for periods over 24H also.
"""
return datetime.timedelta(seconds=seconds)
| 9,748
|
def parse_args(args):
"""
Takes in the command-line arguments list (args), and returns a nice argparse
result with fields for all the options.
Borrows heavily from the argparse documentation examples:
<http://docs.python.org/library/argparse.html>
"""
# The command line arguments start with the program name, which we don't
# want to treat as an argument for argparse. So we remove it.
args = args[1:]
# Construct the parser (which is stored in parser)
# Module docstring lives in __doc__
# See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847
# And a formatter class so our examples in the docstring look good. Isn't it
# convenient how we already wrapped it to 80 characters?
# See http://docs.python.org/library/argparse.html#formatter-class
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Now add all the options to it
parser.add_argument("fasta", type=str,
help="name of the FASTA to read; must be indexable")
parser.add_argument("-n", type=int, default=10000,
help="the number of k-mers to count")
parser.add_argument("-k", type=int, default=150,
help="the length of each k-mer")
parser.add_argument("--thread_count", type=int, default=multiprocessing.cpu_count(),
help="number of k-mer counting threads to use")
parser.add_argument("--batch_size", type=int, default=10000,
help="number of forward-strand k-mer candidates to count in each batch")
parser.add_argument("--bloom_error", type=float, default=1E-4,
help="error rate on the bloom filter")
return parser.parse_args(args)
| 9,749
|
def train(config):
"""Trains the model based on configuration settings
Args:
config: configurations for training the model
"""
tf.reset_default_graph()
data = DataReader(config.directory, config.image_dims, config.batch_size,
config.num_epochs, config.use_weights)
train_data = data.train_batch(config.train_file)
num_train_images = data.num_images
test_data = data.test_batch(config.val_file)
num_val_images = data.num_images
# determine number of iterations based on number of images
training_iterations = int(np.floor(num_train_images/config.batch_size))
validation_iterations = int(np.floor(num_val_images/config.batch_size))
# create iterators allowing us to switch between datasets
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(handle,
train_data.output_types, train_data.output_shapes)
next_element = iterator.get_next()
training_iterator = train_data.make_initializable_iterator()
val_iterator = test_data.make_initializable_iterator()
# create placeholder for train or test
train_network = tf.placeholder(tf.bool, [])
# get images and pass into network
image, label, weight = next_element
drn = DRN(image, config.image_dims, config.batch_size, config.num_classes,
train_network, config.network)
# get predictions and logits
prediction = drn.pred
logits = drn.prob
label = tf.squeeze(label, 3)
# resize the logits using bilinear interpolation
imsize = tf.constant([config.image_dims[0], config.image_dims[1]],
dtype=tf.int32)
logits = tf.image.resize_bilinear(logits, imsize)
print('Resized shape is {}'.format(logits.get_shape()))
prediction = tf.argmax(logits, 3)
if config.loss == 'CE':
if config.use_weights:
label_one_hot = tf.one_hot(label, config.num_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=label_one_hot,
logits=logits)
loss = loss*tf.squeeze(weight, 3)
else:
# use sparse with flattened labelmaps
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label,
logits=logits)
loss = tf.reduce_mean(loss)
elif config.loss == 'dice':
loss = dice_loss(logits, label, config.num_classes,
use_weights=config.use_weights)
else:
NameError("Loss must be specified as CE or DICE")
# global step to keep track of iterations
global_step = tf.Variable(0, trainable=False, name='global_step')
# create placeholder for learning rate
learning_rate = tf.placeholder(tf.float32, shape=[])
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step)
saver = tf.train.Saver(max_to_keep=3)
init = tf.global_variables_initializer()
with tf.Session() as sess:
training_handle = sess.run(training_iterator.string_handle())
validation_handle = sess.run(val_iterator.string_handle())
sess.run(training_iterator.initializer)
sess.run(init)
ckpt = tf.train.get_checkpoint_state(config.logs)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print('Restoring session at step {}'.format(global_step.eval()))
# if restoring saved checkpoint get last saved iteration so that correct
# epoch can be restored
iteration = global_step.eval()
start_epoch = int(np.floor(iteration/training_iterations))
for current_epoch in range(start_epoch, config.num_epochs):
train_loss = 0
for i in range(training_iterations):
_, l = sess.run([optimizer, loss], feed_dict={handle:training_handle,
learning_rate:config.learning_rate, train_network:True})
train_loss += l
iteration = global_step.eval()
sess.run(val_iterator.initializer)
val_loss = 0
for i in range(validation_iterations):
l, img, lbl, pred = sess.run([loss, image, label, prediction],
feed_dict={handle:validation_handle, train_network:False})
val_loss += l
# evaluate accuracy
accuracy = jaccard(lbl, pred, config.num_classes)
dice_score = dice(lbl, pred, config.num_classes)
print('Train loss Epoch {} step {} :{}'.format(current_epoch, iteration,
train_loss/training_iterations))
print('Validation loss Epoch {} step {} :{}'.format(current_epoch, iteration,
val_loss/validation_iterations))
with open('loss.txt', 'a') as f:
f.write("Epoch: {} Step: {} Loss: {}\n".format(current_epoch, iteration,
train_loss/training_iterations))
saver.save(sess, config.logs + '/model.ckpt', global_step)
| 9,750
|
def test_sentry_middleware(db, clients, mocker, settings):
"""
Check that users are added to the Sentry context when the middleware
is active.
"""
settings.MIDDLEWARE.append("creator.middleware.SentryMiddleware")
client = clients.get("Administrators")
mock = mocker.patch("sentry_sdk.set_user")
client.get("health_check")
assert mock.call_count == 1
| 9,751
|
def _add_spot_2d(image, ground_truth, voxel_size_yx, precomputed_gaussian):
"""Add a 2-d gaussian spot in an image.
Parameters
----------
image : np.ndarray, np.uint
A 2-d image with shape (y, x).
ground_truth : np.ndarray
Ground truth array with shape (nb_spots, 4).
- coordinate_y
- coordinate_x
- sigma_yx
- amplitude
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
precomputed_gaussian : Tuple[np.ndarray]
Tuple with one tables of precomputed values for the erf, with shape
(nb_value, 2). One table per dimension.
Returns
-------
new_image : np.ndarray, np.uint
A 2-d image with simulated spots and shape (y, x).
"""
# cast image
original_dtype = image.dtype
image = image.astype(np.float64)
# compute reference spot shape
max_sigma = max(ground_truth[:, 2])
radius_yx, _ = stack.get_radius(
voxel_size_z=None, voxel_size_yx=voxel_size_yx,
psf_z=None, psf_yx=max_sigma)
radius_yx = np.ceil(radius_yx).astype(np.int64)
yx_shape = radius_yx * 2 + 1
# build a grid to represent a spot image
image_spot = np.zeros((yx_shape, yx_shape), dtype=np.uint8)
grid = detection.initialize_grid(
image_spot=image_spot,
voxel_size_z=None,
voxel_size_yx=voxel_size_yx,
return_centroid=False)
# pad image
image_padded = np.pad(image, ((radius_yx, radius_yx),
(radius_yx, radius_yx)), mode="constant")
# loop over every spot
for (coord_y, coord_x, sigma_yx, amp) in ground_truth:
# simulate spot signal
position_spot = np.asarray((radius_yx, radius_yx), dtype=np.int64)
position_spot = np.ravel_multi_index(
position_spot, dims=image_spot.shape)
position_spot = list(grid[:, position_spot])
simulated_spot = detection.gaussian_2d(
grid=grid,
mu_y=position_spot[0],
mu_x=position_spot[1],
sigma_yx=sigma_yx,
voxel_size_yx=voxel_size_yx,
psf_amplitude=amp,
psf_background=0,
precomputed=precomputed_gaussian)
simulated_spot = np.reshape(simulated_spot, image_spot.shape)
# add spot
coord_y_min = int(coord_y)
coord_y_max = int(coord_y + 2 * radius_yx + 1)
coord_x_min = int(coord_x)
coord_x_max = int(coord_x + 2 * radius_yx + 1)
image_padded[coord_y_min:coord_y_max,
coord_x_min:coord_x_max] += simulated_spot
# unpad image
image = image_padded[radius_yx:-radius_yx, radius_yx:-radius_yx]
image_raw = np.reshape(image, image.size)
# sample Poisson distribution from gaussian values
image_raw = np.random.poisson(lam=image_raw, size=image_raw.size)
# reshape and cast image
new_image = np.reshape(image_raw, image.shape)
new_image = np.clip(new_image, 0, np.iinfo(original_dtype).max)
new_image = new_image.astype(original_dtype)
return new_image
| 9,752
|
def bing(text, bot):
"""<query> - returns the first bing search result for <query>"""
api_key = bot.config.get("api_keys", {}).get("bing_azure")
# handle NSFW
show_nsfw = text.endswith(" nsfw")
# remove "nsfw" from the input string after checking for it
if show_nsfw:
text = text[:-5].strip().lower()
rating = NSFW_FILTER if show_nsfw else DEFAULT_FILTER
if not api_key:
return "Error: No Bing Azure API details."
# why are these all differing formats and why does format have a $? ask microsoft
params = {
"Sources": bingify("web"),
"Query": bingify(text),
"Adult": bingify(rating),
"$format": "json"
}
request = requests.get(API_URL, params=params, auth=(api_key, api_key))
# I'm not even going to pretend to know why results are in ['d']['results'][0]
j = request.json()['d']['results'][0]
if not j["Web"]:
return "No results."
result = j["Web"][0]
# not entirely sure this even needs un-escaping, but it wont hurt to leave it in
title = formatting.truncate(unescape(result["Title"]), 60)
desc = formatting.truncate(unescape(result["Description"]), 150)
url = unescape(result["Url"])
return colors.parse('{} -- $(b){}$(b): "{}"'.format(url, title, desc))
| 9,753
|
def single_length_RB(
RB_number: int, RB_length: int, target: int = 0
) -> List[List[str]]:
"""Given a length and number of repetitions it compiles Randomized Benchmarking
sequences.
Parameters
----------
RB_number : int
The number of sequences to construct.
RB_length : int
The number of Cliffords in each individual sequence.
target : int
Index of the target qubit
Returns
-------
list
List of RB sequences.
"""
S = []
for _ in range(RB_number):
seq = np.random.choice(24, size=RB_length - 1) + 1
seq = np.append(seq, inverseC(seq))
seq_gates = []
for cliff_num in seq:
g = [f"{c}[{target}]" for c in cliffords_decomp[cliff_num - 1]]
seq_gates.extend(g)
S.append(seq_gates)
return S
| 9,754
|
def decode(text_file_abs_path, threshold=10):
"""
Decodes a text into a ciphertext.
Parameters
---------
text_file_abs_path: str
Returns
-------
ciphertext: str
"""
try:
with open(text_file_abs_path, "rb") as f:
text = f.read()
except Exception:
return None
freq_limit = limit_freq_threshold(threshold)
renamed_ciphertext = ''.join(
MarkovToolbox.derive_first_letter_of_every_sentence(text))
ciphertext = revert_renamed_number(renamed_ciphertext, freq_limit)
if threshold != 10:
ciphertext = NumericalToolbox.change_base(ciphertext, threshold, 10,
standard_formatting=False)
return ciphertext
| 9,755
|
def pixelizenxncircregion(bmp: array,
x: int, y: int, r: int, n: int):
"""Pixelize a circular region in a
bitmap file by n
Args:
bmp : unsigned byte array
with bmp format
x, y, r: center (x, y)
and radius r
n : integer pixellation
dimension n by n
Returns:
byref modified
unsigned byte array
"""
b = pixelizenxn(bmp, n)
c = _BMoffsethd
for v in itercirclepartlineedge(r):
x1, x2 = mirror(x, v[0])
y1, y2 = mirror(y, v[1])
bmp[c(bmp, x1, y1): c(bmp, x2, y1)], bmp[c(bmp, x1, y2): c(bmp,x2,y2)]= \
b[c(b, x1, y1): c(b, x2, y1)], b[c(b, x1, y2): c(b, x2, y2)]
| 9,756
|
def templatetag(parser, token):
"""
Outputs one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
" Must be one of: %s" %
(tag, list(TemplateTagNode.mapping)))
return TemplateTagNode(tag)
| 9,757
|
def create_test_folder(df_test, target_path):
"""Create test set in the target folder
Parameters:
df_test -- dataframe that contains all the test set details
('patient_id', 'filename', 'class', 'data_source')
target_path -- path to the new dataset folder
"""
folder_path = os.path.join(target_path, 'xray_preprocess/test')
print(f'Create test set at: {folder_path}')
for _, row in tqdm(df_test.iterrows(), total=df_test.shape[0]):
if row['class']=='negative':
destination_path = os.path.join(folder_path, 'negative')
elif row['class']=='positive':
destination_path = os.path.join(folder_path, 'positive')
if not os.path.exists(destination_path):
os.makedirs(destination_path)
img = os.path.join(target_path, 'xray', 'test', row['filename'])
shutil.copy(img, destination_path )
| 9,758
|
def reflected_phase_curve(phases, omega, g, a_rp):
"""
Reflected light phase curve for a homogeneous sphere by
Heng, Morris & Kitzmann (2021).
Parameters
----------
phases : `~np.ndarray`
Orbital phases of each observation defined on (0, 1)
omega : tensor-like
Single-scattering albedo as defined in
g : tensor-like
Scattering asymmetry factor, ranges from (-1, 1).
a_rp : float, tensor-like
Semimajor axis scaled by the planetary radius
Returns
-------
flux_ratio_ppm : tensor-like
Flux ratio between the reflected planetary flux and the stellar flux in
units of ppm.
A_g : tensor-like
Geometric albedo derived for the planet given {omega, g}.
q : tensor-like
Integral phase function
"""
# Convert orbital phase on (0, 1) to "alpha" on (0, np.pi)
alpha = jnp.asarray(2 * np.pi * phases - np.pi)
abs_alpha = jnp.abs(alpha)
alpha_sort_order = jnp.argsort(alpha)
sin_abs_sort_alpha = jnp.sin(abs_alpha[alpha_sort_order])
sort_alpha = alpha[alpha_sort_order]
gamma = jnp.sqrt(1 - omega)
eps = (1 - gamma) / (1 + gamma)
# Equation 34 for Henyey-Greestein
P_star = (1 - g ** 2) / (1 + g ** 2 +
2 * g * jnp.cos(alpha)) ** 1.5
# Equation 36
P_0 = (1 - g) / (1 + g) ** 2
# Equation 10:
Rho_S = P_star - 1 + 0.25 * ((1 + eps) * (2 - eps)) ** 2
Rho_S_0 = P_0 - 1 + 0.25 * ((1 + eps) * (2 - eps)) ** 2
Rho_L = 0.5 * eps * (2 - eps) * (1 + eps) ** 2
Rho_C = eps ** 2 * (1 + eps) ** 2
alpha_plus = jnp.sin(abs_alpha / 2) + jnp.cos(abs_alpha / 2)
alpha_minus = jnp.sin(abs_alpha / 2) - jnp.cos(abs_alpha / 2)
# Equation 11:
Psi_0 = jnp.where(
(alpha_plus > -1) & (alpha_minus < 1),
jnp.log((1 + alpha_minus) * (alpha_plus - 1) /
(1 + alpha_plus) / (1 - alpha_minus)),
0
)
Psi_S = 1 - 0.5 * (jnp.cos(abs_alpha / 2) -
1.0 / jnp.cos(abs_alpha / 2)) * Psi_0
Psi_L = (jnp.sin(abs_alpha) + (np.pi - abs_alpha) *
jnp.cos(abs_alpha)) / np.pi
Psi_C = (-1 + 5 / 3 * jnp.cos(abs_alpha / 2) ** 2 - 0.5 *
jnp.tan(abs_alpha / 2) * jnp.sin(abs_alpha / 2) ** 3 * Psi_0)
# Equation 8:
A_g = omega / 8 * (P_0 - 1) + eps / 2 + eps ** 2 / 6 + eps ** 3 / 24
# Equation 9:
Psi = ((12 * Rho_S * Psi_S + 16 * Rho_L *
Psi_L + 9 * Rho_C * Psi_C) /
(12 * Rho_S_0 + 16 * Rho_L + 6 * Rho_C))
flux_ratio_ppm = 1e6 * (a_rp ** -2 * A_g * Psi)
q = _integral_phase_function(
Psi, sin_abs_sort_alpha, sort_alpha, alpha_sort_order
)
return flux_ratio_ppm, A_g, q
| 9,759
|
def train_single_span(config: TrainConfig) -> None:
"""Train a single span edge probing model.
Trains the model until either a maximum number of evaluations or a maximum number
of evaluations without improvement is reached. Furthermore the learning rate can be halved
if the model does not improve for a certain number of evaluations.
Args:
config: configuration specifying the train parameters.
Returns:
None
"""
print("Training the model")
lr: int = config.lr
eval: int = 0
counter: int = 0
start_index: int = 1
# Train until one of the stop conditions is True.
while True:
config.model.train()
loop: tqdm.notebook.tqdm_notebook = tqdm.tqdm_notebook(config.train_data)
# Run through one epoch.
for i, (xb, span1s, targets) in enumerate(loop, start_index):
config.optimizer.zero_grad()
output = config.model(
input_ids=xb.to(config.dev),
span1s=span1s.to(config.dev)
)
batch_loss = config.loss_func(output, targets.to(config.dev))
batch_loss.backward()
nn.utils.clip_grad_norm_(config.model.parameters(), 5.0)
config.optimizer.step()
# Evaluate the model after each eval_interval.
if i % config.eval_interval == 0:
print(f"Training run {eval+1} finished")
loss: float = eval_single_span(config.val_data, config.model, config.loss_func, dev=config.dev)
print(f"Loss: {loss}")
eval += 1
# Check if the model has improved.
if eval == 1:
min_loss: float = loss
if loss < min_loss:
min_loss = loss
counter = 0
else:
counter += 1
# Check if training is finished.
if config.max_evals is not None and eval >= config.max_evals:
break
elif counter >= config.patience:
break
elif counter % config.patience_lr == 0 and counter > 0:
lr = lr/2
print(f"No improvement for {config.patience_lr} epochs, halving the learning rate to {lr}")
for g in config.optimizer.param_groups:
g['lr'] = lr
# If inner loop did not break start another epoch.
else:
start_index = i % config.eval_interval
continue
# If inner loop did break.
break
print("Training is finished")
| 9,760
|
def costspec(
currencies: list[str] = ["USD"],
) -> s.SearchStrategy[pos.CostSpec]:
"""Generates a random CostSpec.
Args:
currencies: An optional list of currencies to select from.
Returns:
A new search strategy.
"""
return s.builds(pos.CostSpec, currency=common.currency(currencies))
| 9,761
|
def send_approved_mail(request, user):
"""
Sends an email to a user once their ``is_active`` status goes from
``False`` to ``True`` when the ``ACCOUNTS_APPROVAL_REQUIRED``
setting is ``True``.
"""
context = {"request": request, "user": user}
subject = subject_template("email/account_approved_subject.txt", context)
send_mail_template(subject, "email/account_approved",
settings.DEFAULT_FROM_EMAIL, user.email,
context=context)
| 9,762
|
def test_init_3(rate_sim_3):
"""Test initialization with true rank arguments."""
desired_n_sequence = 4
desired_max_timestep = 2
desired_stimulus_set = np.array(
[
[
[3, 1],
[3, 1]
],
[
[9, 12],
[0, 0]
],
[
[3, 4],
[3, 4]
],
[
[3, 4],
[3, 4]
]
], dtype=np.int32
)
desired_n_timestep = np.array([2, 1, 2, 2], dtype=np.int32)
assert rate_sim_3.n_sequence == desired_n_sequence
assert rate_sim_3.max_timestep == desired_max_timestep
np.testing.assert_array_equal(
desired_stimulus_set, rate_sim_3.stimulus_set
)
np.testing.assert_array_equal(
desired_n_timestep, rate_sim_3.n_timestep
)
| 9,763
|
def get_gene_names_conversion():
"""Get the compressed file containing two-way mappings of gene_id to gene_symbol"""
with gzip.open(
os.path.join(
current_app.config["FIVEX_DATA_DIR"], "gene.id.symbol.map.json.gz",
),
"rt",
) as f:
return json.loads(f.read())
| 9,764
|
def _process_input(data, context):
""" Pre-process request input before it is sent to TensorFlow Serving REST API
Args:
data (obj): the request data stream
context (Context): an object containing request and configuration details
Returns:
(dict): a JSON-serializable dict that contains request body and headers
"""
read_data = data.read()
# endpoint API
if context.request_content_type == 'application/json':
# read as numpy array
image_np = np.asarray(json.loads(read_data)).astype(np.dtype('uint8'))
image_np = np.array(Image.fromarray(image_np).resize((INPUT_HEIGHT,INPUT_WIDTH)))
# batch transform of jpegs
elif context.request_content_type == 'application/x-image':
# load image from bytes and resize
image_from_bytes = Image.open(BytesIO(read_data)).convert('RGB')
image_from_bytes = image_from_bytes.resize((INPUT_HEIGHT,INPUT_WIDTH))
image_np = np.array(image_from_bytes)
# batch transform of tfrecord
elif context.request_content_type == 'application/x-tfexample':
example = tf.train.Example()
example.ParseFromString(read_data)
example_feature = MessageToDict(example.features)
image_encoded = str.encode(example_feature['feature']['image']['bytesList']['value'][0])
image_b64 = base64.decodebytes(image_encoded)
image_np = np.frombuffer(image_b64, dtype=np.dtype('uint8')).reshape(32,32,3)
image_np = np.array(Image.fromarray(image_np).resize((INPUT_HEIGHT,INPUT_WIDTH)))
# raise error if content type is not supported
else:
print("")
_return_error(415, 'Unsupported content type "{}"'.format(
context.request_content_type or 'Unknown'))
# preprocess for resnet50
image_np = tf.keras.applications.resnet_v2.preprocess_input(image_np)
# json serialize
data_np_json = {"instances": [image_np.tolist()]}
data_np_json_serialized = json.dumps(data_np_json)
return data_np_json_serialized
| 9,765
|
def similarity_matrix_2d(X, Y, metric='cos'):
"""
Calculate similarity matrix
Parameters:
X: ndarray
input matrix 1
Y: ndarray
input matrix 2
distFunc: function
distance function
Returns:
result: ndarray
similarity matrix
"""
n_X = len(X)
n_Y = len(Y)
if metric == 'cos':
dist_func = cos_dist_2d
elif metric == 'euclid':
dist_func = euclid_dist_2d
elif metric == 'mahal':
dist_func = mahal_dist_2d
else:
dist_func = cos_dist_2d
#SM = sp.zeros((nX, nY))
SM = [map(dist_func, n_X * [X[i]], Y) for i in xrange(n_X)]
#for i in xrange(nX):
# SM.append(map(distFunc, nX * [X[i]], Y))
SM = sp.array(SM)
return SM
| 9,766
|
def issue_list_with_tag(request, tag=None, sortorder=None):
"""
For a tag. display list of issues
"""
if tag:
stag = "\"%s\"" % tag
issues = Issue.tagged.with_any(stag)
tag_cloud = []
if issues:
tag_cloud = get_tagcloud_issues(issues)
issues = issues.filter(is_draft=False)
return issue_list(
request,
issues=issues,
sortorder=sortorder,
min_tv=1,
subset=True,
extra_context = {
'selected_tag' : tag,
'issue_tags' : tag_cloud,
'sort_url' : reverse('issue_with_tag', args=[tag,]),
})
else:
return issue_list(request)
| 9,767
|
def update_deal(id, deal_dict):
"""
Runs local validation on the given dict and gives passing ones to the API to update
"""
if utils.validate_deal_dict(utils.UPDATE, deal_dict, skip_id=True):
resp = utils.request(utils.UPDATE, 'deals', {'id': id}, data=deal_dict)
return utils.parse(resp)
else:
# validation failed but the exception was suppressed
pass
| 9,768
|
def plot_tilt_hist(series, ntile: str, group_name: str, extra_space: bool = True):
"""
Plots the histogram group tilts for a single ntile
:param series: frame containing the avg tilts, columns: group, index: pd.Period
:param ntile: the Ntile we are plotting for
:param group_name: the name of the group
:return: None
"""
if extra_space:
fig, ax = plt.subplots(1, 2, figsize=LARGE_FIGSIZE)
else:
_, ax = plt.subplots(1, 1, figsize=(4.5, 4.5))
title = 'Weight Relative to Universe' if 'Ntile' in group_name else 'Group Exposure'
plotter_frame = series.to_frame('weight')
plotter_frame['colors'] = [TILTS_COLOR_MAP(i) for i in np.linspace(0, 1, len(series))]
plotter_frame = plotter_frame.sort_values('weight')
ax[0].barh(plotter_frame.index.tolist(), plotter_frame['weight'].tolist(), align='center',
color=plotter_frame['colors'].tolist())
ax[0].set(title=f'{ntile}, {group_name}'.title(), ylabel='Group', xlabel=title)
ax[0].axvline(0, linestyle='-', color='black', lw=1)
if extra_space:
return ax[1]
plt.show()
| 9,769
|
def test_oef_serialization_description():
"""Testing the serialization of the OEF."""
foo_datamodel = DataModel("foo", [Attribute("bar", int, True, "A bar attribute.")])
desc = Description({"bar": 1}, data_model=foo_datamodel)
msg = OefSearchMessage(
performative=OefSearchMessage.Performative.REGISTER_SERVICE,
dialogue_reference=(str(1), ""),
service_description=desc,
)
msg_bytes = OefSearchSerializer().encode(msg)
assert len(msg_bytes) > 0
recovered_msg = OefSearchSerializer().decode(msg_bytes)
assert recovered_msg == msg
| 9,770
|
def get_constraints_for_x(cell, board):
"""
Get the constraints for a given cell cell
@param cell Class instance of Variable; a cell of the Sudoku board
@param board
@return Number of constraints
"""
nconstraints = 0
# Row
for cellj in board[cell.row][:cell.col]:
if cellj.get_domain_size() > 1:
nconstraints += 1
for cellj in board[cell.row][cell.col+1:]:
if cellj.get_domain_size() > 1:
nconstraints += 1
# Col
for irow in range(cell.row):
if board[irow][cell.col].get_domain_size() > 1:
nconstraints += 1
for irow in range(cell.row+1, cell.max_domain_val):
if board[irow][cell.col].get_domain_size() > 1:
nconstraints += 1
# .. This would not generalize to a new board, but leave for now
ibox_row = int(cell.row/3) * 3
ibox_col = int(cell.col/3) * 3
if board[ibox_row+1][ibox_col+1].get_domain_size() > 1 \
or board[ibox_row+1][ibox_col+2].get_domain_size() > 1 \
or board[ibox_row+2][ibox_col+1].get_domain_size() > 1 \
or board[ibox_row+2][ibox_col+2].get_domain_size() > 1:
nconstraints += 1
return nconstraints
| 9,771
|
def average(values):
"""Computes the arithmetic mean of a list of numbers.
>>> print average([20, 30, 70])
40.0
"""
try:
return stats.mean(values)
except ZeroDivisionError:
return None
| 9,772
|
def get_subgraph_pos(G, pos):
""" Returns the filtered positions for subgraph G. If subgraph = original graph then pos will be returned.
Parameters
----------
G : nx.Graph
A graph object.
Pos : dict
A dictionary with nodes as keys and positions as values.
Example
-------
>>> pos = nx.spring_layout(G)
>>> subgraph_nodes = ['1','2','3']
>>> subgraph = G.subgraph(subgraph_nodes)
>>> subgraph_positions = get_subgraph_pos(subgraph,pos)
Returns
-------
dict
Assuming positions were generated earlier for a larger graph with some layout algorithm
this functions returns the filtered positions by the subgraph.
"""
return {k: v for k, v in pos.items() if k in G.nodes()}
| 9,773
|
def _calculateWindowPosition(screenGeometry, iconGeometry, windowWidth, windowHeight):
"""
Calculate window position near-by the system tray using geometry of a system tray
and window geometry
@param screenGeometry: geometry of the screen where system tray is located
@type screenGeometry: QRect
@param iconGeometry: geometry of the system tray icon in screen coordinates
@type iconGeometry: QRect
@param windowWidth: width of the main window
@type windowWidth: int
@param windowHeight: height of the main window including header
@type windowHeight: int
@return: coordinates for main window positioning
@rtype: QPoint
"""
possibleWindowPositions = {
LEFT: {
'x': iconGeometry.x() + iconGeometry.width(),
'y': iconGeometry.y() + iconGeometry.height() / 2 - windowHeight / 2
},
BOTTOM: {
'x': iconGeometry.x() + iconGeometry.width() / 2 - windowWidth / 2,
'y': iconGeometry.y() - windowHeight
},
RIGHT: {
'x': iconGeometry.x() - windowWidth,
'y': iconGeometry.y() + iconGeometry.height() / 2 - windowHeight / 2
},
TOP: {
'x': iconGeometry.x() + iconGeometry.width() / 2 - windowWidth / 2,
'y': iconGeometry.y() + iconGeometry.height()
},
}
position = possibleWindowPositions[_guessTrayPosition(screenGeometry, iconGeometry)]
return QPoint(position['x'], position['y'])
| 9,774
|
def build_from_config(config: dict, name: str) -> HomingMotor:
"""Build the named HomingMotor from data found in config"""
def check_for_key(key, cfg):
if key not in cfg:
raise RuntimeError('Key "{}" for HomingMotor "{}" not found.'.format(key, name))
else:
return cfg[key]
if name not in config:
raise RuntimeError('Config for HomingMotor "{}" not found.'.format(name))
my_config = config[name]
inverted = check_for_key('inverted', my_config)
max_steps = check_for_key('max_steps', my_config)
name = check_for_key('name', my_config)
pulse_delay = float(check_for_key('pulse_delay', my_config))
sensor = check_for_key('sensor', my_config)
stepper = check_for_key('stepper', my_config)
dir_pin = int(check_for_key("dir_pin", stepper))
ms1_pin = int(check_for_key("ms1_pin", stepper))
ms2_pin = int(check_for_key("ms2_pin", stepper))
ms3_pin = int(check_for_key("ms3_pin", stepper))
step_pin = int(check_for_key("step_pin", stepper))
step_size = int(check_for_key("step_size", stepper))
input_pin = int(check_for_key('input_pin', sensor))
m = build(name, dir_pin, step_pin, ms1_pin, ms2_pin, ms3_pin, input_pin, max_steps, inverted, pulse_delay)
m.set_step_size(step_size)
# print('{} built from config OK'.format(m.get_name()))
return m
| 9,775
|
def arg_return_greetings(name):
"""
This is greeting function with arguments and return greeting message
:param name:
:return:
"""
message = F"hello {name}"
return message
| 9,776
|
def basic_usage(card_id: str, parent: Any = None):
"""Basic usage of the application, minus the card recognition bits"""
data = pull_card_data(card_id)
qt_window = Card(parent, data)
qt_window.setWindowTitle("YGO Scanner")
qt_window.show()
return qt_window
| 9,777
|
async def test_bad_requests(request_path, request_params, aiohttp_client):
"""Test request paths that should be filtered."""
app = web.Application()
app.router.add_get("/{all:.*}", mock_handler)
setup_security_filter(app)
mock_api_client = await aiohttp_client(app)
resp = await mock_api_client.get(request_path, params=request_params)
assert resp.status == 400
| 9,778
|
def get_abi(
contract_sources: Dict[str, str],
allow_paths: Optional[str] = None,
remappings: Optional[list] = None,
silent: bool = True,
) -> Dict:
"""
Generate ABIs from contract interfaces.
Arguments
---------
contract_sources : dict
a dictionary in the form of {'path': "source code"}
allow_paths : str, optional
Compiler allowed filesystem import path
remappings : list, optional
List of solidity path remappings
silent : bool, optional
Disable verbose reporting
Returns
-------
dict
Compiled ABIs in the format `{'contractName': [ABI]}`
"""
final_output = {
Path(k).stem: {
"abi": json.loads(v),
"contractName": Path(k).stem,
"type": "interface",
"sha1": sha1(v.encode()).hexdigest(),
}
for k, v in contract_sources.items()
if Path(k).suffix == ".json"
}
for path, source in [(k, v) for k, v in contract_sources.items() if Path(k).suffix == ".vy"]:
input_json = generate_input_json({path: source}, language="Vyper")
input_json["settings"]["outputSelection"]["*"] = {"*": ["abi"]}
try:
output_json = compile_from_input_json(input_json, silent, allow_paths)
except Exception:
# vyper interfaces do not convert to ABIs
# https://github.com/vyperlang/vyper/issues/1944
continue
name = Path(path).stem
final_output[name] = {
"abi": output_json["contracts"][path][name]["abi"],
"contractName": name,
"type": "interface",
"sha1": sha1(contract_sources[path].encode()).hexdigest(),
}
solc_sources = {k: v for k, v in contract_sources.items() if Path(k).suffix == ".sol"}
if solc_sources:
compiler_targets = find_solc_versions(solc_sources, install_needed=True, silent=silent)
for version, path_list in compiler_targets.items():
to_compile = {k: v for k, v in contract_sources.items() if k in path_list}
set_solc_version(version)
input_json = generate_input_json(
to_compile,
language="Vyper" if version == "vyper" else "Solidity",
remappings=remappings,
)
input_json["settings"]["outputSelection"]["*"] = {"*": ["abi"]}
output_json = compile_from_input_json(input_json, silent, allow_paths)
output_json = {k: v for k, v in output_json["contracts"].items() if k in path_list}
final_output.update(
{
name: {
"abi": data["abi"],
"contractName": name,
"type": "interface",
"sha1": sha1(contract_sources[path].encode()).hexdigest(),
}
for path, v in output_json.items()
for name, data in v.items()
}
)
return final_output
| 9,779
|
def init_layer_linear_quant_params(quantizer, original_model, layer_name, init_mode=ClipMode.NONE,
init_method='Powell', eval_fn=None, search_clipping=False):
"""
Initializes a layer's linear quant parameters.
This is done to set the scipy.optimize.minimize initial guess.
Args:
quantizer (PostTrainLinearQuantizer): the quantizer, **after** calling prepare model.
original_model (nn.Module): the original, pre-quantized, model.
layer_name (str): the name of the layer.
init_mode (ClipMode or callable or str): the initialization mode.
If ClipMode, the initialization will be according to the respective ClipMode.
If callable - init_mode will be treated as a loss function between the activations pre and post-quantization,
and the initialization process will attempt to find the minimum of that loss function.
E.g. if l1_loss has been passed, the initialization vector will be
scale, zero_point = argmin_{s, zp} (l1_loss(layer(input), q_layer(input; s, zp)))
If str - the mode will be chosen from a list of options. The options are:
[NONE, AVG, LAPLACE, GAUSS, L1, L2 ,L3].
Defaults to ClipMode.NONE
init_method (str or callable): applicable only in the case of init_mode = 'L1/2/3' or callable.
chooses the minimization method for finding the local argmin_{s, zp}.
Defaults to 'Powell'
eval_fn: evaluation function for the model. Assumed it has a signature of the form
`eval_fn(model)->float`. this is the function to be minimized by the optimization algorithm.
applicable only in the case of init_mode = 'L1/2/3' or callable.
search_clipping (bool): if set, optimize clipping values, otherwise optimize scale factor
"""
denorm_layer_name = distiller.denormalize_module_name(quantizer.model, layer_name)
msglogger.info(denorm_layer_name)
if isinstance(init_mode, str):
init_mode = _init_mode_from_str(init_mode)
layer = dict(original_model.named_modules())[layer_name]
local_args, local_kwargs = quantizer.modules_processed_args[denorm_layer_name]
if isinstance(init_mode, ClipMode):
local_kwargs['clip_acts'] = init_mode
replace_fn = quantizer.replacement_factory.get(type(layer), quantizer.default_repalcement_fn)
quantized_layer = replace_fn(deepcopy(layer), *local_args, **local_kwargs).eval()
if not is_post_train_quant_wrapper(quantized_layer, False):
# the module wasn't quantized, nothing to do here
return
if callable(init_mode):
input_for_layer = get_input_for_layer(original_model, layer_name, eval_fn)
quantized_layer = optimize_for_layer(layer, quantized_layer, init_mode, input_for_layer, init_method,
search_clipping=search_clipping)
distiller.model_setattr(quantizer.model, denorm_layer_name, quantized_layer)
quantizer.model.eval()
| 9,780
|
def predict(w , b , X ):
"""
使用学习逻辑回归参数logistic (w,b)预测标签是0还是1,
参数:
w - 权重,大小不等的数组(num_px * num_px * 3,1)
b - 偏差,一个标量
X - 维度为(num_px * num_px * 3,训练数据的数量)的数据
返回:
Y_prediction - 包含X中所有图片的所有预测【0 | 1】的一个numpy数组(向量)
"""
m = X.shape[1] #图片的数量
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0],1)
#计预测猫在图片中出现的概率
A = sigmoid(np.dot(w.T , X) + b)
for i in range(A.shape[1]):
#将概率a [0,i]转换为实际预测p [0,i]
Y_prediction[0,i] = 1 if A[0,i] > 0.5 else 0
#使用断言
assert(Y_prediction.shape == (1,m))
return Y_prediction
| 9,781
|
def delta_C(parcels_old, parcels_new, normed=False):
"""
Compute the number of vertices that change connected component from
old parcellation to new parcellation.
Parameters:
- - - - -
parcels_old : dictionary
old connected component sample assignments
parcels_new : dictionary
new connected component sample assignments
Returns:
- - - -
deltaC : int
number of vertices that changed label
"""
new = set(map(len, parcels_new.values()))
old = set(map(len, parcels_old.values()))
deltaC = np.int32(list(new.difference(old))).sum()
if normed:
deltaC = deltaC / np.sum(list(new))
return deltaC
| 9,782
|
def increment(number):
"""Increases a given number by 1"""
return number + 1
| 9,783
|
def get_recent_added_companies(parser, token):
"""
Gets any number of the recent added comapnies.
Syntax::
{% get_recent_added_companies [limit] as [var_name] %}
"""
return base_tag(parser, token, RecentCreatedCompanies)
| 9,784
|
def search_data(templates, pols, matched_pols=False, reverse_nesting=False, flatten=False):
"""
Glob-parse data templates to search for data files.
Parameters
----------
templates : str or list
A glob-parsable search string, or list of such strings, with a {pol}
spot for string formatting. Ex. ["zen.even.{pol}.LST.*.HH.uv"]
pols : str or list
A polarization string, or list of polarization strings, to search for.
Ex. ["xx", "yy"]
matched_pols : boolean
If True, only use datafiles that are present for all polarizations.
reverse_nesting : boolean
If True, flip the nesting of datafiles to be datafile-polarization.
By default, the output is polarization-datafile.
flatten : boolean
If True, flatten the nested output datafiles to a single hierarchy.
Returns
-------
datafiles : list
A nested list of paths to datafiles. By default, the structure is
polarization-datafile nesting. If reverse_nesting, then the structure
is flipped to datafile-polarization structure.
datapols : list
List of polarizations for each file in datafile
"""
# type check
if isinstance(templates, (str, np.str)):
templates = [templates]
if isinstance(pols, (str, np.str, np.integer, int)):
pols = [pols]
# search for datafiles
datafiles = []
datapols = []
for pol in pols:
dps = []
dfs = []
for template in templates:
_dfs = glob.glob(template.format(pol=pol))
if len(_dfs) > 0:
dfs.extend(_dfs)
dps.extend([pol for df in _dfs])
if len(dfs) > 0:
datafiles.append(sorted(dfs))
datapols.append(dps)
# get unique files
allfiles = [item for sublist in datafiles for item in sublist]
allpols = [item for sublist in datapols for item in sublist]
unique_files = set()
for _file in allfiles:
for pol in pols:
if ".{pol}.".format(pol=pol) in _file:
unique_files.update(set([_file.replace(".{pol}.".format(pol=pol), ".{pol}.")]))
break
unique_files = sorted(unique_files)
# check for unique files with all pols
if matched_pols:
Npols = len(pols)
_templates = []
for _file in unique_files:
goodfile = True
for pol in pols:
if _file.format(pol=pol) not in allfiles:
goodfile = False
if goodfile:
_templates.append(_file)
# achieve goal by calling search_data with new _templates that are polarization matched
datafiles, datapols = search_data(_templates, pols, matched_pols=False, reverse_nesting=False)
# reverse nesting if desired
if reverse_nesting:
datafiles = []
datapols = []
for _file in unique_files:
dfs = []
dps = []
for pol in pols:
df = _file.format(pol=pol)
if df in allfiles:
dfs.append(df)
dps.append(pol)
datafiles.append(dfs)
datapols.append(dps)
# flatten
if flatten:
datafiles = [item for sublist in datafiles for item in sublist]
datapols = [item for sublist in datapols for item in sublist]
return datafiles, datapols
| 9,785
|
def normalize_word(word):
"""
:type word: str
:rtype: str
"""
acronym_pattern = r'^(?:[A-Z]\.)+$'
if re.match(pattern=acronym_pattern, string=word):
word = word.replace('.', '')
if word.lower() in _REPLACE_WORDS:
replacement = _REPLACE_WORDS[word.lower()]
if word.islower():
return replacement.lower()
elif word.isupper():
return replacement.upper()
elif word[0].isupper() and word[1:].islower():
return replacement.capitalize()
else:
return replacement
else:
return word
| 9,786
|
def _ggm_qsize_prob_gt_0_whitt_5_2(arr_rate, svc_rate, c, ca2, cs2):
"""
Return the approximate P(Q>0) in G/G/m queue using Whitt's simple
approximation involving rho and P(W>0).
This approximation is exact for M/M/m and has strong theoretical
support for GI/M/m. It's described by Whitt as "crude" but is
"a useful quick approximation".
See Section 5 of Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161. In
particular, this is Equation 5.2.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
~ P(Q > 0)
"""
rho = arr_rate / (svc_rate * float(c))
pdelay = ggm_prob_wait_whitt(arr_rate, svc_rate, c, ca2, cs2)
prob_gt_0 = rho * pdelay
return prob_gt_0
| 9,787
|
def pds3_label_gen_date(file):
"""Returns the creation date of a given PDS3 label.
:param path: File path
:type path: str
:return: Creation date
:rtype: str
"""
generation_date = "N/A"
with open(file, "r") as f:
for line in f:
if "PRODUCT_CREATION_TIME" in line:
generation_date = line.split("=")[1].strip()
return generation_date
| 9,788
|
def harmony(*args):
"""
Takes an arbitrary number of floats and prints their harmonic
medium value. Calculation is done with formula:
number_of_args \ (1 \ item1 + 1 \ item2 + ...)
Args:
*args (tuple): number of arguments with a type: float, integer
Returns:
float: harmonic medium value
"""
result = 0
if 0 in args:
return 0.0
for item in args:
result += 1 / item
return len(args) / result
| 9,789
|
def node_gdf_from_graph(G, crs = 'epsg:4326', attr_list = None, geometry_tag = 'geometry', xCol='x', yCol='y'):
"""
Function for generating GeoDataFrame from Graph
:param G: a graph object G
:param crs: projection of format {'init' :'epsg:4326'}. Defaults to WGS84. note: here we are defining the crs of the input geometry - we do NOT reproject to this crs. To reproject, consider using geopandas' to_crs method on the returned gdf.
:param attr_list: list of the keys which you want to be moved over to the GeoDataFrame, if not all. Defaults to None, which will move all.
:param geometry_tag: specify geometry attribute of graph, default 'geometry'
:param xCol: if no shapely geometry but Longitude present, assign here
:param yCol: if no shapely geometry but Latitude present, assign here
:returns: a geodataframe of the node objects in the graph
"""
nodes = []
keys = []
# finds all of the attributes
if attr_list is None:
for u, data in G.nodes(data = True):
keys.append(list(data.keys()))
flatten = lambda l: [item for sublist in l for item in sublist]
attr_list = list(set(flatten(keys)))
if geometry_tag in attr_list:
non_geom_attr_list = attr_list
non_geom_attr_list.remove(geometry_tag)
else:
non_geom_attr_list = attr_list
if 'node_ID' in attr_list:
non_geom_attr_list = attr_list
non_geom_attr_list.remove('node_ID')
z = 0
for u, data in G.nodes(data = True):
if geometry_tag not in attr_list and xCol in attr_list and yCol in attr_list :
try:
new_column_info = {
'node_ID': u,
'geometry': Point(data[xCol], data[yCol]),
'x': data[xCol],
'y': data[yCol]}
except:
print('Skipped due to missing geometry data:',(u, data))
else:
try:
new_column_info = {
'node_ID': u,
'geometry': data[geometry_tag],
'x':data[geometry_tag].x,
'y':data[geometry_tag].y}
except:
print((u, data))
for i in non_geom_attr_list:
try:
new_column_info[i] = data[i]
except:
pass
nodes.append(new_column_info)
z += 1
nodes_df = pd.DataFrame(nodes)
nodes_df = nodes_df[['node_ID', *non_geom_attr_list, geometry_tag]]
nodes_df = nodes_df.drop_duplicates(subset = ['node_ID'], keep = 'first')
nodes_gdf = gpd.GeoDataFrame(nodes_df, geometry = nodes_df.geometry, crs = crs)
return nodes_gdf
| 9,790
|
def views():
""" Used for the creation of Orientation objects with
`Orientations.from_view_up`
"""
return [[1, 0, 0], [2, 0, 0], [-1, 0, 0]]
| 9,791
|
def savePlot(widget, default_file_type, old_file_path=None):
"""Saves a plot in the specified file format.
:param widget: graphics widget.
:param default_file_type: default save file type.
:param old_file_path: file path from a previous save operation.
:return: returns file path,
returns empty string or old file path when
user cancels save.
"""
gr_file_types = {**gr.PRINT_TYPE, **gr.GRAPHIC_TYPE}
save_types = ";;".join(sorted(set(gr_file_types.values())))
default_file = 'untitled'
if old_file_path:
default_file = path.splitext(old_file_path)[0]
file_path, _ = QFileDialog.getSaveFileName(None, 'Save as...',
default_file, filter=save_types,
initialFilter=default_file_type)
if not file_path:
return "" if not old_file_path else old_file_path
file_ext = path.splitext(file_path)[1]
if file_ext.lower()[1:] in gr_file_types:
widget.save(file_path)
else:
raise TypeError("Unsupported file format {}".format(file_ext))
return file_path
| 9,792
|
def check_closed(f):
"""Decorator that checks if connection/cursor is closed."""
def g(self, *args, **kwargs):
if self.closed:
raise exceptions.Error(f'{self.__class__.__name__} already closed')
return f(self, *args, **kwargs)
return g
| 9,793
|
def generate_glove_vecs(revs):
"""
This function generates GloVe vectors based on the training data. This function
can be more optimized in future.
:return: A dictionary containing words as keys and their GloVe vectors as the corresponding values.
:rtype: dict
"""
os.chdir("GloVe")
subprocess.call(['python3', 'setup.py', 'cythonize'])
os.system("pip3 install -e .")
os.chdir("..")
example.main_ex(revs)
word_vectors = pickle.load(open("glove.model", "rb"))["words_and_vectors"]
return word_vectors
| 9,794
|
def test_valid_nym_with_fees(fees,
helpers,
nodeSetWithIntegratedTokenPlugin,
sdk_wallet_steward,
address_main,
sdk_pool_handle,
sdk_wallet_trustee,
mint_tokens,
looper):
"""
Steps:
1. Checks that nym with fees will be rejected, because fees are not set
2. Send auth_rule txn with fees in metadata
3. Resend nym with fees and check, that it will be stored
"""
current_amount = get_amount_from_token_txn(mint_tokens)
seq_no = 1
with pytest.raises(RequestRejectedException, match="Fees are not required for this txn type"):
current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, {FEES: fees}, seq_no, looper, [address_main],
current_amount)
helpers.general.do_set_fees(fees, fill_auth_map=False)
original_action = add_new_identity_owner
original_constraint = auth_map.get(add_new_identity_owner.get_action_id())
original_constraint.set_metadata({'fees': NYM_FEES_ALIAS})
sdk_send_and_check_auth_rule_request(looper,
sdk_pool_handle,
sdk_wallet_trustee,
auth_action=ADD_PREFIX, auth_type=NYM,
field=original_action.field, new_value=original_action.value,
old_value=None, constraint=original_constraint.as_dict)
current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, {FEES: fees}, seq_no, looper, [address_main],
current_amount)
ensure_all_nodes_have_same_data(looper, nodeSetWithIntegratedTokenPlugin)
| 9,795
|
def poll_input_files():
"""
Polls the specified input folder for any new files. This is done by tracking the difference
in results. This could be faster by leveraging the WIN32 FileChangeAPI, but this code aims to
be more cross-platform and easier to digest.
"""
global POLL_DELAY
global INITIAL_FILE_LIST
newFiles = dict ([(f, None) for f in os.listdir (INPUT_FILE_LOCATION)])
added = [f for f in newFiles if not f in INITIAL_FILE_LIST]
if added:
POLL_DELAY = 0.25
listOfAdded = ", ".join(added)
logging.info(f"Found: {listOfAdded}")
compute_results(added[0])
INITIAL_FILE_LIST = newFiles
| 9,796
|
def get_diff_comparison_score(git_commit, review_url, git_checkout_path,
cc): # pragma: no cover
"""Reads the diff for a specified commit
Args:
git_commit(str): a commit hash
review_url(str): a rietveld review url
git_checkout_path(str): path to a local git checkout
cc: a cursor for the Cloud SQL connection
Return:
score(float): a score in [0,1] where 0 is no similarity and 1 is a perfect
match
"""
git_diff = get_git_diff(git_commit, git_checkout_path)
comparable_git_diff = [x for x in git_diff if x.startswith('+') \
or x.startswith('-')]
rietveld_diff = get_rietveld_diff(review_url, cc, git_checkout_path)
comparable_rietveld_diff = [x for x in rietveld_diff if x.startswith('+') \
or x.startswith('-')]
matching = list(set(comparable_git_diff) - set(comparable_rietveld_diff))
total = max(len(comparable_git_diff), len(comparable_rietveld_diff))
score = 1 - float(len(matching)) / total if total != 0 else 0
return score
| 9,797
|
def pack(pieces=()):
"""
Join a sequence of strings together.
:param list pieces: list of strings
:rtype: bytes
"""
return b''.join(pieces)
| 9,798
|
def list_programs(plugin, item_id, **kwargs):
"""
Build programs listing
- Les feux de l'amour
- ...
"""
resp = urlquick.get(URL_ROOT)
root = resp.parse()
for program_datas in root.iterfind(".//a"):
if program_datas.get('href'):
if 'emission/' in program_datas.get('href'):
program_title = program_datas.text
if URL_ROOT in program_datas.get('href'):
program_url = program_datas.get('href')
else:
program_url = URL_ROOT + program_datas.get('href')
item = Listitem()
item.label = program_title
item.set_callback(list_videos,
item_id=item_id,
program_url=program_url)
item_post_treatment(item)
yield item
| 9,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.