content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def is_linear(reg_type):
"""
Checks whether a regression type is linear.
"""
return reg_type == "linear" | 06dde95cc412879623e3a2415f7acc533a5117b2 | 48,480 |
def nontemp_mask(reference, transformer, cassette_ops):
"""
:param reference:
:param transformer:
:param cassette_ops:
:return: returns a mask where for each base in a query sequence, there is a True at the position of all the bases
that are either identical to the reference or found in the cassettes.
"""
query = list(reference)
mask = [False for x in query]
transformer = sorted(transformer, key=lambda x: x[0])
offset = 0
for op in transformer:
pos = op[0] + offset
if op[1] == "S":
query[pos] = op[2]
mask[pos] = True if op in cassette_ops else False
elif op[1] == "I":
query[pos:pos] = list(op[2])
mask[pos:pos] = [True]*len(op[2]) if op in cassette_ops else [False]*len(op[2])
offset += len(op[2])
elif op[1] == "D":
del query[pos:pos+op[2]]
del mask[pos:pos+op[2]]
offset -= op[2]
return mask | f366e699154805507dc3e8e1443ea63b07e9f517 | 48,481 |
from typing import OrderedDict
def process_odin_args(args):
"""
Finds arguments needed in the ODIN stage of the flow
"""
odin_args = OrderedDict()
odin_args["adder_type"] = args.adder_type
if args.adder_cin_global:
odin_args["adder_cin_global"] = True
if args.disable_odin_xml:
odin_args["disable_odin_xml"] = True
if args.use_odin_simulation:
odin_args["use_odin_simulation"] = True
return odin_args | 92a50c84be6d16f966323d02a9db8d5f32a04c2e | 48,483 |
import click
import sys
def get_network_for_cli(ctx, network_name):
"""
Get network for cli commands with proper handling.
"""
network = ctx.obj['CLIENT'].network.get(network_name)
if not network:
click.echo("Could not find network: %s" % network_name)
sys.exit(1)
return network | d37a37d01205a1693ec90bda75e93a4d3776ae7b | 48,484 |
def apply_with_return_error(args):
"""
:see: https://github.com/ionelmc/python-tblib/issues/4
"""
return args[0](*args[1:]) | 41f3381c73c92973c0fc1c6b86a899a430c3ed32 | 48,485 |
def format_message_response(params):
"""
Format automatic response
|params| is None if the system can't process the user's message
or is not confident enough to give a response.
Otherwise, |params| is a triple that consists of
the question that the system is trying to answer,
the response it has for that question, and the recommended command to run.
Return the automatic response that will be sent back to the user's chat box.
"""
if params is None:
return 'Thank you for your question. Our staff will get back to you as soon as we can.'
else:
question, response, command = params
result = 'This is the question we are trying to answer: ' + question + '\n'
result += response + '\n'
result += 'You can try to run the following command: \n'
result += command
return result | 13073c711a8d5ec5031a7712822f0ae58beef84f | 48,486 |
import warnings
def select_unique(things):
""" Checks that all the members of `things` are equal, and then returns it. """
first, *rest = things
for other in rest:
if first != other:
warnings.warn("select_unique may have failed: {} is not the same as {}"
.format(first, other))
break
return first | 85b58a57c4139d19781a30ad2ea95654eebf8e5a | 48,487 |
import json
def itunes_autorenew_response3():
"""Contributed by François Dupayrat @FrancoisDupayrat"""
return json.loads(r'''{
"auto_renew_status": 1,
"status": 0,
"auto_renew_product_id": "******************************",
"receipt":{
"original_purchase_date_pst":"2017-06-28 07:31:51 America/Los_Angeles",
"unique_identifier":"******************************",
"original_transaction_id":"******************************",
"expires_date":"1506524970000",
"transaction_id":"******************************",
"quantity":"1",
"product_id":"******************************",
"item_id":"******************************",
"bid":"******************************",
"unique_vendor_identifier":"******************************",
"web_order_line_item_id":"******************************",
"bvrs":"1.1.6",
"expires_date_formatted":"2017-09-27 15:09:30 Etc/GMT",
"purchase_date":"2017-09-27 15:04:30 Etc/GMT",
"purchase_date_ms":"1506524670000",
"expires_date_formatted_pst":"2017-09-27 08:09:30 America/Los_Angeles",
"purchase_date_pst":"2017-09-27 08:04:30 America/Los_Angeles",
"original_purchase_date":"2017-06-28 14:31:51 Etc/GMT",
"original_purchase_date_ms":"1498660311000"
},
"latest_receipt_info":{
"original_purchase_date_pst":"2017-06-28 07:31:51 America/Los_Angeles",
"unique_identifier":"******************************",
"original_transaction_id":"******************************",
"expires_date":"******************************",
"transaction_id":"******************************",
"quantity":"1",
"product_id":"******************************",
"item_id":"******************************",
"bid":"******************************",
"unique_vendor_identifier":"******************************",
"web_order_line_item_id":"******************************",
"bvrs":"1.1.6",
"expires_date_formatted":"2017-09-27 15:09:30 Etc/GMT",
"purchase_date":"2017-09-27 15:04:30 Etc/GMT",
"purchase_date_ms":"1506524670000",
"expires_date_formatted_pst":"2017-09-27 08:09:30 America/Los_Angeles",
"purchase_date_pst":"2017-09-27 08:04:30 America/Los_Angeles",
"original_purchase_date":"2017-06-28 14:31:51 Etc/GMT",
"original_purchase_date_ms":"1498660311000"
},
"latest_receipt":"******************************"
}''') | d402b93a1c891d5fcdc6a3c5b00a5e381335282d | 48,489 |
def _split_image(image, axis='Horizontal'):
"""Splits an image into two halves and returns each half.
Parameters
----------
image : np.ndarray
Image to split in half.
axis : string (default = 'Horizontal')
Which axis to split the image. If 'Horizontal', upper and lower halves
of the specified image are returned. If 'Vertical', left and right
halves of the specified image are returned.
Returns
-------
half1, half2 : np.ndarrays of type np.uint8
Image halves, either upper and lower or left and right.
"""
nrows, ncols = image.shape
if axis == 'Horizontal':
half1 = image[:nrows/2, :] # upper half
half2 = image[nrows/2:, :] # lower half
return half1, half2
half1 = image[:, :ncols/2] # left half
half2 = image[:, ncols/2:] # right half
return half1, half2 | 4a32b819754f060ee0281c3a47349dd2c6bd2dc3 | 48,490 |
import time
def TestDelay(duration):
"""Sleep for a fixed amount of time.
@type duration: float
@param duration: the sleep duration, in seconds
@rtype: (boolean, str)
@return: False for negative value, and an accompanying error message;
True otherwise (and msg is None)
"""
if duration < 0:
return False, "Invalid sleep duration"
time.sleep(duration)
return True, None | 68877dabba2141635bf0645f7a26cc30c4e98e7f | 48,491 |
import torch
def iou_loss(confidence, label, reduction='sum', weight=1.0, eps=1e-6):
"""IoU loss, Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
"""
rows = confidence.size(0)
cols = label.size(0)
assert rows == cols
if rows * cols == 0:
return confidence.new(rows, 1)
lt = torch.max(confidence[:, :2], label[:, :2]) # [rows, 2]
rb = torch.min(confidence[:, 2:], label[:, 2:]) # [rows, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, 2]
overlap = wh[:, 0] * wh[:, 1]
area1 = (confidence[:, 2] - confidence[:, 0] + 1) * (
confidence[:, 3] - confidence[:, 1] + 1)
area2 = (label[:, 2] - label[:, 0] + 1) * (
label[:, 3] - label[:, 1] + 1)
ious = overlap / (area1 + area2 - overlap)
safe_ious = ious.clamp(min=eps)
loss = -safe_ious.log() * weight
if reduction == 'mean':
return loss.mean()
elif reduction == 'sum':
return loss.sum()
else:
raise ValueError('reduction can only be `mean` or `sum`') | ac60b2d64496a3d22cd93c167cc458931d0275ce | 48,492 |
from typing import Awaitable
from typing import Tuple
from typing import Any
import asyncio
async def async_tuple(*coros: Awaitable) -> Tuple[Any, ...]:
"""Await on a parameters and get a tuple back.
Example: result_one, result_two = await async_tuple(gen_one(), gen_two())
"""
return tuple(await asyncio.gather(*coros)) | a5a79759eedec03b403aa5b2f529fcf5e7d6d889 | 48,494 |
import json
def get_number_dict():
"""
获取UVA题号与序号的对应关系,详情见[https://uhunt.onlinejudge.org/api](https://uhunt.onlinejudge.org/api)
的api/p部分
:return:
"""
id_to_number = dict()
# json.loads(u.text)
with open("api/p.json", 'r', encoding="utf-8") as f:
items = json.load(f)
for item in items:
id_to_number[item[0]] = item[1]
return id_to_number | 4f3dcfd052e3f4180f966e0d3c6c818790b571b0 | 48,495 |
import re
def tfstate_resources(tfstate, *resource_regexes):
"""
Returns the attribute of resource found in a .tfstate file
"""
result = {}
for module in tfstate['modules']:
for res_name, res_content in module['resources'].iteritems():
for regex in resource_regexes:
if re.search(regex, res_name):
result.update({
regex: res_content['primary']['attributes']
})
return result | 4cf38d97fec3a2ccf043dee5cb2c859ba3a9216b | 48,496 |
def matix_contains(m, elem):
"""Look for value in matrix"""
for row, i in enumerate(m):
try:
col = i.index(elem)
except ValueError:
continue
return row, col
return -1, -1 | f6c23186901b9514da67b65059b657b7e94c4f49 | 48,497 |
def remove_additional_mers(dict_):
"""Remove mers of type add which are not with central frag."""
for key in list(dict_.keys()):
if dict_[key]["type"] == "add":
del dict_[key]
return dict_ | c14efd6bfe0f72353ea5d3437bb278ac14fba312 | 48,500 |
import os
def _testpath(file):
""" Set the full path to the certificate,keyfile, etc. for the test.
"""
return os.path.join(os.path.dirname(__file__),
"ssl_db/%s" % file) | bd8887b9881674d044e98ec50c9cdba957be3bb3 | 48,501 |
def plusLongSuffixePrefixe(motif) :
""" renvoie l'indice du début du plus long suffixe qui est aussi préfixe dans le motif.
On interdit le mot entier. """
m = len(motif)
for p in range(1, m) : #boucle avec rupture de flux sur p croissant car plus p est petit, plus le suffixe est long
if motif[p:] == motif[:m-p] :
return p
return 1 #pour que l'on retourne 1 dans le pire des cas du bonSuffixe | 00f7ab3848a62e031974d0cf93be04fcc07214a1 | 48,502 |
def tidy_input_string(s):
"""Return string made lowercase and with all whitespace removed."""
s = ''.join(s.split()).lower()
return s | ea0fd434da5c5829b8bc7d38306080371fd36ca1 | 48,504 |
def coordinates_to_json(coordinates):
"""
Converts the coordinates from a mosaic image and puts them into a JSON serializable dictionary
"""
if coordinates.empty:
return '{}'
c = coordinates.copy(deep=False)
c.columns = ['page','y','x','height','width','pid']
return c.to_json(orient='records') | 73d28bcc36d3ce81b48fb3b9cf27266ba43349ac | 48,506 |
def zero_corner(experiment_definition):
"""Take the parameters corresponding to the zero corner. All of the minimums and the first categories."""
return {p['name']: (p['bounds']['min'] if p['type'] in ['int', 'double'] else p['categorical_values'][0]['name'])
for p in experiment_definition['parameters']} | 715a2b5ff94f1ea9940535aff3ad33fcb99985e0 | 48,507 |
import typing
import re
def string_split_newlines(string: str) -> typing.List[str]:
"""Split a string into words while keeping newline characters.
Args:
string (str): The string to split.
Returns:
typing.List[str]: A list of words.
"""
return re.findall(r"\S+|\n", string) | 26d1ba4bceef0f3af9c469cad30f86d17c97db19 | 48,509 |
def get_intersection_range(a0, a1, b0, b1):
"""Gets the intersection between [a0, a1] and [b0, b1]."""
assert a0 <= a1
assert b0 <= b1
start_x = 0
end_x = 0
# Contains
if a0 >= b0 and a1 <= b1:
start_x = a0
end_x = a1
# Contains
elif a0 < b0 and b1 < a1:
start_x = b0
end_x = b1
elif a0 < b0 and a1 > b0:
start_x = b0
end_x = a1
elif a1 > b1 and a0 < b1:
start_x = a0
end_x = b1
else:
pass
return start_x, end_x | 4ebba17d597e8fad888347f169d650299815b1fe | 48,510 |
def unit_vector(vector):
"""
Calculate a unit vector in the same direction as the input vector.
Args:
vector (list): The input vector.
Returns:
list: The unit vector.
"""
length = sum([v ** 2 for v in vector]) ** 0.5
unit_vector_ = [v / length for v in vector]
return unit_vector_ | 83a1040cb8f9155ff0057749a5c3a9dd798e1f28 | 48,512 |
def host_match(c_host, bw_host):
""" Check if a cookie `c_host` matches a bw-list `bw_host`. """
if c_host == bw_host:
return True
elif bw_host.startswith('.') and c_host.endswith(bw_host):
return True
return False | 3a21fccbb7fd1e4faecbe4807ced0131322bfc07 | 48,513 |
def _show_capture_callback(x):
"""Validate the passed options for showing captured output."""
if x in [None, "None", "none"]:
x = None
elif x in ["no", "stdout", "stderr", "all"]:
pass
else:
raise ValueError(
"'show_capture' must be one of ['no', 'stdout', 'stderr', 'all']."
)
return x | f97124fdffd96d8e9013b4a846c9a8b8ef52db88 | 48,514 |
def topBrandsandCountries(df, countries_unique):
"""
>>> import pandas as pd
>>> df = pd.read_csv(r'../data.csv')
>>> topBrandsandCountries(df, ['Angola', 'Aruba', 'Belize', 'Benin', 'Bhutan', 'Brazil', 'Chad', 'Chile', 'China', 'Cuba', 'Egypt', 'Fiji', 'Gabon', 'Ghana', 'Guam', 'Haiti', 'India', 'Iran', 'Iraq', 'Italy', 'Japan', 'Kenya', 'Laos', 'Libya', 'Macao', 'Mali', 'Malta', 'Nepal', 'Niger', 'Oman', 'Peru', 'Qatar', 'Spain', 'Sudan', 'Syria', 'Togo', 'Yemen'])
({'Brazil': 2245, 'Chile': 848, 'China': 2390, 'Egypt': 977, 'India': 3855, 'Italy': 1996, 'Spain': 1172}, [72, 70, 1478, 50, 214, 438, 205], [1161, 377, 30, 338, 1041, 708, 351], [1, 192, 273, 231, 114, 606, 254])
"""
top_countries = {}
for x in countries_unique:
if df[df.geo_country==x].device_brand_name.count() > 500:
top_countries[x] = df[df.geo_country==x].device_brand_name.count()
top_3_brands = ['Apple','Samsung','Huawei']
apple = []
samsung = []
huawei = []
for x in top_countries.keys():
apple.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[0]].device_brand_name.count())
samsung.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[1]].device_brand_name.count())
huawei.append(df[df.geo_country==x][df.device_brand_name==top_3_brands[2]].device_brand_name.count())
return top_countries,apple,samsung,huawei | 377e866943ef12d6fd0aeed37d5c08c133708c4a | 48,516 |
def font(unit, size, family=None, style=None):
"""Font setter for all the widgets"""
f, s, sy = 'Comic Sans MS', 1, 'bold'
rem = int(unit / 100 * (size * 0.05 + s))
if not family and not style:
return f, rem, sy
if family and style:
return family, rem, style
if not family and style:
return f, rem, style
if family and not style:
return family, rem, sy | 4438d12831e11e4907f08b550baf865443060dbe | 48,517 |
def is_num_present(word):
"""
Check if any number present in Given String
params: word - string
returns: 0 or 1
"""
check = any(letter.isdigit() for letter in word)
return 1 if check else 0 | b40096a2e92e013ba3856c4b924fb3bf1e36b7eb | 48,519 |
def p_colons(p, colons):
"""colons : COLON
colons : DOUBLECOLON"""
return colons | 6b68bf8c43f82e40dbcde3e56e16f7778ae8da06 | 48,520 |
def create_ngram_sentence_list(sentence_list, n_grams):
""" Returns a list of lists of n-grams for each sentence """
n_gram_merged_sentence_list = []
for sentence in sentence_list:
aux_list = [tuple(sentence[ix:ix+n_grams]) for ix in range(len(sentence)-n_grams+1)]
n_gram_merged_sentence_list.append(aux_list)
return n_gram_merged_sentence_list | 26416b111a14f48df6dd62db5717e2a8a8bd62c5 | 48,521 |
def sort_places(stat_table, route):
"""Translate place names and sort list."""
# Work in progress! Waiting for translation list.
# Or should this be part of the data instead??
place_translations = {
"Göteborg": "Gothenburg"
}
if "place" in route.rule:
lang = "en"
else:
lang = "sv"
if lang == "en":
for d in stat_table:
d["display_name"] = place_translations.get(d["name"], d["name"])
else:
for d in stat_table:
d["display_name"] = d["name"]
stat_table.sort(key=lambda x: x.get("name").strip())
return stat_table | da8a54094f517c29a63b420c7643579164f45136 | 48,522 |
def PHT(x, y):
"""Pseudo-Hadamard transform"""
assert type(x) == int
assert type(y) == int
rv_x = (2*x + y) % 256
rv_y = (x + y) % 256
return rv_x, rv_y | e678bb2194b23e54ec868d8259e3fb1ef2aedf0d | 48,523 |
import torch
def squash(x, eps = 1e-5):
"""
Squashes each vector to ball of radius 1 - \eps
:param x: (batch x dimension)
:return: (batch x dimension)
"""
norm = torch.norm(x, p=2, dim=-1, keepdim=True)
unit = x / norm
scale = norm**2/(1 + norm**2) - eps
x = scale * unit
# norm_2 = torch.sum(x**2, dim=-1, keepdim=True)
# unit = x / torch.sqrt(norm_2)
# scale = norm_2 / (1.0 + norm_2) # scale \in [0, 1 - eps]
# x = scale * unit - eps # DO NOT DO THIS. it will make magnitude of vector consisting of all negatives larger
return x | 5223c37461d45111ce3a0c0216239e1d6c0a8c96 | 48,524 |
def get_client_msg(message, client_id):
"""get_client_msg: get the client-based-message"""
return message + '@' + client_id | a40efdf6dbac686e3dcb9546f746ffca02f4a3bc | 48,527 |
def order_people_heights(heights, in_fronts):
"""
You are given a list of people. Each person has a height and how many
before it are taller.
heights = [5, 3, 2, 6, 1, 4]
people_in_front = [0, 1, 2, 0, 3, 2]
And this is what it look like
x
x x
x x x
x x x x
x x x x x
x x x x x x
0 1 2 0 3 2
Order people heights that they fulfill their people in front constraint.
ordered_heights = [5, 3, 2, 6, 1, 4]
x
x x
x x x
x x x x
x x x x x
x x x x x x
0 1 2 3 0 2
"""
people = sorted(zip(heights, in_fronts), key=lambda p: p[0])
ordered_people = [None] * len(people)
for height, in_front in people:
if people[in_front] is None:
people[in_front] = height
else:
empty_slots = [i for i, h in enumerate(ordered_people) if h is None]
i = empty_slots[in_front]
ordered_people[i] = height
return ordered_people | 1d658ba6b403bea916bea6f82b03958dbeb66533 | 48,528 |
def calc_total_probe_depth(capture_data):
"""Takes a capture dict and returns a tuple containing the percentage of nucleotide positions
in the target space covered by 0, 1, 2, 3, 4, and 5+ probes."""
total = 0
total_0 = 0
total_1 = 0
total_2 = 0
total_3 = 0
total_4 = 0
total_5 = 0
for header,(seq, depth) in capture_data.items():
total += len(depth)
total_0 += depth.count(0)
total_1 += depth.count(1)
total_2 += depth.count(2)
total_3 += depth.count(3)
total_4 += depth.count(4)
total_5 += len([d for d in depth if d >= 5])
total_0 = round(total_0 * 100 / total, 2)
total_1 = round(total_1 * 100 / total, 2)
total_2 = round(total_2 * 100 / total, 2)
total_3 = round(total_3 * 100 / total, 2)
total_4 = round(total_4 * 100 / total, 2)
total_5 = round(total_5 * 100 / total, 2)
return (total_0, total_1, total_2, total_3, total_4, total_5) | 57929b1f70a2875ac721553a8b2dc3c6489743cd | 48,530 |
def compute_mark(percentage):
"""computes the mark according to the percentage"""
if percentage <10: return '6 (ungenügend)'
if percentage <45: return '5 (mangelhaft)'
if percentage <60: return '4 (ausreichend)'
if percentage <75: return '3 (befriedigend)'
if percentage <90: return '2 (gut)'
return '1 (sehr gut)' | d796df20cf8ad09f08cf7ae34abdfb7e14739907 | 48,531 |
def corner_to_shot(shot_df, corner_df, time_elapsed=.27):
"""
Links shots to previous corners to
try to link assists based on time between pass and corner
----------
shot_df: shot_df of a game with passes included
corner_df: corner_df of a game
time: fraction of a minute between pass and shot, default 16 seconds (because data has a corner assist at that time)
Returns
-------
shots_df with updated column 'passed_from_id' and 'pass_id'
"""
for indx, row in shot_df.iterrows():
shooter_id = row['player_id']
shoot_time = row['time_of_event(min)']
possible_corner_df = corner_df[(corner_df['time_of_event(min)'] < shoot_time) & (corner_df['time_of_event(min)'] > shoot_time - time_elapsed)]
if len(possible_corner_df) == 0:
# shot_df.loc[indx, 'passed_from_id'] = None
shot_df.loc[indx, 'corner_kick'] = 0
elif len(possible_corner_df) > 1:
shot_df.loc[indx, 'passed_from_id'] = possible_corner_df.iloc[-1, :]['player_id']
shot_df.loc[indx, 'pass_coord_x1'] = possible_corner_df.iloc[-1, :]['ck_coord_x1']
shot_df.loc[indx, 'pass_coord_x2'] = possible_corner_df.iloc[-1, :]['ck_coord_x2']
shot_df.loc[indx, 'pass_coord_y1'] = possible_corner_df.iloc[-1, :]['ck_coord_y1']
shot_df.loc[indx, 'pass_coord_y2'] = possible_corner_df.iloc[-1, :]['ck_coord_y2']
shot_df.loc[indx, 'pass_coord_z1'] = possible_corner_df.iloc[-1, :]['ck_coord_z1']
shot_df.loc[indx, 'pass_coord_z2'] = possible_corner_df.iloc[-1, :]['ck_coord_z2']
shot_df.loc[indx, 'corner_kick'] = 1
else:
shot_df.loc[indx, 'passed_from_id'] = possible_corner_df.iloc[0, :]['player_id']
shot_df.loc[indx, 'pass_coord_x1'] = possible_corner_df.iloc[0, :]['ck_coord_x1']
shot_df.loc[indx, 'pass_coord_x2'] = possible_corner_df.iloc[0, :]['ck_coord_x2']
shot_df.loc[indx, 'pass_coord_y1'] = possible_corner_df.iloc[0, :]['ck_coord_y1']
shot_df.loc[indx, 'pass_coord_y2'] = possible_corner_df.iloc[0, :]['ck_coord_y2']
shot_df.loc[indx, 'pass_coord_z1'] = possible_corner_df.iloc[0, :]['ck_coord_z1']
shot_df.loc[indx, 'pass_coord_z2'] = possible_corner_df.iloc[0, :]['ck_coord_z2']
shot_df.loc[indx, 'corner_kick'] = 1
return shot_df | 843c9f65827a9db663388086bf3e8e0296f95fce | 48,533 |
import os
def create_full_path(wiki_path, dataset):
"""Joins to paths together for wiki dataset"""
return os.path.join(wiki_path, 'wiki.{}.tokens'.format(dataset)) | e21289cfc6d9ef9f9dd95d8ad0c9fe7dea86b476 | 48,534 |
def mysql_app_client(mysql_app):
"""Client interacting with app returned by the `mysql_app` fixture."""
return mysql_app.test_client() | 64079932230cce2a497f7ac8a2cb3a217d1cd0a4 | 48,535 |
import os
def exec_shell_command(command):
"""
python 左移问题解决, 使用 shell 脚本, 但是注意 shell 执行结果是不带符号的.....
为了简单,只用来处理左移, 比如 exec_shell_command("echo $[128 << 408]")
"""
result = os.popen(command).read().strip()
print(result)
return int(result) | c1cdbfa58a598944d6c84011802dde186078d178 | 48,536 |
def mix_probability_to_independent_component_probability(mix_probability: float, n: float) -> float:
"""Converts the probability of applying a full mixing channel to independent component probabilities.
If each component is applied independently with the returned component probability, the overall effect
is identical to, with probability `mix_probability`, uniformly picking one of the components to apply.
Not that, unlike in other places in the code, the all-identity case is one of the components that can
be picked when applying the error case.
"""
return 0.5 - 0.5 * (1 - mix_probability) ** (1 / 2 ** (n - 1)) | 8592e34bc39842b1d0017a102d64948713a98f2e | 48,537 |
import re
def is_etag(s):
"""字母、数字和-、_共64种字符构成的长度28的字符串"""
return re.match(r'[a-zA-Z0-9\-_]{28}$', s) | e145cc6c05eefeb5746dadc815c61fe7697c5fff | 48,538 |
def find_pure_symbol(symbols, unknown_clauses):
"""
Find a symbol and its value if it appears only as a positive literal
(or only as a negative) in clauses.
Arguments are expected to be in integer representation.
>>> find_pure_symbol({1, 2, 3}, [{1, -2}, {-2, -3}, {3, 1}])
(1, True)
"""
all_symbols = set().union(*unknown_clauses)
found_pos = all_symbols.intersection(symbols)
found_neg = all_symbols.intersection([-s for s in symbols])
for p in found_pos:
if -p not in found_neg:
return p, True
for p in found_neg:
if -p not in found_pos:
return -p, False
return None, None | ffa20cee768e81cd3525483bc2490ada6482b550 | 48,539 |
def getSuperType(date):
"""Coarse grained distinction between n/a, single date events and timespans"""
if 'beginPoint' in date or 'endPoint' in date or 'Q' in date:
return 'timespan'
if date == 'undefined' or date == 'n/a':
return 'n/a'
return "singleDay" | 7dc96cbe17a4dda12a42235c205b22404c3dd893 | 48,540 |
def join_trips(combined, trips):
"""
This function combines the whole thing an
"""
return combined.merge(trips,on='stop_id') | cc4dc9a7b9b173201669c143a494d697586b8474 | 48,542 |
def find_duplicate_inds(list_with_duplicates: list):
"""Find indices of duplicates in a list.
:param list_with_duplicates: list containing duplicate items
:type list_with_duplicates: list
:return: list of duplicate indices, list of unique items (parents of duplicates)
:rtype: list and list
"""
found = []
foundind = []
badind = []
parentind = []
for i, item in enumerate(list_with_duplicates):
try:
parentind.append(foundind[found.index(item)])
badind.append(i)
except ValueError:
found.append(item)
foundind.append(i)
return badind, parentind | 6042aad96911fa3c6ae4fe97a4616900ded9291a | 48,543 |
def package_length(p):
"""
Get the length of a package in java notation
:param p: the package as a string
:return: the length of the package
"""
return len(p.split('.')) | 9261ab452f7e558c3492d0ca3aed837af9f3989f | 48,545 |
def findClosestRoom (room, roomList):
"""
Finds the closest room to 'room' in the roomList.
Distance is calculated by rectangle centers.
"""
currentClosest = None
closestDistance = None
roomCenter = (room[0] + (room[0] + room[2]) // 2,
room[1] + (room[1] + room[3]) // 2)
for compareRoom in roomList:
compareCenter = (compareRoom[0] + (compareRoom[0] + compareRoom[2]) // 2,
compareRoom[1] + (compareRoom[1] + compareRoom[3]) // 2)
dist = ((compareCenter[0] - roomCenter[0]) ** 2 + (compareCenter[1] - roomCenter[1]) ** 2) ** 0.5
if currentClosest != None and dist < closestDistance:
currentClosest = compareRoom
closestDistance = dist
elif currentClosest == None:
currentClosest = compareRoom
closestDistance = dist
return currentClosest | 65276893cb4955152cbfa5c1e5808371a8839bc0 | 48,546 |
def str_to_int(s):
"""Convert binary strings (starting with 0b), hex strings (starting with 0x), decimal strings to int"""
if s.startswith("0x"):
return int(s, 16)
elif s.startswith("0b"):
return int(s, 2)
else:
return int(s) | 47ac2e7755d9a4d10b1e0712ad23aabb2c0489f5 | 48,547 |
from typing import List
def convert_color_from_hex_to_rgb(value: str) -> List[int]:
"""
converts a hex encoded colors to rgb encoded colors
:param value: hex encoded color
:return: rgb value
"""
value = value.lstrip('#')
lv = len(value)
return list(tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))) | 3aa737b8f06e45bad93df466185c4a997e943a13 | 48,549 |
import itertools
def list_of_combs(arr):
"""returns a list of all subsets of a list"""
combs = []
for i in range(len(arr)):
listing = [list(x) for x in itertools.combinations(arr, i+1)]
combs.extend(listing)
return combs | 324de4a3f7bf11600d36cf02386862ad56de832b | 48,550 |
def convertStringToFloat(string):
"""
Takes in:
Returns:
"""
try:
string = float(string)
except:
string = 0
return string | 4f8d91a4251473066ddb3039c9785baf1eb70c62 | 48,551 |
def remove_irrelevant_terms(graph, irrelevant_terms):
"""
This will prune out irrelevant terms and return a copy of the graph without those nodes
Args:
graph: Networkx object
irrelevant_terms: Iterable giving irrelevant terms. Usually ConstantsAndUtilities.Ignore.iterable
Returns:
Pruned graph
"""
graph.remove_nodes_from(irrelevant_terms)
return graph | 7b7a7e81dff1dc21d579bb2b8f06e98235bd75e6 | 48,552 |
from typing import List
import torch
def mm_list(matrices: List[torch.Tensor]):
"""
Multiply all matrices
"""
m = matrices[0]
for n in range(1, len(matrices)):
m = torch.mm(m, matrices[n])
return m | d1ff7381d2075a209fd43303337ecf17fc557c86 | 48,553 |
import hashlib
import json
def make_hash(d: dict):
"""
Generate a hash for the input dictionary.
From: https://stackoverflow.com/a/22003440
Parameters
----------
d: input dictionary
Returns
-------
hash (hex encoded) of the input dictionary.
"""
return hashlib.md5(json.dumps(d, sort_keys=True).encode("utf-8")).hexdigest() | a4611fdcb54105eeaad336d9bea514f5ab93d6bf | 48,554 |
def switch_unused_ports(node_list):
"""Create a dictionary of unused ports.
Args:
node_list: A list of nodes
Returns:
unused_ports: Dictionary of switches and their unused ports
"""
unused_ports = {}
for node in node_list:
if "sw" in node.common_name() and "sw-hsn" not in node.common_name():
unused_ports[node.common_name()] = []
unused_block = []
logical_index = 1
for port in node.ports():
if port is None:
unused_ports[node.common_name()].append(logical_index)
unused_block.append(logical_index)
logical_index += 1
continue
if unused_block:
unused_block = [] # reset
logical_index += 1
unused_ports[node.common_name()].pop()
return unused_ports | 9246430f17d4f15994f03aafd6326f6ad01dff69 | 48,556 |
def _get_default_annual_spacing(nyears):
"""
Returns a default spacing between consecutive ticks for annual data.
"""
if nyears < 11:
(min_spacing, maj_spacing) = (1, 1)
elif nyears < 20:
(min_spacing, maj_spacing) = (1, 2)
elif nyears < 50:
(min_spacing, maj_spacing) = (1, 5)
elif nyears < 100:
(min_spacing, maj_spacing) = (5, 10)
elif nyears < 200:
(min_spacing, maj_spacing) = (5, 25)
elif nyears < 600:
(min_spacing, maj_spacing) = (10, 50)
else:
factor = nyears // 1000 + 1
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing) | ddc98001e9324f2486f141b86f285ac48b726dd2 | 48,558 |
def get_request_data(request):
"""Return (data, mimetype, charset) triple for Flask request."""
mimetype = request.mimetype
charset = request.mimetype_params.get('charset')
data = request.get_data()
return (data, mimetype, charset) | f34d3af564282767eb8397190a6c6daded7a6d1b | 48,559 |
import sys
import threading
def tee(infile, discard, logging, console=None):
"""Print `infile` to `files` in a separate thread."""
def fanout():
discard_counter = 0
for line in iter(infile.readline, b''):
# use the same encoding as stdout/stderr
s = line.decode(
encoding=sys.stdout.encoding,
errors='replace'
)
s = s.replace('\r', '')
s = s.replace('\t', ' ')
s = s.rstrip() # strip spaces and EOL
s += '\n' # append stripped EOL back
logging.write(s)
if console is None:
continue
if discard is None:
console.write(s)
console.flush()
continue
if discard_counter == 0:
console.write(s)
console.flush()
discard_counter += 1
if discard_counter == discard:
discard_counter = 0
infile.close()
t = threading.Thread(target=fanout)
t.daemon = True
t.start()
return t | edfd631947db83d8b51999cbc133d140fdbb2c48 | 48,562 |
from typing import Tuple
from typing import Optional
import re
def __extract_tes_task_state_from_cwl_tes_log(
line: str,
) -> Tuple[Optional[str], Optional[str]]:
"""Extracts task ID and state from cwl-tes log."""
task_id: Optional[str] = None
task_state: Optional[str] = None
# Extract new task ID
re_task_new = re.compile(r"^\[job \w*\] task id: (\S*)$")
m = re_task_new.match(line)
if m:
task_id = m.group(1)
# Extract task ID and state
re_task_state_poll = re.compile(
r'^\[job \w*\] POLLING "(\S*)", result: (\w*)'
)
m = re_task_state_poll.match(line)
if m:
task_id = m.group(1)
task_state = m.group(2)
return (task_id, task_state) | 8cb7dcd71b24fa3b356f14a0bed29ce0eb267c11 | 48,563 |
def remove_duplicate(duplicate):
"""
remove duplicates in list
"""
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list | 99676a444a76a76bcd0adcd95bba881c0a46b6ad | 48,565 |
from typing import OrderedDict
import torch
def get_stripped_DataParallel_state_dict(m, base_name='', newdict=OrderedDict()):
""" strip 'module.' caused by DataParallel.
"""
try:
# Test if any child exist.
# m.children().next()
next(m.children())
# Pass test, has one or more children
if isinstance(m, torch.nn.DataParallel):
assert len([x for x in m.children()])==1, "DataParallel module should only have one child, namely, m.module"
get_stripped_DataParallel_state_dict(m.module, base_name, newdict)
else:
for _name, _module in m.named_children():
new_base_name = base_name+'.'+_name if base_name!='' else _name
get_stripped_DataParallel_state_dict(_module, new_base_name, newdict)
return newdict # if ended here, return newdict
except StopIteration:
# No children any more.
assert not isinstance(m, torch.nn.DataParallel), 'Leaf Node cannot be "torch.nn.DataParallel" (since no children ==> no *.module )'
for k, v in m.state_dict().items():
new_k = base_name+'.'+k
newdict[new_k] = v
return newdict | 3dce6ba70f31e922e0c399d2e211f9a8e6f474a4 | 48,566 |
def get_overlap(gt_box: list, pr_box: list) -> float:
"""Intersection score between GT and prediction boxes.
Arguments:
gt_box {list} -- [x, y, w, h] of ground-truth lesion
pr_box {list} -- [x, y, w, h] of prediction bounding box
Returns:
intersection {float}
"""
gt_x, gt_y, gt_w, gt_h = gt_box
pr_x, pr_y, pr_w, pr_h = pr_box
xA = max(gt_x, pr_x)
xB = min(gt_x + gt_w, pr_x + pr_w)
yA = max(gt_y, pr_y)
yB = min(gt_y + gt_h, pr_y + pr_h)
return float(max((xB - xA), 0) * max((yB - yA), 0)) | c123cba55d323e919b062953740be7f786a95a59 | 48,568 |
def tor_parse(args):
"""
Checks if Tor boolean is set, then checks if the tor-address is set.
:param args: Arguments
:return: True or False
"""
if not args.tor:
return True
if not args.tor_address:
print("[!] Tor address not supplied by the user.")
return False
return True | 2887e271240baee8804bdaa658872f1c9231ecfa | 48,569 |
import click
def format_lxml_syntax_error(exc, context_lines=5, full_xml=False):
"""
Format a :class:`lxml.etree.XMLSyntaxError`, showing the error's
context_lines.
*exc* should have been augmented with :func:`add_lxml_syntax_error_context`
first.
*name* is just a generic name indicating where the error occurred (for
example the name of a job).
*context_lines* is the number of lines to show around the error. If
*full_xml* is true, show the entire XML.
"""
lines = exc.full_xml.splitlines()
err_line = exc.lineno - 1
err_offset = exc.offset - 1
if full_xml:
start_line = 0
end_line = None
else:
start_line = err_line - context_lines
end_line = err_line + context_lines
before_context = lines[start_line:err_line]
error_line = lines[err_line]
after_context = lines[err_line + 1:end_line]
lines = [
'XML syntax error in %s:' % click.style(exc.context, bold=True),
'',
click.style(exc.message, bold=True),
'',
]
# Get the error context lines
xml_lines = []
xml_lines += before_context
xml_lines.append(
click.style(error_line[:err_offset], fg='red') +
click.style(error_line[err_offset], fg='red', bold=True) +
click.style(error_line[err_offset + 1:], fg='red')
)
xml_lines_error_index = len(xml_lines)
xml_lines += after_context
# Add line numbers gutter
gutter_width = len('%s' % (len(xml_lines) + start_line + 1))
gutter_fmt = '%%%si' % gutter_width
margin_width = 2
xml_lines = [
click.style(gutter_fmt % (i + start_line + 1),
fg='black', bold=True) +
' ' * margin_width + l
for i, l in enumerate(xml_lines)
]
# Add error marker
xml_lines.insert(xml_lines_error_index,
' ' * (err_offset + margin_width + gutter_width) + '^')
lines += xml_lines
return '\n'.join(lines) | 096d926cd8fe7d0a8047e31b9e8ce0c944498a19 | 48,571 |
import torch
def events_to_voxel_grid_mod(events_torch, num_bins, width, height):
"""
A slightly modified version of thirdparty.e2vid.utils.inference_utils,
where the input is already been placed on a torch device
Code from: https://github.com/uzh-rpg/rpg_e2vid
"""
device = events_torch.device
assert (events_torch.shape[1] == 4)
assert (num_bins > 0)
assert (width > 0)
assert (height > 0)
with torch.no_grad():
voxel_grid = torch.zeros(num_bins, height, width,
dtype=torch.float32,
device=device).flatten()
# Normalize the event timestamps so that they lie
# between 0 and num_bins
last_stamp = events_torch[-1, 0]
first_stamp = events_torch[0, 0]
deltaT = last_stamp - first_stamp
if deltaT == 0:
deltaT = 1.0
events_torch[:, 0] = (num_bins - 1) * \
(events_torch[:, 0] - first_stamp) / deltaT
ts = events_torch[:, 0]
xs = events_torch[:, 1].long()
ys = events_torch[:, 2].long()
pols = events_torch[:, 3].float()
pols[pols == 0] = -1 # polarity should be +1 / -1
tis = torch.floor(ts)
tis_long = tis.long()
dts = ts - tis
vals_left = pols * (1.0 - dts.float())
vals_right = pols * dts.float()
valid_indices = tis < num_bins
valid_indices &= tis >= 0
voxel_grid.index_add_(dim=0,
index=xs[valid_indices] + ys[valid_indices]
* width
+ tis_long[valid_indices]
* width * height,
source=vals_left[valid_indices])
valid_indices = (tis + 1) < num_bins
valid_indices &= tis >= 0
voxel_grid.index_add_(dim=0,
index=xs[valid_indices] + ys[valid_indices]
* width
+ (tis_long[valid_indices] + 1)
* width * height,
source=vals_right[valid_indices])
voxel_grid = voxel_grid.view(num_bins, height, width)
return voxel_grid | 3f720f57cfd82256be47eff59887c696ed354f06 | 48,572 |
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
info = []
def dfs(node, depth=0):
if node:
if len(info) <= depth:
info.append([0, 0])
# info[depth][sum, count]
info[depth][0] += node.val
info[depth][1] += 1
dfs(node.left, depth+1)
dfs(node.right, depth+1)
dfs(root, 0)
return [s/c for s, c in info] | a37fcf43510c9f8688de3990f8c7fb90cf462476 | 48,574 |
def train_reformat(train_Xy):
"""
Group train_Xy into articles, with a dictionary containing true and false rows
"""
xy = {}
for row in train_Xy:
a_id = row['a_id']
sentences = row['sentence_span']
t = [] # for when the labels are true
f = [] # for when the labels are false
for s in sentences:
if (s[1] == 0):
f.append(s[0])
else:
t.append(s[0])
if (a_id in xy):
for sample in t:
xy[a_id]['true'].append(sample)
for sample in f:
xy[a_id]['false'].append(sample)
else:
# if this article is not already in the dictionary, then
# initialize
xy[a_id] = {}
xy[a_id]['true'] = t
xy[a_id]['false'] = f
xy[a_id]['sample'] = 0 # location of which true value was last used.
return xy | 005a65468dab96e5e17f6f2f3fde101c58ba88c8 | 48,575 |
def normalize_sec_advisory(string):
"""
Modifies input string to a form which will be further processed.
:param string: input string
:return: modified string
"""
normalized_string = string.lstrip(':').strip()
return normalized_string | ca652dacac42ce8047b6299c393afce04662c973 | 48,576 |
def get_storage_key(namespace: str, storage_key: str) -> str:
"""In case we need to access kvstore
"""
return "{ns}-{key}".format(ns=namespace, key=storage_key) | 5efe5ae4a086ed21548f7d42a6f83c73521ca7b6 | 48,577 |
def format_table(table, extra_space=1):
"""
Note: src.utils.prettytable is more powerful than this, but this
function can be useful when the number of columns and rows are
unknown and must be calculated on the fly.
Takes a table of collumns: [[val,val,val,...], [val,val,val,...], ...]
where each val will be placed on a separate row in the column. All
collumns must have the same number of rows (some positions may be
empty though).
The function formats the columns to be as wide as the widest member
of each column.
extra_space defines how much extra padding should minimum be left between
collumns.
print the resulting list e.g. with
for ir, row in enumarate(ftable):
if ir == 0:
# make first row white
string += "\n{w" + ""join(row) + "{n"
else:
string += "\n" + "".join(row)
print string
"""
if not table:
return [[]]
max_widths = [max([len(str(val)) for val in col]) for col in table]
ftable = []
for irow in range(len(table[0])):
ftable.append([str(col[irow]).ljust(max_widths[icol]) + " " * extra_space
for icol, col in enumerate(table)])
return ftable | 1aab157ad7faed9073db65fbe0b75d9ef84d5ae0 | 48,579 |
def decToDegMinSec(dd: float) -> tuple:
"""
Converts decimal degrees to deg/min/sec.
Parameters:
dd (float): Decimal Degrees
Returns:
tuple: (degrees,minutes,seconds) of integers
"""
isPositive = dd >= 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600, 60)
degrees,minutes = divmod(minutes, 60)
degrees = degrees if isPositive else -degrees
return (round(degrees),round(minutes),round(seconds)) | c0c46ab9be29812084a4a88efde08a9e5702757c | 48,581 |
from typing import Counter
def _find_duplicates(barcodes):
"""Report any barcode observed more than a single time
Parameters
----------
barcodes : iterable of str
The barcodes to check for duplicates in
Returns
-------
set
Any barcode observed more than a single time
dict
Any error information or None
"""
error = None
counts = Counter(barcodes)
dups = {barcode for barcode, count in counts.items() if count > 1}
if len(dups) > 0:
error = {
"barcode": list(dups),
"error": "Duplicated barcodes in input"
}
return dups, error | b90b7171357ec200df0467f9998509c0291fd58c | 48,582 |
def get_include_paths():
"""Return the long description."""
include_dirs = ['boost', 'eigen', 'eigen/unsupported', 'kde1d/include']
return ['lib/' + path for path in include_dirs] | f725e204d2289dd5a71304deb1c94d69755c84a7 | 48,584 |
def solr_field(name=None, type='string', multiValued=False, stored=True, docValues=False):
"""solr_field: convert python dict structure to Solr field structure"""
if not name:
raise TypeError('solar() missing 1 required positional \
argument: "name"')
lookup_bool = {True: 'true', False: 'false'}
return {'name': name, 'type': type,
'multiValued': lookup_bool[multiValued],
'stored': lookup_bool[stored],
'docValues': lookup_bool[docValues]} | e6ce4366c54929caa2bbd35380974e1e85602764 | 48,585 |
import random
def random_alter_crc(test_case=None):
"""Choose a random bool for alter_crc."""
if test_case:
if test_case.get("crc"):
return True
else:
return False
else:
return random.random() < 0.1 | 943e6d734a1347b64496e2a863370ff378b87a39 | 48,586 |
from enum import Enum
def mkenum(**enums):
"""
创建枚举类型
:param enums:
:return:
"""
check_set = set()
for k, v in enums.items():
if k.startswith('__'):
raise RuntimeError('Key:{0} format error!'.format(k))
check_set.add(v)
if len(check_set) != len(enums):
raise RuntimeError('Multi value!')
fields = dict()
fields.update(enums)
renums = dict()
for k, v in enums.items():
renums[v] = k
fields['__renums__'] = renums
enum_cls = type('Enum', (Enum, ), fields)
return enum_cls | 9160daccf5a81a1103496a9617fc6517e825f575 | 48,587 |
import os
def get_path_of_external_data_dir():
"""
Get path to the external data directory (mostly to store data there)
"""
file_path = os.environ['PYSPI']
return file_path | c8a51b5705ec7c6152a4ea8e42511d64f3aa36c6 | 48,588 |
def CDC_revenue(N, accuracy, data_worth):
""" Calculates the CDC nodes' revenue """
R_CDC=[] # revenue for each CDC
for c in range(N):
R_CDC.append(data_worth* accuracy*c)
return R_CDC | 1fce236c2059ecd1287ea0db51771ba84b8d15f0 | 48,589 |
def section(title):
"""center in a string 60 wide padded with ="""
return "\n{:=^60}\n.".format(title) | f8904ee5429d5ffe0e9373f61f0db98a261359c4 | 48,590 |
import ntpath
def path_leaf(path):
"""Returns the leaf of a given path.
Args:
path (string): path that is going to be processed.
Returns:
string: path leaf.
"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head) | 4fa581178cf7018148bb9b466766b5b80732a997 | 48,591 |
from typing import Callable
import hashlib
def make_block_cprng(seed: bytes) -> Callable[[int], bytes]:
"""Return deterministic random function that return the block data"""
assert isinstance(seed, bytes)
assert len(seed) == 32
state = hashlib.sha256(seed).digest()
def sha256_block_cprng(len: int) -> bytes:
nonlocal state
assert len == 32
state = hashlib.sha256(state).digest()
return state
return sha256_block_cprng | 960628f7e585b9f89ad964df23cb104f94ad1641 | 48,592 |
import pytz
def datetime_from_utc_to_local(utc_datetime):
"""Convertie la date et heure donné (utc) en date local
:param utc_datetime: datetime utc
:return: date locale
"""
return pytz.utc.localize(utc_datetime, is_dst=None).astimezone() | 8b1fea8fe23ae140f8f4275aa768483b50603e93 | 48,593 |
def _label_from_key(k: str) -> str:
""" Helper function to create the x-tick labels from the keys in the result dict """
if k == 'bayrn':
return 'BayRn'
elif k == 'epopt':
return 'EPOpt'
else:
return k.upper() | ac1dca696c3d5728f150a6a4b1fd9e352b182f00 | 48,594 |
def bytes_to_hex(list_of_bytes):
"""
>>> bytes_to_hex([1])
'01'
>>> bytes_to_hex([100,120,250])
'6478fa'
"""
if any(i < 0 or i > 255 for i in list_of_bytes):
raise ValueError("Value outside range 0 to 255")
return "".join("{:02x}".format(i) for i in list_of_bytes) | df3319e7d4a5e68d7b90e755307c4bc8282cae50 | 48,595 |
def SGSeries(v):
"""
SGxxx series selector
"""
return "SG" in v["platform"] | f85401703769b83e333588a27377f880c24d4037 | 48,596 |
def make_take(ndims, slice_dim):
"""Generate a take function to index in a particular dimension."""
def take(indexer):
return tuple(indexer if slice_dim % ndims == i else slice(None) # noqa: S001
for i in range(ndims))
return take | fcb51a4db4f6059bc671d3a327e76a7069522e89 | 48,597 |
def color_rgb_to_int(red: int, green: int, blue: int) -> int:
"""Return a RGB color as an integer."""
return red * 256 * 256 + green * 256 + blue | 94c4bcd5e81b9f7dbd2571984b5d128804843bab | 48,598 |
def search_in_toc(toc, key, totalpg):
"""Searches a particular lesson name provided as a parameter in toc and returns its starting and ending page numbers.
Args:
toc (nested list): toc[1] - Topic name
toc[2] - Page number
key (str): the key to be found
totalpg (int): total pages in book/document
Returns:
int: staring and ending page numbers of lesson found.
If not found then return None
"""
for i in range(len(toc) - 1):
topic = toc[i]
if i != len(toc) - 2:
if topic[1] == key:
nexttopic = toc[i + 1]
return (topic[2], nexttopic[2])
elif topic[1].lower() == key:
nexttopic = toc[i + 1]
return (topic[2], nexttopic[2])
else:
if topic[1] == key:
return (topic[2], totalpg)
elif topic[1].lower() == key:
return (topic[2], totalpg)
return None,None | 474e0ba99398df55743d730db310d10d24cad407 | 48,600 |
import subprocess
import re
def get_gpu_temp():
"""Return the GPU temperature as a Celsius float
"""
cmd = ['vcgencmd', 'measure_temp']
measure_temp_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
output = measure_temp_proc.communicate()[0]
gpu_temp = 'unknown'
gpu_search = re.search('([0-9.]+)', output)
if gpu_search:
gpu_temp = gpu_search.group(1)
return float(gpu_temp) | 1b6dbfe75ba63ad105469ee971f2f6077e6aca55 | 48,602 |
def extract_unique_entities_and_relations(triples):
"""
Identifies unique entities and relation types in collection of triples.
Args:
triples: List of string triples.
Returns:
unique_entities: List of strings
unique_relations: List of strings
"""
s_entities = set([triple[0] for triple in triples])
o_entities = set([triple[2] for triple in triples])
r_types = set([triple[1] for triple in triples])
unique_relations = sorted(list(r_types))
unique_entities = sorted(list(s_entities | o_entities)) # union of sets
return unique_entities, unique_relations | 92e7d95991d28d8692fc3a0ab89a3fe487887e41 | 48,605 |
def tz_to_str(tz_seconds: int) -> str:
"""convert timezone offset in seconds to string in form +00:00 (as offset from GMT)"""
sign = "+" if tz_seconds >= 0 else "-"
tz_seconds = abs(tz_seconds)
# get min and seconds first
mm, _ = divmod(tz_seconds, 60)
# Get hours
hh, mm = divmod(mm, 60)
return f"{sign}{hh:02}{mm:02}" | a929a46b957063f2f16a66d304a708838a45ba49 | 48,608 |
def get_haplotype(read):
"""Return the haplotype to which the read is assigned
Or 'un' for reads that are unphased"""
return str(read.get_tag('HP')) if read.has_tag('HP') else 'un' | ff6f45456640e8b7084558a9f38851b664bc729c | 48,609 |
def getNetFromGross(net_income, allowance):
"""Implements tax bands and rates corresponding to the tax year 2011-2012"""
if net_income <= allowance:
return net_income
else:
net_income_without_allowance = net_income - allowance
if net_income_without_allowance <= 35000:
return allowance + net_income_without_allowance * (1 - 0.2)
elif net_income_without_allowance <= 150000:
return allowance + 35000 * (1 - 0.2) + (net_income_without_allowance - 35000) * (1 - 0.4)
else:
return allowance + 35000 * (1 - 0.2) + (150000 - 35000) * (1 - 0.4)\
+ (net_income_without_allowance - 150000) * (1 - 0.5) | c8afafe24ae50031b1fca754b75a4afa1af9cec9 | 48,610 |
def createContainer(values):
""" Create container for szmbolic values, equivalent of Pascal record or C struct commands.
:param list values: values to be added as container properties
:return: class instance with given properties
:rtype: Container
>>> a = createContainer({ "floatNumber": 3, "stringValue": "Test"})
>>> print a.__class__
base.Container
>>> print a.floatNumber, a.stringValue
3 Test
"""
class Container:pass
result = Container()
for key, value in values.iteritems():
setattr(result, key, value)
return result | d0a271d7df6ef5829bb90ab660ff6319ac111285 | 48,611 |
def meters_formatter(f):
"""
Returns a float with 4 decimal digits and the unit 'm' as suffix.
"""
if f is None: return None
return '%0.4f m' % f | ae80f27b16fba79b0c02fb25aa61e4324c4ece04 | 48,612 |
def uniquify(iterable):
"""Uniquify the elements of an iterable."""
elements = {element: None for element in iterable}
return list(elements.keys()) | 3932291387a273d12ce5092d0d9544fcf6386af5 | 48,613 |
def parse_line(line):
""" parse one line to token_ids, sentence_ids, pos_ids, label
"""
line = line.strip('\r\n').split(";")
(image_id, image_path, caption_id, token_ids, sent_ids, pos_ids, seg_labels, label, image_label) = line
return image_id, image_path | cdd419365871183d98443725d892624487962849 | 48,614 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.