content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def find_kconfig_rules(kconf, config, imply_config):
"""Check whether a config has a 'select' or 'imply' keyword
Args:
kconf: Kconfiglib.Kconfig object
config: Name of config to check (without CONFIG_ prefix)
imply_config: Implying config (without CONFIG_ prefix) which may or
may not have an 'imply' for 'config')
Returns:
Symbol object for 'config' if found, else None
"""
sym = kconf.syms.get(imply_config)
if sym:
for sel in sym.get_selected_symbols() | sym.get_implied_symbols():
if sel.get_name() == config:
return sym
return None | f95b3370e99cb4cfb989026cb0ad3007a191f19a | 98,264 |
def mechanismHub_oracle_price_hydra(params, substep, state_history, prev_state, policy_input):
"""
This mechanism returns the updated oracle price for Hydra.
"""
return 'oracle_price_hydra', prev_state['oracle_price_hydra'] + policy_input['oracle_price_hydra'] | 3e9ae2427ab2d920c3c3e7b6555a0eae2514b52e | 98,268 |
def generate_filename(args):
"""
Generates a filename from arguments passed
Used for saving model and runs
"""
n = args.dataset + '_n_layers_' + \
str(args.n_layers) + '_hidden_size_' + str(args.hidden_size)
if args.tie_weights:
n += '_tied_weights'
if args.attention:
n += '_attention'
if args.no_positional_attention:
n += '_positional_attention'
return n | 280095632469949b02ea4784e8eed59c9ce987e5 | 98,271 |
def remove_b_for_nucl_phys(citation_elements):
"""Removes b from the volume of some journals
Removes the B from the volume for Nucl.Phys.Proc.Suppl. because in INSPIRE
that journal is handled differently.
"""
for el in citation_elements:
if el['type'] == 'JOURNAL' and el['title'] == 'Nucl.Phys.Proc.Suppl.' \
and 'volume' in el \
and (el['volume'].startswith('b') or el['volume'].startswith('B')):
el['volume'] = el['volume'][1:]
return citation_elements | e92b38186fe5744f4f78d5633c53b5847812057a | 98,275 |
def create_time_variant_multiplicative_function(time_variant_function):
"""
similar to create_multiplicative_function, except that the value to multiply by can be a function of time, rather
than a single value
:param time_variant_function: function
a function with the independent variable of time that returns the value that the input should be multiplied by
:return: function
function that will multiply the input value by the output value of the time_variant_function
"""
return lambda input_value, time: time_variant_function(time) * input_value | cb17c2fbfeb48f660b3f36fd0093bf52afe07f43 | 98,276 |
from typing import List
def canTwoSum(numsList: List[int], target: int) -> bool:
"""
If two numbers in the list can add to target, return True, else, False
"""
for x in numsList:
targetNum = target - x
if targetNum in numsList:
return True
return False | 9748a46405ae77a617f3edc9a4ee422cc9bf2a34 | 98,279 |
def _fixtures_transition_impl(_settings, _attr):
"""Rule transition that standardizes command-line options for fixtures."""
return {
"//command_line_option:cpu": "darwin_x86_64",
"//command_line_option:ios_minimum_os": "14.1",
"//command_line_option:macos_cpus": "x86_64",
"//command_line_option:macos_minimum_os": "11.0",
"//command_line_option:tvos_minimum_os": "9.2",
"//command_line_option:watchos_minimum_os": "7.6.2",
} | 46b0a499f74fe210f8a894582940694a4aaf6655 | 98,283 |
from typing import Dict
from typing import Any
def dict_to_indented_tree(d: Dict[str, Any]) -> str:
""" Convert a dict-formatted tree into a single string, in indented tree format """
def append_children(d, indent_lvl):
subtree = ''
for k, v in d.items():
subtree += ' ' * indent_lvl + k + '\n'
subtree += append_children(v, indent_lvl + 1)
return subtree
return append_children(d, 0) | 38d75669964f2a1eb116f213fe35f150d5b68cd4 | 98,284 |
def _StripUnusedNotation(string):
"""Returns string with Pythonic unused notation stripped."""
if string.startswith('_'):
return string.lstrip('_')
unused = 'unused_'
if string.startswith(unused):
return string[len(unused):]
return string | b8b1ea02b8d07dba28ec72720b28d45bb2ab592b | 98,288 |
import requests
def get_metric_names(url):
"""Get ALL Metric names from prometheus
Args:
url (str): Prometheus url
Returns:
list: List with all the metric names
"""
response = requests.get(f'{url}/api/v1/label/__name__/values')
data = response.json()['data']
return data | 3af49970b68fcd8ef5a2f3fccbf88583b1a7a90b | 98,289 |
def check_intersect(p1, p2, p3, p4):
"""
Check if the line p1-p2 intersects the line p3-p4
Args:
p1: (x,y)
p2: (x,y)
p3: (x,y)
p4: (x,y)
Returns:
boolean : True if intersection occurred
"""
tc1 = (p1[0] - p2[0]) * (p3[1] - p1[1]) + (p1[1] - p2[1]) * (p1[0] - p3[0])
tc2 = (p1[0] - p2[0]) * (p4[1] - p1[1]) + (p1[1] - p2[1]) * (p1[0] - p4[0])
td1 = (p3[0] - p4[0]) * (p1[1] - p3[1]) + (p3[1] - p4[1]) * (p3[0] - p1[0])
td2 = (p3[0] - p4[0]) * (p2[1] - p3[1]) + (p3[1] - p4[1]) * (p3[0] - p2[0])
return tc1 * tc2 < 0 and td1 * td2 < 0 | bbb2aacc2792ba53954e514284c0e172828de376 | 98,293 |
def infer_num_vertices(architecture):
"""
Infer number of vertices from an architecture dict.
Parameters
----------
architecture : dict
Architecture in NNI format.
Returns
-------
int
Number of vertices.
"""
op_keys = set([k for k in architecture.keys() if k.startswith('op')])
intermediate_vertices = len(op_keys)
assert op_keys == {'op{}'.format(i) for i in range(1, intermediate_vertices + 1)}
return intermediate_vertices + 2 | b66538b879d102a5863fa3b9c2cd4be8b7bca6d3 | 98,295 |
def get_attributes_by_type(attrs, type_):
"""
Returns tye filtered list of user defined attributes from a attributes dictionary passed to the __new__ coro of a metaclass.
:param attrs: Attributes of a metaclass (last parameters of __new__ coro).
:return:
:rtype: dict
"""
return {k: v for k, v in attrs.items() if isinstance(v, type_) or issubclass(v.__class__, type_)} | e2ed28c63a495ef5f44e70360a19031db733624b | 98,297 |
def to_red(string):
""" Converts a string to bright red color (16bit)
Returns:
str: the string in bright red color
"""
return f"\u001b[31;1m{string}\u001b[0m" | c4c7ee43c872b1ea9ad37c1d72540a1227b58926 | 98,298 |
def split_df(df):
"""
This will split a VOiCES index dataframe by microphone and distractor type
Inputs:
df - A pandas dataframe representing the index file of the dataset,
with the default columns of VOiCES index files.
Outputs:
df_dict - A dictionary where keys are (mic,distractor type) and values
are dataframes corresponding to slices of df with that mic and
distractor value pair.
"""
distractor_values =df['distractor'].unique()
mic_values = df['mic'].unique()
df_dict = {}
for m_val in mic_values:
for dist_val in distractor_values:
sub_df = df[(df['mic']==m_val) & (df['distractor']==dist_val)]
df_dict[(m_val,dist_val)]=sub_df
return df_dict | 289e28cda52e01933fb9228986dcff1e5a1590fd | 98,307 |
def get_metrics(metrics, loss, losses=None):
"""Structure the metric results
PARAMETERS
----------
metrics: object
Contains statistics recorded during inference
loss: tensor
Loss value
losses: list
List of loss values
RETURNS
-------
metrics_values: dict
"""
metrics_values = dict()
metrics_values['loss'] = loss.item()
if isinstance(losses, list):
metrics_values['loss_ce'] = losses[0].item()
metrics_values['loss_dice'] = losses[1].item()
acc, acc_by_class = metrics.get_pixel_acc_class() # harmonic_mean=True)
prec, prec_by_class = metrics.get_pixel_prec_class()
recall, recall_by_class = metrics.get_pixel_recall_class() # harmonic_mean=True)
miou, miou_by_class = metrics.get_miou_class() # harmonic_mean=True)
dice, dice_by_class = metrics.get_dice_class()
metrics_values['acc'] = acc
metrics_values['acc_by_class'] = acc_by_class.tolist()
metrics_values['prec'] = prec
metrics_values['prec_by_class'] = prec_by_class.tolist()
metrics_values['recall'] = recall
metrics_values['recall_by_class'] = recall_by_class.tolist()
metrics_values['miou'] = miou
metrics_values['miou_by_class'] = miou_by_class.tolist()
metrics_values['dice'] = dice
metrics_values['dice_by_class'] = dice_by_class.tolist()
return metrics_values | 3b6fff4fc9dcdc9eaf2e10d859f97f63fa9ea6cb | 98,308 |
def daily_mean_t(tmin, tmax):
"""
Calculates mean daily temperature [deg C] from the daily minimum and
maximum temperatures.
Arguments:
tmin - minimum daily temperature [deg C]
tmax - maximum daily temperature [deg C]
"""
# Raise exceptions
if (tmin < -95.0 or tmin > 60.0):
raise ValueError, 'tmin=%g is not in range -95 to +60' % tmin
elif (tmax < -95.0 or tmax > 60.0):
raise ValueError, 'tmax=%g is not in range -95 to +60' % tmax
tmean = (tmax + tmin) / 2.0
return tmean | 2075f634f3254fb4015dfabe81e84c44860f6f2a | 98,313 |
def energy2evap(energy, _latent_heat):
"""
Convert energy (e.g. radiation energy) in MJ m-2 time to the equivalent
evaporation, assuming a grass reference crop.
Energy is converted to equivalent evaporation using a conversion
factor equal to the inverse of the latent heat of vapourisation
(1 / _latent_heat).
:param energy: Energy e.g. radiation or heat flux [MJ m-2 time].
:param _latent_heat Calculated by latent_heat(temperature_mean)
:return: Equivalent evaporation [mm time-1].
:rtype: float
"""
return (1 / _latent_heat) * energy | dc0a089465216b2a1f6ac374ce2676cfd4249445 | 98,314 |
import re
def _GenerateArg(source):
"""Strips out comments, default arguments, and redundant spaces from a single argument.
Args:
source: A string for a single argument.
Returns:
Rendered string of the argument.
"""
# Remove end of line comments before eliminating newlines.
arg = re.sub(r'//.*', '', source)
# Remove c-style comments.
arg = re.sub(r'/\*.*\*/', '', arg)
# Remove default arguments.
arg = re.sub(r'=.*', '', arg)
# Collapse spaces and newlines into a single space.
arg = re.sub(r'\s+', ' ', arg)
return arg.strip() | 693e936e203a4437bf9de1fbca71a46d14b3000f | 98,318 |
from datetime import datetime
def format_mtime(mtime):
"""
Format the date associated with a file to be displayed in directory listing.
"""
now = datetime.now()
dt = datetime.fromtimestamp(mtime)
return '%s %2d %5s' % (
dt.strftime('%b'), dt.day,
dt.year if dt.year != now.year else dt.strftime('%H:%M')) | 7d270a025881c66d6ec21a91d3946b8670dfc61b | 98,321 |
import json
def jpp(obj):
""" Simple json pretty print (used for dict types here) """
return json.dumps(obj, indent=2, sort_keys=True) | 41109d2f5fedce645af41d4576155ee1df31b0d7 | 98,322 |
def get_or_create(cls, session, **filters):
"""Retrieve or add object; return a tuple ``(object, is_new)``.
``is_new`` is True if the object already exists in the database.
"""
instance = session.query(cls).filter_by(**filters).first()
is_new = not instance
if is_new:
instance = cls(**filters)
return instance, is_new | f2bab72aa2d948dfc252d709d80f7848dc969a65 | 98,338 |
def linspace(a, b, num_points):
""" return a list of linearly spaced values
between a and b having num_points points
"""
inc = (float(b) - float(a))/(num_points-1)
ret_ar = []
for i in range(num_points):
ret_ar.append(a + i*inc)
return ret_ar | 53d4951082e77b4ae7f912ac65365b08685505a3 | 98,339 |
def rep_hill(x, n):
"""Dimensionless production rate for a gene repressed by x.
Parameters
----------
x : float or NumPy array
Concentration of repressor.
n : float
Hill coefficient.
Returns
-------
output : NumPy array or float
1 / (1 + x**n)
"""
return 1.0 / (1.0 + x ** n) | 0c7a90509ce470d7d67975ee87a356b32aeead71 | 98,344 |
def _concatenate(list_of_iters):
"""
Return the concatenation of a list of iterables as a tuple.
EXAMPLES::
sage: from sage.combinat.multiset_partition_into_sets_ordered import _concatenate
sage: L = ([1,2,3], Set([4,5,6]), [], range(7,11))
sage: _concatenate(L)
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
"""
return tuple([val for block in list_of_iters for val in block]) | 06f148c206dd2955bfb0a07fecc50f0e7a9b64fe | 98,345 |
def get_video_id(video_name):
"""
Returns video id from first 4 characters of video name and returns an int
"""
# video id is first 4 characters of video name minus 1 because they have to be 0 indexed
return int(video_name[0:4]) - 1 | e57fb0f3016c4b08d1385d4aa213b8a5930f7a24 | 98,353 |
import pathlib
def get_message(message):
"""
get message from file or string format and return it.
Args:
message: str or Path.file path to the cleartext or cleartext itself.
Returns:
message: str.cleartext
"""
if not isinstance(message, str) and not isinstance(message, pathlib.Path):
print("input str object or Path object.aborting...")
exit()
if isinstance(message, str):
if message == "":
print("message is empty.aborting...")
exit()
else:
return message
else:
try:
with message.open() as f:
message = f.read()
if message == "":
print("message is empty.aborting...")
exit()
return message
except UnicodeDecodeError:
print("file is binary.aborting...")
exit() | c00dfa1bda60445c605eddaa8a8e7df1a3802be0 | 98,357 |
def session_cache_limiter(interp, limiter=None):
"""Get and/or set the current cache limiter"""
act = interp.session.cache_limiter
space = interp.space
if limiter is None:
return space.newstr(act)
if not limiter:
limiter = ""
interp.config.set_ini_w('session.cache_limiter', space.wrap(limiter))
interp.session.cache_limiter = limiter
return space.newstr(act) | 3e1bb0b0ed07239c89b146a7b5791925dede5c5e | 98,358 |
def mm2pt(mm=1):
"""25.4mm -> 72pt (1 inch)"""
return float(mm) * 72.0 / 25.4 | e61ee5eb262f70ab75db3e5f29f75d17001cf03e | 98,360 |
def make_proximity_sensor(radius, occlusion):
"""
Returns string representation of the proximity sensor configuration.
For example: "proximity radius=5 occlusion_enabled=False"
Args:
radius (int or float)
occlusion (bool): True if consider occlusion
Returns:
str: String representation of the proximity sensor configuration.
"""
radiustr = "radius=%s" % str(radius)
occstr = "occlusion_enabled=%s" % str(occlusion)
return "proximity %s %s" % (radiustr, occstr) | 89cc0a0ce99472efcd503639fb8e1d20fd70dc8e | 98,368 |
import re
def string_to_column_name(x: str) -> str:
"""
Converts non-alphanumeric characters to "_" and lowercases string. Repeated
non-alphanumeric characters are replaced with a single "_".
Args:
x: String to convert to column name
Returns:
Re-formatted string for using as a PySpark DataFrame column name
"""
return re.sub("[^0-9a-zA-Z]+", "_", x).lower() | 37fff9e2543402f82295b0242a1003be9527b70c | 98,372 |
def scrapeRaised(soup):
""" Scrapes ICO amount raised from ICODrops listing """
raised = soup \
.find("div", attrs = { "class" : "money-goal" }) \
.text \
.translate({ ord(x): "" for x in [ "$", ",", "\n", "\r", "\t" ] })
try:
return int(raised)
except ValueError:
# Return nothing in the event type casting fails
return None | f6166ff3057c57e44a1e01ba79a7b9273bfc05f6 | 98,374 |
def no_parens(s):
"""Delete parenteses from a string
Args:
s (str): String to delete parentheses from
Returns:
str: Input without any opening or closing parentheses.
"""
return s.replace('(','').replace(')','') | 14797f6d825c70f1970e047f1b9b7f4fb358b9f3 | 98,378 |
from pathlib import Path
import hashlib
import base64
def create_md5(path: Path) -> str:
"""Create MD5 from file."""
with path.open("rb") as f:
md5 = hashlib.md5(f.read()).digest()
encoded_content_md5 = base64.b64encode(md5)
content_md5 = encoded_content_md5.decode()
return content_md5 | 9c97e4a2456b4d10ea11dd1b5fb9d782fd6192c0 | 98,379 |
def reverse_string(string):
"""Returns reversed string"""
reversed = ""
for char in string:
reversed = char + reversed
return reversed | 51e782fe79e52afd08cbcb659868647607c54add | 98,380 |
import csv
def read_dataset(filepath):
"""
Features:
Takes the file path of the dataset as input,
reads the data,
and returns the dataset in a suitable format.
Parameters:
filepath: String. The path of the file.
Returns:
List of string list. The dataset.
"""
with open(filepath) as csvfile:
reader = csv.reader(csvfile)
data = [line for line in reader]
return data | e859322a49d3d665c7140992658903302ea7e8ea | 98,381 |
def get_report_from_trajectory(traject_path, report_base):
"""
The format is usually <name>_<number>.pdb
"""
name_number = traject_path.split(".")
_, number = name_number[0].split("_")
return report_base+"_"+number | 0fa1218740de8567d3cdfc09beb1b551941944e2 | 98,383 |
from typing import Dict
import json
def _load_image_name_to_version_map() -> Dict[str, str]:
"""
_load_image_name_to_version_map returns a mapping of each image name
to the corresponding version.
e.g.
{
"mongodb-kubernetes-operator" : "0.7.2",
"mongodb-agent" : "11.0.11.7036-1"
...
}
"""
with open("release.json") as f:
release = json.loads(f.read())
# agent section is a sub object, we change the mapping so the key corresponds to the version directly.
release["mongodb-agent"] = release["mongodb-agent"]["version"]
return release | 57daded791674c8642de5b963bf63bbacfeb1c35 | 98,386 |
def organization_retrieve_doc_template_values(url_root):
"""
Show documentation about organizationRetrieve
"""
required_query_parameter_list = [
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'organization_id',
'value': 'integer', # boolean, integer, long, string
'description': 'Internal database unique identifier (one identifier required, '
'either organization_id or organization_we_vote_id)',
},
{
'name': 'organization_we_vote_id',
'value': 'string', # boolean, integer, long, string
'description': 'We Vote unique identifier so we can move organizations from server-to-server '
'(one identifier required, either id or we_vote_id)',
},
]
optional_query_parameter_list = [
# {
# 'name': '',
# 'value': '', # boolean, integer, long, string
# 'description': '',
# },
]
potential_status_codes_list = [
{
'code': 'ORGANIZATION_FOUND_WITH_ID',
'description': 'The organization was found using the internal id',
},
{
'code': 'ORGANIZATION_FOUND_WITH_WE_VOTE_ID',
'description': 'The organization was found using the we_vote_id',
},
{
'code': 'ORGANIZATION_RETRIEVE_BOTH_IDS_MISSING',
'description': 'One identifier required. Neither provided.',
},
{
'code': 'ORGANIZATION_NOT_FOUND_WITH_ID',
'description': 'The organization was not found with internal id.',
},
{
'code': 'ERROR_<specifics here>',
'description': 'An internal description of what error prevented the retrieve of the organization.',
},
]
try_now_link_variables_dict = {
'organization_we_vote_id': 'wv85org1',
}
# Changes made here should also be made in organizations_followed_retrieved
api_response = '{\n' \
' "success": boolean,\n' \
' "status": string,\n' \
' "facebook_id": integer,\n' \
' "organization_banner_url": string,\n' \
' "organization_description": string,\n' \
' "organization_email": string,\n' \
' "organization_facebook": string,\n' \
' "organization_id": integer (the id of the organization found),\n' \
' "organization_instagram_handle": string,\n' \
' "organization_name": string (value from Google),\n' \
' "organization_photo_url_large": string,\n' \
' "organization_photo_url_medium": string,\n' \
' "organization_photo_url_tiny": string,\n' \
' "organization_type": string,\n' \
' "organization_twitter_handle": string (twitter address),\n' \
' "organization_we_vote_id": string (the organization identifier that moves server-to-server),\n' \
' "organization_website": string (website address),\n' \
' "twitter_description": string,\n' \
' "twitter_followers_count": integer,\n' \
'}'
template_values = {
'api_name': 'organizationRetrieve',
'api_slug': 'organizationRetrieve',
'api_introduction':
"Retrieve the organization using organization_id (first choice) or we_vote_id.",
'try_now_link': 'apis_v1:organizationRetrieveView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values | a0f1d4fdc0268a99a055fd424706e8bbd87d24aa | 98,388 |
def get_all_lightfilters(scene):
"""Return a list of all lightfilters in the scene
Args:
scene (byp.types.Scene) - scene file to look for lights
Returns:
(list) - list of all lightfilters
"""
lightfilters = list()
for ob in scene.objects:
if ob.type == 'LIGHT':
if hasattr(ob.data, 'renderman'):
if ob.data.renderman.renderman_light_role == 'RMAN_LIGHTFILTER':
lightfilters.append(ob)
return lightfilters | ae8606f552b1fb76fd17aab461c70a5f1679daa9 | 98,391 |
def triple_split(triple):
"""Split target triple into parts.
"""
arch, vendor, os = triple.split('-', 2)
if '-' in os:
os, env = os.split('-', 1)
else:
env = ''
return arch, vendor, os, env | bdfbe48fdd32fc7a5a52cf3750d4d83e37549d32 | 98,393 |
def lookup(dictionary, key):
"""
Helpers tag to lookup for an entry inside of a dictionary, ruturns `None`
for nonexisting keys.
"""
return dictionary.get(key.lower()) | 493eb17dd8ac83c1ce115b56b3ec2e43de761ac1 | 98,394 |
from collections import defaultdict
def get_label_indices(labels):
"""
Group samples based on their labels and return indices
@param labels: list of labels
@return: dict, {class1: [indices], class2: [indices]}
"""
label_indices = defaultdict(list)
for index, label in enumerate(labels):
label_indices[label].append(index)
return label_indices | 7c4cf1911e26417f3cec14b4825ec15f06d8c4c4 | 98,397 |
import math
def calculate_segment_bearing(shape_obj):
"""Calculate the bearing from a single shape object and return the angle. Assumes projected coords.
@param - shape object from arcpy for a polyline
returns - angle - float - angle in degrees (not azimuth)"""
first_point = shape_obj.firstPoint
last_point = shape_obj.lastPoint
first_x = first_point.X
first_y = first_point.Y
last_x = last_point.X
last_y = last_point.Y
dx = last_x - first_x
dy = last_y - first_y
rads = math.atan2(dy, dx) # Relative to North
angle = math.degrees(rads)
return angle | 3bb0dba72fb9f10cdf565349e888d26c78a0509b | 98,402 |
def pauli_wt(pauli):
"""
Return weight of given Pauli operator(s).
:param pauli: A single or list of Pauli operators.
:type pauli: str or list of str
:return: Weight
:rtype: int
"""
def _wt(p):
return p.count('X') + p.count('Y') + p.count('Z')
if isinstance(pauli, str):
return _wt(pauli)
else:
return sum(_wt(p) for p in pauli) | 5ec3de630f1f5e05641adae2fe72d8fe14de4656 | 98,404 |
def link_in_path(link, node_list):
""" This function checks if the given link is in the path defined by this node list.
Works with undirected graphs so checks both link orientations.
Parameters
----------
link : tuple
a link expressed in a node pair tuple
node_list : list
a path as a list of nodes
Returns
-------
indicator : boolean
true is the link is in the path, false if not.
"""
b_link = (link[1], link[0]) # The link in reverse order
for i in range(len(node_list) - 1):
p_link = (node_list[i], node_list[i+1])
if link == p_link:
return True
if b_link == p_link:
return True
return False | c3dd323cc12f9c4e3a89a19a5f17527b13528566 | 98,410 |
def _validate_static_url_path(static_url_path):
"""
Validates the given static folder value.
Parameters
----------
static_url_path : `str`
Static url path value to validate.
Returns
-------
static_url_path : `None` or `str`
The validated static url path value.
Raises
------
TypeError
If `static_url_path` was not given either as `None` or `str` instance.
"""
if static_url_path is not None:
if type(static_url_path) is str:
pass
elif isinstance(static_url_path, str):
static_url_path = str(static_url_path)
else:
raise TypeError(
f'`static_url_path` can be given as `str` instance, got '
f'{static_url_path.__class__.__name__}.'
)
return static_url_path | ba8fd2c28a50aa0fdfa53c79fd89a62efb028fce | 98,417 |
def build_query(args_names, args_values) :
"""
Transform a list of names and values in a HTTP query string.
Args:
args_names (:obj:`list` of :obj:`str`): List of names.
args_values (:obj:`list`): List of values, must have same length of args_names.
Returns:
:obj:`str` : Query string.
"""
ret_str = ''
first = True
for (name, value) in zip(args_names, args_values) :
if value :
if first :
ret_str += '?'
first = False
else :
ret_str += '&'
ret_str += f'{name}={value}'
return ret_str | 80cdd1db0bb90763b4c14dc8092ce467f9fc5633 | 98,418 |
import torch
def region_mask(n, min_mask_size, max_mask_size, maskable_length, device=None):
"""Create a vector of ``n`` random region masks.
Masks are returned in a vector with shape `(n, maskable_length)`.
The mask vectors are boolean vectors of ``maskable_length`` with a
continuous region of 1s between ``min_mask_size`` and ``max_mask_size``
(inclusive).
"""
# Generate the start & end positions for each mask, then compare these to
# absolute indices to create the actual mask vectors.
mask_sizes = (
torch.rand([n, 1], device=device) * (max_mask_size - min_mask_size)
+ min_mask_size
)
mask_starts = torch.rand([n, 1], device=device) * (maskable_length - mask_sizes)
mask_ends = mask_starts + mask_sizes
indexes = torch.arange(0, maskable_length, device=device)
return (mask_starts <= indexes) & (indexes < mask_ends) | ceedf548f1b9f25f579bff51d69cfb593ab2b7df | 98,420 |
import ast
def get_scope(series):
"""Get a string of all the words in scope. """
idx = ast.literal_eval(series["scopes_idx"])
scope = " ".join([s for i, s in zip(idx, series["sent"].split(" ")) if i])
return scope | 30476852bc3d6ceac0dd0078a0a2cfe3c42a1140 | 98,421 |
from typing import Optional
def ealive(self, elem: str = "", **kwargs) -> Optional[str]:
"""Reactivates an element (for the birth and death capability).
APDL Command: EALIVE
Parameters
----------
elem
Element to be reactivated:
ALL - Reactivates all selected elements (ESEL).
Comp - Specifies a component name.
Notes
-----
Reactivates the specified element when the birth and death
capability is being used. An element can be reactivated only
after it has been deactivated (EKILL).
Reactivated elements have a zero strain (or thermal heat
storage, etc.) state.
ANSYS, Inc. recommends using the element
deactivation/reactivation procedure for analyses involving
linear elastic materials only. Do not use element
deactivation/reactivation in analyses involving time-
dependent materials, such as viscoelasticity, viscoplasticity,
and creep analysis.
This command is also valid in PREP7.
"""
command = f"EALIVE,{elem}"
return self.run(command, **kwargs) | 193ce242eb41c77c786291fe10cb7222b857661b | 98,422 |
def rdb_names(names):
""" Return the usual .rdb format names. """
r = []
for name in names:
# print(name)
if name.lower() in ('rv', 'vrad', 'radvel'):
r.append('vrad')
elif name.lower() in ('rve', 'svrad', 'error', 'err', 'rverror'):
r.append('svrad')
elif name.lower() in ('fwhm'):
r.append('fwhm')
elif name.lower() in ('bis', 'bisspan', 'bis_span'):
r.append('bis_span')
elif name.lower() in ('contrast'):
r.append('contrast')
else:
r.append(name)
return r | 1fa42f61bc0275b9aed52fb2fb609e6912077b6e | 98,426 |
def equal_mirror(t, s):
"""
Returns whether t is the mirror image of s.
"""
if t is None and s is None:
return True
if t is None or s is None:
return False
if t.value != s.value:
return False
return equal_mirror(t.left, s.right) and equal_mirror(t.right, s.left) | 7ead0b80fce313b74442ed28b07419dbce38c917 | 98,434 |
def round_coords(*coords, res=0.5, ndigits=1):
"""Return rounded coordinates.
>>> round_coords(45.4451574, 11.1331589)
(45.0, 11.0)
"""
return tuple([(round(coord, ndigits=ndigits) // res) * res for coord in coords]) | c33b45de6dadd8a8a29d52873ff4784143382a60 | 98,435 |
import random
import string
def createRandomStrings(l,n):
"""create list of l random strings, each of length n"""
names = []
for i in range(l):
val = ''.join(random.choice(string.ascii_uppercase) for x in range(n))
names.append(val)
return names | e2d2db2a3474cf6f20010cba4252d2adfd427b79 | 98,436 |
def read_null_terminated_string(data: bytes, offset: int, encoding: str = 'ascii') -> str:
"""
Returns a null terminated string starting from the given offset
into the given data.
:param data: a continous sequence of bytes
:param offset: offset to start looking
:param encoding: an encoding to interpret the string
:return: the found string
"""
start = offset
current = offset
while data[current] != 0:
current += 1
return data[start:current].decode(encoding) | 3804ae917a0a63b096fa2ee31b784736a9f0ab1b | 98,438 |
from typing import List
def get_tail_size(shape: List[int], ndims: int) -> int:
"""
Get the total size of the tail of a given shape.
Args:
shape: The whole shape.
ndims: The number of tail dimensions.
``ndims <= len(shape)`` is required.
Returns:
The total size of the tail.
"""
rank = len(shape)
if rank < ndims:
raise ValueError('`ndims <= len(shape)` does not hold: '
'`ndims` == {}, `shape` == {}'.format(ndims, shape))
r = 1
for i in range(rank - ndims, rank):
r *= shape[i]
return r | ac186b1dbee9658a052a792cb311d9cac2c6c884 | 98,440 |
def mix(x, y, a):
"""Return x * (1 - a) + y * a, with x, y floats or float vectors. A can be
a float-vector or float (also if x and y are vectors).
"""
return x * (1 - a) + y * a | 2bc0a22237253f7eab854dbe70c8cc8091cfd8e1 | 98,445 |
def remove_long_tail(df2, column, count_threshold, value):
"""Returns True if replaced values in column of dataframe
with specified value
if the value counts are below threshold."""
v_counts = df2[column].value_counts()
long_tail = v_counts[v_counts < count_threshold].index
df_subset = df2[column].isin(long_tail)
df2.loc[df_subset, column] = value
return len(long_tail) > 0 | be091acfbee1ccae433ad193648ef7d772d87e70 | 98,461 |
def issubclass_safe(x, klass):
"""return issubclass(x, klass) and return False on a TypeError"""
try:
return issubclass(x, klass)
except TypeError:
return False | 0b5ade96aa4b5fd3c55969aa22a0501549600d6b | 98,467 |
def sort_whisker_names(curated_whiskers_as_dict):
"""Sort whisker names in anatomical order.
curated_whiskers_as_dict : dict from object label to whisker name
Iterates through all values of `curated_whiskers_as_dict`. Sorts them
into junk (anything containing "junk"), unk (anything containing "unk"),
greeks (lowercase names), and real whiskers (the rest).
Returns: dict
'greek', 'real', 'junk', 'unk' : as above
'sorted_order' : greek + real + junk + unk
"""
# Order: real, then junk, then unk
junk_whiskers, unk_whiskers, greek_whiskers, real_whiskers = [], [], [], []
for whisker_name in list(curated_whiskers_as_dict.values()):
if 'junk' in whisker_name:
junk_whiskers.append(whisker_name)
elif 'unk' in whisker_name:
unk_whiskers.append(whisker_name)
elif str.islower(whisker_name):
greek_whiskers.append(whisker_name)
else:
real_whiskers.append(whisker_name)
sorted_whisker_names = (sorted(greek_whiskers) + sorted(real_whiskers) +
sorted(junk_whiskers) + sorted(unk_whiskers))
return {
'greek': greek_whiskers,
'junk': junk_whiskers,
'unk': unk_whiskers,
'real': real_whiskers,
'sorted_order': sorted_whisker_names,
} | fec8fb5efc19291306ab9347cc4263018ef98261 | 98,470 |
from typing import List
def save_topics(model, vectorizer, top_n: int=10)-> List:
"""Save the top n topics from our trained model
Args:
model: Sklearn LatentDirichletAllocation model
vectorizer: sklearn CountVectorizer
top_n (int): Number of topics
Returns:
list: A list of the top_n words for each topic
"""
words_per_topic = []
for idx, topic in enumerate(model.components_):
words = [vectorizer.get_feature_names()[i] for i in topic.argsort()[:-top_n-1:-1]]
words_per_topic.append(words)
return words_per_topic | b84f0db5f9dfab1db22c9830fe8e67a7abac9666 | 98,472 |
def remap(v, in_range, out_range):
"""rescales a value from one range to another"""
in_v_norm = (in_range[1]-v)/(in_range[1]-in_range[0])
clamped_norm = min(1, max(0, in_v_norm))
return out_range[0] + clamped_norm*(out_range[1] - out_range[0]) | c7396a72bcbb1c32c98435b760e27db5fe651e73 | 98,474 |
def dfilter(data, f):
"""
Filter shodan port data compared based on a given filter
Filter are under the form protocol/port
"""
ff = f.split("/")
return (data['transport'] == ff[0]) and (data['port'] == int(ff[1])) | cd5d59fc773cada43c70e280306ad6c9ab0cdfa5 | 98,476 |
def get_camera_matrix(camera, global_matrix):
"""
Get the camera transformation decomposed as origin, forward, up and target.
"""
camera_mat = global_matrix * camera.matrix_world
origin = camera_mat.col[3]
forward = -camera_mat.col[2]
up = camera_mat.col[1]
target = origin + forward
return origin, forward, up, target | bc1eaf1e0a1b6460b76cc82c9c81b766f3b1bcdb | 98,485 |
def parse_req(spec: str) -> str:
""" Parse package name==version out of requirments file"""
if ";" in spec:
# remove restriction
spec, _ = [x.strip() for x in spec.split(";", 1)]
if "#" in spec:
# remove comment
spec = spec.strip().split("#")[0]
if "\\" in spec:
# remove line breaks
spec = spec.strip().split("\\")[0]
if "--hash=" in spec:
# remove line breaks
spec = spec.strip().split("--hash=")[0]
return spec | 2aa838a7e0182fa600c8500db4bd00a964d072bc | 98,489 |
def tof(i, shape):
"""Check whether i is tensor or initialization function; return tensor or initialized tensor;
Parameters
-------
i : tensor or function
Tensor or function to initialize tensor
shape : list or tuple
Shape of tensor to initialize
Returns
-------
: tensor
Tensor or initialized tensor
"""
if callable(i):
return i(shape)
else:
return i | 8ac3f507d0b870b5260090e0201210147d580423 | 98,490 |
def create_stubs(skylark_stubs, load_symbols):
"""Combines Skylark stubs with loaded symbols.
This function creates a copy of the global Skylark stubs and combines them
with symbols from the list of load_extractor.LoadSymbol, which contain
information about symbols extracted from other .bzl files. The stubs created
for the loaded symbols are global variables set to the empty string.
Args:
skylark_stubs: Dict containing the Skylark global stubs.
load_symbols: List of load_extractor.LoadSymbol objects containing
information about symbols extracted from other .bzl files.
Returns:
Dictionary containing both the Skylark global stubs and stubs created for
the loaded symbols.
"""
stubs = dict(skylark_stubs)
for load_symbol in load_symbols:
if load_symbol.alias:
stubs[load_symbol.alias] = ""
else:
stubs[load_symbol.symbol] = ""
return stubs | 5c193a48190fcec31951fbac56f5c7ae61ebd728 | 98,491 |
import re
def to_snake_case(string):
"""Converts string to snake_case
we don't use title because that forces lowercase for the word, whereas we want:
PSK -> psk
api key -> api_key
content-length -> content_length
user -> user
"""
return re.sub("[ -]", "_", string).lower() | 1d131bc12404fe0e160d6a80f588f6b0386886b0 | 98,492 |
def is_leap_year(year):
"""
Helper function for get_timestamp.
:param year: The year.
:returns True if the year is a leap year.
"""
year = int(year)
if(year%400) ==0:
leap=1
elif (year%100) == 0:
leap = 0
elif (year%4) == 0:
leap = 1
else:
leap = 0
return leap | dc70246a29e1cc78aa0cb08d3802f2f504e38c2c | 98,502 |
def dot_appended_param(param_key, reverse=False):
"""Returns ``param_key`` string, ensuring that it ends with ``'.'``.
Set ``reverse`` to ``True`` (default ``False``) to reverse this behavior,
ensuring that ``param_key`` *does not* end with ``'.'``.
"""
if not param_key.endswith("."):
# Ensure this enumerated param ends in '.'
param_key += "."
if reverse:
# Since param_key is guaranteed to end with '.' by this point,
# if `reverse` flag was set, now we just get rid of it.
param_key = param_key[:-1]
return param_key | 17b5324802dce60404f91003bdbb267d4f59dfd6 | 98,504 |
def render_region(context, regions, region, **kwargs):
"""
Render a single region. See :mod:`feincms3.renderer` for additional
details.
"""
return regions.render(region, context, **kwargs) | a35f112e0450ba9b06c4313cafc37b7285a23209 | 98,506 |
def UpdateMaskHook(ref, args, req):
"""Constructs updateMask for patch requests.
This method works for Container and Function patch requests.
Args:
ref: A resource ref to the parsed Edge Container resource
args: The parsed args namespace from CLI
req: Created Patch request for the API call.
Returns:
Modified request for the API call.
"""
del ref # Unused.
arg_name_to_field = {
# Common flags
'--input-topic': 'inputTopics',
'--output-topic': 'outputTopics',
'--description': 'description',
# Container and Function flags
'--memory': 'availableMemoryMb',
'--volume-binding': 'volumeBindings',
'--device-binding': 'deviceBindings',
# Container flags
'--docker-image': 'dockerImageUri',
'--autostart': 'autostart',
'--no-autostart': 'autostart',
# Function flags
'--source': 'dockerImageUri',
'--timeout': 'requestTimeout',
'--entry-point': 'entryPoint',
'--function-type': 'functionType'
}
update_mask = []
for arg_name in args.GetSpecifiedArgNames():
if arg_name in arg_name_to_field:
update_mask.append(arg_name_to_field[arg_name])
elif 'env-var' in arg_name and 'environmentVariables' not in update_mask:
update_mask.append('environmentVariables')
req.updateMask = ','.join(update_mask)
return req | 70b0dc4b2a381c23f8381dcd3ea5244e3968ac2c | 98,514 |
import math
def get_relative_index(value, min_value, step):
""" Returning the relative bin index of a value given bin start and bin span.
Args:
value: The value to be converted to index.
min_value: The minimum value of a value series.
step: The span of the bin.
"""
assert value >= min_value
assert step > 0
return math.floor((value - min_value) / step) | 93d1413ef5298ffeace1134fca2c0d85392288e4 | 98,516 |
def parse_line(line):
"""Takes a string assumed to be in the format:
<long string>,<char>
Splits on the comma and returns the substrings.
"""
s, c = line.split(',')
return s.strip(), c.strip() | 05136925c42ac8fff1b8dbc36469476747532f11 | 98,518 |
def filestore_pregenerator(request, elements, kw):
"""Pregenerator for the filestore, which may run under a different domain
name in the case of a CDN cacher thinger.
"""
cdn_root = request.registry.settings.get('cdn_root')
if cdn_root:
kw['_app_url'] = cdn_root
return elements, kw | a52c66c78052a7e93789fbdc364e235fa8044998 | 98,525 |
def handle_integer_input(input, desired_len):
"""
Checks if the input is an integer or a list.
If an integer, it is replicated the number of desired times
If a tuple, the tuple is returned as it is
Parameters
----------
input : int, tuple
The input can be either a tuple of parameters or a single parameter to be replicated
desired_len : int
The length of the desired list
Returns
-------
input : tuple[int]
a tuple of parameters which has the proper length.
"""
if type(input) is int:
return (input,) * desired_len
elif type(input) is tuple:
if len(input) != desired_len:
raise AssertionError("The sizes of the parameters for the CNN conceptizer do not match."
f"Expected '{desired_len}', but got '{len(input)}'")
else:
return input
else:
raise TypeError(f"Wrong type of the parameters. Expected tuple or int but got '{type(input)}'") | 4d954e509a1f2b6d2dc1abc8191c08ceb475a0b0 | 98,526 |
def index_startswith_substring(the_list, substring):
"""Return index of element in the_list that starts with substring,
and -1 if substring was not found
"""
for i, s in enumerate(the_list):
if s.startswith(substring):
return i
return -1 | 04dd62aa59216ad2b523f56897fe95e5bffb740d | 98,531 |
def coord2pix(y, downsample):
"""convert real coordinates to pixels indices for 3D 2D projection
"""
# x * downsample + downsample / 2.0 - 0.5 = y
return (y + 0.5 - downsample / 2.0) / downsample | 662a9a84072b9a467d7eeedac3ac913cfbe274e7 | 98,532 |
from typing import Tuple
def parse_table(fq_table: str) -> Tuple[str, str]:
"""Parse a tablename into tuple(<schema>, <table>).
Schema defaults to doc if the table name doesn't contain a schema.
>>> parse_table('x.users')
('x', 'users')
>>> parse_table('users')
('doc', 'users')
"""
parts = fq_table.split('.')
if len(parts) == 1:
return 'doc', parts[0]
elif len(parts) == 2:
return parts[0], parts[1]
else:
raise ValueError | b25cb9cefd86da9a458afccff918575c588a94da | 98,538 |
import bz2
import json
def read_krun_results_file(results_file):
"""Return the JSON data stored in a Krun results file.
"""
results = None
with bz2.BZ2File(results_file, 'rb') as file_:
results = json.loads(file_.read())
return results
return None | 5ffeb3a9d6fa28728402c229bfa32dacb265d734 | 98,540 |
def regular_polygon_angle(side_amount: int) -> float:
"""
Returns the inner angle of a polygon with the given amount of sides
:param side_amount: amount of sides the polygon has
:return:
"""
return (side_amount - 2) * (180 / side_amount) | 3f82595a7ad7c737daf1543d3bae3a24da0723df | 98,554 |
def set_up_default_channels(clean_up=False):
"""Block of text to populate a Text widget."""
channels = ["@my-favorite-channel, 5",
"@OdyseeHelp#b, 4",
"@lbry:3f, 6"]
if clean_up:
channels = ["@OdyseeHelp, 2",
"@my-favorite-channel-vmv, 15",
"@lbry, 1",
"@The-Best-Channel-ABC, 5"]
channels = "\n".join(channels)
return channels | 2bfa48e2ad7eb5ec601a97dd532159a988d91cc9 | 98,558 |
def api_url(lang):
"""Return the URL of the API based on the language of Wikipedia."""
return "https://%s.wikipedia.org/w/api.php" % lang | 33748d3801d445262c659c467fbeb920d45968e8 | 98,560 |
def _get_constant_function(constant: float):
"""
Defines a callable, that returns the constant, regardless of the input.
"""
def function(x):
return constant
return function | 18decf6639bb3829f5b4ef6fd7a49a8f30e40f57 | 98,561 |
def chao1_var_bias_corrected(singles, doubles):
"""Calculates chao1 variance, bias-corrected.
From EstimateS manual, equation 6.
"""
s, d = float(singles), float(doubles)
return s*(s-1)/(2*(d+1)) + (s*(2*s-1)**2)/(4*(d+1)**2) + \
(s**2 * d * (s-1)**2)/(4*(d+1)**4) | d5fd8e297593acc7b20667c4d735ab2b6be73fb2 | 98,571 |
def is_prime(p):
"""
Returns True if p is a prime
This function uses Fermat's little theorem to quickly remove most
candidates.
https://en.wikipedia.org/wiki/Fermat%27s_little_theorem
"""
if p == 2:
return True
elif p <= 1 or p % 2 == 0 or 2**(p-1) % p != 1:
return False
return all(p % n != 0 for n in range(3,int(p ** 0.5 + 1),2)) | 463de539dcc346cf35ad78a1a020782d13acdf93 | 98,578 |
def list_of_values(dictionary, list_of_keys, default=None):
"""
Converts a dict to a list of its values,
with the default value inserted for each missing key::
>>> list_of_values({"a":1, "b":2, "d":4}, ["d","c","b","a"])
[4, None, 2, 1]
>>> list_of_values({"a":1, "b":2, "d":4}, ["d","c","b","a"], default=0)
[4, 0, 2, 1]
"""
result = []
for key in list_of_keys:
result.append(dictionary.get(key, default))
return result | f71ef600e795724404322ae83f45b22738ad8562 | 98,579 |
import re
import itertools
def parse_synonyms(template):
"""Parses and returns a list of string templates.
Args:
template (string): a template strings. Converts "[Synonym1, Synonym2] ..." into separate list items.
Returns:
templates (array): a list of generated template strings from the original.
"""
templates = []
if "[" in template and "]" in template:
# Get all synonyms enclosed in [] and convert into lists
matches = re.findall("\[.*?\]", template)
synonyms = [m[1:-1].split(", ") for m in matches]
# Create every possible combination from the words
combinations = list(itertools.product(*synonyms))
# Substitute every match with the correct synonym
for combination in combinations:
# Make a new template
new_template = template
# Replace the synonym match block with every word combination
for match_block, word in zip(matches, combination):
new_template = new_template.replace(match_block, word)
# Add new template to templates
templates.append(new_template)
else:
# No synonyms, just return the original template
templates.append(template)
return templates | a218d643c3c6be2a61ba88ac437f99655796edc5 | 98,580 |
def coordinates(line):
"""
Produces coordinates tuples
line: CSV string from RDD
"""
contents = line.split(",")
lng, lat = map(float,contents[3:5])
return lng, lat | 4a29bfb0a688bb1ddd7586257e2ab96cfea6c14c | 98,584 |
def zip_with_map(mapper, iterable):
"""
Returns a collection of pairs where the first
element correspond to the item in the iterable
and the second is its mapped version, that is,
the item when applied to the mapper function.
>>> zip_with_map(lambda x: x**2, [1,2,3])
>>> [(1,1), (2,4), (3,9)]
"""
return zip(iterable, map(mapper, iterable)) | 4bb631abdf872558d52b67b8875908b0ba8446c8 | 98,586 |
import requests
import json
def bus_arrivals(naptan_id: str):
"""Use TFL API to return a list of bus arrivals at the parm bus stop. Each item in the list is a tuple of
important info about the arrival. Buses in the list are sorted, with soonest arrival first.
If the TFL API fails to respond then None will be returned.
If there are no buses expected then an empty list will be returned.
"""
arrivals_url = 'https://api.tfl.gov.uk/StopPoint/' + naptan_id + '/arrivals'
request = requests.get(url=arrivals_url) # Invoke the TFL API.
if request.status_code != 200:
return None
else:
request_list = json.loads(s=request.text) # Convert JSON to list of dictionaries.
bus_list = []
for rl in request_list:
exp_arrival_secs = rl.get('timeToStation')
exp_arrival_mins = int(exp_arrival_secs / 60)
bus_stop_name = rl.get('stationName')
bus_number = rl.get('lineId')
towards = rl.get('towards')
bus_list.append((exp_arrival_secs, exp_arrival_mins, bus_stop_name, bus_number, towards))
# Sorted by first item in each tuple, which is the number of seconds until arrival. This mean the list is sorted
# to have the first bus which will arrival at the head of the list.
return sorted(bus_list) | 4b307ac7af7be87c9bb351545c68e1c0a015d719 | 98,587 |
import io
import csv
def list_to_csv(list_data):
"""Converts a Python list into a CSV string"""
output = io.StringIO()
writer = csv.writer(output)
writer.writerow(list_data)
# Strip trailing newline
return output.getvalue()[:-2] | 7dbfad9f0dc2b8e815dcd5b97aba9cc8091ce9cb | 98,593 |
def insertion_sort(array):
"""insertion_sort(list) -> list
>>> insertion_sort([3, 2, 13, 4, 6, 5, 7, 8, 1, 20])
[1, 2, 3, 4, 5, 6, 7, 8, 13, 20]
"""
for index in range(1, len(array)):
value_now = array[index]
pos = index
while pos > 0 and array[pos - 1] > value_now:
array[pos] = array[pos - 1]
pos = pos - 1
array[pos] = value_now
return array | f91732e1eddceae762b49e3db328965cecee153c | 98,599 |
from typing import List
def flatten_dict_to_pairs(labeled_data) -> List[list]:
"""
Takes as input a dict of key -> list and turns it into a single unified list of pairs
of entry and label. Thus
{ 'a': ['frodo', 'sam'], 'b': ['merry', 'pippin' ] }
becomes
[ ['frodo', 'a], ['sam', 'a'], ['merry', 'b'], ['pippin', 'b'] ]
:param labeled_data: The dict of labeled data.
:return: Combined list of all values with parallel list of all matching labels.
"""
result = []
for label, values in labeled_data.items():
for item in values:
result.append([item, label])
return result | a5e2828313467d6f7443193b0bb6c71965339336 | 98,603 |
def euclidean_area(poly, precision=6):
"""
An implementation of Green's theorem, an algorithm to calculate area of
a closed polgon. This works for convex and concave polygons that do not
intersect oneself whose vertices are described by ordered pairs.
https://gist.github.com/rob-murray/11245628
Args:
poly: The polygon expressed as a list of vertices, or 2D vector points
precision: How many decimal places to truncate for the returned result
Returns:
float: The calculated area of input polygon in meteres
"""
# ensure we have a list; best to assert that it isnt a string as in python
# several types can act as a list
assert not isinstance(poly, str)
total = 0.0
N = len(poly)
for i in range(N):
v1, v2 = poly[i], poly[(i + 1) % N]
total += v1[0] * v2[1] - v1[1] * v2[0]
return float(round(abs(total / 2), precision)) | 23931977cfb0b788a5adfdb44360b111c23bbd10 | 98,611 |
def transGraphToList(graph={}):
""" Transforms a graph to a list of strings
of the form -- ["key1: v11, v12, v13", "key2: v21, v22, v23 ...."]
Preconditions: Takes a graph
Postconditions: Returns a list of strings
Usage:
>>> numbergraph
{1: [2, 3, 4], 2: [5, 6, 9]}
>>> agraph
{'a': ['b', 'c', 'd'], 'c': ['e', 'f', 'g']}
>>> transGraphToList(numbergraph)
['1:2, 3, 4\n', '2:5, 6, 9\n']
>>> transGraphToList(agraph)
['a:b, c, d\n', 'c:e, f, g\n']
Algorithms (see pseudocode below)
# For each key in the input graph
# render the Key as a string, and add the ':' key delimiter
# render the value list as a comma delimited string
# add the Key as String and Value as String together and tag with newline
# The resulting string is appended to a list
# return the list of strings
"""
transList = []
for key in graph:
keyString = str(key) + ':'
# render the value list as a comma delimited string.
# Run a list comprehension for values to create a list of strings
# join the strings together into a single string of comma delimted values
# strip off the final comma
valueString = " ".join([str(x) + ',' for x in graph[key]]).rstrip(',')
newline = '\n'
finalString = keyString + valueString + newline
transList.append(finalString)
return transList | 649559c1079e20310b5a856fa75766476a4b4e05 | 98,615 |
def contains_one_instance(tokenized, element):
""" Return True if tokenized contains only one instance of the class
element. """
contexts = [t for t in tokenized if isinstance(t, element)]
return len(contexts) == 1 | 060bf952ec04b40824f217a39667876817fc374c | 98,617 |
def parse_bool(string: str) -> bool:
"""Parses strings into bools accounting for the many different text representations of bools
that can be used
"""
if string.lower() in ["t", "true", "1"]:
return True
elif string.lower() in ["f", "false", "0"]:
return False
else:
raise ValueError("{} is not a valid boolean string".format(string)) | 38caf05a74b1f57d52a90736b4fcaa59d9a25550 | 98,618 |
def parse_game_type(data: dict) -> str:
"""Parse and format a game's type."""
is_rated = data.get('rated')
speed = data.get('speed')
variant = data.get('variant')
game_type = 'unknown'
if speed:
game_type = '%s (%s)' % (speed, variant or 'standard')
if is_rated:
game_type = 'rated %s' % game_type
else:
game_type = 'unrated %s' % game_type
return game_type | 3535d1ba47c4a601e670c5d11919daa8387ecf37 | 98,622 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.