content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def colon_based_id_to_vep_id(colon_id):
"""Converts a colon-based identifier to VEP compatible one. Example: '15:7237571:C:T' → '15 7237571 . C T'"""
id_fields = colon_id.split(':')
assert len(id_fields) == 4, 'Invalid colon-based identifier supplied (should contain exactly 4 fields)'
return '{} {} . {} {}'.format(*id_fields) | 208b9690f77e015263eb207fce6d2db129a2fae5 | 127,171 |
def filter_file_paths_by_extension(file_paths, ext='csv'):
"""
Filters out file paths that do not have an appropriate extension.
:param file_paths: list of file path strings
:param ext: valid extension
"""
valid_file_paths = []
for file_path in file_paths:
ext = ".%s" % ext if ext[0] != '.' else ext
if not file_path.endswith(ext):
continue
valid_file_paths.append(file_path)
return valid_file_paths | 6666434635b6054c736b1d54eb5adfdaeb422b7b | 127,173 |
import time
def dateTimeNow(str_sign=''):
"""
.. _dateTimeNow :
Return the system date and time as a string.
Return format: 'yyyy-mm-dd, HH:MM:SS' + str_sign
Parameters
----------
str_sign : str
A sign to append to the 'yyyy-mm-dd, HH:MM:SS'.
Returns
-------
str_date_time : str
Formatted system date time string in 'yyyy-mm-dd, HH:MM:SS' + str_sign.
Examples
--------
>>> dateTimeNow(':')
'2017-11-20, 15:14:42:'
"""
str_date_time = time.strftime('%Y-%m-%d, %H:%M:%S', time.gmtime())
str_date_time = str_date_time + str_sign
return str_date_time | 2f08d8278251f4cbf616d8bda961f3a2526f420a | 127,174 |
def variable_name_to_option_name(variable: str):
"""
Convert a variable name like `ec2_user` to the Click option name it gets mapped to,
like `--ec2-user`.
"""
return '--' + variable.replace('_', '-') | c3b9c90cecd774b912ff2cfedbce11bab268aaf5 | 127,179 |
def rrl(n,dn=1,amu=1.007825,Z=1):
"""
compute Radio Recomb Line freqs in GHz
from Brown, Lockman & Knapp ARAA 1978 16 445
UPDATED:
Gordon & Sorochenko 2009, eqn A6
Parameters
----------
n : int
The number of the lower level of the recombination line (H1a is Lyman
alpha, for example)
dn : int
The delta-N of the transition. alpha=1, beta=2, etc.
amu : float
The mass of the central atom
Z : int
The ionization parameter for the atom. Z=1 is neutral, Z=2 is singly
ionized, etc. For hydrogen, only z=1 makes sense, since ionized
hydrogen has no electrons and therefore cannot have recombination
lines.
Returns
-------
frequency in GHz
"""
nu = 3.289842e6*(1-5.48593e-4/amu)*(1/float(n)**2 - 1/float(n+dn)**2)
nu = 3.28984196e6 * Z**2 * (amu-5.48579911e-4*(Z+1))/(amu-5.48579911e-4*Z) * (1/float(n)**2 - 1/float(n+dn)**2)
return nu | 8c6daf7b8c0b77f3168bcc7f40a760b84f7f9d70 | 127,189 |
def record_has(inrec, fieldvals):
"""Accept a record, and a dictionary of field values.
The format is {'field_name': set([val1, val2])}.
If any field in the record has a matching value, the function returns
True. Otherwise, returns False.
"""
retval = False
for field in fieldvals:
if isinstance(inrec[field], str):
set1 = {inrec[field]}
else:
set1 = set(inrec[field])
if (set1 & fieldvals[field]):
retval = True
break
return retval | 76a449977896f2f8c8dfca4f449a60ac04b39bd7 | 127,191 |
def force_string(val=None):
"""Force a string representation of an object
Args:
val: object to parse into a string
Returns:
str: String representation
"""
if val is None:
return ''
if isinstance(val, list):
newval = [str(x) for x in val]
return ';'.join(newval)
if isinstance(val, str):
return val
else:
return str(val) | 51add5202a45229cad8ca8697d63b593ad65ec5a | 127,194 |
def id_is_int(patient_id):
""" Check if patient id is integer
Check if the patient's id is a integer
Args:
patient_id (int): id number of the patient
Returns:
True or str: indicate if the input patient id is an integer
"""
try:
int(patient_id)
return True
except ValueError:
return "Please use an integer or a numeric string containing " \
"an ID number but without any letter" | 21400915bf15cbdd01f594fc6d7e01ff045d528d | 127,196 |
import math
def sqroot(x):
"""
Finds the square root of the number passed in
"""
return math.sqrt(x) | 868f74584e1ff28a5534331cae624f8e655d8e98 | 127,203 |
def get_relative_error_success_count(relative_errors, threshold=0.05):
"""Return the count of the errors below a threshold"""
return len(list(filter(lambda x: x <= threshold, relative_errors))) | ef24a3b3c1ff705692461a53f646cb27834ef4cc | 127,204 |
import math
def land_cost(starting_land: int, ending_land: int, *, multiplier: float = 1, policy: bool = False) -> float:
"""
Calculate the cost to purchase or sell land.
:param starting_land: A starting land amount.
:param ending_land: The desired land amount.
:param multiplier: A multiplier to adjust the ending result by.
:param policy: If the land policy is being used.
:return: The cost to purchase or sell land.
"""
def unit_cost(amount: int):
return (.002*(amount-20)*(amount-20))+50
difference = ending_land - starting_land
cost = 0
if difference < 0:
return 50 * difference
if difference > 500 and difference % 500 != 0:
delta = difference % 500
cost += round(unit_cost(starting_land), 2) * delta
starting_land += delta
difference -= delta
for _ in range(math.floor(difference // 500)):
cost += round(unit_cost(starting_land), 2) * 500
starting_land += 500
difference -= 500
if difference:
cost += (round(unit_cost(starting_land), 2) * difference)
if policy:
cost = cost * 0.95
return cost * multiplier | 2bfcb8921cdee2aaad4694cc1ee0a485bbf2e8d7 | 127,210 |
def skipped_iter(data, skip_criteria):
"""Utility function that returns an iterator where data items are skipped
according to some provided criteria.
Parameters:
-----------
data - list of data items
skip_criteria - lambda or function that takes an item and determines
whether or not to skip.
Returns:
--------
a data item iter starting at the first unskipped item.
"""
iterator = iter(data)
# Skip past any sensor data that was recorded prior to min time.
# if min_time:
item = next(iterator)
i = 0
while skip_criteria(item):
item = next(iterator)
i = i + 1
return iter(data[i:]) | ed6a6b664215645ac21f51347071ccdbbb39154f | 127,211 |
def pprint(ast):
"""pretty print an Abstract Syntax Tree"""
def red(string):
# format a string to be printed in red
return '\033[31m' + string + '\033[0m'
def print_rec(ast, i):
# recursively print the AST
if ast[0] == "symb" or ast[0] == 'value':
print(" "*i + str(ast[1]))
elif ast[0] == "non":
print(" "*i + red(ast[0]))
print_rec(ast[1], i+1)
else:
print(" "*i + red(ast[0]))
print_rec(ast[1], i+1)
print_rec(ast[2], i+1)
print_rec(ast, 0) | f45ff1435e7f8409158cc0a8a0665decc8eeda08 | 127,212 |
from datetime import datetime
def get_datetime(ts):
"""datetime from epoch timestamp"""
if ts:
return datetime.fromtimestamp(int(int(ts)/1000)) | ee66018ea59759f9586258792b3589d518caf058 | 127,219 |
def getattrs(value, attrs):
"""Helper function that extracts a list of attributes from
`value` object in a `dict`/mapping of (attr, value[attr]).
Args:
value (object):
Any Python object upon which `getattr` can act.
attrs (iterable):
Any iterable containing attribute names for extract.
Returns:
`dict` of attr -> val mappings.
Example:
>>> getattrs(complex(2,3), ['imag', 'real'])
{'imag': 3.0, 'real': 2.0}
"""
return dict([(attr, getattr(value, attr)) for attr in attrs]) | 4d0fe54745aff646df4cc0b78a51bf4bbcf52aae | 127,221 |
def tileSizePixels() -> int:
""" Tile height and width in pixels. """
return 256 | 7c68ef610be72c840f7785a123d88f0966dc3728 | 127,222 |
def GetFullPartitionSize(partition, metadata):
"""Get the size of the partition including metadata/reserved space in bytes.
The partition only has to be bigger for raw NAND devices. Formula:
- Add UBI per-block metadata (2 pages) if partition is UBI
- Round up to erase block size
- Add UBI per-partition metadata (4 blocks) if partition is UBI
- Add reserved erase blocks
"""
erase_block_size = metadata.get('erase_block_size', 0)
size = partition['bytes']
if erase_block_size == 0:
return size
# See "Flash space overhead" in
# http://www.linux-mtd.infradead.org/doc/ubi.html
# for overhead calculations.
is_ubi = partition.get('format') == 'ubi'
reserved_erase_blocks = partition.get('reserved_erase_blocks', 0)
page_size = metadata.get('page_size', 0)
if is_ubi:
ubi_block_size = erase_block_size - 2 * page_size
erase_blocks = (size + ubi_block_size - 1) // ubi_block_size
size += erase_blocks * 2 * page_size
erase_blocks = (size + erase_block_size - 1) // erase_block_size
size = erase_blocks * erase_block_size
if is_ubi:
size += erase_block_size * 4
size += reserved_erase_blocks * erase_block_size
return size | 28490a29cd3c3dcf4467fe8895698672911d884c | 127,226 |
def stats_box(points):
"""
Returns the extrema and dimensions for a box enclosing the points
Args:
points (list): a list of points in [(x1, y1),(x2, y2),(xN, yN)] form
Returns:
min_x (float): minimum x coordinate
min_y (float): minimum y coordinate
max_x (float): maximum x coordinate
max_y (float): maximum y coordinate
width (float): width across x-coordinates
height (float): width across y-coordinates
Example Returns::
min_x, min_y, max_x, max_y, width, height = stats_box(points)
"""
x_coordinates = []
y_coordinates = []
for x_coordinate, y_coordinate in points:
x_coordinates.append(x_coordinate)
y_coordinates.append(y_coordinate)
min_x = min(x_coordinates)
max_x = max(x_coordinates)
min_y = min(y_coordinates)
max_y = max(y_coordinates)
return min_x, min_y, max_x, max_y, abs(max_x - min_x), abs(max_y - min_y) | db0264db8b3a17387adf9f6e601d140d6cf7e006 | 127,228 |
import torch
def dmae(x, y):
"""
dMAE(x,y) = sign(x-y)
"""
return torch.sign(x-y) | 2b6498f2361a92041737a2d352f73afed2301e4b | 127,229 |
def get_experiment_run_name(
mode: str,
msg: int,
msg_unit: str,
freq: int,
) -> str:
"""
Get name of data file for a specific run.
:param mode: the mode
:param msg: the msg size
:param msg_unit: the msg unit prefix
:param freq: the publishing frequency
:return: the file name for that specific run
"""
assert mode in ('base', 'trace')
# use '_s' suffix file because that's the one that contains the latency data (subscriber)
return f'1-{mode}_Array{msg}{msg_unit}_{freq}hz_s' | 132fa0ccf84f40b7fd561c19f4dc6e9111c299f1 | 127,230 |
def env_exists(env_variable: str) -> bool:
"""Validates if env variable was provided and is not an empty string.
Args:
env_variable: str, name of the env variable
Returns:
True: if env provide and not an empty string
False: if env not provided or an empty string
"""
if env_variable is not None and env_variable != '':
return True
return False | 4bbec34a80f20e67c531263af72b1d33fa9e9481 | 127,232 |
def midpoint2(x1: float, y1: float, x2: float, y2: float) -> complex:
""" Returns the Midpoint between two numbers on a line"""
x = (x1 + x2) / 2
y = (y1 + y2) / 2
return complex(x, y) | af5a2a99abea9b61fd51b5e16a724dc8c1bcaf6b | 127,233 |
from typing import List
from typing import Any
def difference(list1: List[Any], list2: List[Any]) -> List:
"""
Extend list with unique values of second lists.
Parameters
----------
list1: List[Any]
List of values of any type.
list2: List[Any]
List of values of any type.
Returns
-------
List
The first list without elements from the second list.
"""
return [x for x in list1 if x not in list2] | fd2eb4b6aedf013cc5a8f391a8039436b08a493b | 127,250 |
import yaml
def YamlWrite(structured_data, dumper):
"""Wrap yaml.dump to make calling convention consistent."""
return yaml.dump(structured_data, default_flow_style=False, Dumper=dumper) | d2a3f75eb9f1b0d62a0606c75b61368ae9be37e8 | 127,251 |
import time
def _get_timestamp_ms(when):
"""Converts a datetime.datetime to integer milliseconds since the epoch.
Requires special handling to preserve microseconds.
Args:
when: A datetime.datetime instance.
Returns:
Integer time since the epoch in milliseconds. If the supplied 'when' is
None, the return value will be None.
"""
if when is None:
return None
ms_since_epoch = float(time.mktime(when.utctimetuple()) * 1000.0)
ms_since_epoch += when.microsecond / 1000.0
return int(ms_since_epoch) | 2f6380c506e282818aa00a5b138ebba640126318 | 127,252 |
from typing import Mapping
from typing import Any
from typing import Dict
def with_key_prefix(d: Mapping[str, Any], key_prefix: str) -> Dict[str, Any]:
"""Returns {key_prefix+k: v for k, v in d.items()}."""
return {key_prefix+k: v for k, v in d.items()} | 244d1749841a86567412efd719aedc91ad292d44 | 127,254 |
import copy
def _convert_jsonschema_to_list_of_parameters(obj, in_="query"):
"""
Swagger is only _based_ on JSONSchema. Query string and header parameters
are represented as list, not as an object. This converts a JSONSchema
object (as return by the converters) to a list of parameters suitable for
swagger.
:param dict obj:
:param str in_: 'query' or 'header'
:rtype: list[dict]
"""
parameters = []
assert obj["type"] == "object"
required = obj.get("required", [])
for name, prop in sorted(obj["properties"].items(), key=lambda i: i[0]):
parameter = copy.deepcopy(prop)
parameter["required"] = name in required
parameter["in"] = in_
parameter["name"] = name
parameters.append(parameter)
return parameters | 794c7c1745d2e7cc1e850a2514b59872731389ae | 127,258 |
import torch
def averaged_std(outputs):
"""
Computes averaged standard deviation across list of tensors with the similar size
Step 1: Std across members of ensemble
Step 2: Average Across all datapoints
Step 3: Average Across all item
"""
#print("Outputs ", outputs)
outputs = torch.stack(outputs)
#print("Stacked outputs ", outputs)
step1 = torch.std(outputs, dim=0)
step2 = torch.mean(step1, dim=0)
step3 = torch.mean(step2, dim=0)
#print("step 3 is ", step3)
return step3.tolist() | 186a21924bf19dfe4d9c47c89102ec4837678e0f | 127,259 |
def getSubjectName(inputname):
"""Determine first column subject name for output file
"""
if len(inputname.split('.')) == 2:
return inputname.split('.')[0] | cf63510971d3211adfb17ffd13a6278cbaac4001 | 127,265 |
def put_item_request_param(item):
"""
Creates a PutItem request used as an argument to Table.put_item()
:param item: The item to put in DynamoDB
:return: PutItem request
"""
return {
'Item': item,
'ReturnConsumedCapacity': 'TOTAL'
} | 89060e4bd91dc089058204d8bc6170b52b24917b | 127,273 |
import asyncio
async def async_run_command(command):
"""Run an async command and return stdout and stderr."""
process = await asyncio.create_subprocess_shell(
command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
stdout, stderr = stdout.decode(), stderr.decode()
return stdout, stderr | 509085c586b463a2b006e66935261d88bb6b44a4 | 127,277 |
def greedy_approach(U, N, dist):
"""
Compute travel cost in the greedy approach, i.e.,
each user travels to the nearest POI.
Parameters
----------
U: array
List of users.
N: dict
Dictionary of nearest POI.
dist: array
Pairwise distance.
Returns
-------
total_cost: int
Total travel cost.
"""
total_dist = 0
for u in U:
total_dist += dist[u][N[u]]
return total_dist | 709a054d1cf51b0930ad70ba6e9772ab5423683f | 127,280 |
import sqlite3
def suggest_options(field, query, **kwargs):
"""
Sample plugin that enables CloudBolt interface to provide a form field that
autocompletes as the user is typing, but that fetches matching options by
invoking this action asynchronously. Use this if the set of options is so
large that the normal `get_options_list` (which renders all options into
the page) performs poorly.
Note: This is just an example of one way such an action might be used, by
querying a local SQLite database. It could also fetch options from an
external API.
Args:
query: the word the user has typed
Returns:
list of tuples representing dropdown options ("value", "Visible label")]
"""
# Limit the results to something reasonable, for a responsive user experience;
# this action is called as the user is typing and results should be returned
# as quickly as possible, but also not too many.
max_results = 50
# Query an SQLite database table for matching email addresses.
conn = sqlite3.connect('/var/opt/cloudbolt/proserv/ldap_users.db')
c = conn.cursor()
emails = c.execute(
'SELECT email FROM users WHERE email LIKE ? ORDER BY email LIMIT ?',
('%{}%'.format(query), max_results)
)
# Use the SQLite query result here, before closing the DB connection. It
# materializes the cursor and avoids Python exception.
options = [(email, email) for email in emails]
conn.close()
return options | 32346be1e9be36655e443cdcebc8b43a34e2b6ac | 127,289 |
def as_list(maybe_element):
"""helps to regularize input into list of element.
No matter what is input, will output a list for your iteration.
**Basic Examples:**
>>> assert as_list("string") == ["string"]
>>> assert as_list(["string", "string"]) == ["string", "string"]
>>> assert as_list(("string", "string")) == ["string", "string"]
>>> assert as_list([["string", "string"]]) == ["string", "string"]
**An Application Example:**
>>> def func(*args):
>>> return as_list(args)
>>>
>>> assert func("a", "b") == ["a", "b"]
>>> assert func(["a", "b"]) == ["a", "b"]
"""
if type(maybe_element) == list:
maybe_stacked = maybe_element
if 1 == len(maybe_stacked) and type(maybe_stacked[0]) == list:
mustbe_list = maybe_stacked[0]
else:
mustbe_list = maybe_stacked
elif type(maybe_element) == tuple:
maybe_stacked = maybe_element
if 1 == len(maybe_stacked) and type(maybe_stacked[0]) == list:
mustbe_list = maybe_stacked[0]
else:
mustbe_list = list(maybe_stacked)
else:
mustbe_list = [maybe_element]
return mustbe_list | f626fd81da1f7578d5bb10ee0f5f4902339a1e1c | 127,290 |
def filter_error_ppm(df, err_range=0.5):
""" Filter based on error range of - err_range to + err_range """
df = df[df['Error_ppm'].between(-err_range, err_range)]
return df | 5056f7ead2191f342c9ae1ed7b72c1b516d88ea7 | 127,295 |
def get_query_file_type(query_file):
"""Takes an input query file and determines whether it is a FASTA file or
an HMM file. Returns either "FASTA" or "HMM".
"""
file_type = None
with open(query_file) as infh:
first_line = infh.readline().strip()
if first_line.startswith('>'):
file_type = 'FASTA'
elif first_line.startswith('HMM'):
file_type = 'HMM'
# Check that it worked.
assert file_type is not None, """Error: Input file type could not be
determined: %s""" % query_file
# Return file type.
return file_type | e0e1a0356ec3b33016a4b4a463236753dab63d73 | 127,298 |
def split_record(line, separator=';'):
"""Split `line` to a list of fields and a comment
>>> split_record('0000..0009 ; XXX # some value')
(['0000..0009', 'XXX'], 'some value')
"""
try:
i = line.index('#')
except ValueError:
record_line = line
comment = ''
else:
record_line = line[:i]
comment = line[i+1:]
fields = [x.strip() for x in record_line.split(separator)]
comment = comment.strip()
return fields, comment | 03dc638d174ba627dde49a18fb6f8a98e0714bd8 | 127,303 |
def has_station(train, look_for_station):
"""Check if this `train` stops at the given station (by the station name).
Returns the index of this station within train["stations"], or
if the train does not stop returns -1.
"""
for idx, stoptime in enumerate(train["stations"]):
if stoptime["sta"] == look_for_station:
return idx
return -1 | 9a4d8612f3db7572f3ac17e99271eb8862ce14b8 | 127,307 |
def standardise_cell_values(pd_df, dict_of_nonstandard_standard):
"""
Maps non-standard values e.g. "Males" to standard values like "M".
Mapping is carried out on a column-specific basis.
"""
df = (pd_df.replace
(to_replace=dict_of_nonstandard_standard,
value=None))
return df | 47cd855552efde6ad1c9190940c45c2c11750ae1 | 127,308 |
def all_subclasses(cls):
"""
Given a class `cls`, this recursive function returns a list with
all subclasses, subclasses of subclasses, and so on.
"""
subclasses = cls.__subclasses__()
return subclasses + [g for s in subclasses for g in all_subclasses(s)] | 94c94e99cc880e92f2f822c70c3b4a2916823db8 | 127,315 |
def df_html_display(df):
"""Apply some HTML/CSS display options for the input DataFrame."""
config_df = df.style
return config_df.render() | cdeccd81bf4fd336c9fd9d886fbcd3d03c116cfd | 127,317 |
import unicodedata
def is_number(target_str):
""" determine whether the word is number
Args:
target_str (str): target string
"""
try:
float(target_str)
return True
except ValueError:
pass
try:
unicodedata.numeric(target_str)
return True
except (TypeError, ValueError):
pass
return False | 2e27ba2394cf0cfefbb21c202eb92fa9096e9664 | 127,321 |
def ComputeErrorRate(error_count, truth_count):
"""Returns a sanitized percent error rate from the raw counts.
Prevents div by 0 and clips return to 100%.
Args:
error_count: Number of errors.
truth_count: Number to divide by.
Returns:
100.0 * error_count / truth_count clipped to 100.
"""
if truth_count == 0:
truth_count = 1
error_count = 1
elif error_count > truth_count:
error_count = truth_count
return error_count * 100.0 / truth_count | 3c0a0768ab3c2d8fad75138d31ab5a35ef836219 | 127,324 |
import requests
def check_valid_site(url, print_status=False):
"""
Checks if a passed URL is valid. If the status code from the site isn't 200, than it isn't valid.
:param url: a url to check
:param print_status: Print if the passed site is valid or not
:return: True(site is valid) or False(site isn't valid; 404)
"""
try:
request = requests.get(url)
status_code = request.status_code
if status_code == 200: # return request.status_code == 200 could be used,
if print_status:
print(url, "was a valid site")
return True
else:
if print_status:
print(url, "was not a valid status, here's the status code:", status_code)
return False
except:
print("It broke on", url) | 66ccaea80d926eb81f267266b04e881716e1eba0 | 127,325 |
from datetime import datetime
def col2datetime(df, col, datestr='%m/%d/%Y %H:%M'):
"""
Converts date strings in a DataFrame to datetime objects
Args:
df - a pandas DataFrame
col - the column name in df that contains the date strings
datestr - a string that encodes the formatting of the datetime strings
see python's documentation for datetime to see how these
strings are formatted
Returns:
The DataFrame with all date strings in df[col] converted to datetime objects
"""
for idx in df.index:
d = df[col][idx]
df[col][idx] = datetime.strptime(d, datestr)
return df | cde2e4e89a3fc520e892bfd025e6ab801a94a86c | 127,326 |
def testmodels(app_models_env):
"""
Pytest fixture that deletes existing records in any models in
app_models_env.models (an OrderedDict used to access the models).
Returns the OrderedDict, with all models emptied.
"""
testmodels = app_models_env.models
for model in testmodels.values():
if hasattr(model, 'objects') and len(model.objects.all()):
model.objects.all().delete()
return testmodels | 828a0150cad30f7f2a1c875abf4ac61f8d5f913e | 127,327 |
def _select_option(options: dict):
""" Present a dictionary of options to the user for selection """
for ix, (k, v) in enumerate(options.items()):
print(f'[{str(ix)}] {k}')
choice = input("Choose an option by number or name: ")
print('')
try:
choice = int(choice) # treat the input as if it was an integer
choice = list(options.keys())[choice]
except ValueError: # otherwise assume it was a key value
pass
return options[choice], choice | 8d80d4dc72004e9b093b69b50669cbaa7ac60f0b | 127,329 |
def removeChannelDuplicates(queue, channel):
"""
Remove settings that apply to the current channel. This way
when you move the slider and generate 100 events only the last
one gets acted on, but pending events for other channels are not lost.
"""
final_queue = []
for item in queue:
if (item[0] == channel):
continue
final_queue.append(item)
return final_queue | 8bfacd3e7728e9395f1204e30d62510e31163959 | 127,335 |
from typing import Counter
def count_passwords(r_start: int, r_end: int):
""" Calculate the number of passwrods that meet the predefined criteria """
counter = 0
for i in range(r_start, r_end):
numbers = str(i)
prev = numbers[0]
if len(numbers) != 6:
continue
increasing, similar_digits = True, set()
for x in numbers[1:]:
if prev > x:
increasing = False
break
if prev == x:
similar_digits.add(x)
prev = x
histogram = Counter(numbers)
proper_similar_digits = any(
[histogram[x] == 2 for x in similar_digits])
if increasing and proper_similar_digits:
counter += 1
return counter | 2db4106d2ddaf9adac6806d735ec3cd35839b3ef | 127,337 |
import torch
def perplexity(y_hat, ground_true):
"""Perplexity per single example
:param y_hat: tensor of probabilities of shape (num_steps, num_classes)
:param ground_true: tensor of ground true labels of shape (num_steps,)
:return: a perplexity as a scalar
"""
row_indices = list(range(len(ground_true)))
col_indices = ground_true
probabilities = y_hat[row_indices, col_indices]
n = len(ground_true)
return torch.exp(-torch.log(probabilities).sum() / n).item() | 62659562f0ea781562475b4ff03742a070e5e132 | 127,340 |
import importlib
def load_component(component, name, class_name):
"""Load and return component network from file."""
imported = importlib.import_module(
'.'.join(('musegan.presets', component, name)))
return getattr(imported, class_name) | 4d41f06728267c70506e1143b5cc1b2732a00557 | 127,347 |
def calc_static_stress_drop(Mw, fc, phase='S', v=3.5, use_brune=False):
"""
Calculate static stress drop from moment/corner_freq relation
Note the brune model (instantaneous slip) gives stress drops ~ 8 x lower
than the Madariaga values for fcP, fcS
:param Mw: moment magnitude
:type Mw: float
:param fc: corner frequency [Hz]
:type fc: float
:param phase: P or S phase
:type phase: string
:param v: P or S velocity [km/s] at source
:type v: float
:param use_brune: If true --> use Brune's original scaling
:type use_brune: boolean
:returns: static stress drop [MPa]
:rtype: float
"""
if use_brune: # Use Brune scaling
c = .375
else: # Use Madariaga scaling
if phase == 'S':
c = .21
else:
c = .32
v *= 1e5 # cm/s
a = c * v / fc # radius of circular fault from corner freq
logM0 = 3/2 * Mw + 9.1 # in N-m
M0 = 10**logM0 * 1e7 # dyn-cm
stress_drop = 7./16. * M0 * (1/a) ** 3 # in dyn/cm^2
stress_drop /= 10. # convert to Pa=N/m^2
#print("calc_stress_drop: Mw:%.2f M0:%g [%s] fc:%.2f a:%.1f MPa:%.1f" % (Mw, M0, corner_phase, fc, a, stress_drop/1e6))
return stress_drop / 1e6 | 365b87a9ef98b921fa256df97066ef0d28dcc03e | 127,348 |
from pathlib import Path
def get_full_path(path: str) -> str:
"""Resolve all implicit and relative path components
to give an absolute path to a file or directory"""
return str(Path(path).resolve().absolute()) | 0638aa173f36fc5fa64b5fbb982a3a55c3c82747 | 127,349 |
import jinja2
def create_synapse_hoc(
syn_mech_args,
syn_hoc_dir,
template_path,
gid,
dt,
synapses_template_name="hoc_synapses",
):
"""Returns a string containing the synapse hoc.
Args:
syn_mech_args (dict): synapse-related configuration
syn_hoc_dir (str): path to directory containing synapse-related data
template_path (str): path to the template to fill in
gid (int): cell ID
dt (float): timestep (ms)
synapses_template_name (str): template name of the synapse class
Returns:
str: hoc script with the synapse class template
"""
# load template
with open(template_path, "r", encoding="utf-8") as template_file:
template = template_file.read()
template = jinja2.Template(template)
# edit template
return template.render(
TEMPLATENAME=synapses_template_name,
GID=gid,
SEED=syn_mech_args["seed"],
rng_settings_mode=syn_mech_args["rng_settings_mode"],
syn_dir=syn_hoc_dir,
syn_conf_file=syn_mech_args["syn_conf_file"],
syn_data_file=syn_mech_args["syn_data_file"],
dt=dt,
) | 03e775354c5f7cc057b8f686ec5b4e377159ca51 | 127,350 |
def job_dfn_list_dict(job_dict):
"""A job definition list represented as a dictionary."""
return {"jobs": [job_dict]} | fbe0cb510c86dc2146d22d8edbc25c9215f47b05 | 127,351 |
import pytz
def add_timezone(time_record):
""" Add a default America/New_York timezone info to a datetime object.
Args:
time_record: Datetime object.
Returns:
Datetime object with a timezone if time_record did not have tzinfo,
otherwise return time_record itself.
"""
if time_record.tzname() is None:
return time_record.replace(tzinfo=pytz.timezone('America/New_York'))
return time_record | e3e16fbb87f9d5ff3a3bd5c7f24b3bd880700a00 | 127,352 |
def farenheit(ctemp):
""" Convert celcius to farenheit."""
return round(9.0/5.0 * ctemp + 32) | fae2d6c04229be9e2f6c4c798d5fc0ee212cd488 | 127,353 |
import logging
def bad_request(err):
"""Return a custom 400 error."""
logging.warning(err)
return 'The browser (or proxy) sent a request that this server could not understand.', 400 | f5f9ef65d58226f079512bb08f8b57918cc9446c | 127,359 |
from functools import reduce
def _calculate_in_and_out(arr):
"""
Calculate n_in and n_out.
Args:
arr (Array): Input array.
Returns:
Tuple, a tuple with two elements, the first element is `n_in` and the second element is `n_out`.
"""
dim = len(arr.shape)
if dim < 2:
raise ValueError("If initialize data with xavier uniform, the dimension of data must be greater than 1.")
n_in = arr.shape[1]
n_out = arr.shape[0]
if dim > 2:
counter = reduce(lambda x, y: x * y, arr.shape[2:])
n_in *= counter
n_out *= counter
return n_in, n_out | 3f3d5cf77d95d1aa02e12154f7d1b4473d3eddee | 127,362 |
def total_demand(user_demand, population, area, parameters):
"""
Estimate total demand based on:
- population (raw number)
- smartphone penetration (percentage)
- market share (percentage)
- user demand (Mbps)
- area (km^2)
E.g.::
100 population
* (80% / 100) penetration
* (25% / 100) market share
= 20 users
20 users
* 0.01 Mbps user demand
= 0.2 total user throughput
0.2 Mbps total user throughput during the busy hour
/ 1 km² area
= 0.2 Mbps/km² area demand
"""
penetration = parameters['penetration_percentage']
market_share = parameters['market_share_percentage']
users = population * (penetration / 100) * market_share
user_throughput = users * user_demand
demand_per_kmsq = user_throughput / area
return demand_per_kmsq | 22892775ee30b15c73dfa518ed43dbd81afb5497 | 127,364 |
def adjacent(L):
"""Recherche l'ensemble des éléments consécutifs d'une listes d'entiers.
Cette fonction prend en argument une liste d'entiers et retourne
une liste de listes contenant tous les couples d'indices dont les
valeurs dans `L` sont consécutives selon l'ordre numétique. Une liste
vide est retournée si aucun élément de `L` n'est consécutif.
Parameters
----------
L : list
`L` est une liste d'entiers.
Returns
-------
adj : array_like
`adj` est une liste de listes contenant tous les couples d'indice
de `L` dont les valeurs sont consécutives.
Examples
--------
>>> L1 = [3,4,1,2]
>>> adjacent(L1)
[[0, 1], [2, 3]]
>>> L2 = [1,2,3,4]
>>> adjacent(L2)
[[0, 1], [1, 2], [2, 3]]
>>> L3 = [2,4,1,3]
>>> adjacent(L3)
[]
"""
assert(type(L[0])== int)
adj=[]
for i in range(len(L)-1):
if L[i]==L[i+1]+1 :
adj.append([i, i+1])
elif L[i]==L[i+1]-1:
adj.append([i,i+1])
return adj | e9ba010207b8be694add505a3813a2f02255893c | 127,366 |
def make_lilypond_template(tonic: str, scale_type: str) -> str:
"""
Make template of Lilypond text file.
:param tonic:
tonic pitch class represented by letter (like C or A#)
:param scale_type:
type of scale (e.g., 'major', 'natural_minor', or 'harmonic_minor')
:return:
template
"""
raw_template = (
"\\version \"2.18.2\"\n"
"\\layout {{{{\n"
" indent = #0\n"
"}}}}\n"
"\\new StaffGroup <<\n"
" \\new Staff <<\n"
" \\clef treble\n"
" \\time 4/4\n"
" \\key {} \\{}\n"
" {{{{{{}}}}}}\n"
" \\\\\n"
" {{{{{{}}}}}}\n"
" >>\n"
">>"
)
tonic = tonic.replace('#', 'is').replace('b', 'es').lower()
scale_type = scale_type.split('_')[-1]
template = raw_template.format(tonic, scale_type)
return template | 277d78ac3727d67a07c4ad1d6e2d51f47520d0a4 | 127,367 |
def jacobian(theta, event, parameters_to_fit):
"""
Calculate chi^2 gradient (also called Jacobian).
"""
for (key, val) in enumerate(parameters_to_fit):
setattr(event.model.parameters, val, theta[key])
return event.chi2_gradient(parameters_to_fit) | 90f806fc832ee84a62e5aa1047524417d18f1b74 | 127,369 |
def _find_bboxes_in_rect(bboxes, left, bottom, right, top):
""" Given a list of polygon bounding boxes and a rectangle defined by
left/bottom/right/top, this function returns those polygons which overlap
the rectangle.
Parameters
----------
bboxes : list
List of all polygon bboxes.
left : int or float
The x-coordinate of the lefthand boundary.
bottom : int or float
The y-coordinate of the bottom boundary.
right : int or float
The x-coordinate of the righthand boundary.
top : int or float
The y-coordinate of the top boundary.
Returns
-------
result : list
List of all polygon bboxes that overlap with the defined rectangle.
"""
result = (bboxes[:, 0] <= right) & (bboxes[:, 2] >= left) & \
(bboxes[:, 1] <= top) & (bboxes[:, 3] >= bottom)
return result | 942f0917b3817e21d5d22b12d11adac9650b5045 | 127,372 |
def sort_range_list(range_list, *, descending=True):
"""Sorts the given list of ranges based on range size. Descending by default."""
def compare_range(iterable):
return abs(iterable[1] - iterable[0]) + 1
range_list.sort(key=compare_range, reverse=descending) # Can't return sort() directly, would return None,
return range_list | 2a17b7578caa66ec52f006669dfc76cf4bf0a5c8 | 127,374 |
def valid_python(name):
"""
Converts all illegal characters for python object names to underscore.
Also adds underscore prefix if the name starts with a number.
:param name: input string
:type name: str
:return: corrected string
:rtype: str
"""
__illegal = str.maketrans(' `~!@#$%^&*()-+=[]\{\}|\\;:\'",.<>/?', '_' * 34)
if name[0].isdigit():
name = '_'+name
return name.translate(__illegal) | d86e867233d1089251e8d0288fff9be1b2f2b96c | 127,375 |
import re
def remove_site_number(site):
"""Some sites are numbered in the VPR arch definitions.
This happens for e.g. SLICE0. This function removes
trailing numbers from the name"""
number = re.search(r'\d+$', site)
if number is not None:
site = site[:-len(str(number.group()))]
return site | 672d8e733e60478a58062cca7f9e625490710a4c | 127,380 |
def wt_av(x, xw, y, yw):
""" Calculate a weighted average """
return (x*xw+y*yw)/(xw+yw) | 56424282bf21040993e84a4958c2027218afd561 | 127,381 |
import errno
def retry_io_command(base_cmd, *argv):
"""PEP475: Retry syscalls if EINTR signal received.
https://www.python.org/dev/peps/pep-0475/
Certain system calls can be interrupted by signal 4 (EINTR) for no good
reason. Per PEP475, these signals should be ignored. This is implemented
by default at the lowest level in py3, but we have to account for it in
py2.
:param base_cmd: The syscall to wrap.
:param argv: Arguments to the syscall.
:return: The return value from invoking the syscall.
"""
while True:
try:
return base_cmd(*argv)
except EnvironmentError as enve:
if enve.errno != errno.EINTR:
raise | 4ce9d9d868a2613ee79f337d6ad5255574a2426e | 127,383 |
from pathlib import Path
from typing import IO
def openConfig(path: Path, mode: str) -> IO[str]:
"""Open the SoftFab configuration file."""
return open(path / 'softfab.ini', encoding='utf-8', mode=mode) | 305ef554271710e9fe3e028dc53cb2cf258189e6 | 127,384 |
def chromoAgeRHK(log10RHKprime):
"""
Calculate the chromospheric age according to Donahue 1998.
Donahue 1998 (ASPC 154, 1235) give a relation between chromospheric
activity as measured by the R'HK index and the age of late-type stars
(Eq. 1).
As the level of stellar activity undergoes continuous change, Donahue
cautions that individual measurements of the activity level yield
notoriously poor age estimates. As an example, the spread in
chromospheric solar age resulting from the 11 yr activity cycle is given,
which amounts to about 2.5 Ga. This gives an idea of the accuracy of
the estimates.
Parameters
----------
log10RHKprime : float
Chromospheric activity index log10(R'HK).
Returns
-------
Age : float
Stellar age [Ga].
"""
RHK = 10.0**log10RHKprime
R5 = 1e5*RHK
logAge = 10.725 - 1.334*R5 + 0.4085*R5**2 - 0.0522*R5**3
age = 10.0**logAge / 1e9
return age | a5a02b2166d0b878bfb7b9d22c6aa23b03f84b02 | 127,386 |
def has_column(bind, table, column):
"""Return True/False whether the column exists on the table in the database."""
stmt = "SELECT column_name FROM information_schema.columns WHERE table_name='{table}' and column_name='{column}';".format(table=table, column=column)
result = bind.execute(stmt)
return result and result.scalar() | ea6fb0c5f3e186ec388360d6b0214ace0e55b456 | 127,387 |
def mapVal(x, in_min, in_max, out_min, out_max):
"""
Maps a value that is between in_min and in_max to
a value between out_min and out_max
@param in_min The minimum value that the input value could be
@param in_max The maximum value that the input value could be
@param out_min The minimum value that the output value could be
@param out_max The maximum value that the output value could be
@return A scaled value based on a given input
"""
return int(
(x - in_min) *
(out_max - out_min) /
(in_max - in_min) +
out_min
) | 37c57338bad5419432a84dc476c8b53a22c3c9b8 | 127,393 |
def text_to_dict(text):
"""parse multilines text containing simple 'key=value' lines and return a
dict of {'key': 'value'}. When the same key is encountered multiple time,
value is turned into a list containing all values.
>>> d = text_to_dict('''multiple=1
... multiple= 2
... single =3
... ''')
>>> d['single']
'3'
>>> d['multiple']
['1', '2']
"""
res = {}
if not text:
return res
for line in text.splitlines():
line = line.strip()
if line and not line.startswith('#'):
key, value = [w.strip() for w in line.split('=', 1)]
if key in res:
try:
res[key].append(value)
except AttributeError:
res[key] = [res[key], value]
else:
res[key] = value
return res | e9905192111464fde2ef0cd36ef8c9059228a1f0 | 127,397 |
import torch
def make_creep_tests(stress, temperature, rate, hold_times,
nsteps_load, nsteps_hold, logspace = False):
"""
Produce creep test input (time,stress,temperature) given tensor
inputs for the target stress, target temperature, loading rate
Args:
stress: 1D tensor of target stresses
temperature: 1D tensor of target temperature
rate: 1D tensor of target rates
hold_times: 1D tensor of hold times
nsteps_load: number of time steps to load up the sample
nsteps_hold: number of time steps to hold the sample
logspace (optional): log space the hold time steps
"""
nbatch = stress.shape[0]
nsteps = nsteps_load + nsteps_hold
stresses = torch.zeros(nsteps, nbatch)
times = torch.zeros_like(stresses)
temperatures = torch.zeros_like(stresses)
for i, (s,t,lr,T) in enumerate(zip(stress,hold_times,rate,temperature)):
stresses[:nsteps_load,i] = torch.linspace(0, s, nsteps_load)
stresses[nsteps_load:,i] = s
times[:nsteps_load,i] = torch.linspace(0, s / lr, nsteps_load)
temperatures[:,i] = T
if logspace:
times[nsteps_load:,i] = torch.logspace(torch.log10(times[nsteps_load-1,i]),
torch.log10(t), nsteps_hold+1)[1:]
else:
times[nsteps_load:,i] = torch.linspace(times[nsteps_load-1,i], t, nsteps_hold+1)[1:]
return times, stresses, temperatures | 9f252018c078158ffc7615aec5a194ced914f17f | 127,401 |
def convert_single_linear_to_srgb(color_value):
"""
Changes as single RGB color in linear to SRGB color space
:param color_value:float, single color value in 0-1 range (for example red channel)
:return:float, new color converted to SRGB
"""
a = 0.055
if color_value <= 0.0031308:
return color_value * 12.92
return (1 + a) * pow(color_value, 1 / 2.4) - a | 80c38f241a6f1bd6e89eecc07140ecb14d1e3b78 | 127,403 |
def equal_frame_and_index_content(df1, df2):
"""
Checks whether the passed dataframes have the same content and index values. This ignores
index type, so a dataframe with RangeIndex(start=0, stop=3, step=1) will be considered equal
to Int64Index([0, 1, 2], dtype='int64', name='index')
"""
return df1.equals(df2) and list(df1.index) == list(df2.index) | 0888c1dc3316dbba30782055e3af995ab320ddf6 | 127,404 |
def day_in_week(adate, offset = 1):
"""
Returns the day of the week as an integer where Sunday = 1, Monday = 2, ..., Saturday = 7
"""
return ((adate.weekday() + offset) % 7) + 1 | a745263cfb6604ab537f1d99a8d3e8e6bf6ccf6c | 127,409 |
def effective_axial_force(H, delta_P, A_i, v, A_s, E, alpha, delta_T):
""" Returns the effective axial force of a totally restrained pipe in the
linear elastic stress range based on thick wall stress formulation.
DNVGL-ST-F101 Equation (4.10)
"""
return H - delta_P * A_i * (1 - 2 * v) - A_s * E * alpha * delta_T | f71bfb0b6073cc551d1846c61eccee55d5c1e490 | 127,410 |
def sanitize(value: object) -> object:
"""
Default sanitize.
"""
return value | 965bc8587f7cd34360e63a3b15192dc0a8a64eb1 | 127,411 |
def load_country_tz(filename, winter=True):
"""
Load the list of country timzones from a csv file
Parameters
----------
filename: String
Path to the csv file containing the timezone information
winter: bool (defaut to True)
Wether the winter time or the summertime is considered
Returns
-------
dict(str : int)
A dictionary mapping the iso3 country codes to the corresponding
timezone (hours offset from UTC)
"""
ctz = dict()
with open(filename) as f:
for line in f:
words = line.split(";")
if len(words[0].strip()) == 3:
ctz[words[0].strip()] = int(words[2])
if not winter:
ctz[words[0].strip()] += int(words[3])
return ctz | e578ff419f41d27d8c7facfdff0919c029decaaf | 127,412 |
import re
def formatCommandLine(command_line):
"""Format a command line for printing in output"""
return " ".join(map(lambda s: (' ' in s or '\n' in s) and '"' +
re.sub("\\s+", " ", s) + '"' or s,
command_line)) | a12076c62cfa7a79ecbf5a6329ef44f56f620a93 | 127,415 |
def longest_common_substring(a, b):
"""Return the longest common substring between a and b.
Args:
a (str): A word or non-word string.
b (str): A word or non-word string.
Returns:
res (str): The longest common substring found between a and b.
"""
memo = {}
if a and b:
for i in range(len(a)):
if a[i] == b[i]:
if i == 0 or memo[i - 1] == 0:
memo[i] = a[i]
else:
memo[i] = memo[i - 1] + a[i]
else:
memo[i] = 0
return memo[max(memo)] | d37581278d6bc9b2615874b6ccf6282f1a9c1090 | 127,417 |
import math
def get_experience_for_level(level: int) -> int:
"""Gets the total experience needed for a specific level.
:param level: The desired level.
:return: The total experience needed for the level."""
return int(math.ceil((50 * math.pow(level, 3) / 3) - 100 * math.pow(level, 2) + 850 * level / 3 - 200)) | 0f7f4ead51ebcd7a7e6af8369bcfa86b56203bbf | 127,431 |
def compute_score_for_nervous_system(glasgow_coma_scale: int) -> int:
"""
Computes score based on Glasgow Coma Scale, see paper by Teasdale et al.:
https://doi.org/10.1016/S0140-6736(74)91639-0
"""
if glasgow_coma_scale < 6:
return 4
if glasgow_coma_scale < 10:
return 3
if glasgow_coma_scale < 13:
return 2
if glasgow_coma_scale < 15:
return 1
return 0 | 0cd61554302266ac17dabe2f06a4070aa1f78f00 | 127,433 |
def is_project_delegate(user, project):
"""Return if a user is owner of the project."""
return project.delegate == user | 0d54d533541573ec7485cb063cb98992856d23fc | 127,434 |
import six
def parse_methods(root, methods=None):
"""Parses methods from the given Discovery document.
Args:
root (dict): A Discovery document. When called recursively, this is a
resource within a Discovery document.
methods (dict): A mapping of method ID to Discovery method. Do not set,
this is used to collect method IDs while recursing.
Returns:
dict: A mapping of method ID to method.
"""
if methods is None:
methods = {}
for method in six.itervalues(root.get('methods', {})):
id_ = method['id']
methods[id_] = method
for resource in six.itervalues(root.get('resources', {})):
parse_methods(resource, methods)
return methods | d120a0851d20d2d4f5f2d219955472efed69d511 | 127,435 |
import re
def remove_special_chars(sentences, remove_digits = False):
"""Given a list of sentences, remove all special characters from those sentences and return the
updated sentences. Method gives the option to remove digits."""
# used to isolate special characters that might be next to a character.
isolate_pattern = re.compile(r'([{.(-)!}]|-)')
special_char_pattern = re.compile(r'[^0-9a-zA-Z\s]+|\[|\]' if not remove_digits else r'[^-a-zA-Z\s]+|\[|\]')
updated_sents = []
for sentence in sentences:
sentence = isolate_pattern.sub(" \\1 ", sentence)
sentence = special_char_pattern.sub("", sentence)
updated_sents.append(sentence)
return updated_sents | 4d410e8746cd9df930fb08b5048770ca30e10b55 | 127,443 |
def is_reference_target(resource, rtype, label):
""" Return true if the resource has this rtype with this label """
prop = resource.props.references.get(rtype, False)
if prop:
return label in prop | f96327f606bdcd4d1d0e754439421f4a0fc4e53b | 127,447 |
def make_time_unit_str(dtobj):
""" Return a time unit string from a datetime object. """
return "seconds since " + dtobj.strftime("%Y-%m-%dT%H:%M:%SZ") | e92f69fe32098ad15c9e8bfd480e6bed1e2dd2c3 | 127,455 |
import random
def get_random_sample(population: tuple, k: int) -> list:
"""Returns a k length list of unique elements from population"""
return random.sample(population, k) | 055c1573b907b3547639891868dfe3438dbc968e | 127,457 |
def wavelength (wn):
"""
wavelength in m given the wavenumber in 1/m
"""
return 1./wn | a4faa016c8cdccdfb4588b2724f6f731890486d9 | 127,458 |
def replace_multiple(y, xs, rep):
"""Match multiple strings to replace in sequence."""
for x in xs[1:]:
y = y.replace(x, rep)
return y | 7623f1379f7f28003601f0e421d696bbcc07d2f3 | 127,459 |
from urllib.parse import urlparse, parse_qsl, urlunparse, urlencode
def url_mod(url: str, new_params: dict) -> str:
"""
Modifies existing URL by setting/overriding specified query string parameters.
Note: Does not support multiple querystring parameters with identical name.
:param url: Base URL/path to modify
:param new_params: Querystring parameters to set/override (dict)
:return: New URL/path
"""
res = urlparse(url)
query_params = dict(parse_qsl(res.query))
for k, v in new_params.items():
if v is None:
query_params[str(k)] = ''
else:
query_params[str(k)] = str(v)
parts = list(res)
parts[4] = urlencode(query_params)
return urlunparse(parts) | 64ed525bf81e77f382323c778d6a667f0de1eb3c | 127,461 |
import re
def _strip_color(string: str) -> str:
"""Strip ANSI color escape sequences.
See https://stackoverflow.com/questions/14693701/
"""
return re.sub(r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]", "", string) | 693b743437ba3e4d5f7dfe884cddc3dafe641e3c | 127,466 |
from typing import List
def parse_blocks(lines: List) -> List:
"""
Parse list of blocks into singular data structure
Args:
lines (List): list of cleaned lines
Returns:
List of dictionaries, each dictionary a layer in the darknet
"""
blocks = []
block = {}
for line in lines:
if line[0] == "[":
if len(block):
blocks.append(block)
block = {}
block['arch'] = line[1:-1].rstrip()
else:
k,v = line.split("=")
block[k.rstrip()] = v.lstrip()
blocks.append(block)
return blocks | 1414f7e2c1010760106ae40963b9813374c9c5c4 | 127,467 |
def _GetNestedKeyFromManifest(manifest, *keys):
"""Get the value of a key path from a dict.
Args:
manifest: the dict representation of a manifest
*keys: an ordered list of items in the nested key
Returns:
The value of the nested key in the manifest. None, if the nested key does
not exist.
"""
for key in keys:
if not isinstance(manifest, dict):
return None
try:
manifest = manifest[key]
except KeyError:
return None
return manifest | 25f4e342503d7d5667cc74e31a67941804021239 | 127,468 |
def flip_horiz_tile(s):
"""Flip a tile horizontally, return str repr."""
new_los = []
for line in s.split("\n"):
new_los.append(str.join('', reversed(line)))
return str.join("\n", new_los) | e821b94314ee713c7c9cb8587f36dc3e5cb1c533 | 127,476 |
def is_nan(builder, val):
"""
Return a condition testing whether *val* is a NaN.
"""
return builder.fcmp_unordered('uno', val, val) | 4ed5b09b6922518cb09560665975f0a4f6150935 | 127,477 |
def get_info_cls(value, base_class='col-md-7'):
"""Return info element class"""
c = base_class
if value == '':
c += ' text-muted'
return c | 3ff2049b79bec13cd9e01b51de4d9bd6e3951b2b | 127,478 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.