content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def iota(n):
"""
Return a list of integers.
"""
return list(range(n)) | 6fda33ce7e367cdf46330a6aa089571397b38535 | 36,617 |
def f_exists(fname):
"""
checks if a file exists
:param fname:
:return:
"""
try:
with open(fname) as f:
return True
except IOError:
return False | 000219f8b9e0a582f0dc35c768e70d75ec8c242b | 36,619 |
import os
def parseConfigItem(item):
"""
Read the definition of a "DEFAULT_*" value from the stdconfig.py file so that we get
the literal Python source as a L{str} that we then then process into JSON.
@param item: the "DEFAULT_*" item to read
@type item: L{str}
@return: the "DEFAULT_*" value
@rtype: L{str}
"""
with open(os.path.join(os.path.dirname(__file__), "stdconfig.py")) as f:
# Read up to the first line containing DEFAULT_*
while f.readline() != "{} = {{\n".format(item):
continue
# Build list of all lines up to the end of the DEFAULT_* definition and
# make it look like a JSON object
lines = ['{']
line = f.readline()
while line != "}\n":
lines.append(line[:-1])
line = f.readline()
lines.append('}')
return "\n".join(lines) | a3af048860bb00d85b11cf7041c24a4fc7c2e10f | 36,621 |
from typing import Optional
from typing import BinaryIO
import struct
def test_vtf(h: bytes, f: Optional[BinaryIO]) -> Optional[str]:
"""Source Engine Valve Texture Format."""
if h[:4] == b'VTF\0':
try:
version_major, version_minor = struct.unpack('II', h[4:12])
except struct.error:
return None
if version_major == 7 and (0 <= version_minor <= 5):
return 'source_vtf'
return None | 91374bf930724cc78da2773d23318f0634b6a40c | 36,622 |
def get_limelines_by_name(resolve_project, timeline_names):
"""
For a given list of timeline names, will output a list of timeline objects
"""
all_timelines = []
output = []
for index in range(int(resolve_project.GetTimelineCount())):
all_timelines.append(resolve_project.GetTimelineByIndex(index + 1))
for timeline_name in timeline_names:
for timeline_object in all_timelines:
if timeline_object.GetName() == timeline_name:
output.append(timeline_object)
break
return output | 4038dfa5efd995c938ef12fd636ffd289b8977dc | 36,624 |
def capital_recovery_factor(interest_rate, years):
"""Compute the capital recovery factor
Computes the ratio of a constant loan payment to the present value
of paying that loan for a given length of time. In other words,
this works out the fraction of the overnight capital cost you need
to pay back each year if you were to use debt to
pay for the expenditure.
Arguments
---------
interest_rate: float
The interest rate as a decimal value <= 1
years: int
The technology's economic lifetime
"""
if float(interest_rate) == 0.:
return 1. / years
else:
top = interest_rate * ((1 + interest_rate) ** years)
bottom = ((1 + interest_rate) ** years) - 1
total = top / bottom
return total | 4a92ca527557087537973e2adfedd48279a7c59e | 36,625 |
def minimum_level_and_priority(item_list, level=0, priority=float('inf'),
exclude=None):
"""
create a new list with items that have sufficient level and priority
"""
if exclude is None:
exclude = []
return [item for item in item_list
if (item['priority'] <= priority and
('level' not in item or item['level'] >= level) and
item.get('label', '').lower() not in exclude)
] | 3e21b8b44dfa237d5ecee374d570c7ace22b9cd2 | 36,627 |
def parametrise(parameters):
"""
Causes a test to run multiple times with different parameters.
This only works for functions inside a `TestCase` class.
:param parameters: A dictionary containing individual tests. The keys in the
dictionary act as a name for the test, which is appended to the function
name. The values of the dictionary are dictionaries themselves. These act as
the parameters that are filled into the function.
:return: A parametrised test case.
"""
def parametrise_decorator(original_function):
"""
Adds the parameters with which the test must be run to the function.
:param original_function: The function to parametrise.
:return: A function with the parameters attached.
"""
original_function.parameters = parameters
return original_function
return parametrise_decorator | 604102f6961e7d84aa8dd4b5a11e5a06b58d0834 | 36,628 |
def ellipsis_eqiv(key):
"""
Returns whether *key* is a *tuple* of *Ellipsis* or equivalent *slice*s.
"""
return isinstance(key, tuple) and all(
k is Ellipsis or
isinstance(k, slice) and (
k.start in (None, 0) and
k.stop in (None, -1) and
k.step in (None, 1)
)
for k in key
) | 5c0de46415afd936b9828685229f848fbf099e18 | 36,629 |
import sys
import os
def _generate_default_dump_filename():
"""Generates a descriptive dump file name if one isn't provided."""
main_filename = os.path.realpath(sys.argv[0]) if sys.argv[0] else "dump"
python_ver_str = ".".join(str(x) for x in sys.version_info[:3])
base_filename_str = os.path.splitext(os.path.basename(main_filename))[0]
bittess_str = 'x64' if sys.maxsize > 2**32 else 'x86'
return '.'.join([base_filename_str, python_ver_str, bittess_str, "dmp"]) | c4b942edecce8c06ebe3589f07702e4849d0d171 | 36,631 |
def cached_property(expensive_function):
"""
A decorator function like @property that will cache return a cached quantity if it is available
"""
@property
def caching_function(self):
cacheName = f"__cache__{expensive_function.__name__}"
try: # check if the cache has been initialized
cacheExists = True
cache = getattr(self, cacheName)
except AttributeError:
cacheExists = False
cache = None
# Check if the cache is valid (not None), caching is requested, and that it exists
if ( cache is not None ) and ( self.withCaching == True ) and (cacheExists == True):
return cache
else:
#worst case, now we have to compute the quantity
computed = expensive_function(self)
setattr(self, cacheName, computed)
return computed
return caching_function | 21c84e8de6aeb0b911ceea4aa94e8dd99a8168e6 | 36,632 |
def prepare_tenant_name(ts, tenant_name, product_name):
"""
Prepares a tenant name by prefixing it with the organization shortname and returns the
product and organization record associated with it. The value of 'tenant_name' may
already contain the prefix.
Returns a dict of 'tenant_name', 'product' and 'organization'.
"""
products = ts.get_table('products')
product = products.get({'product_name': product_name})
if not product:
raise RuntimeError("Product '{}' not found.".format(product_name))
organization = products.get_foreign_row(product, 'organizations')
if '-' in tenant_name:
org_short_name = tenant_name.split('-', 1)[0]
if org_short_name != organization['short_name']:
raise RuntimeError("Tenant name '{}' must be prefixed with '{}'.".format(
tenant_name, org_short_name)
)
else:
tenant_name = '{}-{}'.format(organization['short_name'], tenant_name)
return {
'tenant_name': tenant_name,
'product': product,
'organization': organization,
} | e5afa36eb866e182c0ca674cb0a578a66fad242a | 36,638 |
def is_uri_option(number):
"""
checks if the option is part of uri-path, uri-host, uri-port, uri-query
:param number:
:return:
"""
if number == 3 | number == 7 | number == 11 | number == 15:
return True
return False | 57c1ba52e63b289a1bc0962c2a66214ed7764a37 | 36,639 |
def linear_search(L, e):
"""
Function of linear searching a list to look for specific element
Complexity: O(n)
:param L: List-object
:param e: Element to look for
:return: Boolean value if element has been found
"""
found = False
for i in range(len(L)):
if e == L[i]:
found = True
return found | 56d690d13ced909868943baa7ef5bc75d2f4de6a | 36,640 |
def parse_hal_spikes(hal_spikes):
"""Parses the tag information from the output of HAL
Parameters
----------
hal_spikes: output of HAL.get_spikes() (list of tuples)
Returns a nested dictionary:
[pool][neuron] = list of (times, 1) tuples
The 1 is for consistency with the return of parse_hal_tags
"""
parsed_spikes = {}
for time, pool, neuron in hal_spikes:
if pool not in parsed_spikes:
parsed_spikes[pool] = {}
if neuron not in parsed_spikes[pool]:
parsed_spikes[pool][neuron] = []
parsed_spikes[pool][neuron].append((time, 1))
return parsed_spikes | fb1faba1c10845331bc2e526bbd5553289be2be7 | 36,641 |
def static_var(var, value):
"""
Decorator to support static variables in functions.
"""
def decorate(func):
setattr(func, var, value)
return func
return decorate | 4ccdb0c93406c725b376574c8d9dd61bbc556ebe | 36,642 |
from typing import List
from typing import Optional
def brute_force(arr: List[int], value: int) -> Optional[int]:
"""
A brute force method that runs in O(n) and thus does not satisfy the question's requirements.
We use it for comparison.
:param arr: the rotated array
:param value: the value to find in the array
:return: the index of value, or None if it does not exist
>>> brute_force([2, 3, 0, 1], 2)
0
>>> brute_force([-1, 0], 0)
1
>>> brute_force([0, 1, 2], 1)
1
>>> brute_force([13, 18, 25, 2, 8, 10], 8)
4
>>> brute_force([13, 18, 25, 2, 8, 10], 3) is None
True
"""
if arr is None or not arr or value is None:
return None
try:
return arr.index(value)
except ValueError:
return None | b374a81843dc827984382830b9f89d14b24a1c74 | 36,643 |
import os
def get_listdir_inferred(dname_inferred, dname_infer):
"""get filename in dir
"""
fnames = os.listdir(dname_infer)
fnames = list(filter(lambda f: not f.startswith("."), fnames))
fnames.sort()
fnames = [os.path.join(dname_inferred, fname) for fname in fnames]
fnames = [os.path.splitext(fname)[0] + '.png'
if not(fname.endswith('.tif') or fname.endswith('.tiff')) else fname
for fname in fnames]
return fnames | be82df6cca240136d39e252de17b3a6ebe0d92f3 | 36,644 |
import random
def hsk_grabber(
target_hsk: float, search_str: str = "", deviation: float = 0.2, limit: int = 10
):
"""
Finds HSK sentences at `target_hsk` level, with a ± deviation of `deviation`.
Search for sentences that contain words (space-separated) with `search_str` and set a sentence output limit with `limit`.
"""
sentences = []
with open("data/sentences.tsv", "r", encoding="utf-8") as file:
for line in file.read().splitlines()[1:]:
line = line.split("\t")
in_line = True
for char in search_str.split(" "):
if char not in line[0]:
in_line = False
if in_line == True:
if (
float(line[3]) < target_hsk + deviation
and float(line[3]) > target_hsk - deviation
):
sentences.append((line[0], line[1], line[2], line[3]))
random.shuffle(sentences)
return sentences[:limit] | c5f5b4c6f015c309588dcbf791fded250428ab6e | 36,646 |
def linear_loss_weight(nepoch, epoch, max, init=0):
"""
linearly vary scalar during training
"""
return (max - init)/nepoch *epoch + init | d5ca09ae3e2b15357c51482d8a0c7d0685dc9bb7 | 36,647 |
from typing import List
def bip32_path_from_string(path: str) -> List[bytes]:
"""Convert BIP32 path string to list of bytes."""
splitted_path: List[str] = path.split("/")
if "m" in splitted_path and splitted_path[0] == "m":
splitted_path = splitted_path[1:]
return [int(p).to_bytes(4, byteorder="big") if "'" not in p
else (0x80000000 | int(p[:-1])).to_bytes(4, byteorder="big")
for p in splitted_path] | 8a79d738669724ebacf6afd241f08b7773fcacba | 36,648 |
def _get_centering_constraint_from_dmatrix(design_matrix):
""" Computes the centering constraint from the given design matrix.
We want to ensure that if ``b`` is the array of parameters, our
model is centered, ie ``np.mean(np.dot(design_matrix, b))`` is zero.
We can rewrite this as ``np.dot(c, b)`` being zero with ``c`` a 1-row
constraint matrix containing the mean of each column of ``design_matrix``.
:param design_matrix: The 2-d array design matrix.
:return: A 2-d array (1 x ncols(design_matrix)) defining the
centering constraint.
"""
return design_matrix.mean(axis=0).reshape((1, design_matrix.shape[1])) | 47aff43f5e6658309e7c11ed2328b279a44e2243 | 36,649 |
def float_callback(input_):
"""Accepts only a float or '' as entry.
Args:
input_ (str): input to check
Returns:
bool: True if input is a float or an empty string, False otherwise
"""
if input_ != "":
try:
float(input_)
except (ValueError, TypeError):
return False
return True | 979443f9f6efcec65a09fbc7e7b8b5281763ce7b | 36,650 |
def clean_dict(d: dict, r=0, trash=[[], {}], verbose=0):
""" rm all trash in a dict
no deepcopy, thus potential side-effects
trash :: list of values to remove
return :: copy of d
"""
result = {}
for k, v in d.items():
if not d[k] in [[], {}]:
if r > 0:
v = clean_dict(v, r - 1)
result[k] = v
else:
if verbose: print('found empty item: ', k, d[k])
return result | bfb20b267ef96d5d3c24337d5c15650ac2b774af | 36,651 |
def Strecke_der_Route(Entfernungen, route):
""" Bestimmt die länge der Route und gibt diese als int zurück """
Entfernung = 0
for step, index in enumerate(route):
index2 = route[(step+1)%len(Entfernungen)]
Entfernung += Entfernungen[index][index2] # von index nach index2
return Entfernung | 22fa1ab320c3f58930afec7db017323d5d6f97b8 | 36,652 |
def get_create_fulltext_index_query():
"""
To run the query, need three params: $indexName as str, $labels as array and $properties as array
:return:
"""
return 'CALL db.index.fulltext.createNodeIndex($indexName, $labels, $properties)' | a28d6c6a73848de855be6d75de00c54d7ed46d43 | 36,653 |
import collections
def play (*start, turns):
"""Plays the North Pole Elves memory game with a list of `start`ing
number and for the given number of `turns`. Returns the last number
spoken (after `turns` rounds).
"""
last = start[-1]
numbers = collections.defaultdict(lambda: collections.deque(maxlen=2))
for pos, n in enumerate(start):
numbers[n].append(pos + 1)
for t in range(len(start) + 1, turns + 1):
positions = numbers[last]
last = 0 if len(positions) == 1 else (t - 1) - positions[0]
numbers[last].append(t)
return last | b4913c77ecffa43ffdec5469e5e5c51583a80eb5 | 36,655 |
def __checkCanLink(context, source, source_type, message_libname, real_libs=[]):
"""
Check that source can be successfully compiled and linked against real_libs.
Keyword arguments:
source -- source to try to compile
source_type -- type of source file, (probably should be ".c")
message_libname -- library name to show in the message output from scons
real_libs -- list of actual libraries to link against (defaults to a list
with one element, the value of messager_libname)
"""
if not real_libs:
real_libs = [message_libname]
context.Message("Checking for %s..." % message_libname)
libsave = context.env.get('LIBS')
context.env.AppendUnique(LIBS=real_libs)
ret = context.TryLink(source, source_type)
context.Result( ret )
if libsave is None:
del(context.env['LIBS'])
else:
context.env['LIBS'] = libsave
return ret | 66cc4819d684501462465308eb9f8fdeca9f3e6e | 36,656 |
import requests
import os
def get_ip_geo_data(ip):
"""
Gets ip address geo data in json format
"""
response = requests.get(
'http://api.ipstack.com/{}?'.format(ip),
params={'access_key': os.environ['geo_key']},
)
return response.json() | 760f3cf5dd01de12961699b09cb0460b281bef59 | 36,660 |
import pandas
def load_unlabeled_data(filename):
"""Loads raw unlabeled data as floats from a csv file using pandas"""
dataframe = pandas.read_csv(filename, header=None)
dataset = dataframe.values
return dataset[:, 0:].astype(float) | 90175c0c27be44b0167476b390e3134df2b3b5e4 | 36,662 |
def figsize(rows=1, cols=1):
"""Default figsize for a plot with given subplot rows and columns."""
return (7 * rows, 5 * cols) | 1269f7e6400f903249b3de5857f355aad9a53d46 | 36,664 |
def calc_power(cell, serial):
"""Calculate the power for a single cell
"""
rack_id = cell[0] + 10
power_level = rack_id * cell[1]
power_level += serial
power_level *= rack_id
hundereds = power_level // 100
if hundereds > 10:
hundereds = hundereds % 10
power_level = hundereds - 5
return power_level | 1472e9f020699b2e21e1af9bb43308f67aa583d2 | 36,665 |
def compare_lists(lst, other_lst):
""" compare two lists
:param lst: list for compare
:param other_lst: list for compare
:return: result compare
"""
return frozenset(lst) == frozenset(other_lst) | 764ecc0974e863395407f81c435cd1eb4631d649 | 36,669 |
def bool2option(b):
"""
Get config-file-usable string representation of boolean value.
:param bool b: value to convert.
:rtype: string
:returns: ``yes`` if input is ``True``, ``no`` otherwise.
"""
return 'yes' if b else 'no' | 0a0eec4b27cd3c062c7dbe6b1fd7625648c2f0ba | 36,671 |
def update_table_simple (schema_dataframe, schema_targets, action):
"""Update schema_dataframe with info in schema_targets.
:param schema_dataframe: a dictionary - {schema:dataframe}
:param_targets: a dictionary - {schema:targets}
'schema' -- a string - 'study', 'sample','run', 'experiment'
'dataframe' -- a pandas dataframe created from the input tables
'targets' -- a filtered dataframe with 'action' keywords
contains updated columns - md5sum and taxon_id
:return schema_dataframe: a dictionary - {schema:dataframe}
dataframe -- updated status
"""
# define expected status based on action
status = {'ADD': 'added', 'MODIFY': 'modified',
'CANCEL': 'cancelled', 'RELEASE': 'released'}
for schema in schema_dataframe:
dataframe = schema_dataframe[schema]
targets = schema_targets[schema]
dataframe.set_index('alias', inplace=True)
targets.set_index('alias', inplace=True)
for index in targets.index:
dataframe.loc[index, 'status'] = status[action]
return schema_dataframe | 5a01c336b2a24d631127d4a767c354f6f9684e88 | 36,672 |
import os
def get_version_info():
"""Read __version__ from version.py, using exec, not import."""
fn_version = os.path.join("padelpy", "version.py")
myglobals = {}
with open(fn_version, "r") as f:
# pylint: disable=exec-used
exec(f.read(), myglobals)
return myglobals["__version__"] | d6171b14d8b28b5faa3541f39bfd82e2ca62e76e | 36,673 |
import os
def path_base(path: str) -> str:
"""
Get the base of a path string.
>>> path_base('/dir1/dir2/path.ext')
''
>>> path_base('dir1/dir2/path.ext')
'dir1'
Args:
path (str): Path string.
Returns:
str: Path string's base.
"""
return os.path.normpath(path).split(os.sep)[0] | 69d78636cdbcd1a1ad1c2553b61638d59ad65c18 | 36,674 |
def gotoHostGnx(c, target):
"""Change host node selection to target gnx.
This will not change the node displayed by the
invoking window.
ARGUMENTS
c -- the Leo commander of the outline hosting our window.
target -- the gnx to be selected in the host, as a string.
RETURNS
True if target was found, else False
"""
if c.p.gnx == target:
return True
for p in c.all_unique_positions():
if p.v.gnx == target:
c.selectPosition(p)
return True
return False | 22eeb26137b2f082abb32ab9fd1daedf44c08033 | 36,675 |
import click
def check(fn, error_message=None):
"""
Creates callback function which raises click.BadParameter when `fn` returns `False` on given input.
>>> @click.command()
>>> @click.option('--probability', callback=check(lambda x: 0 <= x <= 1, "--probability must be between 0 and 1 (inclusive)"))
>>> def f(probability):
>>> print('P:', probability)
"""
def f(ctx, param, value):
if fn(value):
return value
else:
if error_message is None:
msg = str(value)
else:
msg = '{}. Value: {}'.format(error_message, value)
raise click.BadParameter(msg)
return f | 8519e6c29a1cb2843260570a8888b551d8c4987c | 36,676 |
import warnings
def heatcapacity(ddtt):
"""
Heat capacity (kJ/m2-K) of a construction or material.
thickness (m) * density (kg/m3) * specific heat (J/kg-K) * 0.001
"""
object_type = ddtt.obj[0]
if object_type == "Construction":
heatcapacity = 0
layers = ddtt.obj[2:]
field_idd = ddtt.getfieldidd("Outside_Layer")
validobjects = field_idd["validobjects"]
for layer in layers:
found = False
for key in validobjects:
try:
heatcapacity += ddtt.theidf.getobject(key, layer).heatcapacity
found = True
except AttributeError:
pass
if not found:
raise AttributeError("%s material not found in IDF" % layer)
elif object_type == "Material":
thickness = ddtt.obj[ddtt.objls.index("Thickness")]
density = ddtt.obj[ddtt.objls.index("Density")]
specificheat = ddtt.obj[ddtt.objls.index("Specific_Heat")]
heatcapacity = thickness * density * specificheat * 0.001
elif object_type == "Material:AirGap":
heatcapacity = 0
elif object_type == "Material:InfraredTransparent":
heatcapacity = 0
elif object_type == "Material:NoMass":
warnings.warn(
"Material:NoMass materials included in heat capacity calculation",
UserWarning,
)
heatcapacity = 0
elif object_type == "Material:RoofVegetation":
warnings.warn(
"Material:RoofVegetation thermal properties are based on dry soil",
UserWarning,
)
thickness = ddtt.obj[ddtt.objls.index("Thickness")]
density = ddtt.obj[ddtt.objls.index("Density_of_Dry_Soil")]
specificheat = ddtt.obj[ddtt.objls.index("Specific_Heat_of_Dry_Soil")]
heatcapacity = thickness * density * specificheat * 0.001
else:
raise AttributeError("%s has no heatcapacity property" % object_type)
return heatcapacity | 438f7f6cdb31b5e4c0c70e8a75e5fc64fe0c7176 | 36,677 |
import socket
def discover_avail_ports(num_required):
"""Discover available TCP listening ports."""
# We need to find free ports.
# Note: This a hack. We bind and then close.
# There's obviously a race condition here.
sockets = [
socket.socket()
for _ in range(num_required)]
for sock in sockets:
sock.bind(('', 0))
free_ports = [
sock.getsockname()[1]
for sock in sockets]
for sock in sockets:
sock.close()
return free_ports | 4ba16233d78c2760e6c20fa472eeb4d8c84d4587 | 36,678 |
import subprocess
import json
def get_json_info(package):
"""retrieves json info as str for given homebrew package name
Parameters
----------
package : str
homebrew package for which to retrieve info in json format as str
Returns
-------
str
json representation of homebrew package
"""
proc = subprocess.Popen(
['brew', 'info', '--json=v1', package],
stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return json.loads(stdout) | 727b7189c84b52326e07627041501ca2db664cea | 36,679 |
def fix_non_aware_datetime(obj):
"""
Ugh - for SOME REASON some of the DateTime values returned by the PBS MM API are NOT time zone aware.
SO - fudge them by adding 00:00:00 UTC (if even a time is not provided) or assume the time is UTC.
"""
if obj is None:
return None
if ':' not in obj: # oops no time
obj += ' 00:00:00'
if '+' not in obj: # no time zone - use UTC
if 'Z' not in obj:
obj += '+00:00'
return obj | 3fe775cf0e30c6409d8eb6bd2e4d556d20369d09 | 36,680 |
def surround_double_quotes(string: str) -> str:
"""Surrounds the input string with double quotes and returns the modified string.
Args:
string: String to be modified.
Returns:
str: String with double quotes.
"""
return '"' + str(string) + '"' | 63526107bc13772e23a4d1eb198bc703cb6824b0 | 36,683 |
import itertools
def sort_and_group(iterable, key):
"""Sort an iterable and group the items by the given key func"""
groups = [
(k, list(g)) for k, g in
itertools.groupby(sorted(iterable, key=key), key=key)
]
return groups | e0bddcad90cc4c4e6652a6a9a8faa043dcb69ebd | 36,685 |
def get_mouth_coords(fjord):
"""
Get the reference x and y coordinates for the fjord mouth
"""
x = {"JI": -312319.963189, "KB": -438759}
y = {"JI": -2260417.83078, "KB": -1037230}
try:
return [x.pop(fjord), y.pop(fjord)]
except KeyError:
print("The current fjord does not have a location entry") | 1b593f99a22530d8386f666318c75ba1a9a54b14 | 36,686 |
def decode_email(ee):
"""Decode email protection."""
try:
r = int(ee[:2], 16)
email = ''.join([chr(int(ee[i:i+2], 16) ^ r) for i in range(2, len(ee), 2)])
return email
except (ValueError):
return '' | 1f5499b50d803176969d3369ee85028603e873f8 | 36,687 |
def whitespace_tokenize(text):
"""Splits an input into tokens by whitespace."""
return text.strip().split() | 266f6bb537f82e599c85c96f70675a571bf5dafd | 36,688 |
import numpy
def _evaluate_jacobian(model, jacobian, current):
""" Evaluates the jacobian at current """
# pylint: disable=invalid-name, star-args
nvars = len(model.variables.values())
J = numpy.zeros((nvars, nvars, ))
for i in range(nvars):
for j in range(nvars):
if jacobian[i][j] is not None:
J[i, j] = jacobian[i][j](*current)
return J | 40e38c68a1eabec13894458eab0d63f800dc101b | 36,689 |
def create_full_deck(n):
"""create n full decks, each with 36 tuples of the dice rolls"""
deck = []
for card in range(n):
for d1 in range (1,7):
for d2 in range (1,7):
deck.append((d1,d2))
return deck | 70e9b762b1b953e08a0d0ef15740cec9b473bef4 | 36,690 |
def diagonalize(items):
"""
Take the diagonal of a list of words. If the diagonal runs off the end
of a word, raise an IndexError.
"""
return ''.join([items[i][i] for i in range(len(items))]) | 225e4c0f150f84e47f0a6c9f25dc9c5c10e3c860 | 36,691 |
def interpret(instruction: str) -> tuple:
"""
Split and interpret a piloting instruction and return a tuple of action and
units.
Paramaters:
instruction (str): Instruction to interpret
Returns
tuple: Tuple of action and units
"""
action, units = instruction.split(" ", 1)
units = int(units)
return (action, units) | 5d52bbefea69aaab66a3f34cfa0f8185c9abadff | 36,692 |
def main(args=None):
""" Main entry point of hello-world """
print('Hello World')
return 0 | 7c2032c045d7ce96e5a32a20483176c362689ab3 | 36,693 |
import hashlib
import json
def check_hash(hash: str, content: dict) -> bool:
"""Check that the stored hash from the metadata file matches the pyproject.toml file."""
# OG source: https://github.com/python-poetry/poetry/blob/fe59f689f255ea7f3290daf635aefb0060add056/poetry/packages/locker.py#L44 # noqa: E501
# This code is as verbatim as possible, with a few changes to be non-object-oriented.
_relevant_keys = ["dependencies", "dev-dependencies", "source", "extras"]
def get_hash(content: dict) -> str:
"""Returns the sha256 hash of the sorted content of the pyproject file."""
content = content["tool"]["poetry"]
relevant_content = {}
for key in _relevant_keys:
relevant_content[key] = content.get(key)
content_hash = hashlib.sha256(json.dumps(relevant_content, sort_keys=True).encode()).hexdigest()
return content_hash
return hash == get_hash(content) | 5b4d9407c95eb2239c42dd13c5052ce011f4fa5a | 36,694 |
def _percent_str(percentages):
"""Convert percentages values into string representations"""
pstr = []
for percent in percentages:
if percent >= 0.1:
pstr.append('%.1f' %round(percent, 1)+' %')
elif percent >= 0.01:
pstr.append('%.2f' %round(percent, 2)+' %')
else:
pstr.append('%.3f' %round(percent, 3)+' %')
return pstr | 0e6acc22eb14e0e5dfb1f44a52dc64e076e15b33 | 36,695 |
import typing
def isunion(T) -> bool:
"""Returns whether the given type is a generic union"""
return typing.get_origin(T) == typing.Union | 23e8095ad8e131abf079e384c850038c25b57aab | 36,696 |
import argparse
from pathlib import Path
def parse_args():
"""Parses command line args."""
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=Path, default=Path('data'),
help='directory containing the (.png|.jpg|.bmp) input images')
parser.add_argument('--optim_algo', choices=['grid', 'powell'], default='grid',
help='either do grid search, or apply Powell\'s derivative-free optimizer')
parser.add_argument('--lower_bound', type=float, default=-2, metavar='LO',
help='lower bound of shear values')
parser.add_argument('--upper_bound', type=float, default=2, metavar='HI',
help='upper bound of shear values')
parser.add_argument('--num_steps', type=float, default=20, metavar='STEPS',
help='if grid search is used, this argument defines the number if grid points')
parser.add_argument('--bg_color', type=int, default=255, metavar='BG',
help='color to fill the gaps of the sheared image that is returned')
return parser.parse_args() | 73d0fbc66f6d6b093b8ec94d7d2efe7868d25369 | 36,697 |
def fitbox(fig, text, x0, x1, y0, y1, **kwargs):
"""Fit text into a NDC box.
Args:
textsize (int, optional): First attempt this textsize to see if it fits.
"""
if text is None:
return None
figbox = fig.get_window_extent().transformed(
fig.dpi_scale_trans.inverted()
)
# need some slop for decimal comparison below
px0 = x0 * fig.dpi * figbox.width - 0.15
px1 = x1 * fig.dpi * figbox.width + 0.15
py0 = y0 * fig.dpi * figbox.height - 0.15
py1 = y1 * fig.dpi * figbox.height + 0.15
xanchor = x0
if kwargs.get("ha", "") == "center":
xanchor = x0 + (x1 - x0) / 2.0
yanchor = y0
if kwargs.get("va", "") == "center":
yanchor = y0 + (y1 - y0) / 2.0
txt = fig.text(
xanchor,
yanchor,
text,
fontsize=kwargs.get("textsize", 50),
ha=kwargs.get("ha", "left"),
va=kwargs.get("va", "bottom"),
color=kwargs.get("color", "k"),
)
def _fits(txt):
"""Test for fitting."""
tb = txt.get_window_extent(fig.canvas.get_renderer())
return tb.x0 >= px0 and tb.x1 < px1 and tb.y0 >= py0 and tb.y1 <= py1
if not _fits(txt):
for size in range(50, 1, -2):
txt.set_fontsize(size)
if _fits(txt):
break
return txt | f219cdf6b191b218eb922f2046efb120c7d2a151 | 36,699 |
def _zipcode_guard(model, field_prefix, match_usa):
"""Get the zip code or not depending on country value"""
is_usa = getattr(model, field_prefix + '_country') == 'USA'
zipcode = getattr(model, field_prefix + '_zip')
if (match_usa and is_usa) or (not match_usa and not is_usa):
return zipcode | 0376a708ac1f686e9be039c9d4afa48670f0dff3 | 36,701 |
def swcfsrf(fsns, fsnsc):
"""Surface shortwave cloud forcing """
var = fsns - fsnsc
var.long_name = "Surface shortwave cloud forcing"
return var | 0621e3b19c43a6367c2a92ac6a3714312b069b94 | 36,702 |
def get_non_job_argslist(details_form):
"""
Gets a list of all fields returned with the Job options forms that
shouldn't be included in the job args list put in the database.
Args:
details_form (WTForm): the JobDetails form (a form that is
used as a base for most other job
options forms)
Returns:
list: list of arguments to remove from the job_args list
"""
details_list = ['csrf_token', 'start_job']
for fieldname, value in details_form.data.items():
details_list.append(fieldname)
return details_list | f236bfdfd3edab88406845dda893cc4b35f9378d | 36,703 |
def mean(list):
"""计算平均值"""
if not len(list):
return 0
return sum(list) / len(list) | f380d90969eebf6d23ee9fffa67f1154af37dfd5 | 36,705 |
def temp_dir_simulated_files(tmp_path_factory):
"""Temporal common directory for processing simulated data."""
return tmp_path_factory.mktemp("simulated_files") | 0f1cf4b8501463b9e2dd484d68e9c74978f9b5b8 | 36,706 |
import numpy
def dist_eucl(val_x, val_y):
"""
calculates the euclydian distance between two points using numpy library.
"""
return numpy.linalg.norm(val_x - val_y) | 20e470eb5d6a76799f2fdd5ccb22a3665f114b91 | 36,707 |
def slash_at_the_end(path, slash=0):
"""
slash_at_the_end: make sure there is (or not) a slash at the end of path name
:param path:
:param slash:
:return:
"""
if slash == 0:
if path[-1:] == '/':
path = path[:-1]
if slash == 1:
if not path[-1:] == '/':
path = path+'/'
return path | 564803c43c99e236c80a79bf3088f890df8d3dcb | 36,708 |
def _indent(msg, tab=' '):
"""Add indentation to a message line per line"""
res = ''
for line in msg.split('\n'):
res += tab+line+'\n'
return res | af871f6b9478ed4a21a0d8c8437b23942002c781 | 36,709 |
from numpy.linalg import svd, det
from numpy import dot
def scale_and_fit(X, Y, check_mirror_image=False):
"""
Return the translation vector, the rotation matrix and a
global scaling factor minimizing the RMSD between two sets
of d-dimensional vectors, i.e. if
>>> R, t, s = scale_and_fit(X, Y)
then
>>> Y = s * (dot(Y, R.T) + t)
will be the fitted configuration.
@param X: (n, d) input vector
@type X: numpy array
@param Y: (n, d) input vector
@type Y: numpy array
@return: (d, d) rotation matrix and (d,) translation vector
@rtype: tuple
"""
## centers
x, y = X.mean(0), Y.mean(0)
## SVD of correlation matrix
V, L, U = svd(dot((X - x).T, Y - y))
## calculate rotation, scale and translation
R = dot(V, U)
if check_mirror_image and det(R) < 0:
U[-1] *= -1
L[-1] *= -1
R = dot(V, U)
s = (L.sum() / ((Y-y)**2).sum())
t = x / s - dot(R, y)
return R, t, s | fc91392609a8d4cf23d3ed85844de1e6fd499b7d | 36,711 |
def create_payment_msg(identifier, status):
"""Create a payment payload for the paymentToken."""
payment_msg = {'paymentToken': {'id': identifier, 'statusCode': status}}
return payment_msg | 265401c5b8c9bbc0f71cfb718157101e3b48e8de | 36,713 |
def addElements(record,recordInfo,featureList):
"""
Input: record - node from XML File,
recordInfo - dict of info about record,
featureList - list of features to parse
Output: Updated recordInfo with text of features in featureList
"""
for entry in featureList:
try:
#Locate feature in the record
feature = record.find(entry)
#Add feature text under feature tag to record info
recordInfo[feature.tag] = feature.text.strip()
except AttributeError:
print(entry,'of type',type(entry),'in record',record,'is missing text method')
return recordInfo | 351d06e23c6d21ee3c4da980e077dcd09b163a61 | 36,714 |
from typing import List
import os
import pkg_resources
import shutil
def get_flat_cfg_file(path: str = "~/.edapy/pdf_ignore_keys.csv") -> List[str]:
"""
Get a list of strings from a config file.
Create this if it doesn't exist
Parameters
----------
path : str
Returns
-------
ignore_keys : List[str]
"""
path = os.path.expanduser(path)
path = os.path.abspath(path)
directory, filename = os.path.split(path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isfile(path):
path_inside = "/".join(["static", filename])
src = pkg_resources.resource_filename("edapy", path_inside)
shutil.copyfile(src, path)
with open(path) as f:
ignore_keys = [line.strip() for line in f.readlines()]
return ignore_keys | 34a5785eea6906ba007d7a6b91af8c81a6441790 | 36,715 |
def path_to_str(pathP: list) -> str:
"""Use to get a string representing a path from a list"""
if len(pathP) < 1:
return ""
if len(pathP) < 2:
return pathP[0]
res = f"{pathP[0]} -> "
for i in range(1, len(pathP) - 1):
res += f"{pathP[i]} -> "
return res + f"{pathP[-1]}" | db4a2c054455e2de6a6b4c7844b3104b4706c7f4 | 36,716 |
def truncate_int(tval, time_int):
"""Truncate tval to nearest time interval."""
return((int(tval)/time_int) * time_int) | 3c366f417ab9e84b5337814f78d7888fca4d0265 | 36,717 |
def get_cid_from_cert_lot_result(result):
"""
Get CID from a certificate-by-location query result.
The query looks like:
'[hardware_api]/201404-14986/'
This function is a parser to get 201404-14986.
:param result: elements of results queried from C3 certificate API
:return: string, CID
"""
return result['machine'].split('/')[-2] | 724d9371411e378e1e143d9d09d6d3686e7133c7 | 36,719 |
def _create_expr(symbols, prefix='B', suffix='NK'):
"""Create einsum expr with prefix and suffix."""
return prefix + ''.join(symbols) + suffix | d5e0aff866f375d611b4a4fa82665a0a5447bf02 | 36,720 |
def unify_common_space(memory_state):
"""
Merges the common space for various devices.
Each one may not have the same size in the shared space whereas they seem to all have access to the whole space.
Input:
- memory_state : the state in which the memory is with all its fragments.
Output:
The memory with common space merged.
"""
new_memory_state = [memory_state[0]]
for i in range(1, len(memory_state)):
if (
memory_state[i]["devices_id"] == 0
and new_memory_state[-1]["devices_id"] == 0
):
new_memory_state[-1]["size"] += memory_state[i]["size"]
else:
new_memory_state.append(memory_state[i])
return new_memory_state | 9f5c91920904ebe7081cd009fb9aa0e103d69e2f | 36,721 |
import random
def make_batches(train_set, train_labs, batch_size):
"""
Generates the training batches. Performs random shuffling.
:param train_set: All training pairs.
:param train_labs: All training labels.
:param batch_size: Batch size.
:return: A list with batches.
"""
rand_inds = random.sample(range(len(train_set)),len(train_set))
rand_inds = rand_inds[0:len(rand_inds)//batch_size*batch_size]
#rand_inds = list(range(len(train_set)))
batches = []
batch_index = 0
while batch_index*batch_size < len(rand_inds):
if (len(rand_inds)>=batch_index*batch_size+batch_size):
batch_inds = rand_inds[batch_index * batch_size:batch_index * batch_size + batch_size]
else:
remain = len(rand_inds) - batch_index * batch_size
batch_inds = rand_inds[batch_index * batch_size:batch_index * batch_size + remain]
batch = [train_set[batch_inds], train_labs[batch_inds]]
batches.append(batch)
batch_index += 1
return batches | cc0b982e5130956acba58a5307f4aa41554e4406 | 36,723 |
def evaluate(formula, operators, numbers):
""" Evaluate a formula represented as a tree into a single integer result
If the formula involves a non-integer division, NaN will be returned
"""
calc = float("NaN")
left = formula[0]
op = operators[formula[1]]
right = formula[2]
if isinstance(left, list):
left = evaluate(left, operators, numbers)
else:
left = numbers[left]
if isinstance(right, list):
right = evaluate(right, operators, numbers)
else:
right = numbers[right]
if op == 0:
calc = left + right
elif op == 1:
calc = left - right
elif op == 2:
calc = left * right
elif op == 3:
if (right != 0) and (left % right == 0):
calc = left / right
elif op == 4:
calc = left
elif op == 5:
calc = right
return calc | ecda2e943dc4c2fa2cbb2e500a054fc4242ad985 | 36,724 |
def arrange_gff_data(features):
"""Add row number to each feature."""
features.sort(key=lambda f: f["start"])
rows = []
for feature in features:
if not rows:
feature["row_cnt"] = 0
rows.append([feature])
else:
found = False
for idx, row in enumerate(rows):
if row[-1]["end"] <= feature["start"]:
feature["row_cnt"] = idx
row.append(feature)
found = True
break
if not found:
feature["row_cnt"] = len(rows)
rows.append([feature])
return [item for row in rows for item in row] | 8ebd24ba61d8748d46581f41e443d823dffe5fa1 | 36,725 |
def SDL_EVENTMASK(x):
"""Used for predefining event masks."""
return 1 << x | d01fc017f74a1da7c2306991006ff64a17bb6d20 | 36,726 |
def raw_to_regular(exitcode):
"""
This function decodes the raw exitcode into a plain format:
For a regular exitcode, it returns a value between 0 and 127;
For signals, it returns the negative signal number (-1 through -127)
For failures (when exitcode < 0), it returns the special value -128
"""
if not isinstance(exitcode, int):
return exitcode
if exitcode < 0:
return -128
if (exitcode & 127) > 0:
# Signal
return -(exitcode & 127)
return exitcode >> 8 | 1146bc0303886489f10864c9511280dff8047710 | 36,727 |
def get_trigger_severity(code):
"""Get trigger severity from code."""
trigger_severity = {0: "Not classified", 1: "Information", 2: "Warning", 3: "Average", 4: "High", 5: "Disaster"}
if code in trigger_severity:
return trigger_severity[code]
return "Unknown ({})".format(str(code)) | 234c58a04f374a2f227f11b519a82b6272b7aa18 | 36,728 |
def cember_cevresi_hesapla(r,pi =3.14):
"""
cember cevresi hesapla
input(parametre): r,pi
output = cemberin cevresi
"""
output = 2*pi*r
return output | 64a929927fb47994d724808366984d184de3975c | 36,729 |
def create_timestamp_subdirectory_Structure(timestamp: str):
"""
Takes a string (2020-10-05_020600UTC) representing a datetime
and attempts to create a directory structure in the format ./YYYY/MM/DD/
and returns a string representation of the directory.
"""
date, time = timestamp.split("_") # split date from time
yy, mm, dd = date.split("-")
_hh = time[:2]
OP = f"{yy}/{mm}/{dd}/"
return OP | af8af201592693ea0b1abe8591f3319b724939e3 | 36,731 |
from os.path import exists
def StartedButUnfinishedExtrapolations(TopLevelOutputDir, SubdirectoriesAndDataFiles):
"""Find directories with extrapolations that started but didn't finish."""
Unfinished = []
for Subdirectory, DataFile in SubdirectoriesAndDataFiles:
StartedFile = "{}/{}/.started_{}".format(TopLevelOutputDir, Subdirectory, DataFile)
ErrorFile = "{}/{}/.error_{}".format(TopLevelOutputDir, Subdirectory, DataFile)
FinishedFile = "{}/{}/.finished_{}".format(TopLevelOutputDir, Subdirectory, DataFile)
if exists(StartedFile) and not exists(ErrorFile) and not exists(FinishedFile):
Unfinished.append([Subdirectory, DataFile])
return Unfinished | eb203e18476d79c63cac2297b13b636f2ae552ec | 36,732 |
def iou_distance(gt_obj, hp_obj):
""" calculate the iou coefficient 'intersection over union (IoU)' of one obj
The IoU is computed as
IoU(a,b) = 1. - isect(a, b) / union(a, b)
where isect(a,b) is the area of intersection of two rectangles and union(a, b) the area of union. The
IoU is bounded between zero and one. 0 when the rectangles overlap perfectly and 1 when the overlap is
zero.
we normally think if iou < 0.5, the matching is qualified
param
gt_obj:
hp_obj:
"""
gt_left_x = int(gt_obj[2])
hp_left_x = int(hp_obj[2])
gt_width = int(gt_obj[3])
hp_width = int(hp_obj[3])
gt_up_y = int(gt_obj[4])
hp_up_y = int(hp_obj[4])
gt_height = int(gt_obj[5])
hp_height = int(hp_obj[5])
# width
x1 = gt_left_x - hp_left_x
x2 = (gt_left_x + gt_width) - (hp_left_x + hp_width)
y1 = gt_up_y - hp_up_y
y2 = (gt_up_y + gt_height) - (hp_up_y + hp_height)
if x1 * x2 > 0:
width = max(0, ((gt_width + hp_width) - abs(x1 + x2))) / 2
else:
width = min(gt_width, hp_width)
if y1 * y2 > 0:
height = max(0, ((gt_height + hp_height) - abs(y1 + y2))) / 2
else:
height = min(gt_height, hp_height)
iou_area = height * width
total_area = hp_width * hp_width + gt_width * gt_height
# print(width, height)
# print("iou_area:{}").format(iou_area)
# print("total_area:{}").format(total_area)
iou = 1 - iou_area / total_area
return iou | 77d36e94ad307f1f485e88c10bb0a5d4d2226b19 | 36,734 |
def goldstein_price(phenome):
"""The bare-bones Goldstein-Price function."""
x = phenome[0]
y = phenome[1]
long_part1 = 19.0 - 14.0 * x + 3.0 * x ** 2 - 14.0 * y
long_part1 += 6.0 * x * y + 3.0 * y ** 2
long_part1 *= (x + y + 1.0) ** 2
long_part2 = 18.0 - 32.0 * x + 12.0 * x ** 2
long_part2 += 48.0 * y - 36.0 * x * y + 27.0 * y ** 2
long_part2 *= (2.0 * x - 3.0 * y) ** 2
return (1.0 + long_part1) * (30.0 + long_part2) | 3294b52de864500a742687653cf7bf5d3f7ffb30 | 36,735 |
def maximum_diff_brute_force(A):
""" 総当たり法 Θ(n^2)
"""
left = 0
right = 0
max_diff = 0
for i in range(0, len(A) - 1):
for j in range(i + 1, len(A)):
if A[j] - A[i] > max_diff:
left = i
right = j
max_diff = A[j] - A[i]
return (left, right, max_diff) | 30e6af49962cdfebc2f98d9ca2f2502d743ea520 | 36,736 |
from typing import Optional
from typing import Union
def concat_nas_address(ip_address: Optional[str] = None,
port: Union[None, str, int] = None,
drive_prefix: Optional[str] = None,
https: bool = True) -> str:
"""
:param ip_address: such as 192.168.1.51
:param port: port number
:param drive_prefix: such as drive.xxxx.com, hehe.com/drive
:param https:
:return:
"""
scheme = 'https://' if https else 'http://'
if port is None:
# https default port 5001, http default port 5000
port = '5001' if https else '5000'
else:
# int => str
port = str(port)
if ip_address is None:
assert isinstance(drive_prefix, str)
nas_address = ':'.join(filter(None, [drive_prefix]))
else:
assert drive_prefix is None
nas_address = ':'.join(filter(None, [ip_address, port]))
return f"{scheme}{nas_address}" | 4bad1d322e4c0e53b199f36a8568f32c48820558 | 36,737 |
import functools
import warnings
def raise_warnings(func):
"""Function decorator that causes all Python warnings to be raised as
exceptions in the wrapped function.
Example:
>>> @raise_warnings
>>> def foo():
>>> warnings.warn("this will raise an exception")
"""
@functools.wraps(func)
def inner(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('error')
return func(*args, **kwargs)
return inner | 08f74b639cdb3a41f5909732d3840d2cf7767f13 | 36,739 |
import os
def _get_filename(_file, _fext):
""" Ensures an output file has a certain extension """
if _file is None or _fext is None:
return _file
else:
if _fext[0] != ".":
_fext = f".{_fext}"
if not _file.endswith(_fext):
return os.path.splitext(_file)[0] + _fext
return _file | 86bd2486ee0fb13d306f543f2f532ea43ec57503 | 36,740 |
def get_arg_default_issues(func_el, *, get_issue_status_func, include_kw=True):
"""
Look at this function's arguments. Any issues?
"""
posonly_arg_els = func_el.xpath('args/arguments/posonlyargs/arg')
arg_els = func_el.xpath('args/arguments/args/arg')
all_arg_els = posonly_arg_els + arg_els ## order matters
if include_kw:
kwonly_arg_els = func_el.xpath('args/arguments/kwonlyargs/arg')
all_arg_els += kwonly_arg_els
arg_names = [arg_el.get('arg') for arg_el in all_arg_els]
arg_default_els = func_el.xpath('args/arguments/defaults')
default_els = arg_default_els ## order matters
if include_kw:
kw_default_els = func_el.xpath('args/arguments/kw_defaults')
default_els += kw_default_els
issue_statuses = []
for default_el in default_els:
for child_el in default_el.getchildren():
issue_status = get_issue_status_func(child_el)
issue_statuses.append(issue_status)
## reversed because defaults are filled in rightwards e.g. a, b=1, c=2
## args = a,b,c and defaults=1,2 -> reversed c,b,a and 2,1 -> c: 2, b: 1
arg_names_reversed = reversed(arg_names)
issue_statuses_reversed = reversed(issue_statuses)
args_and_issue_statuses = reversed(list(
zip(arg_names_reversed, issue_statuses_reversed))) ## back to left-to-right order
args_with_issues = [(arg, issue_status)
for arg, issue_status in args_and_issue_statuses
if issue_status]
return args_with_issues | 0411d7f29e17a7da47fb5e5bfb01fb376c362481 | 36,741 |
def data_response(data, code=0, msg='ok'):
"""返回单条信息的响应"""
return dict(
code=code,
msg=msg,
data=data
) | d21935b199c140a989824cf5cc1baf42f9138f72 | 36,743 |
import re
def clean_ar_text(sentence: str) -> str:
"""
reference
https://jrgraphix.net/r/Unicode/0600-06FF
https://en.wikipedia.org/wiki/Arabic_alphabet
"""
# all_diacritics = u"[\u0640\u064b\u064c\u064d\u064e\u064f\u0650\u0651\u0652\u0670]"
# remove_diacritics = lambda x: re.sub(all_diacritics, '', x)
sentence = re.sub(f"[^{'-ۿ'} ,\n]", '', sentence)
sentence = re.sub(f"['.,؟?!،]", '', sentence)
sentence = re.sub(' +', ' ', sentence)
return sentence | 5047ad26e0e0fca06e5e7f75a03f7d92014246a1 | 36,744 |
def format_section(section_dict):
"""
"""
section_formated = list()
for id, name, eu_requirement in set([(q['aspect_id'], q['name'], q['eu_requirement']) for q in section_dict]):
aspect = {}
aspect['id'] = id
aspect['name'] = name
aspect['eu_requirement'] = eu_requirement
questions = [q for q in section_dict if q['aspect_id'] == id]
questions = list(sorted(questions, key=lambda item: item['order']))
aspect['questions'] = questions
section_formated.append(aspect)
return list(sorted(section_formated, key=lambda item: item['id'])) | 940bec5e69647e5114d4bd0c1f19b10513cc7d6f | 36,746 |
from typing import Union
from typing import Iterable
def vector_dot_product(vector: tuple[Union[int, float], ...],
matrix: Iterable[Iterable[Union[int, float]]]) -> tuple[Union[int, float], ...]:
"""transform a vector through a transformation matrix
:param vector: cell address
:type vector: tuple of integer coordinates for universe dimensions
:param matrix: rotation matrix
:type matrix: tuple of «dimension» cell address coordinates
:raises: TypeError
"""
# This works with 1 dimensional data and up
return tuple(sum(x * y for x, y in zip(row, vector)) for row in matrix)
# return tuple([sum(x * y for x, y in zip(row, vector)) for row in matrix])
# product = []
# for ele in matrix:
# product.append(x*y for x,y in zip(ele, vector))
# result = [sum(x * y for x, y in zip(row, vector)) for row in matrix]
# return tuple(result) | e4c3d3567f0fa4e8ed79b6d3e6a45a735f83c56f | 36,747 |
def cli_billing_get_invoice(client, name=None):
"""Retrieve invoice of specific name of the subscription"""
if name:
return client.get(name)
return client.get_latest() | 51460f4c24fb2932011e3fe82d2ebcb74ce7b8a1 | 36,748 |
def _get_pattern_list(testdata, global_obj, pattern="start"):
"""
Get the pattern attribute from either the current testdata block
or from the global section
"""
global_var_pattern = global_obj.find("variable_pattern") if global_obj is \
not None else None
if pattern == "start":
resultant = "${"
else:
resultant = "}"
if testdata.get(pattern+"_pattern") is not None:
resultant = testdata.get(pattern+"_pattern")
elif global_var_pattern is not None and global_var_pattern.\
get(pattern+"_pattern") is not None:
resultant = global_var_pattern.get(pattern+"_pattern")
return resultant | 8650065c70eb27da519ff2d215c82ab1abdd5848 | 36,749 |
import os
def _get_file_size(file_path):
"""
Get the file size.
Args:
file_path (str): The file path.
Returns:
int, the file size. If file is not existed, then return 0.
"""
try:
file_size = os.path.getsize(file_path)
except FileNotFoundError:
file_size = 0
return file_size | 03c2efaf1f165864187f55c5d08b5dbd9f2916d0 | 36,750 |
def get_attributevalue_from_directchildnode (parentnode, childname, attribute):
""" Takes a parent node element as input
Searches for the first child with the provided name under the parent node
If child is present returns the value for the requested child attribute (the returns None if
the child does not have that attributte)
Returns False if child is not found under the parent
"""
childnode = parentnode.find(childname)
if childnode is not None:
value = childnode.get(attribute)
return value
else: return False | cee739b8c0f79f4361f2bee4faf47465ecf06212 | 36,751 |
from functools import reduce
def _stop_buses(json):
"""
Helper function for enumerating buses going through the stop
:param json: HSL API bus stop code
"""
# lines = json[0]["lines"]
lines = reduce(lambda x, y: x + y, [x["lines"] for x in json])
return [x.split(":")[0] for x in lines] | 800061157602a8744e5e4406e885717be5cb2651 | 36,753 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.