content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def unpack(iter):
"""
Input: List of multiple comma seperated values as string.
Returns: list of unique values from the string.
"""
unique = []
for i in iter:
unique.extend(i.split(','))
return list(set(unique)) | 7df47820afd47001062e7c85a82fea1dbe076c58 | 96,751 |
import torch
def get_device() -> torch.device:
"""
Checks if cuda is available. If available, use cuda by default, else use cpu.
Returns
-------
Device to be used.
"""
return torch.device("cuda" if torch.cuda.is_available() else "cpu") | a24780ae8e94105309e9dcb34541554945e6163a | 96,752 |
def _sqlite_strata_conditions(df, dtypes, n=1):
"""Given a dataframe where columns are merge_cols and rows are unique
value combinations that appear as aggregation strata, return a list
of strings which constitute valid SQLite conditional statements.
Parameters
----------
df : pandas.core.frame.DataFrame
A dataframe where columns are merge_cols and rows represent
unique aggregation strata of the compartment table
dtypes : Dict[str, str]
Dictionary to look up SQLite datatype based on column name
n : int
Number of rows of the input df to combine in each output
conditional statement. n=1 means each row of the input will
correspond to one string in the output list. n=2 means one
string in the output list is comprised of two rows from the
input df.
Returns
-------
grouped_conditions : List[str]
A list of strings, each string being a valid SQLite conditional
Examples
--------
Suppose df looks like this:
TableNumber | ImageNumber
=========================
[1] | [1]
[2] | [1, 2, 3]
[3] | [1, 2]
[4] | [1]
>>> _sqlite_strata_conditions(df, dtypes={'TableNumber': 'integer', 'ImageNumber': 'integer'}, n=1)
["(TableNumber in (1) and ImageNumber in (1))",
"(TableNumber in (2) and ImageNumber in (1, 2, 3))",
"(TableNumber in (3) and ImageNumber in (1, 2))",
"(TableNumber in (4) and ImageNumber in (1))"]
>>> _sqlite_strata_conditions(df, dtypes={'TableNumber': 'text', 'ImageNumber': 'integer'}, n=2)
["(TableNumber in ('1') and ImageNumber in (1))
or (TableNumber in ('2') and ImageNumber in (1, 2, 3))",
"(TableNumber in ('3') and ImageNumber in (1, 2))
or (TableNumber in ('4') and ImageNumber in (1))"]
"""
conditions = []
for row in df.iterrows():
series = row[1]
values = [
[f"'{a}'" for a in y] if dtypes[x] == "text" else y
for x, y in zip(series.index, series.values)
] # put quotes around text entries
condition_list = [
f"{x} in ({', '.join([str(a) for a in y]) if len(y) > 1 else y[0]})"
for x, y in zip(series.index, values)
]
conditions.append(f"({' and '.join(condition_list)})")
grouped_conditions = [
" or ".join(conditions[i : (i + n)]) for i in range(0, len(conditions), n)
]
return grouped_conditions | e6e0cd2e9ebda9c48426eede359fe6fffee832d0 | 96,753 |
def build_quota_table(old_quotas, updated_quotas):
"""Construct the table of updated quotas for insertion into the email"""
QUOTA_FORMAT = "{quota:>25} |{old_value:>10} |{new_value:>10}"
headers = QUOTA_FORMAT.format(quota='QUOTA ', old_value='OLD VALUE',
new_value='NEW VALUE')
dividers = QUOTA_FORMAT.format(quota='-' * 20, old_value='-' * 10,
new_value='-' * 10)
quota_rows = [headers, dividers]
for q in updated_quotas:
quota_rows.append(QUOTA_FORMAT.format(quota=q,
old_value=old_quotas[q],
new_value=updated_quotas[q]))
quota_table = "\n".join(quota_rows)
return quota_table | c180a54944ad46a7d7a917e8378ea378979a8fde | 96,765 |
def generate_power_sets(sample_list):
"""Generate all possible subsets from the `sample_list`.
Parameters
----------
sample_list : [int]
Returns
-------
[[int]]
All possible subsets
"""
num_of_combinations = pow(2, len(sample_list))
all_sets = []
for element_number in range(num_of_combinations):
set_for_number = []
for binary_num in range(len(sample_list)):
if (1 << binary_num) & element_number:
set_for_number.append(sample_list[binary_num])
all_sets.append(set_for_number)
return all_sets | d63389c21a7b92cdcf74874a51d7dfe95dd9c056 | 96,768 |
from typing import Counter
def ransom_note(magazine, ransom):
""" Determines if the magazine contains words to write the ransom note """
magazine_multiset = Counter(magazine)
note_multiset = Counter(ransom)
multiset_diff = note_multiset - magazine_multiset
return len(multiset_diff) == 0 | bb463708abdcb98d3345aeb4f905340f90fb5ec6 | 96,770 |
def coord_to_index_(coord, map_obj):
"""
Converts atomic residue coordinates into map indices
:param coord: coordinate in angstrom
:param map_obj: map object
:return: float indices
"""
return coord[2] / map_obj.voxel_size[2] - map_obj.origin[2] / map_obj.voxel_size[2] - map_obj.n_start[2], \
coord[1] / map_obj.voxel_size[1] - map_obj.origin[1] / map_obj.voxel_size[1] - map_obj.n_start[1], \
coord[0] / map_obj.voxel_size[0] - map_obj.origin[0] / map_obj.voxel_size[0] - map_obj.n_start[0] | d2c198bd574efbccbb97b4ff0dee79fa528b55dd | 96,773 |
import torch
def compute_angles(
conformer: torch.Tensor,
atom_indices: torch.Tensor,
) -> torch.Tensor:
"""Computes the angles [rad] between each atom triplet specified by the
``atom_indices``.
Returns:
A tensor of the valence angles.
"""
if len(atom_indices) == 0:
return torch.tensor([])
vector_ab = conformer[atom_indices[:, 1]] - conformer[atom_indices[:, 0]]
vector_ac = conformer[atom_indices[:, 1]] - conformer[atom_indices[:, 2]]
# tan theta = sin theta / cos theta
#
# ||a x b|| = ||a|| ||b|| sin theta
# a . b = ||a|| ||b|| cos theta
#
# => tan theta = (a x b) / (a . b)
angles = torch.atan2(
torch.norm(torch.cross(vector_ab, vector_ac, dim=-1), dim=-1),
(vector_ab * vector_ac).sum(dim=-1),
)
return angles | b353a233666a6809bbade4b56771542f82ef2d21 | 96,775 |
def isSequence(sequence):
"""**Determine whether sequence is a sequence**."""
try:
sequence[0:-1]
return True
except TypeError:
return False | cbf288b30ee787c433a03b24f12b30c5b4ea8986 | 96,776 |
import json
def read(filename):
"""Import json TOUGH input file.
Parameters
----------
filename : str
Input file name.
Returns
-------
dict
TOUGH input parameters.
"""
def to_int(data):
"""Return dict with integer keys instead of strings."""
return {int(k): data[k] for k in sorted(data.keys())}
with open(filename, "r") as f:
parameters = json.load(f)
keys = {"extra_options", "more_options", "selections"}
for key in keys:
if key in parameters.keys():
parameters[key] = to_int(parameters[key])
return parameters | c963db0cf94c0d6d011c2d54f900972c5086bc1f | 96,777 |
import re
def node_name_from_match(match):
""" Takes a match identifier (which includes the matched annotation name) and returns the node name. This can take a single string or a list of strings as argument.
>>> m = node_name_from_match("tiger::cat::topcorpus/subcorpus/doc1#n2")
>>> m == "topcorpus/subcorpus/doc1#n2"
True
"""
if isinstance(match, str):
elements = re.split('::', match, 3)
if len(elements) == 3:
return elements[2]
elif len(elements) == 2:
return elements[1]
else:
return elements[0]
elif isinstance(match, list):
result = []
for m in match:
result.append(node_name_from_match(m))
return result
else:
return None | 4a48059046cbe46f332773b21e69e9af76a1f8dc | 96,778 |
def from_pb_tags(pb_tags):
"""Creates a ``dict`` from ``ProtoTags``.
Args:
pb_tags (ProtoTags): The ``ProtoTags`` instance to be converted.
Returns:
dict
"""
if list(pb_tags.tags):
return {tags.key: tags.value for tags in pb_tags.tags}
else:
return {} | 60d4b7c423b0d77750cf39808bb4da1fe32e1a4a | 96,783 |
def inch2point(inch, dpi=72.0):
"""
Inch to point converter.
:param inch: the length in unit inch
:type inch: double
:return: number of points corresponding to :param:`inch`
:rtype: integer
"""
return int(dpi * inch) | e44ca7409460a3eccc17453d8ed7ce12e921023e | 96,785 |
def get_relationships_between(subj, obj):
"""Get the set of relationships between two cuds objects.
:param subj: The subject
:type subj: Cuds
:param obj: The object
:type obj: Cuds
:return: The set of relationships between subject and object.
:rtype: Set[Type[Relationship]]
"""
result = set()
for rel, obj_uids in subj._neighbors.items():
if obj.uid in obj_uids:
result.add(rel)
return result | 5d9b99aacde3171a5bd2aa74dd44573ad1f6c11c | 96,786 |
def split_group(group):
"""
Converts a list of objects splitted by "|" into a list. The complication
comes from the fact that we do not want to use other group's "|" to split
this one. Meaning (a|(b|c)|e) should be splitted into ['a', '(b|c)', 'e'].
Warning, this function supposes that there is no parenthesis around the
given group (it must be under the form "a|(b|c)|e").
"""
# suppose no parenthesis around group
parenthesis_level = 0
last_split_index = 0
result = []
for index, char in enumerate(group):
if char == "(":
parenthesis_level += 1
elif char == ")":
parenthesis_level -= 1
elif char == "|" and parenthesis_level == 0:
result.append(group[last_split_index:index])
last_split_index = index + 1
result.append(group[last_split_index:])
return result | 7a4179297ab6c0eea9e0e4ccf192d3ccf18c4654 | 96,787 |
def trim_item(item):
"""Trims unnecessary fields in the track item"""
track = item['track']
if 'album' in track:
to_delete = ['album_type', 'available_markets', 'external_urls', 'href',
'images', 'uri']
for key in to_delete:
try:
del track['album'][key]
except KeyError:
pass
to_delete = ['available_markets', 'external_ids', 'external_urls', 'href',
'is_local', 'preview_url', 'uri']
for key in to_delete:
try:
del track[key]
except KeyError:
pass
return item | 3f798db7251b30025075b34214239885384a7d23 | 96,788 |
import requests
import json
def get_market(id):
"""
Query the PredictIt API to get the details of a particular market given the market's ID.
"""
market = requests.request(
'GET', "https://www.predictit.org/api/marketdata/markets/%d" % id)
return json.loads(market.content) | 98731f570529741e5fdccf4f06bc08e966bf57af | 96,790 |
import hashlib
def hexdigest(strval: str) -> str:
"""hexdigest of a string, using blake2b"""
h = hashlib.blake2b(digest_size=20)
h.update(bytes(strval, "utf-8"))
return h.hexdigest() | e61c93d646b5bfd633fda0f0828af23edc9c0352 | 96,793 |
import math
def arg(number: complex) -> float:
"""Calculate arg function of a complex number."""
return math.atan2(number.imag, number.real) | bf0c66fd9f9d25ccf082daab4d17310d8bf2e47d | 96,796 |
def _format_links(link_x):
"""
Format links part
:param link_x: list, which has several dicts in it
:return: formatted string
"""
try:
pairs = ""
for _dict in link_x:
pairs += _dict['rel'] + '-> ' + _dict['href'] + '\n'
return pairs[:-1]
except Exception:
return link_x | ea47e3c9d509dc127ae9089c0b410702e1f58a57 | 96,797 |
def default_grouping(datasets, date_field=None):
"""
Heuristic for default boxplot grouping
"""
if datasets > 20 and date_field:
# Group all data by year
return "year"
elif datasets > 10 or not date_field:
# Compare series but don't break down by year
return "series"
else:
# 10 or fewer datasets, break down by both series and year
return "series-year" | b31fd0330d442ff170bf742f710bc2997033ad59 | 96,798 |
import re
def read_cmake_cache(cache_path):
""" Read a CMakeCache.txt file, returning a dict
name -> value
"""
with open(cache_path, "r") as fp:
lines = fp.readlines()
res = dict()
for line in lines:
if line.startswith("//"):
continue
if line.startswith("#"):
continue
if not line:
continue
match = re.match(r"([a-zA-Z0-9-_]+):(\w+)=(.*)", line)
if not match:
continue
else:
(key, _type, value) = match.groups()
res[key] = value
return res | adadf8f89bedbfa9218d2acbd008a5f981d9d54b | 96,800 |
def genera_info(nom_classe: str, nom_funcio: str, params: dict) -> dict:
"""
Retorna diccionari amb informació per depurar errors.
Args:
nom_classe (str): Nom de la classe.
nom_funcio (str): Nom de la funció.
params (dict): Paràmetres.
Returns:
dict: Diccionari amb informació per depurar errors.
"""
info = {"className": nom_classe, "functionName": nom_funcio, "params": params}
return info | 77d4749a7e724e5dc0a655e2379ba857f0b259ff | 96,802 |
def parse_summary_string( summary_string ):
"""
Parses the summary string from vvtest output, such as
Summary: pass=0, fail=1, diff=0, timeout=0, notdone=0, notrun=1, skip=0
Returns dictionary of these names to their values.
"""
valD = {}
for spec in summary_string.split():
spec = spec.strip(',')
if '=' in spec:
nv = spec.split('=')
assert len(nv) == 2
valD[ nv[0] ] = int( nv[1] )
return valD | 39e08ed8cf591e08e45674b874df536400183fac | 96,808 |
def find_word_boundaries(string):
"""
Given a string, such as "my lights are off", return a tuple::
0: a list containing all word boundaries in tuples
(start(inclusive), end(exclusive)):
[(0, 2), (3, 9), (10, 13), (14, 17)]
1: a set of all start positions: set[(0, 3, 10, 14)]
2: a set of all end positions: set[(2, 9, 13, 17)]
"""
start, end, last_end = 0, 0, -1
boundaries = []
while end != -1:
end = string.find(" ", start)
if end != -1:
boundaries.append((start, end))
last_end = end
start = last_end + 1
start, end = start, len(string)
if end > start:
boundaries.append((start, end))
if len(boundaries) > 0:
starts, ends = zip(*boundaries)
starts, ends = set(starts), set(ends)
else:
starts, ends = set(), set()
return boundaries, starts, ends | 33c028b4e50a97a30d50debbe8346f3f0a8e07ff | 96,811 |
import re
def text_reformat(text):
"""lowercase without punctuation"""
return re.sub(r'[^\w\s]', '', text.lower()) | 60f87486f65ddaf17919cf3ddbc72923597ddb08 | 96,812 |
def without(s, w):
"""Remove all in str w from string s"""
l = list(s)
for i in w:
l.remove(i)
return "".join(l) | 2c95c906b5c71f8de00dd69ee2ae9f7c638a8b75 | 96,813 |
def get_labels(decode_steps):
"""Returns labels dict given DecodeSteps."""
return dict(
target_action_types=decode_steps.action_types,
target_action_ids=decode_steps.action_ids) | 77e4d0fe517f8b9f2b335283adb4eddd77abd864 | 96,814 |
import shlex
def format_cli_args(cli_args):
"""
Formats cli_args as list if they're string.
"""
if isinstance(cli_args, str):
return shlex.split(cli_args)
return cli_args | e3ea582b1942a9a9cb4e39904355910b76f71268 | 96,816 |
def get_hostgroups_id(cfg):
"""Get writer and reader hostgroup id's """
writer_hostgroup_id = int(cfg.get('galera', 'writer_hostgroup_id'))
reader_hostgroup_id = int(cfg.get('galera', 'reader_hostgroup_id'))
return writer_hostgroup_id, reader_hostgroup_id | bbc288e565c5545da758fcd37103c6640726a265 | 96,818 |
def bowers(v, obp, u, start_idx, a, b, vmax, end_idx=None):
"""
Compute pressure using Bowers equation.
Parameters
----------
v : 1-d ndarray
velocity array whose unit is m/s.
obp : 1-d ndarray
Overburden pressure whose unit is Pa.
v0 : float, optional
the velocity of unconsolidated regolith whose unit is m/s.
a : float, optional
coefficient a
b : float, optional
coefficient b
Notes
-----
.. math:: P = S - \\left[\\frac{(V-V_{0})}{a}\\right]^{\\frac{1}{b}}
[3]_
.. [3] Bowers, G. L. (1994). Pore pressure estimation from velocity data:
accounting from overpressure mechanisms besides undercompaction:
Proceedings of the IADC/SPE drilling conference, Dallas, 1994,
(IADC/SPE), 1994, pp 515–530. In International Journal of Rock
Mechanics and Mining Sciences & Geomechanics Abstracts (Vol. 31,
p. 276). Pergamon.
"""
sigma_max = ((vmax-1524)/a)**(1/b)
ves = ((v - 1524) / a)**(1.0 / b)
ves_fe = sigma_max*(((v-1524)/a)**(1/b)/sigma_max)**u
ves[start_idx: end_idx] = ves_fe[start_idx: end_idx]
return obp - ves | 39cba116bfee69b83429ed3e76d27bc78152c8d0 | 96,822 |
def filter_hosts_by_initiators(hosts, initiators):
"""Filter hosts by given list of initiators.
:param hosts: list of PowerStore host objects
:param initiators: list of initiators
:return: PowerStore hosts list
"""
hosts_names_found = set()
for host in hosts:
for initiator in host["host_initiators"]:
if initiator["port_name"] in initiators:
hosts_names_found.add(host["name"])
return list(filter(lambda host: host["name"] in hosts_names_found, hosts)) | af2a27603cc7e67f6a1928e0bcc73430e654f6f5 | 96,824 |
def is_sorted(lyst):
""" This function test if a list is sorted and returns true if it is
"""
result = True
for num in range(1, len(lyst)):
if lyst[num-1] > lyst[num]:
result = False
return result | ac6a1a02d6fa2684d669df6cbaa9085d4101777b | 96,826 |
def close_under_modulus(a, b, abs_diff, modulus):
""" Determines whether the absolute difference between a and b is less than
abs_diff under modulus. For example, if a = -5, b = 13, and modulus =
12, their difference is 6 under the modulus, so this function will
return True when abs_diff >= 6, False otherwise.
Functionally, this checks whether b falls in the modular range [a-
abs_diff, a+abs_diff]. Note that if abs_diff >= modulus/2, the function
will always return True.
This function will work with numpy arrays, as long as a and b can be
broadcasted together.
"""
return (b + abs_diff - a) % modulus <= (2 * abs_diff) | 30fe077c998c386cdf9e63b952b912158428d100 | 96,827 |
def CommaJoin(names):
"""Nicely join a set of identifiers.
@param names: set, list or tuple
@return: a string with the formatted results
"""
return ", ".join([str(val) for val in names]) | 908e2cc2a85b5e967c9a5b225abf31b5926a9bf3 | 96,831 |
def load_wav_scp(wav_scp_file):
""" return dictionary { rec: wav_rxfilename } """
lines = [line.strip().split(None, 1) for line in open(wav_scp_file)]
return {x[0]: x[1] for x in lines} | 4d6ee953eba3eaa46a142ab3f385b79a65353e83 | 96,834 |
def check_game_state(current_word: str, hangman_state: int) -> bool:
"""Check if there are any _ left in the word or if hangman_state >= 9.
Args:
current_word (str): The current state of the word that the user sees.
hangman_state (int): The state of the hangman.
Returns:
bool: True if the game may continue, False if the game is over.
"""
if hangman_state >= 9:
return False
for i in current_word:
if i == '_':
return True
return False | 0630be5fac83739f249baee886b26b14eae1ba90 | 96,842 |
def manu_year_cisco(value: str):
"""Calculates manufacture date from Cisco SN"""
date = int(value[3:5])
return f"{date + 1996}" | e1b052364653e54e9cca55adc5535a4be032b56a | 96,843 |
def A11_2_d11(A11, SRM_ratio=4.04367):
"""
Convert Abundance to Delta notation.
Default SRM_ratio is NIST951 11B/10B
"""
return ((A11 / (1 - A11)) / SRM_ratio - 1) * 1000 | ad257038b862d3260fa12cd2c8ce9966a9fa2dbd | 96,846 |
def chunk(L, n):
"""Split an iterable into n-item chunks."""
return zip(*[iter(L)] * n) | 52177a938cf49def15f408cf15401f5f190a329b | 96,847 |
import logging
def _infill_variable(cruncher_i, req_variable, leader_i, to_fill_i, **kwargs):
"""
A function used to iterate the actual crunching if the data doesn't already
exist.
Parameters
----------
cruncher_i : :obj: silicone cruncher
the initiated silicone cruncher to use for the infilling
req_variable : str
The follower variable to infill.
leader_i : list[str]
The leader variable to guide the infilling.
to_fill_i : IamDataFrame
The dataframe to infill.
kwargs : Dict
Any key word arguments to include in the cruncher calculation
Returns
-------
:obj:IamDataFrame
The infilled component of the dataframe (or None if no infilling done)
"""
filler = cruncher_i.derive_relationship(req_variable, leader_i, **kwargs)
# only fill for scenarios who don't have that variable
# quieten logging about empty data frame as it doesn't matter here
logging.getLogger("pyam.core").setLevel(logging.CRITICAL)
not_to_fill = to_fill_i.filter(variable=req_variable)
to_fill_var = to_fill_i.copy()
if not not_to_fill.data.empty:
for (model, scenario), _ in not_to_fill.data.groupby(["model", "scenario"]):
to_fill_var = to_fill_var.filter(model=model, scenario=scenario, keep=False)
if not to_fill_var.data.empty:
interpolated = filler(to_fill_var)
return interpolated
logging.getLogger("pyam.core").setLevel(logging.WARNING)
return None | df685771b057db95c7aa05e5a0f402731da0ffbc | 96,849 |
from datetime import datetime
def datetime_to_str(dt: datetime) -> str:
"""
Converts a date in datetime format to string format.
Parameters
----------
dt : datetime
Represents a datetime in datetime format.
Returns
-------
str
Represents a datetime in string format "%Y-%m-%d %H:%M:%S".
Example:
-------
>>> from pymove.utils.datetime import datetime_to_str
>>> from datetime import datetime
>>> time_now = datetime.now()
>>> print(time_now)
'2021-04-29 14:15:29.708113'
>>> print(type(time_now))
'<class 'datetime.datetime'>'
>>> print(datetime_to_str(time_now), type(datetime_to_str(time_now)))
'2021-04-29 14:15:29 <class 'str' >'
"""
return dt.strftime('%Y-%m-%d %H:%M:%S') | 6061f97f09b6f72f942b911bacc691f9d1508d16 | 96,850 |
def bytes_to_text(s, encoding):
"""
Convert bytes objects to strings, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Return any non-bytes objects without change.
"""
if isinstance(s, bytes):
return str(s, encoding, 'replace')
else:
return s | 2b5b45be32725070d14c180b6dde6039c292b598 | 96,853 |
from typing import Optional
def _filter_key(k: str) -> Optional[str]:
"""Returns None if key k should be omitted; otherwise returns the (possibly modified) key."""
if k.startswith("return_"):
return None
elif k.endswith("_max") or k.endswith("_min"):
return None
else:
k = k.replace("monitor_return", "mr")
k = k.replace("wrapped_return", "wr")
return k | 2d5cfc3b7fb5ef4e072273c8a46ab9a3b5f1a4ca | 96,855 |
def read_counter(path):
"""
Read the value of a counter
:param path: filesystem path to the counter
:return: the current count
"""
with open(path, "r") as fd:
return int(fd.read()) | c53b0c7e1c306fc916e2d72faf2f3de50d21aa02 | 96,862 |
import random
import string
def make_random_string(length=10):
"""
Makes a random alphanumeric string of some length
Args:
length (int, optional): Length of the random string to return. Defaults to 10.
Returns:
str: Random alphanumeric string of the specified length.
"""
return ''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(length)
) | ed3d2f909ee891a07dcc15043850f30cd65a92f2 | 96,864 |
def get_split_oris(comp="DoriA"):
"""
get_split_oris()
Returns Gabor frames values split for orientation comparisons, if
applicable.
Optional args:
- comp (str): type of comparison
default: "DoriA"
Returns
- split_oris (bool or list): List of Gabor frames for each split, or
False if splitting orientation comparison
is not applicable.
"""
split_oris = False
if "ori" in comp:
gab_letts = [lett.upper() for lett in comp.split("ori")
if len(lett) > 0]
if len(gab_letts) == 2:
split_oris = gab_letts
return split_oris | 2297999e8265f6c48e029d409627629cd949d93e | 96,865 |
from typing import Union
from pathlib import Path
import re
def package_version(path: Union[str, Path]) -> str:
"""
Parse the version from specified file, assumed to be a versioner module.
Args:
path: The path of the file to parse.
Returns:
The parsed version file.
Raises:
ValueError: If the file is deemed not clear enough to determine version
information.
"""
contents = Path(path).read_text()
# Note that this regex version check is very simple and is not all encompassing
# but works fine for the given use case and internal nature of the logic.
m = re.search('__version__\\s*=\\s*[\'"]((\\d+\\.)+(\\d+))[\'"]', contents)
if not m:
raise ValueError(
'There is no version string identified in the file contents.')
ver = m.group(1)
return ver | 13e9e12e9dddea77cbfe81bc3bcc313c218b81e9 | 96,867 |
def _extract_file_info(git_commit_files):
""" Given the raw file paths payload from the raw Git commit payload, this
function produces a list of modified file paths object
:param git_commit_files: raw file paths payload from the raw Git commit payload
:return a list of processed file path dict objects
"""
paths = []
for commit_file in git_commit_files:
if commit_file['status'] == 'added':
paths.append({'editType': 'add', 'path': commit_file['filename']})
elif commit_file['status'] == 'modified':
paths.append({'editType': 'edit', 'path': commit_file['filename']})
elif commit_file['status'] == 'renamed':
paths.append({'editType': 'add', 'path': commit_file['filename']})
paths.append({'editType': 'delete', 'path': commit_file['previous_filename']})
elif commit_file['status'] == 'deleted':
paths.append({'editType': 'delete', 'path': commit_file['filename']})
else:
paths.append({'editType': 'others', 'path': commit_file['filename']})
return paths | 73b9806a92f2b3f50a8eff68c823dfeba54119a2 | 96,868 |
import time
def query(query, database, workgroup, athena_client, max_checks=30):
"""Executes an Athena query, waits for success or failure, and returns the first page
of the query results.
Waits up to max_checks * 10 seconds for the query to complete before raising.
"""
resp = athena_client.start_query_execution(
QueryString=query,
QueryExecutionContext={"Database": database},
WorkGroup=workgroup,
)
qid = resp["QueryExecutionId"]
for i in range(max_checks):
resp = athena_client.get_query_execution(QueryExecutionId=qid)
state = resp["QueryExecution"]["Status"]["State"]
if state == "SUCCEEDED":
return qid
elif state == "FAILED":
raise RuntimeError(f"Failed query execution: {query}")
# Continue to wait
time.sleep(10)
else:
raise TimeoutError("Reached max_checks") | f432dee5bf631a64868216602746fe992fc6882b | 96,869 |
import itertools
def create_segfrac(connector, segfrac, seasons, hours):
"""
Generates the "SegFrac" table for the Temoa database.
This table defines what fraction of a year is represented
by each time slice.
Parameters
----------
connector : sqlite3 connection object
Used to connect to and write to an sqlite database.
segfrac : float
The fraction-of-a-year represented by each time slice.
seasons : list
The list of seasons in the simulation.
hours : list
The list of hours in the simulation.
Returns
-------
table_command : string
The command for generating the "SegFrac" table.
"""
table_command = """CREATE TABLE "SegFrac" (
"season_name" text,
"time_of_day_name" text,
"segfrac" real CHECK("segfrac" >= 0 AND "segfrac" <= 1),
"segfrac_notes" text,
PRIMARY KEY("season_name","time_of_day_name"),
FOREIGN KEY("season_name") REFERENCES "time_season"("t_season"),
FOREIGN KEY("time_of_day_name") REFERENCES "time_of_day"("t_day")
);"""
insert_command = """
INSERT INTO "SegFrac" VALUES (?,?,?,?)
"""
time_slices = itertools.product(seasons, hours)
entries = [(ts[0][0], ts[1][0], segfrac, 'fraction of year')
for ts in time_slices]
cursor = connector.cursor()
cursor.execute(table_command)
cursor.executemany(insert_command, entries)
connector.commit()
return table_command | 386abf0948a1deecac879fd94e2927d442acbff5 | 96,873 |
import json
def load_dict_from_json(filename):
"""
Loads a python dictionary from a json file
:param filename: full filename to json file from which to load the config
:return: dictionary object with content from the json file
"""
with open(filename, 'r') as f:
loaded_dict = json.load(f)
return loaded_dict | 3eb7fe84960f86214f7f87c63eb6ab6bd0ee1fef | 96,877 |
import re
def wrap_long_text(text: str, length: int = 70) -> str:
"""Word-wrap plain text to maximum of length columns.
:param text: text to wrap
:param length: maximum columns
:return: wrapped text
"""
return "\n".join(
line.strip() for line in re.findall(r".{1,%d}(?:\s+|$)" % length, text)
) | 6145037c8872a6a0241200a38d20d82ca5879416 | 96,879 |
def count_sightings(json_obj):
""" Returns a count of the number of sightings per word in corpus
Args:
json_obj (dict).
Returns:
int: The return value.
"""
try:
return int(json_obj["number_of_sightings"])
except KeyError:
return 0 | 8d1eab87dc11047c40eb22b2c8b0f2148339be92 | 96,883 |
def verify_newp_info(in_dict):
"""
Verify in_dict contains the correct keys and data
{"patient_id": 1, # usually this would be the patient MRN
"attending_email": "dr_user_id@yourdomain.com",
"patient_age": 50, # in years}
"""
for key in ("patient_id", "attending_email", "patient_age"):
if key not in in_dict.keys():
return "Key {} not found".format(key)
if key == "attending_email":
if type(in_dict[key]) is not str:
return "Key attending_email not correct type"
else:
try:
integer = int(in_dict[key])
except ValueError:
return "Key {} is not integer".format(key)
return True | ee070d300569269adb67036b7da79f81e596707e | 96,884 |
def get_next_url(soup):
"""Get the url for retrieve next articles."""
try:
return soup.select('.load-more button')[0]['data-url']
except:
return None | e690ec789f58ea642ab5d37a741b09ea6ca080b5 | 96,895 |
def get_list_item(list, index):
"""Get item of list without the risk of an error being thrown"""
if 0 <= index < len(list):
return list[index]
else:
return None | ce902262d2de964e499ddf5149e6429796da998e | 96,898 |
def _vector_table(point_set):
"""Computes a vector table for given point set.
Vector table tells the distance of each point in the point set to
all other points in the point set.
Args:
point_set: The point set to compute the vector table from
Returns:
A dictionary with origin point as key, and the list of translation vectors
to all points in the point set as value.
"""
return {a:[ (b[0]-a[0], b[1]-a[1]) for b in point_set ] for a in point_set} | ae5e7dadcfa55c39e025f6656fc8dd25075c116e | 96,900 |
async def create_access_token(
app, identity, user_claims=None, role=None, fresh=False, expires_delta=None
):
"""
Create a new access token.
:param app: A Sanic application from request object
:param identity: The identity of this token, which can be any data that is
json serializable. It can also be a python object
:param user_claims: User made claims that will be added to this token. it
should be dictionary.
:param role: A role field for RBAC
:param fresh: If this token should be marked as fresh, and can thus access
:func:`~sanic_jwt_extended.fresh_jwt_required` endpoints.
Defaults to `False`. This value can also be a
`datetime.timedelta` in which case it will indicate how long
this token will be considered fresh.
:param expires_delta: A `datetime.timedelta` for how long this token should
last before it expires. Set to False to disable
expiration. If this is None, it will use the
'JWT_ACCESS_TOKEN_EXPIRES` config value
:return: An encoded access token
"""
return await app.jwt._create_access_token(
app, identity, user_claims, role, fresh, expires_delta
) | 137651fbcca58bdaa77eb5628712e7e46d19d1ea | 96,904 |
def get_maximum_length(bounds):
"""
Calculate the maximum length of the side bounding box.
"""
if len(bounds) != 6:
return None
return max([bounds[i] - bounds[i - 1] for i in range(1, len(bounds), 2)]) | 21d0290be558f74aac7e1e99d4c77e9d6f3975c4 | 96,907 |
def _normalize_number(number, intmax):
""" Return 0.0 <= number <= 1.0 or None if the number is invalid. """
if isinstance(number, float) and 0.0 <= number <= 1.0:
return number
elif isinstance(number, int) and 0 <= number <= intmax:
return number / intmax
return None | 56b17ccf027d6fade2d74a83793ece18462672f3 | 96,908 |
def divmult(n: int, m: int = 2) -> int:
"""
Checks how many times n is divisible by m. Returns -1 if n=0
@author = Joel
:param n: Numerator
:param m: Denominator
:return: Multiplicity
"""
if n < 0:
raise ValueError('Only non-negative integers are supported for n.')
elif n == 0:
return -1
q, r = divmod(n, m)
count = 0
while not r:
count += 1
q, r = divmod(q, m)
return count | 76fcc5c89a643da8ce2f5ad1e011afa85715d35f | 96,910 |
import math
def rotationsmatrix(phi):
""" rotation matrix. phi is degrees, positive phi rotates CW """
phi = math.radians(phi)
return [[math.cos(phi), 1 * math.sin(phi)], [-1 * math.sin(phi), math.cos(phi)]] | c8c3b44eaeab9fc8446338262a879d9282fe9a53 | 96,911 |
def harris(f, sigma2, **kwargs):
""" one-side Harris spectra, adopted in Austrilia code
Args:
f (1d-ndarray): freqency, unit: Hz
sigma2 (float): variance, (Iu * vz) ** 2
**kwargs: spectrum propeteries
v10 (float): mean wind speed at 10m height, unit: m/s
"""
v10 = kwargs["v10"]
x = f * 1800 / v10
ret = sigma2 / f * 0.6 * x / ((2 + x * x) ** (5.0 / 6))
return ret | a830b5aa75137e49b07ce0dd3f6c163914085ed8 | 96,916 |
def list_to_commas(list_of_args) -> str:
"""
Converts a list of items to a comma separated list. If ``list_of_args`` is
not a list, just return it back
:param list_of_args: List of items
:return: A string representing a comma separated list.
"""
if isinstance(list_of_args, list):
return ",".join(list_of_args)
return list_of_args | a7b8d59f7a9265ad54ab287002379fb122ebf8f7 | 96,919 |
def _uuid_for_part(model, part_name, is_rels = None):
"""Returns the uuid for the named part."""
if part_name is None:
return None
if is_rels is None:
is_rels = part_name.endswith('.rels')
if is_rels:
return model.rels_forest[part_name][0]
return model.parts_forest[part_name][1] | 3b2ab9236f64a66d36fc84e1ea663038ab0675a2 | 96,920 |
def default_str(expr):
"""The default printer for expressions.
Simply call the to_string method.
Arguments:
- `expr`: an expression
"""
return expr.to_string() | 40124fc6fb2ac0fc5e0c950dc87033a91a48154d | 96,921 |
def get_time_weights(cube):
"""Compute the weighting of the time axis.
Parameters
----------
cube: iris.cube.Cube
input cube.
Returns
-------
numpy.array
Array of time weights for averaging.
"""
time = cube.coord('time')
coord_dims = cube.coord_dims('time')
# Multidimensional time coordinates are not supported: In this case,
# weights cannot be simply calculated as difference between the bounds
if len(coord_dims) > 1:
raise ValueError(
f"Weighted statistical operations are not supported for "
f"{len(coord_dims):d}D time coordinates, expected "
f"0D or 1D")
# Extract 1D time weights (= lengths of time intervals)
time_weights = time.core_bounds()[:, 1] - time.core_bounds()[:, 0]
return time_weights | e431ac8551d2e2e95d5084217b58aefb209946e8 | 96,923 |
def isprime(i):
"""
input: 1, a positive integer
i > 1
returns True if i is a prime number, False otherwise
"""
if i > 1:
count = 0
for z in range(2,i+1):
if (i % z) == 0:
count +=1
if count > 1:
return False
if count == 1:
return True
else:
return False | 9dd78e957c156147097a83fa77088e5dc776448d | 96,929 |
def lyambda_fact_top(m_distrib, R):
"""
Calculates the the factor of masstransfer.
Parameters
----------
m_distrib : float
The distribution coefficient, [dismensionless]
R : float
The reflux number, [dismensionless]
Returns
-------
lyambda_fact_top : float
The factor of masstransfer, [dismensionless]
References
----------
Дытнерский, стр.239 формула 6.35
"""
return m_distrib * (R + 1) / R | 2deebd3b5002cd22eff3201e9533883292b6a876 | 96,930 |
def clip(x, lowest, highest):
"""Return x clipped to the range [lowest..highest]."""
return max(lowest, min(x, highest)) | 8e98e0a7b3fb33e7fece20941b80117778002328 | 96,932 |
import hashlib
def hash_file(dgst, path):
"""Generate a dgst hash from file path"""
h = hashlib.new(dgst)
with open(path, 'rb') as file:
for chunk in iter(lambda: file.read(4096), b""):
h.update(chunk)
return h.digest() | 0584ca6f08eae6657e7ff6a04eec9391975105b1 | 96,940 |
def parse_line(line):
"""
Parse a line in the auth event dataset.
This data represents authentication events collected from individual Windows-based desktop computers,
servers, and Active Directory servers. Each event is on a separate line in the form of "time,
source user@domain,destination user@domain,source computer,destination computer,authentication type,
logon type,authentication orientation,success/failure" and represents an authentication event at the
given time. The values are comma delimited and any fields that do not have a valid value are represented
as a question mark ('?').
Example: 244,C1$@DOM1,C1$@DOM1,C1,C529,Kerberos,Network,LogOn,Success
"""
fields = line.split(',')
assert len(fields) == 9
return {
"timestamp": fields[0],
"src_user": fields[1],
"dest_user": fields[2],
"src_comp": fields[3],
"dest_comp": fields[4],
"auth_type": fields[5],
"logon_type": fields[6],
"auth_orientation": fields[7] == 'LogOn',
"success": fields[8] == 'Success'
} | 5277ea7a309832ce7f308d5eace55ee495781588 | 96,946 |
def find(cond, iterator):
"""
Returns the first value that matches f
otherwise Returns None.
"""
for i in iterator:
if cond(i):
return i
return None | 8f6a6fc355a0e20f21bb2170fab89d3a7b642314 | 96,947 |
from typing import Union
def atmospeheres_to_bars(atm: float, unit: str) -> Union[float, str]:
"""
This function converts atm to bar
Wikipedia reference: https://en.wikipedia.org/wiki/Standard_atmosphere_(unit)
Wikipedia reference: https://en.wikipedia.org/wiki/Bar_(unit)
>>> atmospeheres_to_bars(2.5, "atm")
2.533125
>>> atmospeheres_to_bars("12", "atm")
12.158999999999999
>>> atmospeheres_to_bars(0, "atm")
0.0
>>> atmospeheres_to_bars(35, "mmHg")
'Invalid unit'
>>> atmospeheres_to_bars("atmospheres", "atm")
Traceback (most recent call last):
...
ValueError: could not convert string to float: 'atmospheres'
"""
if unit == "atm":
bar = float(atm) * 1.01325
return bar
else:
return "Invalid unit" | 5e367f007902819c5be9948f187d4774e16d33ea | 96,948 |
def example_func(param1, param2):
"""Example function with types documented in the docstring
Args:
param1 (int): The first parameter
param2 (str): The second parameter
Returns:
bool: The return value. True for success, False otherwise.
"""
print(param1)
print(param2)
return True | 2bc46e4cb686cc926383b145ae7b8a7b3ed948c8 | 96,949 |
import logging
def calc_duration(time):
"""This calculates the time duration of the ECG data.
The time duration is found by subtracting the first time
value from the last time value.
Args:
time (list): list of time values for the ECG data
Returns:
float : duration of ECG data in seconds
"""
logging.info('Calculating ECG duration')
first = time[0]
last = time[-1]
return last - first | 0f84314bfe6f42b1ef4661caae88042ce5233cbe | 96,951 |
import itertools
def CreateTextOutputPermutations(text, inputs):
"""Creates permutations of |text| filled with the contents of |inputs|.
Some output ordering is not guaranteed, so this acts as a way to generate
all possible outputs instead of manually listing them.
Args:
text: A string containing a single string field to format.
inputs: An iterable of strings to permute.
Returns:
A set of unique permutations of |text| filled with |inputs|. E.g. if |text|
is '1%s2' and |inputs| is ['a', 'b'], the return value will be
set(['1ab2', '1ba2']).
"""
permutations = set()
for p in itertools.permutations(inputs):
permutations.add(text % ''.join(p))
return permutations | 63dfb5e8fa919c94213a2c047cb47448370f66a8 | 96,952 |
def _check(isdsAppliance, serverID, action):
"""
Check if serverID one of these acceptable values:
directoryserver
directoryadminserver
directorywat
directoryintegrator
directoryintegratorscimtarget
scimservice
Note: only directoryserver supports "startconfig" action
"""
if serverID == 'directoryserver':
if action == 'startconfig':
return True
return True
elif serverID == 'directoryadminserver':
if action == 'startconfig':
return False
return True
elif serverID == 'directorywat':
if action == 'startconfig':
return False
return True
elif serverID == 'directoryintegrator':
if action == 'startconfig':
return False
return True
elif serverID == 'directoryintegratorscimtarget':
if action == 'startconfig':
return False
return True
elif serverID == 'scimservice':
if action == 'startconfig':
return False
return True
else:
return False | f2e84dbaa4e50cfdfa14f91d2031234b8814ae10 | 96,957 |
def parse_midi_input(midi_key_presses):
"""Takes in a list of MidiKeyPress objects, returns the notes, ordered by
time pressed."""
midi_key_presses.sort(key=lambda x: x.time)
return [x.note for x in midi_key_presses if x.velocity > 0] | b5964e666540c2585cfefaa89fd9742090ee0bec | 96,958 |
def state_to_str(state, n_qubits=4, ket=True):
""" takes a state as a number, and returns the string
specifying ket=True draws angle brackts around the string """
# "state" is not the right term here ...
binary_str = bin(state)[2:].zfill(n_qubits)
if ket:
binary_str = '|'+binary_str+'>'
return binary_str | 583dc9381306555f855880efafa84945edd466e4 | 96,959 |
def get_component_aliases(*args):
"""
Returns aliases for variables in the properties of Components (TendencyComponent,
DiagnosticComponent, Stepper, and ImplicitTendencyComponent objects).
If multiple aliases are present for the same variable, the following
properties have priority in descending order: input, output, diagnostic,
tendency. If multiple components give different aliases at the same priority
level, one is chosen arbitrarily.
Args
----
*args : Component
Components from which to fetch variable aliases from the input_properties,
output_properties, diagnostic_properties, and tendency_properties dictionaries
Returns
-------
aliases : dict
A dictionary mapping quantity names to aliases
"""
return_dict = {}
for property_type in (
'tendency_properties', 'diagnostic_properties', 'output_properties',
'input_properties'):
for component in args:
if hasattr(component, property_type):
component_properties = getattr(component, property_type)
for name, properties in component_properties.items():
if 'alias' in properties.keys():
return_dict[name] = properties['alias']
return return_dict | 478ef02437fa9b55319979e69717b2ebe1c7c5f2 | 96,963 |
from typing import List
def get_fillings(sandwich: List) -> List:
"""Given a sandwich, return the ingredients."""
return sandwich[1:-1] | 812e59b4509f3d461fda81784f50b9c68bb88d02 | 96,965 |
import math
def get_velocities(pitch_vel_l, pitch_vel_r, roll_vel_l, roll_vel_r, pitch, roll):
"""
Returns an estimated bot velocity
:param pitch_vel_l: Left pitch motor velocity
:param pitch_vel_r: Right pitch motor velocity
:param roll_vel_l: Left roll motor velocity
:param roll_vel_r: Right roll motor velocity
:param pitch: Bot pitch angle
:param roll: Bot roll angle
:return: Velocities in the x and y axis
"""
v_x = .5 * (pitch_vel_l + pitch_vel_r)
v_y = .5 * (roll_vel_l + roll_vel_r)
pitch = math.radians(pitch)
roll = math.radians(roll)
v_x *= math.cos(pitch)
v_y *= math.cos(roll)
return v_x, v_y | d85f6687f3b80548d5a12f9bd4cdd64a738bf0ad | 96,969 |
def resource_str(resource_obj):
"""
Return a human readable string identifying the resource object, for
messages.
"""
res_class = resource_obj.properties['class']
if res_class == 'cpc':
res_str = "CPC '{}'".format(resource_obj.name)
elif res_class in ('partition', 'logical-partition'):
res_str = "partition '{}' on CPC '{}'". \
format(resource_obj.name, resource_obj.manager.parent.name)
else:
raise ValueError("Resource class {} is not supported".format(res_class))
return res_str | b30b069361648de770b5a9d91da671c31e5154c6 | 96,970 |
import re
def findDat(paths):
"""Guesses the .dat file from a list of filenames"""
return [path for path in paths if re.search('/FirmwareData_([^/]+)\.dat$', path)][0] | cd60c3d376631d8d29108ee897626a89f09f2424 | 96,971 |
from typing import Optional
from typing import Union
def parse_positive_integer(s: 'Optional[Union[str, int]]') -> 'Optional[int]':
"""Parse a positive integer from a string representation.
Args:
s: string representation of a positive integer, or just an integer
Returns:
the parsed integer result, return :data:`None` if input is :data:`None` or empty string
Raises:
TypeError: if ``s`` is not :obj:`str` or :obj:`int`
ValueError: if ``s`` is an invalid positive integer value
"""
if s is None or s == '': # pylint: disable=compare-to-empty-string
return None
if not isinstance(s, (str, int)):
raise TypeError('expect str or int, got {!r}'.format(s))
try:
value = int(s)
except ValueError:
raise ValueError('expect an integer value, got {!r}'.format(s)) from None
if value <= 0:
raise ValueError('expect integer value to be positive, got {!r}'.format(value))
return value | 99e0bfca7550001c140ce9f3f3f39baccfd2a0c9 | 96,975 |
def define_parameters(**parameters):
"""Get a list of parameters to pass to AWS boto call."""
params = []
for key, value in parameters.items():
param = dict(ParameterKey=key, ParameterValue=value)
params.append(param)
return params | d25cac3f26d3604ce5e333016533cc4be53e78a3 | 96,978 |
def collect_summary(cols):
"""Select the summary sentences that are matched with a given section into an array of sentences"""
section_idx, matched_summaries = cols.section_idx, cols.matched_summaries
collected_summary = [t for (t, s_idx) in matched_summaries if s_idx == section_idx]
return collected_summary | 43a64b18681019a9634814d81bbb8205284b04f2 | 96,981 |
def get_aws_partition(context):
"""
Get aws partition
@param context: the context of the event
@return: current account partition
"""
return context.invoked_function_arn.split(":")[1] | 5ee0aaf8f1203d39629d159860f2f893eed9334c | 96,983 |
def cut_up_url(url):
"""
Input: A URL string consistent w/ runbritain.com. Here is an ex)
https://www.runbritainrankings.com/results/results.aspx?meetingid=201320&pagenum=1
Output: Unique ID int for a particular race, which is how
I cross-reference GPS info with race info
"""
flag = 'meetingid='
start = url.find(flag) + len(flag)
end = url.find('&')
substr = url[start:end]
return int(substr) | cc5d34da975c66d33d3e2af77edcdfc0e1e8bb13 | 96,985 |
def ft_to_toplevel(fasttext_lbl):
"""Example: '__label__STEM.Technology' -> 'STEM'"""
return fasttext_lbl.replace('__label__','').split('.')[0] | 95b46460553b11c22339ef8ede6c2a2e761bc894 | 96,994 |
def default_authority(request):
"""
Return the value of the h.authority config settings.
Falls back on returning request.domain if h.authority isn't set.
"""
return request.registry.settings.get("h.authority", request.domain) | 8bb7420683c91b959ac83d405de05e126ea92b31 | 96,996 |
def get_icon(event_type):
"""
This function returns an icon path based on event_type
:param event_type: the event type used to specify an icon
"""
if event_type == 'AUTOMATIC ALARM':
return 'Google Maps Markers/blue_MarkerA.png'
elif event_type == 'EMS':
return 'Google Maps Markers/red_MarkerE.png'
elif event_type == 'FIRE':
return 'Google Maps Markers/orange_MarkerF.png'
elif event_type == 'HAZMAT':
return 'Google Maps Markers/purple_MarkerH.png'
elif event_type == 'MUTUAL AID':
return 'Google Maps Markers/green_MarkerM.png'
elif event_type == 'PUBLIC SERVICE':
return 'Google Maps Markers/pink_MarkerP.png'
elif event_type == 'TRAFFIC':
return 'Google Maps Markers/darkgreen_MarkerT.png'
else:
return 'Google Maps Markers/brown_MarkerU.png' | ee82844364538682058618098f92b2d78e7838ea | 97,001 |
import math
def outlierCleaner(predictions, ages, net_worths):
"""
Clean away the 10% of points that have the largest
residual errors (difference between the prediction
and the actual net worth).
Return a list of tuples named cleaned_data where
each tuple is of the form (age, net_worth, error).
"""
nb_cleaned = int(math.ceil(len(predictions) * 0.1))
cleaned_data = []
#Calculate all residual errors
for prediction, age, net_worth in zip(predictions, ages, net_worths):
error = (prediction - net_worth)**2
cleaned_data.append((age, net_worth, error))
#Sort with highest error first
cleaned_data.sort(key=lambda x: x[2], reverse=True)
#Remove highest errors:
cleaned_data = cleaned_data[nb_cleaned:]
return cleaned_data | af3809faed46f3cf5539f6451aaf92ffddf7878c | 97,004 |
def get_model_attribute(model, attribute_name, cuda_device):
"""
Getter function for both CPU and GPU.
Parameters
____________________
model: MultiTaskModel object,
attribute_name: str
Returns
--------------------
The attribute object from the model.
"""
# maybe we should do (int, list)
if isinstance(cuda_device, list):
return getattr(model.module, attribute_name)
else:
return getattr(model, attribute_name) | 72cc2c188305171d162f0275674a04fd1877d5c3 | 97,008 |
import functools
def lazy_property(function):
"""Decorator which adds lazy evaluation to the function and cashing the result.
Parameters
----------
function : callable
The function that should be evaluated only once and providing the
result that gets cached.
Returns
-------
return type of callable
The cached result from the first and only evaluation.
"""
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator | 089bb22d27e1d50f258bf1566e881ed5aee1aea1 | 97,010 |
def compute_checksum(byte_seq):
"""Compute and return the checksum of the `byte_seq` packet.
The checksum is the value of the last byte of each packet. It is used
to prevent transmission errors of packets. Checksums are computed as
follow::
Checksum = ~(dynamixel_id + length + data1 + ... + dataN)
where ~ represent the NOT logic operation.
If the computed value is larger than 255, then only its lower byte is
defined as the checksum value.
:param bytes byte_seq: a byte sequence containing the packet's bytes
involved in the computation of the checksum (i.e. from the third to the
penultimate byte of the "full packet" considered).
"""
# Check the argument and convert it to "bytes" if necessary.
# Assert "byte_seq" items are in range (0, 0xff).
# "TypeError" and "ValueError" are sent by the "bytes" constructor if
# necessary.
# The statement "tuple(byte_seq)" implicitely rejects integers (and all
# non-iterable objects) to compensate the fact that the bytes constructor
# doesn't reject them: bytes(3) is valid and returns b'\x00\x00\x00'.
byte_seq = bytes(tuple(byte_seq))
# Check the argument's length
if len(byte_seq) < 3:
raise ValueError("At least three bytes are required.")
# Check the ID byte
if not (0x00 <= byte_seq[0] <= 0xfe):
msg = "Wrong dynamixel_id, a byte in range(0x00, 0xfe) is required."
raise ValueError(msg)
# Check the "length" byte
if byte_seq[1] != (len(byte_seq) - 1):
raise ValueError('Wrong length, at least 3 bytes are required.')
checksum = ~sum(byte_seq) & 0xff
return checksum | cdce695d9927f0230ccce081c58ea8d9072e8604 | 97,012 |
import requests
def fetch_latest_pypi_version(project):
"""
Return the latest version of the given project from PyPi.
"""
return requests.get("https://pypi.python.org/pypi/%s/json" % project).json().get("info", {}).get("version", "") | 0b8d66baba61d424fc64616ae56c32d81a974421 | 97,017 |
def url(path):
"""Generate a Markdown link for a file/folder"""
urlable = path.as_posix().replace("/", "").replace(".", "").lower()
if path.is_dir():
return f"`{path.name}`"
return f"[`{path.name}`](#{urlable})" | 0f163120efe5c3988205e5ea13e625da8ba050b2 | 97,021 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.