content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def task_11_list_customers_starting_from_11th(cur):
"""
List all customers starting from 11th record
Args:
cur: psycopg cursor
Returns: 11 records
"""
cur.execute("""SELECT * FROM Customers WHERE customerid > 11""")
return cur.fetchall()
|
d85c7d28c172b8d84118160efaf67ad5225fe220
| 82,779
|
from typing import Union
from typing import List
def get_attribute(object, name: Union[List[str], str]):
"""Return an attribute of an object."""
components = name.split(".") if isinstance(name, str) else name
for component in components:
object = getattr(object, component)
return object
|
95ad52ba6614449fc2968f43317a9b2f5f181fec
| 82,781
|
def calcPv(D, H):
"""
Calculate geometric albedo given asteroid diameter and H magnitude.
Parameters
----------
D : float or `~numpy.ndarray` (N)
Asteroid diameter in meters.
H : float or `~numpy.ndarray` (N)
Absolute H magnitude.
Returns
-------
float or `~numpy.ndarray` (N)
Geometric albedo.
"""
return (1.329e6 / (D * 10**(H / 5)))**2
|
fb91ab6be9f389f1754514b940fc96b4c999a9f8
| 82,786
|
def get_machines_by_vnet_interface_name(config, ifname):
"""
Returns a list of machine that use a particular VNet interface
:param dict config: The config generated by get_config()
:param str ifname: The interface to check for
:return: list of VNet machines using that interface
"""
machines = []
for m_name, m_data in config["machines"].items():
for int_data in m_data["interfaces"].values():
if int(int_data["bridge"]) == int(ifname[-1]):
machines.append(m_name)
return machines
|
9f26b01243664f0af596db2eaf0d067d481076e6
| 82,788
|
def _parse_results(results):
""" Split the keys into a list of each key and the date that the data covers
Args:
results (dict): Dictionary of search results
Returns:
list (tuple): List of date, data key list pairs
"""
date_keys = {}
for key in results.keys():
keys = sorted(results[key])
start_key = keys[0]
end_key = keys[-1]
# Get the first and last dates from the keys in the search results
start_date = start_key.split("/")[-1].split("_")[0]
end_date = end_key.split("/")[-1].split("_")[-1]
dates_covered = start_date + "_" + end_date
date_keys[key] = {"dates": dates_covered, "keys": keys}
return date_keys
|
4da151d517b2d60445c97cb6dd1414a142d02c55
| 82,790
|
def figure_format(fig_width='3.4',fig_height='2.1'):
"""
Parameter
---------
fig_width : float
figure width in inches
e.g. 3.4
fig_height : float
figure height in inches
e.g. 3.4
Returns
-------
A tuple with:
- Float corresponding to figure width and height
- A dictionary containing several parameters, such as fontsize
, etc. If fig_width is not set, the default value is 3.4 inch,
corresponding to the width of a column in a double colomn paper.
"""
golden_ratio = 1.618 # Aesthetic ratio
fig_size = [fig_width,fig_height]
fontsize = 10
linewidth = 0.5
params = {'backend': 'ps',
'axes.labelsize': fontsize,
'axes.titlesize': fontsize,
'font.size': fontsize,
'legend.frameon': False,
'legend.fontsize': fontsize,
'legend.loc': 'best',
'lines.linewidth': linewidth,
'xtick.labelsize': fontsize,
'ytick.labelsize': fontsize,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.top': True,
'ytick.right': True,
'xtick.major.size': linewidth*4,
'xtick.major.top': True,
'xtick.major.width': 0.5,
'xtick.minor.size': linewidth*2,
'xtick.minor.top': True,
'xtick.minor.width': 0.5,
'ytick.major.size': linewidth*4,
'ytick.major.width': 0.5,
'ytick.minor.size': linewidth*2,
'ytick.minor.width': 0.5,
'figure.figsize': fig_size,
'pgf.texsystem': 'pdflatex',
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'ps.usedistiller': 'xpdf'}
return(fig_width,fig_height,params)
|
0ee9cba83111ef335e76ca3d39e5ce1aa8b20fcf
| 82,792
|
def get_label(audio_config):
"""Returns label corresponding to which features are to be extracted
e.g:
audio_config = {'mfcc': True, 'chroma': True, 'contrast': False, 'tonnetz': False, 'mel': False}
get_label(audio_config): 'mfcc-chroma'
"""
features = ["mfcc", "chroma", "mel", "contrast", "tonnetz"]
label = ""
for feature in features:
if audio_config[feature]:
label += f"{feature}-"
return label.rstrip("-")
|
5f8b0bbe9966fd50e34e5bdb23cf915970f2170f
| 82,793
|
def parse_hhmm_field_from_form(data_dict, key_root):
""" Extracts and parses the hours and minutes inputs to create a
parseable time of day string in HH:MM format. These times are
displayed as two select fields designated with a name (key_root)
and _hours or _minutes suffix.
Parameters
----------
data_dict: dict
Dictionary of posted form data
key_root: string
The shared part of the name attribute of the inputs to parse.
e.g. 'issue_time' will parse and concatenate 'issue_time_hours'
and 'issue_time_minutes'
Returns
-------
string
The time value in HH:MM format.
"""
hours = int(data_dict[f'{key_root}_hours'])
minutes = int(data_dict[f'{key_root}_minutes'])
return f'{hours:02d}:{minutes:02d}'
|
fd4a29cf52ddeeafd076ea5a53bf33e7e4cebc60
| 82,797
|
def _model_columns(db_model):
"""Return the columns names from a given DB model.
"""
return [c.name for c in db_model.__table__.columns]
|
4e26f97ba7ee993c3034d5d185080fb7f6ba17d3
| 82,814
|
import torch
def adam(parameters, args):
"""Build adam."""
return torch.optim.Adam(
parameters,
lr=args.lr,
weight_decay=args.weight_decay,
betas=(args.beta1, args.beta2),
)
|
e51ca56e31cd8facd226398f4042853534d59969
| 82,819
|
def normalize_rect(rect):
"""
Make rectangle a rotated tall rectangle, which means y-size > x-size
and the angle indicates the "tilt": rotation of the longer axis from vertical
:param rect: OpenCV's rectangle object
:return: OpenCV's rectangle object normalized as described
rect[0] is the coordinates of the center of the rectangle
rect[1] is tuple of the sizes (x, y)
rect[2] is the tilt angle
if width > height, in rect's internal means, then we turn it 90 degrees
which means swap the sizes and turn cw or ccw depending on the previous value
"""
if rect[1][0] > rect[1][1]:
# incoming rect can be a tuple so if swapping reassign the whole thing
rect = (
rect[0], # same center coordinates
(rect[1][1], rect[1][0]), # swap height with width
rect[2] + 90.0 if rect[2] < 0.0 else -90.0
)
return rect
|
4796f4e14b16c3e78098fa74df868f18a684e229
| 82,826
|
def daterange_to_str(daterange):
""" Takes a pandas DatetimeIndex created by pandas date_range converts it to a
string of the form 2019-01-01-00:00:00_2019-03-16-00:00:00
Args:
daterange (pandas.DatetimeIndex)
Returns:
str: Daterange in string format
"""
start = str(daterange[0]).replace(" ", "-")
end = str(daterange[-1]).replace(" ", "-")
return "_".join([start, end])
|
a57be32e5adf96ec44cfb80d0d3830d645c9817b
| 82,827
|
def resistivity_index(rt, ro):
"""
Archie Resistivity Index (I)
Parameters
----------
rt : float
True formation resistivity (ohm.m)
ro : float
Resistivity of water saturated formation (ohm.m)
Returns
-------
float
Returns Archie resistivity index (I)
"""
return rt/ro
|
163cea9edeb51d319fe7756858296fc04409a481
| 82,829
|
def instance_get_uuid(instance):
"""
Retrieve the instance uuid
"""
return instance['id']
|
70d97fdcd56d344225d9d4294f2ab077e18a9db1
| 82,830
|
def conversion(amount, rates, output_currency=None):
"""
Converts amount from input currency to the output currency. If output
currency is not defined, conversion is made for all supported currencies.
Parameters
----------
amount : float
Amount of money to be converted.
rates : dict
Conversion rates.
output_currency : str, optional
Currency for which conversion is made.
Returns
-------
dict
Converted rates.
"""
result = {
k: round(v * amount, 2) for k, v in rates.items()
} # it is necessary to have float(amount)?
if output_currency:
result = {output_currency: result[output_currency]}
return result
|
ad9f506d3597b22d54e612fe0ee572ee019732ef
| 82,835
|
def get_modified_files_list(diff):
"""Get list of modified or newly added files list"""
file_list = []
for file in diff["files"]:
if file["status"] == "modified" or file["status"] == "added":
file_list.append(file["filename"])
return file_list
|
9219d0e4f41ee9aa9b64e0a9867a6a90c679dc02
| 82,836
|
def constant_stiffness(x, y):
"""Constant coefficient S{lambda}(x,y)=1.0 for wave equation"""
return 1.0
|
304312c8679350bc60c6be40d9c10735ad6a4bf8
| 82,837
|
import unicodedata
def remove_accents(s):
"""remove accents from a piece of text.
Examples:
>>> _s = 'âbĉ'
>>> remove_accents(_s)
'abc'
"""
s = unicodedata.normalize('NFD', s)
output = []
for c in s:
if unicodedata.category(c) == 'Mn':
continue
output.append(c)
return ''.join(output)
|
839c0f8f7ad08850cbc6080095d1230c680ea414
| 82,840
|
def get_min_max_range(arr_1d, std_factor=2):
"""Get min max ranges for array, defined as so many standard deviations +/- from the mean
Args:
arr_1d (numpy.array): Numpy array over which values are calculated.
std_factor (int, optional): The min / max ranges returned are this many std +/- the mean. Defaults to 3.
Returns:
double, double: min and max calculated values
"""
arr_mean = arr_1d.mean()
arr_std = arr_1d.std()
min_val = arr_mean - arr_std * std_factor
max_val = arr_mean + arr_std * std_factor
return min_val, max_val
|
f5561d39f9f5f94045a9979ca0c391a9d29a4218
| 82,845
|
def encontrar_mayor(entrada: list) -> int:
""" Encontrar el elemento mayor
Parámetros:
entrada (list): La lista de números que se desea buscar
Retorno:
int: El número más grande en la lista, si está vacía -1.
"""
mayor = -1
for numero in entrada:
if numero > mayor:
mayor = numero
return mayor
|
e7be0b9183316db876681096acfbffefd876fb13
| 82,848
|
def remove_unwanted_files(workdir, files):
"""
Remove files from the list that are to be ignored by the looping job algorithm.
:param workdir: working directory (string). Needed in case the find command includes the workdir in the list of
recently touched files.
:param files: list of recently touched files (file names).
:return: filtered files list.
"""
_files = []
for _file in files:
if not (workdir == _file or
"pilotlog" in _file or
".lib.tgz" in _file or
".py" in _file or
"pandaJob" in _file):
_files.append(_file)
return _files
|
ddb70a98de34ef162d56e1569cf2580ee5a38907
| 82,849
|
def extract_type_from_nnlm(data):
"""
Extract the version for the NNLM collection of Language Models
https://tfhub.dev/google/collections/nnlm/1
"""
return data['lm'].str.extract(r'nnlm-(e.-.*)-w.*')
|
5a7da10bfb32551107056052bb2aac49d39f01ea
| 82,850
|
from typing import Tuple
from typing import List
from typing import Any
def _deduplicate_labels(
handles_labels: Tuple[List[Any],
List[str]]) -> Tuple[List[Any], List[str]]:
"""Deduplicate legend handles and labels.
Parameters
----------
handles_labels : Tuple[List[Any], List[str]]
Legend handles and labels.
Returns
-------
(new_handles, new_labels) : Tuple[List[Any], List[str]]
Deduplicated legend handles and labels
"""
new_handles = []
new_labels = []
for handle, label in zip(*handles_labels):
if label not in new_labels:
new_handles.append(handle)
new_labels.append(label)
return (new_handles, new_labels)
|
37133c689aa6bccde7f56ac550b7b6a3c73cba23
| 82,853
|
def get_variants_from_log(trace_log, attribute_key="concept:name"):
"""
Gets a dictionary whose key is the variant and as value there
is the list of traces that share the variant
Parameters
----------
trace_log
Trace log
attribute_key
Field that identifies the attribute (must be provided if different from concept:name)
Returns
----------
variant
Dictionary with variant as the key and the list of traces as the value
"""
variants = {}
for trace in trace_log:
variant = ",".join([x[attribute_key] for x in trace if attribute_key in x])
if not variant in variants:
variants[variant] = []
variants[variant].append(trace)
return variants
|
02ef93e06f29bf9c8b3c9c8f88975a9a584ebc74
| 82,854
|
def qte(lne):
"""Quotes the given string"""
return "'" + lne + "'"
|
309b462ab5eff1ea3b8e8b949cbc06ee3e884247
| 82,856
|
def speak_excitedly(message, num_exclamations=1, enthusiasm=False):
"""Return a message followed by some exclamations points, possibly capitalized."""
message += '!' * num_exclamations
if not enthusiasm:
return message
return message.upper()
|
0360a668eaa8fbab9863d32cccbd2145ce8da32b
| 82,860
|
def is_post_code(tag):
"""
Usage: if is_post_code(tag): ...
Returns whether the key value of tag element is of type post code.
"""
return tag.attrib['k'] in ["addr:postcode", "postcode", "postal_code"]
|
3f903d7a227f1870be3567f401cfba2fae8f96b4
| 82,861
|
def split_iativer(version_str):
"""Split an IATIver-format version number into numeric representations of its components.
Args:
version_str (string): An IATIver-format string.
Returns:
list of int: A list containing numeric representations of the Integer and Decimal components.
"""
integer_component = int(version_str.split('.')[0])
decimal_component = int(version_str.split('.')[1])
return [integer_component, decimal_component]
|
bcdfe21de01dea172bdf19c4c9af01c58e2aede5
| 82,867
|
from typing import Tuple
def convert_tile_string(tile_pos_string: str) -> Tuple[int, int]:
"""
Convert a tile position string to (x, y)
"""
_x, _y = tile_pos_string.split(",")
x = int(_x) # str to int
y = int(_y)
return x, y
|
1911cc90b7ca935de3e3180e49b93d6949e72dcd
| 82,875
|
def image_rescale(I, im_size, n_uv):
"""
Rescale the obtained image
Args:
I (np.narray): summed up image
im_size (int): the image size, it is to be noted that this is before the image cropping
n_uv (int): the number of visibibility data
"""
return I*im_size*im_size/n_uv
|
94d0478d8e33fc65331498d278f669e112a8a857
| 82,876
|
def create_window_array(window, season_lag=None):
"""
Create a sliding window array (i.e. a array of True and False values that
is used to determine what past values should be used to to forecast the
next value).
Args:
window: An integer that is converted into its binary representation
which corresponds to the sliding window. For example, a input of 9
would correspond to a window of [1, 0, 0, 1].
season_lag: If value is present three values surrounding the lag are
included into the sliding window. For example, a a window of 1 (i.e.
only the last value) and a season lag of 5 leads to a window
[1, 0, 0, 1, 1, 1].
Returns:
The sliding window array.
"""
window = [int(digit) for digit in bin(window)[2:]]
if season_lag:
window += [0] * (season_lag - len(window) - 2) + [1, 1, 1]
return window
|
1075578a6ade128a2ce3fcb64f7548de02557908
| 82,877
|
def check_recent_timestamps(times, ref_time):
"""Checks if the input time is too recent and precludes all measurements
Args:
times (list): list of times, in seconds float, in which recordings were
made
ref_time (float): input reference time in seconds
Returns:
bool: returns True if there are more recent measurements than ref_time
"""
# Get last recorded time
last_time = times[-1]
# If the time is less recent than the ref_time
if last_time < ref_time:
return False
else:
return True
|
089a71a564ed0d2d300f33fd32b30da24cbca784
| 82,884
|
def interpreted_fact(n):
"""Computes n!"""
if n <= 1:
return 1
return n * interpreted_fact(n - 1)
|
900de74b5d279635df5bb85ef289e374bc2d24e8
| 82,886
|
def kw(**kwargs):
"""Return a dictionary of the provided keyword arguments."""
return kwargs
|
23ccaa71533c8bcad784ab60cdd600416734a35b
| 82,891
|
def get_template(path_to_template):
"""
Helper function to return the string representation of a template file.
"""
f = open(path_to_template, 'r')
template_str = ""
for line in f:
template_str = f"{template_str}{line}"
f.close()
return template_str
|
c2a20944d7590c4d78f06bb35e9818abe2c72a86
| 82,893
|
def is_fermat_probable_prime(n, base=2):
"""is_fermat_probable_prime(n [, base]) -> 0|1|2
Return a three-state flag (either 0, 1 or 2) that integer ``n `` is
either a prime or Fermat pseudoprime, as witnessed by one or more
integer bases.
Arguments
---------
n Integer to be tested for primality.
base Optional integer base, or tuple of bases. (Defaults to 2.)
Return result
-------------
0 Number is definitely non-prime.
1 Number is definitely prime.
2 Number is a weak probable prime or pseudoprime.
``is_fermat_probable_prime`` performs the Fermat primality test,
which is a weak probabilistic test. If a number fails this test,
it is definitely composite (there are no false negative tests):
>>> is_fermat_probable_prime(99, 7)
0
However, if the number passes the test, it is provisional evidence
that it may be a prime:
>>> is_fermat_probable_prime(29, 7) # 29 actually is prime.
2
In this case we can state that "7 is a witness that 29 may be prime".
As the Fermat test is probabilistic, composite numbers will sometimes
pass a test, or even repeated tests. We call them pseudoprimes to
some base:
>>> is_fermat_probable_prime(3*11, 10) # 33 is a pseudoprime to base 10.
2
and we call 10 a "Fermat liar" for 33.
A single passed test is not very convincing, but with more tests, we
can gain more confidence. ``base`` must be a positive int between 1
and n-1 inclusive, or a tuple of such bases. 1 is permitted, but not
very useful: it is a witness for all numbers. By default, base=2.
>>> is_fermat_probable_prime(33, (10, 7))
0
It may take an arbitrary number of Fermat tests to definitively
prove a number is composite:
>>> is_fermat_probable_prime(7*11*13*41, (17, 23, 356, 359))
2
>>> is_fermat_probable_prime(7*11*13*41, (17, 23, 356, 359, 363))
0
Unfortunately, there are some numbers which are composite but still
pass *all* Fermat tests. These pseudoprime numbers are called the
Carmichael numbers, and ``is_fermat_probable_prime`` cannot
distinguish them from actual primes no matter how many tests you
perform.
For large enough ``n``, if a number passes ``k`` randomly chosen and
independent Fermat tests, we can conclude that the probability that
it is either prime or a Carmichael number is (on average) at least
``1 - (1/2**k)``.
"""
if not isinstance(base, tuple):
base = (base,)
# Deal with the simple deterministic cases first.
if n < 2:
return 0 # Certainly composite (or unity, or zero).
elif n == 2:
return 1 # Certainly prime.
elif n % 2 == 0:
return 0
# Now the Fermat test proper.
for a in base:
if pow(a, n-1, n) != 1:
return 0 # n is certainly composite.
return 2
|
eef79614d3884ef18bce24eceddffc7c3c72c5eb
| 82,894
|
def format_step(step, zero_prefix=False):
"""Return the step value in format suitable for display."""
if isinstance(step, int):
return "{:06}".format(step) if zero_prefix else "{}".format(step)
elif isinstance(step, tuple):
return "{:04}:{:06}".format(*step) if zero_prefix else "{}:{}".format(*step)
|
ac0018b3aeb32fd61672c8e835e35692f2a1cc87
| 82,897
|
def str2bool(v):
"""Decoding string-encoded bool into Python objects.
This function returns one of the following:
* True if value decodes to true,
* False if value decodes to false,
* None otherwise.
Based on: https://stackoverflow.com/a/715468"""
value = str(v).lower()
if value in ("yes", "true", "t", "1"):
return True
elif value in ("no", "false", "f", "0"):
return False
else:
return None
|
562ba633f52bd845653b25c60bedb8e014027179
| 82,898
|
def get_samples(data, srate, idxes):
"""
Gets a sample of the wave with the indexes specified by idxes
returns has a form of [{'dt' :, 'val':}, ...]
"""
return [{"dt": idx / srate, "val": data[idx]} for idx in idxes]
|
fc270f64c613c7842fe0233cc1f1eba2692d2e8b
| 82,905
|
def _sorted_photon_data(data_dict):
"""Return a sorted list of keys "photon_dataN", sorted by N.
If there is only one "photon_data" key (with no N) it returns the list
['photon_data'].
"""
prefix = 'photon_data'
keys = [k for k in data_dict.keys() if k.startswith(prefix)]
if len(keys) > 1:
sorted_channels = sorted([int(k[len(prefix):]) for k in keys])
keys = ['%s%d' % (prefix, ch) for ch in sorted_channels]
return keys
|
461d84221aed991e72439f9d1bd3a7c8c7464f92
| 82,906
|
def stock_prices_1_brute_force(stock_prices):
"""
Solution: Brute force iterative solution compares each stock price with
all subsequent stock prices.
Complexity:
Time: O(n^2)
Space: O(1)
"""
if len(stock_prices) < 2:
raise ValueError('stock price list must be at least 2 items long')
highest_profit = None
for i, stock_price_purchased in enumerate(stock_prices):
for stock_price_sold in stock_prices[i + 1:]:
profit = stock_price_sold - stock_price_purchased
if not highest_profit or profit > highest_profit:
highest_profit = profit
return highest_profit
|
e56d7018885fe27e488b1d1557e18013048a310a
| 82,912
|
def listify(obj):
"""
Make sure the given object is a list
:param obj: Any object - either None, a list of objects or a single object
:return: The given object formatted as list
"""
if obj is None:
# When the object is None, an empty list will be returned
return []
elif isinstance(obj, list):
# When the object is already a list, that list will be returned
return obj
else:
# When a single object is passed to the method, a list with the
# object as single item will be returned
return [obj]
|
357e6448175129d0036f8a27659ad00bf6130538
| 82,914
|
def get_first_content(el_list, alt=None, strip=True):
"""
Return content of the first element in `el_list` or `alt`. Also return `alt`
if the content string of first element is blank.
Args:
el_list (list): List of HTMLElement objects.
alt (default None): Value returner when list or content is blank.
strip (bool, default True): Call .strip() to content.
Returns:
str or alt: String representation of the content of the first element \
or `alt` if not found.
"""
if not el_list:
return alt
content = el_list[0].getContent()
if strip:
content = content.strip()
if not content:
return alt
return content
|
652e2bb89f87db25bec39cc832b37bfcadab8089
| 82,918
|
def div(format, data): # HTML formatting utility function
"""Wraps 'data' inside a div of class 'format' for HTML printing."""
d = '<div class="{}">{}</div>' # Basic div template
return d.format(format, data)
|
0596217cabfeb13b0b77a91697ebcfc42888e6b0
| 82,919
|
def parent(a, b):
"""
Tests if `a` is a parent of `b` in the Williams & Beer lattice of antichains.
Parameters
----------
a : iterable of iterables
One antichain.
b : iterable of iterables
Another antichain.
Returns
-------
parent : bool
True if, for each set in `b`, there exists a set in `a` which is a subset of that set.
"""
return all(any(frozenset(aa) <= frozenset(bb) for aa in a) for bb in b)
|
70751848734ae89dfbca8969b083fd4814b876b6
| 82,921
|
def remove_decorator(srccode: str, decorator: str) -> str:
"""remove decorator from return value of `inspect.getsource`.
:param srccode: return value of `inspect.getsource`
:param decorator: remove target ex: '@snippet'
:return srccode_without_decorator: srccode removed decorator
"""
# no decorator remained
if srccode.find(decorator) != 0:
return srccode.strip()
len_deco = len(decorator)
# no option
if srccode[len_deco] != '(':
return srccode[len_deco:].strip()
stack = []
stack.append('(')
i = len_deco + 1
while stack:
top = stack[-1]
nchr = srccode[i]
if top == '(':
if nchr == ')':
stack.pop()
elif nchr == "'" or nchr == '"':
stack.append(nchr)
elif top == "'":
if nchr == "'":
stack.pop()
elif top == '"':
if nchr == '"':
stack.pop()
i += 1
return srccode[i:].strip()
|
95565a2467f4e615bfe32c8e3060e5279ce3212f
| 82,922
|
import torch
def create_edge_index_attribute(adj_matrix):
"""
Given an adjacency matrix, this function creates the edge index and edge attribute matrix
suitable to graph representation in PyTorch Geometric.
"""
rows, cols = adj_matrix.shape[0], adj_matrix.shape[1]
edge_index = torch.zeros((2, rows * cols), dtype=torch.long)
edge_attr = torch.zeros((rows * cols, 1), dtype=torch.float)
counter = 0
for src, attrs in enumerate(adj_matrix):
for dest, attr in enumerate(attrs):
edge_index[0][counter], edge_index[1][counter] = src, dest
edge_attr[counter] = attr
counter += 1
return edge_index, edge_attr, rows, cols
|
0d7b38b8fafee06221683e6fa1fa62596f6fa468
| 82,926
|
def str_grep(S, strs):
"""Returns a list of strings wherein the substring S is found."""
return [s for s in strs if s.find(S) >= 0]
|
4976f13595b71680d7991e0b4ec7d53e23bdd90e
| 82,929
|
def find_next_unused_field_name(field_name, start_index, existing_collisions):
"""
Finds a unused field name in the provided table starting with field_name.
If field_name is not taken then it will be returned, if it is taken then the
next name appended with an _X where X is a positive integer which is free will
be returned.
:param existing_collisions: A set of existing field names to skip over when finding
the next free field name.
:param start_index: The number to start looking for fields from.
:param field_name: The field_name to find a unused name for.
:return: A free field name starting with field_name possibly followed by an
_X where X is a positive integer.
"""
original_field_name = field_name
i = start_index
while True:
field_name = f"{original_field_name}_{i}"
i += 1
if field_name not in existing_collisions:
break
return field_name, i
|
daf104f12a251d0cf7a8988b9d06b34f532b5afa
| 82,932
|
def read_stream_and_display(stream, display):
"""Read from stream line by line until EOF, display, and capture the lines.
"""
output = []
while True:
line = yield from stream.readline()
if not line:
break
output.append(line)
display(line) # assume it doesn't block
return b''.join(output)
|
b8aa6b3199e62608330c2c94652fef201f65afbd
| 82,937
|
def get_atom_table(topology):
"""Convert the atom information to a dictionary."""
if 'atoms' not in topology:
return None
atoms = {}
for atom in topology['atoms']:
atoms[atom[0]] = atom
return atoms
|
1cd079db4e249b45d9297212acf7b913793c356f
| 82,938
|
import re
def yes(text):
"""True only if the given text expresses affirmative."""
return re.match('yes|true|ok', text, re.IGNORECASE)
|
2996a7cb038ead7b10c1c424f381a12d0f63bd0c
| 82,940
|
def get_tip_labels(tree_or_node):
"""Returns a `set` of tip labels for a node or tree."""
try:
return {x.taxon.label for x in tree_or_node.leaf_node_iter()}
except AttributeError:
return {x.taxon.label for x in tree_or_node.leaf_iter()}
|
2d6edfb1e7bf9671cd3b78b73aa006339b21ee0c
| 82,942
|
def germano_tau(f, g, operator, scale=45, mask=False):
"""Small scale variance according to M. Germano 1990 paper where
<tau>_ss = <f * g> - <f> * <g> and bracket terms are convolved inputs
Scale is analogous to sigma if using a gaussian kernel.
"""
return operator(f * g, scale=scale, mask=mask) - (operator(f, scale=scale, mask=mask) *
operator(g, scale=scale, mask=mask))
|
30adcda446817df1e235102e733b21d71257a23d
| 82,943
|
def load_text(file_name):
"""Reads text from file.
Parameters
----------
file_name : string
Path to file containing the subject text.
Returns
-------
string
File text as a string.
Examples
--------
>>> load_text("text.txt")
"""
with open(file_name, "r") as f:
text = f.read()
return text
|
e792ee3206fef3f6ef4bcddd903e9bf3d94ab7e9
| 82,951
|
def split(line):
"""Input string as described in module docstring, return 2 sets of ints."""
set1 = {int(x) for x in (line.split(';')[0]).split(',')}
set2 = {int(x) for x in (line.split(';')[1]).split(',')}
return set1, set2
|
53bbebb0a64d91ee2014baebed8b2c62f4a55341
| 82,953
|
from typing import Union
def clamp(value: Union[int, float], min_: Union[int, float], max_: Union[int, float]) -> Union[int, float]:
"""
Clamps the value between minimum and maximum values.
:param value: number to clamp
:param min_: minimum value
:param max_: maximum value
:return: clamped number
"""
# If inside the boundaries, return the actual value
if min_ <= value <= max_:
return value
# When going over the boundary, return min/max
elif value < min_:
return min_
else:
return max_
|
dbd79a8e27d79486e0a05827462ae5fd71fc6818
| 82,960
|
import inspect
def get_classes(module, base_cls, include_base_cls=True):
"""Get specified classes form module.
Args:
module (module): Where to find classes.
base_cls (type): The base class.
include_base_cls (bool, optional): Defaults to True.
Whether include base class.
Returns:
list: The specified classes.
"""
def is_class(c):
return inspect.isclass(c) \
and issubclass(c, base_cls) \
and (include_base_cls or c != base_cls)
return [c for _, c in inspect.getmembers(module, is_class)]
|
91f9ce7ecdc5491a153ad3e7b7bba1453c6a4f3f
| 82,961
|
def _normalize_percent_rgb(value):
"""
Internal normalization function for clipping percent values into
the permitted range (0%-100%, inclusive).
"""
percent = value.split(u'%')[0]
percent = float(percent) if u'.' in percent else int(percent)
return u'0%' if percent < 0 \
else u'100%' if percent > 100 \
else u'{}%'.format(percent)
|
7e5dffccd94a95d491ab945aeb5e3d8313208897
| 82,963
|
def option_dict(options):
""" Return a dictionary mapping option names to Option instances.
"""
d = {}
for option in options:
d[option.name] = option
return d
|
32bd0b6085b491c3d4517ebdaf8abe7055b8f1d4
| 82,970
|
def agg_var_num(dataframe, group_var, dict_agg, prefix):
"""
Aggregates the numeric values in a dataframe.
This can be used to create features for each instance of the grouping variable.
Parameters
--------
dataframe (dataframe): the dataframe to calculate the statistics on
group_var (string): the variable by which to group df
df_name (string): the variable used to rename the columns
Return
--------
agg (dataframe):
a dataframe with the statistics aggregated for
all numeric columns. Each instance of the grouping variable will have
some statistics (mean, min, max, sum ...) calculated.
The columns are also renamed to keep track of features created.
"""
# Remove id variables other than grouping variable
for col in dataframe:
if col != group_var and 'SK_ID' in col:
dataframe = dataframe.drop(columns=col)
group_ids = dataframe[group_var]
numeric_df = dataframe.select_dtypes('number')
numeric_df[group_var] = group_ids
# Group by the specified variable and calculate the statistics
agg = numeric_df.groupby(group_var).agg(dict_agg)
# Ajout suffix mean, sum...
agg.columns = ['_'.join(tup).strip().upper()
for tup in agg.columns.values]
# Ajout du prefix bureau_balance pour avoir une idée du fichier
agg.columns = [prefix + '_' + col
if col != group_var else col
for col in agg.columns]
agg.reset_index(inplace=True)
return agg
|
9f5d92d932ce966160825a5c9a9dfd678a162645
| 82,972
|
def get_factory_name(factory):
""" Returns a factory name, given the factory. This ensure that something
will be displayed (id or name of the factory) even if no name has been
specified for the factory """
name = factory.name.strip()
if len(name) != 0:
return name
else:
return factory.id
|
e3c626097c77f34dacab947072186582596bab0a
| 82,973
|
from typing import Dict
from typing import Tuple
def add_ascii_keys(data) -> Dict[Tuple[int, ...], str]:
"""Update the data with ascii keys
>>> data = add_ascii_keys({})
>>> assert data[(48,)] == '0'
>>> assert data[(66,)] == 'B'
>>> assert data[(99,)] == 'c'
"""
# See previous function for previous key, value pairs
for i in range(32):
data[(i + 1,)] = f"Ctrl {chr( ord('A') + i)}"
for i in range(32, 127):
data[(i,)] = f"{chr(i)}"
return data
|
1adb6da5e184f82f258eb3b58698e61b420a8516
| 82,974
|
import yaml
def read_params(config_path: str ='config/params.yaml')->dict:
"""Responsible for reading the yaml file
Args:
config_path (str): Path of the Yaml file . Defaults to 'config/params.yaml'
Returns:
dict: Return the details of the yaml file
"""
with open(config_path, 'r') as f:
return yaml.safe_load(f)
|
2c5c7ceaaca0d44f3f68481e4940b71d2df10c72
| 82,975
|
def symRatio(m1, m2):
"""Compute symmetric mass ratio from component masses"""
return m1*m2/(m1+m2)/(m1+m2)
|
9eb56c46bf8ba3c149a759d85e5f8e030d5dfe1d
| 82,978
|
def create_headers(bearer_token):
"""Twitter function for auth bearer token
Args:
bearer_token (string): bearer token from twitter api
Returns:
headers
"""
headers = {"Authorization": "Bearer {}".format(bearer_token)}
return headers
|
6850559a1d44b85a0b7f8dba01b32aef1710ac05
| 82,982
|
def put_allowed_class_names(doc):
"""
Get the names of all the parsed classes endpoints in a given HydraDoc object
"""
allowed_classes = list()
for parsed_class in doc.parsed_classes.values():
for operation in parsed_class['class'].supportedOperation:
if operation.method == 'PUT':
allowed_classes.append(parsed_class['class'].title)
return allowed_classes
|
8ee38aea161894b664e1db4e392b1cd78be48ff9
| 82,983
|
import collections
def wer_details_by_speaker(details_by_utterance, utt2spk):
"""Compute word error rate and another salient info grouping by speakers.
Arguments
---------
details_by_utterance : list
See the output of wer_details_by_utterance
utt2spk : dict
Map from utterance id to speaker id
Returns
-------
dict
Maps speaker id to a dictionary of the statistics, with keys:
* "speaker": Speaker id,
* "num_edits": (int) Number of edits in total by this speaker.
* "insertions": (int) Number insertions by this speaker.
* "dels": (int) Number of deletions by this speaker.
* "subs": (int) Number of substitutions by this speaker.
* "num_scored_tokens": (int) Number of scored reference
tokens by this speaker (a missing hypothesis might still
have been scored with 'all' scoring mode).
* "num_scored_sents": (int) number of scored utterances
by this speaker.
* "num_erraneous_sents": (int) number of utterance with at least
one error, by this speaker.
* "num_absent_sents": (int) number of utterances for which no
hypotheses was found, by this speaker.
* "num_ref_sents": (int) number of utterances by this speaker
in total.
"""
# Build the speakerwise details:
details_by_speaker = {}
for dets in details_by_utterance:
speaker = utt2spk[dets["key"]]
spk_dets = details_by_speaker.setdefault(
speaker,
collections.Counter(
{
"speaker": speaker,
"insertions": 0,
"dels": 0,
"subs": 0,
"num_scored_tokens": 0,
"num_scored_sents": 0,
"num_edits": 0,
"num_erraneous_sents": 0,
"num_absent_sents": 0,
"num_ref_sents": 0,
}
),
)
utt_stats = collections.Counter()
if dets["hyp_absent"]:
utt_stats.update({"num_absent_sents": 1})
if dets["scored"]:
utt_stats.update(
{
"num_scored_sents": 1,
"num_scored_tokens": dets["num_ref_tokens"],
"insertions": dets["insertions"],
"dels": dets["deletions"],
"subs": dets["substitutions"],
"num_edits": dets["num_edits"],
}
)
if dets["num_edits"] > 0:
utt_stats.update({"num_erraneous_sents": 1})
spk_dets.update(utt_stats)
# We will in the end return a list of normal dicts
# We want the output to be sortable
details_by_speaker_dicts = []
# Now compute speakerwise summary details
for speaker, spk_dets in details_by_speaker.items():
spk_dets["speaker"] = speaker
if spk_dets["num_scored_sents"] > 0:
spk_dets["WER"] = (
100.0 * spk_dets["num_edits"] / spk_dets["num_scored_tokens"]
)
spk_dets["SER"] = (
100.0
* spk_dets["num_erraneous_sents"]
/ spk_dets["num_scored_sents"]
)
else:
spk_dets["WER"] = None
spk_dets["SER"] = None
details_by_speaker_dicts.append(spk_dets)
return details_by_speaker_dicts
|
442cf123e07cb9163bb29e1c09179fa41ca837c5
| 82,985
|
def sign_of_sequence_fast(sequence: list) -> int:
"""
Sign of sequence equals to (-1)**(sequence inversions).
Sequence must contain elements from 0 to len(sequence).
"""
sign = 1
elements_to_check = [True for _ in range(len(sequence))]
for i in range(len(sequence)):
if elements_to_check[i]:
current_element = sequence[i]
while current_element != i:
elements_to_check[current_element] = False
sign = -sign
current_element = sequence[current_element]
return sign
|
d0eb0907da09f6909633c247b4614b7057f1b428
| 82,987
|
import json
import base64
def to_base64_encoded_json(obj) -> str:
"""Encode a Python object as a base64-endoded JSON string.
When embedding JSON inline inside HTML, serialize it to a JSON string in
Python and base64 encode that string to escape it so that it's safe to render
inside HTML. Then on the JS side, base64 decode it and parse it as JSON.
Args:
obj: any Python object serializable to JSON
Returns:
base64-encoded string of JSON
"""
json_string = json.dumps(obj)
return base64.b64encode(json_string.encode('utf-8')).decode('utf-8')
|
c977bf3ed634a967fccde2e7989f1a565ff45161
| 82,993
|
def process_data(piece):
"""
python preprocess.py --num_workers 8 --name kks --in_dir .\datasets\kks --out_dir .\data\kks
output : "./datasets/son/audio/NB10584578.0000.wav": "오늘부터 뉴스룸 2부에서는 그날의 주요사항을 한마디의 단어로 축약해서 앵커브리핑으로 풀어보겠습니다",
input : 1/1_0000.wav|그는 괜찮은 척하려고 애쓰는 것 같았다.|그는 괜찮은 척하려고 애쓰는 것 같았다.|그는 괜찮은 척하려고 애쓰는 것 같았다.|3.5|He seemed to be pretending to be okay.
"""
_content_path = "./datasets/kss/audio/" + piece.split("|")[0]
_content = piece.split("|")[3]
return _content_path, _content
|
32283168245a8ba4407832d75208366ec434bd51
| 82,994
|
def agg_concat(group):
"""Concatenate the group into a string of unique values."""
group = [g for g in group if g]
return '|'.join(set(group))
|
bb899af80de172a926e500686d9acef780c5b808
| 83,001
|
def is_better(ori_value, comparing_value, is_greater):
"""
This module compare the two values based on what we are looking for
(Min or Max).
:param ori_value: Original value.
:type ori_value: number
:param comparing_value: New value to compare with.
:type comparing_value: number
:param is_greater: True if you want to know b > a, else False if you want
to check b < a.
:return: If b is better than a or not.
:rtype: bool
"""
if ori_value is not None:
if (is_greater and comparing_value <= ori_value) or \
(not is_greater and comparing_value >= ori_value):
return False
return True
|
753da70ed60302fd431755def5e760c0ffb85678
| 83,003
|
import torch
def random_z(num: int, min_height: float, max_height: float, device: str) -> torch.Tensor:
"""Returns sampled height of the goal object."""
z = torch.rand(num, dtype=torch.float, device=device)
z = (max_height - min_height) * z + min_height
return z
|
d24b40cbbe08c1753fc4b9304fba7ba2e26373ec
| 83,010
|
def clip(number, min_nb, max_nb):
""" Clip a number between min and max inclusively """
return max(min_nb, min(number, max_nb))
|
83a3c1d8904e1bbabebf65fa1c99337659feb8ed
| 83,012
|
import re
def format_mile_info(n_str):
"""Format the mileage information extracted from the description.
Args:
n_str (str): extracted miles information from description in one line of the dataframe.
Returns:
str: formatted string to be converted in float
"""
if(n_str[-1] == 'k' or n_str[-1] == 'K'):
n_str = n_str[:-1] + '000'
n_str = re.sub('[,.]', '', n_str)
return n_str
|
5e68acdb7fe7605fa40a02ac95d28190220b678e
| 83,020
|
def parse_indices(in_: str) -> list:
"""Parse indices from comma-separated and ranged index string."""
comps = in_.split(',')
indices = set()
for comp in comps:
if '-' in comp:
low, high = comp.split('-')
indices.update(range(int(low), int(high) + 1))
else:
indices.add(int(comp))
return sorted(indices)
|
eb0fd9daf3ea705f250f7164176522a7d2c269d1
| 83,021
|
def sorted_dict(d, key=None, reverse=False):
"""
Return dictionary sorted using key. If no key provided sorted by dict keys.
"""
if key is None:
return dict(sorted(d.items(), key=lambda e: e[0], reverse=reverse))
return dict(sorted(d.items(), key=key, reverse=reverse))
|
97b08901c1cd39a2fb17ae6db0cfd088e8366e70
| 83,022
|
def lookup(obj, key, default=None):
"""Looks up a property within an object using a dotted path as key.
If the property isn't found, then return the default value.
"""
keys = key.split(".")
value = default
for key in keys:
value = obj.get(key)
if value is None:
return default
obj = value
return value
|
7b5841b9d092a3c68fd6343fccd523353bf650fe
| 83,037
|
import re
def keyword_found_in_text(keyword, line_text):
"""Returns true if the given keyword is matched anywhere in the text."""
if (re.search(keyword, line_text, re.IGNORECASE)) is not None:
return True
return False
|
7718b896ff38980cde59b583c44672ceb5aa5e24
| 83,039
|
def p_n(nnx, nn):
"""
Computes the probability of the term x occurs in NEGATIVE news
Args:
nnx (int): number of NEGATIVE news with the term x
nn (int): number of NEGATIVE news
Returns:
float: p(x|not(Y))
"""
return float(nnx) / nn
|
d4596f836eaac0f173ab4109ecac1fc0791645aa
| 83,042
|
def url_tabele(n: int) -> str:
"""Vrne tabelo z rezultati n-tega EGMO."""
return f"https://www.egmo.org/egmos/egmo{n}/scoreboard/"
|
3b35eff6ab0ab55a7fc0b1600887c61d6e83dca5
| 83,045
|
def _decimal_year_to_mjd2000_simple(decimal_year):
""" Covert decimal year to Modified Julian Date 2000.
"""
return (decimal_year - 2000.0) * 365.25
|
d616b2bf8bae92118d69ef84b195162f43ecd684
| 83,046
|
def find_str_in_dict(
term, class_mapping):
"""Finds all key, value in class_mapping s.t key is a substring of term."""
all_matched_classes = []
for k, v in class_mapping.items():
if k in term.lower():
all_matched_classes.append((v, k))
return all_matched_classes
|
68137a0f5f1df7bce282ff32b0f10728534e734c
| 83,048
|
import csv
def load_settings (filename):
"""Load the settings file
"""
settings = {}
data_reader = csv.reader(open(filename, 'rU'), delimiter='\t')
# Ignore header
header = next(data_reader)
# Process each line
for row in data_reader:
if len(row) == len(header):
sample = row[0]
sample_data = {}
for el_idx, el in enumerate(header[1:]):
sample_data[el] = row[el_idx+1]
settings[sample] = sample_data
return settings
|
e454ec658316468bc97d24adcf927721d09bd863
| 83,049
|
def _get_deep(properties, *keys):
"""Get a final key among a list of keys (each with its own sub-dict)."""
for key in keys:
properties = properties[key]
return properties
|
cde02079deb2f6f744739e337993bb658a8a1be6
| 83,052
|
def latlon_decimaldegrees(nmealat, latchar, nmealon, lonchar):
"""
Converts the nmea lat & lon into decimal degrees
Note:
West (of prime meridian) longitude as negative
East (of prime meridian) longitude as positive
North (of the equator) latitude is positive
South (of the equator) latitude is negative
Args:
nmealat(str): latitude
latchar(str): N or S
nmealon(str): longitude
lonchar(str): E or W
Returns:
latdeg(float): the latitude in decimal degrees
londeg(float): the longitude in decimal degrees
"""
nmealon = float(nmealon)
nmealat = float(nmealat)
londegwhole = int(nmealon/100)
londecdeg = (nmealon - londegwhole * 100)/60
londeg = londegwhole + londecdeg
if lonchar == 'W':
londeg = (-1)*londeg
latdegwhole = int(nmealat/100)
latdecdeg = (nmealat - latdegwhole * 100)/60
latdeg = latdegwhole + latdecdeg
if latchar == 'S':
latdeg = (-1)*latdeg
return latdeg, londeg
|
423bb3cb50e9eb4485adf564b66161167dc47496
| 83,053
|
def int_enum_to_int(context, builder, fromty, toty, val):
"""
Convert an IntEnum member to its raw integer value.
"""
return context.cast(builder, val, fromty.dtype, toty)
|
98637f77d357b3c9660ac7a9abc51b3e6bdb84fb
| 83,054
|
def _column_tup_to_str(ind):
"""
Convert tuple of MultiIndex to string.
Parameters
----------
ind : tuple
ind[0]: either 'sleep' or 'activity'
ind[1]: int that is the day number
ind[2]: bool, True being light, False being dark
Returns
-------
output : str
Conversion to a single string represnting info in tuple.
E.g., ('activity', 6, True) gets converted to
'total seconds of activity in day 6'.
"""
if ind[0] == 'activity':
string = 'total seconds of activity in '
elif ind[0] == 'sleep':
string = 'total minutes of sleep in '
elif ind[0] == 'latency':
string = 'minutes of sleep latency in '
else:
raise RuntimeError('%s is invalid MultiIndex' % ind[0])
if ind[2]:
return string + 'day ' + str(ind[1])
else:
return string + 'night ' + str(ind[1])
|
088b5c341a4c7e520e445d359d985a180e96a4da
| 83,055
|
def get_bamboo_plan_shortname(client, plan_id):
"""Get shortName of a Bamboo plan based on plan_id"""
plan = client.get_plan(plan_id)
if plan["shortName"]:
plan_name = plan["shortName"]
else:
plan_name = plan["master"]["shortName"]
return plan_name
|
9ce0d0734ae8053b0ed2f69222fd23d13296bf66
| 83,056
|
import click
def split_issues(ctx, param, value):
"""
Split issue arguments into number and URL components.
"""
def _split_one(value):
parts = [x.strip() for x in value.split(':', 1) if x.strip()]
if len(parts) < 1:
raise click.BadParameter(
'Invalid issue format, should be issue_number or '
'issue_number:issue_url')
elif len(parts) < 2:
parts = parts + ['ISSUE_URL_HERE']
return tuple(parts)
return [_split_one(v) for v in value]
|
bade19c0992b9a051bf36a686a469e7bd94e4838
| 83,057
|
def initialize_engine_object(engine, endpoint, apikey=None, username=None, password=None):
"""
Initialize a DatasetEngine object from a string that points at the engine class.
"""
# Derive import parts from engine string
engine_split = engine.split('.')
module_string = '.'.join(engine_split[:-1])
engine_class_string = engine_split[-1]
# Import
module = __import__(module_string, fromlist=[engine_class_string])
EngineClass = getattr(module, engine_class_string)
# Create Engine Object
engine_instance = EngineClass(endpoint=endpoint,
apikey=apikey,
username=username,
password=password)
return engine_instance
|
4e9dfdfc8d282ff0aec2d70cf17cf1ea75499b0a
| 83,058
|
def wrapPos(angDeg):
"""Returns the angle (in degrees) wrapped into the range [0, 360)"""
res = angDeg % 360.0
# First wrap into [0, 360]; result is 360 if ctrAng < 0 but so near 0 that adding 360 rounds it
if res == 360.0:
return 0.0
return res
|
c87d73d9155cc6b65ff64f92f635162d9c25c913
| 83,062
|
from typing import Callable
from typing import Any
import locale
def get_currency_format() -> Callable[[Any], str]:
"""Return currency function.
Returns:
Callable[[Any], str]: function from current locale.
"""
return locale.currency
|
9ae4f50e48b8c13465f76e95a459f24e5cf300be
| 83,064
|
def scale_range(x, x_min, x_max, y_min, y_max):
""" Scales the entries in x which have a range between x_min and x_max
to the range defined between y_min and y_max. """
# y = a*x + b
# a = deltaY/deltaX
# b = y_min - a*x_min (or b = y_max - a*x_max)
y = (y_max - y_min) / (x_max - x_min) * x + (y_min*x_max - y_max*x_min) / (x_max - x_min)
return y
|
88d1b9b98c4e5f59ac93f4efd322e584ebf1c08d
| 83,065
|
from urllib.request import urlopen
def read_file_from_url(url, return_type="list", encoding="utf-8"):
"""Reads a file from a URL.
Args:
url (str): The URL of the file.
return_type (str, optional): The return type, can either be string or list. Defaults to "list".
encoding (str, optional): The encoding of the file. Defaults to "utf-8".
Raises:
ValueError: The return type must be either list or string.
Returns:
str | list: The contents of the file.
"""
if return_type == "list":
return [line.decode(encoding).rstrip() for line in urlopen(url).readlines()]
elif return_type == "string":
return urlopen(url).read().decode(encoding)
else:
raise ValueError("The return type must be either list or string.")
|
3f35ff64103081b6ed1bc921ffc055f9321f8474
| 83,067
|
def parse_section(section):
"""
Works out the component and section from the "Section" field.
Sections like `python` or `libdevel` are in main.
Sections with a prefix, separated with a forward-slash also show the component.
It returns a list of strings in the form [component, section].
For example, `non-free/python` has component `non-free` and section `python`.
``section``
Section name to parse.
"""
if '/' in section:
return section.split('/')
else:
return ['main', section]
|
ce868969488ad007c14e909378a58c0a2bc4e7fb
| 83,069
|
def bvid2aid(bvid: str):
"""
BV 号转 AV 号。
Args:
bvid (str): BV 号。
Returns:
int: AV 号。
"""
table = 'fZodR9XQDSUm21yCkr6zBqiveYah8bt4xsWpHnJE7jL5VG3guMTKNPAwcF'
tr = {}
for i in range(58):
tr[table[i]] = i
s = [11, 10, 3, 8, 4, 6]
xor = 177451812
add = 8728348608
def dec(x):
r = 0
for i in range(6):
r += tr[x[s[i]]] * 58 ** i
return (r - add) ^ xor
return dec(bvid)
|
71ec7e7ee26408120b8b6cc61c25be1bb68a2de9
| 83,072
|
def make_link(base, url):
"""Make URL from absolute or relative `url`"""
if '://' in url:
return url
return base + url
|
af5c5811915c4d57bbd0529de2179fa1e1b6278c
| 83,075
|
def simplify_fasttext_label(label: str) -> str:
"""Simplifies fasttext-like label representation
Args:
label (str): Raw fasttext-like label (with `__label__` prefix).
Examples:
>>> simplify_fasttext_label('__label__NORMAL')
'normal'
Returns:
str: Simplified label.
"""
prefix = '__label__'
if label.startswith(prefix):
return label[len(prefix):].lower()
return label
|
a5654ba52b27bb7bce34aff31883463d391f0f93
| 83,077
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.