content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def _apply_function(args):
"""
Calls an apply function on a DataFrame with a set of parameters
Parameters
----------
args: dict
Parameters for functon
Returns
-------
data.apply : Function
The result of the apply function
"""
df_data, function, kwargs = args
return df_data.apply(function, **kwargs)
|
ec473ad36bde641fe97cb2bc363abcb9121b6e66
| 72,025
|
def parsedefaultmarker(text):
"""given a text 'abc (DEFAULT: def.ghi)',
returns (b'abc', (b'def', b'ghi')). Otherwise return None"""
if text[-1:] == b')':
marker = b' (DEFAULT: '
pos = text.find(marker)
if pos >= 0:
item = text[pos + len(marker) : -1]
return text[:pos], item.split(b'.', 2)
|
4fdf8a96715105b7eb456ea50d996ad6e64ff7a7
| 72,026
|
def table_data() -> str:
"""Fake table data with mean/std columns, units, and non-numerical data types."""
header = "x~Mean (lbs),y~Mean (lbs),z~Mean (lbs),z~Std (lbs), pets~Mean\n"
row1 = "1.0,0.0,10.0,1.0,dog\n"
row2 = "0.0,1.0,5.0,0.5,cat"
return header + row1 + row2
|
cf42bfd21bfb2b23d5625d30205477e8a7ec4e01
| 72,029
|
def reversemap(obj):
"""
Invert mapping object preserving type and ordering.
"""
return obj.__class__(reversed(item) for item in obj.items())
|
d33347646de75cd883dbf642049970e78b5a2f74
| 72,031
|
import json
def json_from_file(file_name):
"""Read data from file and return as dict,
if any exception occurs - return empty dict
:param file_name: filepath to dump data to
:type file_name: str
:return: dictionary with data from json file
:rtype: dict
"""
data = {}
with open(file_name, "r", encoding="utf-8") as f_in:
data = json.load(f_in)
return data
|
34a23560cffc4e8ea2c77d3baaca005f01690613
| 72,035
|
def extract_name(data):
"""
Extracts the full name, first name, and last name from a LinkedIn user's profile
Parameters
----------
data: the dictionary representation of a LinkedIn user's profile
Returns
-------
The user's full name, first name, and last name
"""
full_name = data['personal_info']['name']
if ' ' in full_name:
first_name = full_name[:full_name.index(' ')]
last_name = full_name[full_name.rindex(' ') + 1:]
else:
first_name = ''
last_name = ''
return full_name, first_name, last_name
|
d1cd55fed4c6e307d44ee71596d199d68c5a3473
| 72,047
|
def _update_partition(this: dict, other: dict):
""" given two dicts of lists (this and other),
extends the list of `this` with the contents of `other`
NOTE: they must have exactly the same keys or will raise an assertion error
NOTE: not done in place (returns a copy of the dict)
"""
this = dict(this)
for key in this:
assert key in other
assert isinstance(this[key], list)
assert isinstance(other[key], list)
# this[key].update(other[key])
this[key].extend(other[key])
return this
|
edf5151a1c8e4c2a65c1a45a075a46deffd60e55
| 72,048
|
def retriever(cont, tag):
"""Function retrieving the data from the container (here a dictionary)
"""
return cont[tag]
|
8688e07131a249d53139a277fb69dc1c608f72cd
| 72,053
|
def prediction_to_vad_label(
prediction,
frame_size: float = 0.032,
frame_shift: float = 0.008,
threshold: float = 0.5,
):
"""Convert model prediction to VAD labels.
Args:
prediction (List[float]): predicted speech activity of each **frame** in one sample
e.g. [0.01, 0.03, 0.48, 0.66, 0.89, 0.87, ..., 0.72, 0.55, 0.20, 0.18, 0.07]
frame_size (float): frame size (in seconds) that is used when
extarcting spectral features
frame_shift (float): frame shift / hop length (in seconds) that
is used when extarcting spectral features
threshold (float): prediction values that are higher than `threshold` are set to 1,
and those lower than or equal to `threshold` are set to 0
Returns:
vad_label (str): converted VAD label
e.g. "0.31,2.56 2.6,3.89 4.62,7.99 8.85,11.06"
NOTE: Each frame is converted to the timestamp according to its center time point.
Thus the converted labels may not exactly coincide with the original VAD label, depending
on the specified `frame_size` and `frame_shift`.
See the following exmaple for more detailed explanation.
Examples:
>>> label = parse_vad_label("0.31,0.52 0.75,0.92")
>>> prediction_to_vad_label(label)
'0.31,0.53 0.75,0.92'
"""
frame2time = lambda n: n * frame_shift + frame_size / 2
speech_frames = []
prev_state = False
start, end = 0, 0
end_prediction = len(prediction) - 1
for i, pred in enumerate(prediction):
state = pred > threshold
if not prev_state and state:
# 0 -> 1
start = i
elif not state and prev_state:
# 1 -> 0
end = i
speech_frames.append(
"{:.2f},{:.2f}".format(frame2time(start), frame2time(end))
)
elif i == end_prediction and state:
# 1 -> 1 (end)
end = i
speech_frames.append(
"{:.2f},{:.2f}".format(frame2time(start), frame2time(end))
)
prev_state = state
return " ".join(speech_frames)
|
993491314fd1b92402701996ce94c10f8dcf59e2
| 72,059
|
def lt_pre_release(self, other):
"""
Check to see if one SemVer's pre_release version is less than that of
another. From the SemVer spec:
Pre-release versions have a lower precedence than the associated
normal version.
"""
if self.pre_release and other.pre_release:
return self.pre_release < other.pre_release
elif self.pre_release and not other.pre_release:
return True
return False
|
314a12ad9681fa517fdb5a61c845d90cdaa16ab1
| 72,060
|
import csv
def get_X_y(data_file):
"""Read the log file and turn it into X/y pairs. Add an offset to left images, remove from right images."""
X, y = [], []
steering_offset = 0.4
with open(data_file) as fin:
for _, left_img, right_img, steering_angle, _, _, speed in csv.reader(fin):
if float(speed) < 20: continue # throw away low-speed samples
X += [left_img.strip(), right_img.strip()]
y += [float(steering_angle) + steering_offset, float(steering_angle) - steering_offset]
return X, y
|
82329371eaa6100d99aed599707bf28cb83f025e
| 72,062
|
def listify(to_select):
"""Utitlity function to handle both single and
list inputs
"""
if not isinstance(to_select, list):
to_select = [to_select]
return to_select
|
8f476b1c48272d209408df86ccb47d259be72738
| 72,064
|
def get_all_dict_words(dict_):
"""
This function gets all the word entries of the dictionary.
:param dict_: Dictionary words and their definitions.
:type dict_: dict
:return: All the word entries.
:rtype: list
"""
words = set([])
for k, v in dict_.items():
for lev, meaning_words in v.items():
words = words.union(meaning_words)
return list(words)
|
ea0e336f20a2d51b72ca13c77f5696e3bf896039
| 72,071
|
def update_parameters(params, grads, alpha):
"""
Updates model parameters using gradient descent.
Arguments:
params -- dictionary containing model parameters
grads -- dictionary with gradients, output of L_model_backward()
Returns:
params -- dictionary with updated parameters
params['w' + str(l)] = ...
params['b' + str(l)] = ...
"""
n_layers = len(params) // 2
for i in range(n_layers):
params['w%s' % (i+1)] = (
params['w%s' % (i+1)] - alpha * grads['dw%s' % (i+1)])
params['b%s' % (i+1)] = (
params['b%s' % (i+1)] - alpha * grads['db%s' % (i+1)])
return params
|
45acda1cf5b69c289c4e124a827749f2966c98c8
| 72,073
|
def get_value_from_dict(key_path, input_dict):
"""
Returns the value of a key in input_dict
key_path must be given in string format with dots
Example: result.dir
"""
if not isinstance(key_path, str) or not isinstance(input_dict, dict):
return None
for key in key_path.split('.'):
input_dict = input_dict.get(key)
if not input_dict:
return None
return input_dict
|
c76ed21bcf4aa8a9884d6660a235ff92ebeae1ac
| 72,076
|
def update_rbac_assignment(
self,
username: str,
roles: str = "null",
asset: str = "null",
) -> bool:
"""Create or update rbac assignment
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - rbacAssignment
- POST
- /rbac/assignment
:param username: Username for assignment
:type username: str
:param roles: Comma separated list of roles. Required if asset value
is not provided, e.g. "role1, role2, role3", defaults to "null"
:type roles: str, optional
:param asset: Appliance access group or asset name. Required if
roles value is not provided, e.g. "group1", defaults to "null"
:type asset: str, optional
:return: Returns True/False based on successful call
:rtype: bool
:raises ValueError: Both optional parameters cannot be value of
"null"
"""
if roles == "null" and asset == "null":
raise ValueError("Roles and asset variables cannot both be 'null' ")
data = {
"username": username,
"roles": roles,
"asset": asset,
}
return self._post(
"/rbac/assignment",
data=data,
return_type="bool",
)
|
53bfa4adb35ba8c50762b06b614de97f59aca63e
| 72,080
|
def get_relative_freqs(type2freq):
"""
Calculates the relative frequency (proportion) of each type in a system
Parameters
----------
type2freq: dict
Keys are types of a system and values are frequencies of those types
Returns
-------
type2p: dict
Keys are types and values are relative (normalized) frequencies
"""
n = sum(type2freq.values())
type2p = {t: s / n for t, s in type2freq.items()}
return type2p
|
15c33e0ed235543430e9c1925593916135d96615
| 72,084
|
def mat311mod(a, b):
"""
Compute moduli of a 3x1 matrix.
Parameters
----------
a : tuple of float
3x1 matrix
b : float
modulus
Returns
-------
res : tuple of float
3x1 matrix
"""
res = [0, 0, 0]
r3 = range(3)
for i in r3:
res[i] = int(a[i] - int(a[i]/b)*b)
return res
|
275a509a4f53785c4702151811c63e5009d999c4
| 72,087
|
from typing import Union
import math
def convert_heading(degree: Union[int, float]) -> Union[int, float]:
"""
Convert heading to other convention and unit
Args:
degree: Starting north increasing clockwise
Returns:
object (Union[int, float]): Heading angle in rad starting east increasing anti-clockwise and conversion
"""
if not (isinstance(degree, float) or isinstance(degree, int)):
raise TypeError("input must be a float or int")
float_degree = float(degree)
if float_degree == 0:
temp_degree: float = 0
else:
temp_degree = 360 - float_degree
temp_degree = temp_degree + 90
return math.radians(temp_degree)
|
5be4925fa9316377a6b873e3f05601eda0e0211a
| 72,088
|
import io
def slurp(path, encoding='UTF-8'):
"""
Reads file `path` and returns the entire contents as a unicode string
By default assumes the file is encoded as UTF-8
Parameters
----------
path : str
File path to file on disk
encoding : str, default `UTF-8`, optional
Encoding of the file
Returns
-------
The txt read from the file as a unicode string
"""
with io.open(path, 'r', encoding=encoding) as f:
return f.read()
|
1a0697b7c2858308a2ad4844d92620d3e7206a02
| 72,098
|
def earliest_bus( notes ):
"""
Setup:
All buses depart at 0 mins. Trip duration of each bus = bus ID in minutes.
i.e., every 'ID' mins, the bus with that ID departs from the starting point.
Thus, if the time is divisible by the bus ID, that bus will start its departure.
Method:
The remainder between current time and bus ID denotes the time in mins to the
previous departure of each bus. Subtracting this value from the bus ID gives
the time in mins to the next departure of each bus.
Task:
Given the time and available buses, this function returns the product of ID of
next earliest bus and minutes to wait to catch that bus.
"""
time = notes[ 'time' ]
buses = notes[ 'buses' ]
deltas = [ bus - ( time % bus ) for bus in buses ]
wait_time = min( deltas )
next_bus = buses[ deltas.index( wait_time ) ]
return wait_time * next_bus
|
736e5b1b124fb9acd1f825becec14c3958be1249
| 72,101
|
import threading
def makeThreadSafe(function, lock=None):
""" Call with a function you want to make thread safe
Call without lock to make the function thread safe using one lock per
function. Call with existing lock object if you want to make several
functions use same lock, e.g. all functions that change same data
structure.
@param function: function to make thread safe
@param lock: threading.Lock instance or None
@rtype: function
@return: function decorated with locking
"""
if lock is None:
lock = threading.Lock()
def decorated(*args, **kw):
lock.acquire()
try:
return function(*args, **kw)
finally:
lock.release()
return decorated
|
dde480d751b867e442fcef4263d6d5e5b9da1f37
| 72,102
|
def check_items(items, expiry):
"""
Check for old deployments that have files that can be deleted
and prune old deployments from the item list.
The algorithm finds all deployments older than the expiry
and keeps only the newest of them.
Returns a set containing the remaining items and a list of
files to be deleted.
"""
deployments = list(set([i.time for i in items]))
deployments_old = sorted([i for i in deployments if i < expiry])
if len(deployments_old) <= 1:
return items, []
delete_older_than = deployments_old[-1]
keep_files = list(set([i.filename for i in items if i.time >= delete_older_than]))
delete_files = list(
set(
[
i.filename
for i in items
if i.time < delete_older_than and i.filename not in keep_files
]
)
)
return [i for i in items if i.time >= delete_older_than], delete_files
|
95fcae0d130d80f17623ee556ca7e933e7e5681b
| 72,104
|
def roundf(x, precision=0):
"""Like round but works with large exponents in floats and high precision
Based on http://stackoverflow.com/a/6539677/623735
>>> 234042163./(2**24)
13.94999998807...
>>> roundf(234042163./(2**24), 5)
13.95
>>> roundf(1234.1234e-123, 5)
1.2341e-120
>>> roundf(1234.1234e-123, 3)
1.23e-120
>>> roundf(1234123.1234, 5)
1234100.0
>>> roundf(1234123.1234, 3)
1230000.0
"""
if precision > 0:
return float("{:.{}e}".format(x, precision - 1))
return x
|
53899bf858c95ef3d7041d5eb29301b8c1aec311
| 72,110
|
def get_cell(sheet, index):
"""Return cell object for sheet and index (of the form "A1")"""
return sheet[index]
|
1f44e2d17eb17bd336e80e553be9476e4f797405
| 72,112
|
def ports_from_output_port_acts(output_port_acts):
"""Return unique port numbers from OFPActionOutput actions.
Args:
list of ryu.ofproto.ofproto_v1_3_parser.OFPActionOutput: output to port actions.
Returns:
set of port number ints.
"""
return {output_port_act.port for output_port_act in output_port_acts}
|
0548a57f9cdf8a0ddef863e82ce5d44bbbf8cb3f
| 72,118
|
def parse_int(val, name):
"""Parse a number to an integer if possible"""
if val is None:
return val
try:
return int(val)
except ValueError:
raise ValueError(
f"Please provide a number for {name}, instead of {val}"
)
|
fa6e50de45e92df0c75a37910e55a17dda57588f
| 72,120
|
def read_kaldi_mapfile(path):
""" Read any Kaldi mapping file - like text, .scp files, etc.
"""
m = {}
with open(path, 'r') as f:
for line in f:
line = line.strip()
sp_pos = line.find(' ')
key = line[:sp_pos]
val = line[sp_pos+1:]
m[key] = val
return m
|
f966d81a757d49ee72f2a4e50c64221e574f2e05
| 72,122
|
import functools
import operator
def product(seq):
"""Product of a sequence."""
return functools.reduce(operator.mul, seq, 1)
|
f9a0cf3c699537632e8e9917f69ef37c393af456
| 72,124
|
import itertools
def interleave(a, b):
"""Interleaves the contents of two iterables."""
return itertools.chain.from_iterable(zip(a, b))
|
2e378307880aebb51fd15b335b92ec5ee0764a22
| 72,130
|
from typing import Iterable
def build_vocab(items, add_unk=True, add_pad=True):
""" build vocabularies from a item list
Args:
items: list of items
add_unk: bool. Whether to add unk
>>> build_vocab(["a","b","b"], add_unk=False, add_pad=True)
{'a': 1, 'b': 2, '<PAD>': 1e+20}
"""
assert isinstance(items, Iterable), "input 'items' is not iterable"
dic = {}
for item in items:
dic[item] = dic.get(item, 0) + 1
if add_pad:
dic['<PAD>'] = 1e20
if add_unk:
dic['<UNK>'] = 1e10
return dic
|
1a2e6df1ba4b4066073e3bc39cd6b7e28a9c50e9
| 72,131
|
def to_list(a, diag_val=None):
""" Convert array to list of lists
More or less np .tolist() method but can set diag values as a convenience
"""
ll = a.tolist()
n = len(ll[0])
if diag_val is not None:
for i in range(n):
ll[i][i] = diag_val
return ll
|
1f6d5188e6e597d15a89be8a0cbc19fa35df9a24
| 72,134
|
def BFS_search(gr, u, v):
"""
Find the shortest path between two nodes (source and target) using
the Breadth-First Search (BFS) algorithm.
@type gr: tlp.Graph
@param gr: Tulip graph
@type u: tlp.node
@param u: first node of interest (source)
@type v: tlp.node
@param v: Second node of interest (target)
"""
explored = []
queue = []
queue.append([u])
while queue :
path = queue.pop(0)
node = path[-1]
if node not in explored:
for n in gr.getInOutNodes(node):
new_path = list(path)
new_path.append(n)
queue.append(new_path)
if n == v:
new_path.pop()
del new_path[0]
return new_path
explored.append(node)
|
9349ee5d4869b572d360644b4895ca6acc47f3d0
| 72,135
|
def _long_to_wide(data, data_type, date_col="date", other_data_types_to_drop=[], sort_by=None):
"""Convert a dataframe from long format to wide format.
Parameters:
data (pandas.DataFrame): The dataframe to convert.
data_type (str): The name of the data type to keep when we pivot.
date_col (str, optional): The name of the column with the dates in it. Default "date".
other_data_types_to_drop (list of str, optional): A list of other data_type columns that may exist in the table, which will be dropped. Note that if data_type is included in this list, it will actually not be dropped.
sort_by (str, optional): The name of one of the indexing columns to sort the dataframe by before returning it. Default of None causes no extra sorting to be performed.
Returns:
pandas.DataFrame: The dataframe in wide format.
"""
# If there are multiple data type columns, only keep the one specified
cols_to_drop = [col for col in other_data_types_to_drop if col != data_type and col in data.columns]
data = data.drop(columns=cols_to_drop)
# Spread the table, a la tidyr
id_cols = [col for col in data.columns if col != data_type]
data = data.set_index(id_cols) # Putting these in the index keeps them from being spread
data = data.unstack(level=date_col, fill_value=0)
data.columns = data.columns.droplevel(0)
data.columns.name = None
if sort_by is not None:
data = data.sort_index(level=sort_by)
data = data.reset_index() # Take the saved columns out of the index
return data
|
ed26fca4c96d95063492e7b3e5fc27dbbb982a52
| 72,136
|
from datetime import datetime
def convert_date_to_fns_format(date_db: datetime) -> str:
"""Convert datetime from database to FNS format."""
return date_db.strftime('%Y-%m-%dT%H:%M')
|
60445e51032efe4548b6cd59fe53baf6c53c6f1c
| 72,139
|
def component_unit_dimension(h5):
"""
Return the unit dimension tuple
"""
return tuple(h5.attrs['unitDimension'])
|
36f239667dd1c75f0312bb1fb7dd9ac8e57c720f
| 72,141
|
def compute_max_vapor_velocity(C_sbf, sigma, rho_L, rho_V, F_F, A_ha):
"""
Return the maximum allowable vapor velocity
through the net area of flow before flooding [U_f; in m/s].
Parameters
----------
C_sbf :
Maximum Capacity Parameter (m/s)
sigma :
Liquid surface tension (dyn/cm)
rho_L :
Liquid density
rho_V :
Vapor density
F_F :
Foaming factor
A_ha :
Ratio of open area, A_h, to active area, A_a
Notes
-----
The max vapor velocity is given by [3]_. See source code for details.
"""
F_ST = (sigma/20)**0.2 # Surface tension factor
# Working area factor
if A_ha >= 0.1 and A_ha <= 1:
F_HA = 1
elif A_ha >= 0.06:
F_HA = 5*A_ha + 0.5
else:
raise ValueError("ratio of open to active area, 'A', must be between 0.06 and 1")
return C_sbf * F_HA * F_ST * ((rho_L-rho_V)/rho_V)**0.5
|
50fa395f7a094103940dbfc46996e9aba8c8e8ab
| 72,144
|
def has_any_in(chances, possibilities):
"""
>>> has_any_in(range(5), range(3, 6))
True
>>> has_any_in(range(5), range(6, 10))
False
>>> has_any_in(range(5), range(5))
True
"""
return any([x for x in chances if x in possibilities])
|
98990dad0bb91dfdd56d9d2f653d00429ca04b86
| 72,148
|
def pipe_to_underscore(pipelist):
"""Converts an AFF pipe-seperated list to a CEDSCI underscore-seperated list"""
return pipelist.replace("|", "_")
|
94d29e07d2c01be5afffab71cc26f9e91646bd9b
| 72,155
|
def write_file(file_to_write, text):
"""Write a file specified by 'file_to_write' and returns (True,NOne) in case of success or (False, <error message>) in case of failure"""
try:
f = open(file_to_write, 'w')
f.write(text)
f.close()
except Exception as e:
return (False, str(e))
return (True, None)
|
5bed72cf333eb908ecfa4951ac4ed1da91ee842c
| 72,156
|
def get_index_bucket(oval_id):
"""The current repository schema protects against having too many files in a single directory by
creating subdirectories based on the index portion of the OVALID that limits the number of
files to 1000. This function determines the proper bucket based on the OVALID
@type oval_id: string
@param oval_id: The OVALID
@rtype: int
@return: an integer representing the proper subdirectory for this OVALID, or 0 if it could not
be computed.
"""
if not oval_id or oval_id is None:
return None
# Get the numeric index from the end of the OVAL ID
position = oval_id.rfind(':')
if position < 0:
return None
try:
position = position + 1
index = int(oval_id[position:])
if index < 1000:
return "0000"
# Apply the modulus function to determine which bucket it belongs to
return str(int(index/1000) * 1000)
# Or another way to do it:
# sequence = int(index)
# mod = sequence % 1000
# return sequence - mod + 1000
except Exception:
return None
|
15cdd00dba8c3cc1d19f3159d6d9d9559ada778e
| 72,157
|
def _str_formatter(x):
"""Format a string"""
return repr(x)
|
8c469a0ba078348448c32d9f6d18cbf508b8781c
| 72,158
|
def github_split_owner_project(url):
"""
Parses the owner and project name out of a GitHub URL
Examples
--------
>>> github_split_owner_project("https://github.com/intel/dffml")
('intel', 'dffml')
"""
return dict(
zip(
("owner", "project"),
tuple("/".join(url.split("/")[-2:]).split("/")),
)
)
|
ea3d788f92c0936babf48000c51eba3ec2c1ac73
| 72,159
|
def get_subs_tes_chp(chp_ratio, v_tes, tes_invest, p_nom):
"""
Calculate KWKG subsidy for TES in combination with CHP
Parameters
----------
chp_ratio : chp_heat/total_heat per year
v_tes : tes volume in liter
tes_invest
p_nom : el. power in kW
Returns
-------
kwkg_subs_tes : amount of subsidy
"""
kwkg_subs_tes = 0
# TES volume in m3
v_m3 = v_tes/1000
# subsidy applies only for systems with ratio of chp heat per year higher than 50%
if chp_ratio >= 0.5:
if 1.0 <= v_m3 <= 50.0:
kwkg_subs_tes = 250 * v_m3
elif v_m3 > 50:
kwkg_subs_tes = 0.3 * tes_invest
else:
if v_m3 >= 0.3 * p_nom:
kwkg_subs_tes = 250 * v_m3
return kwkg_subs_tes
|
e4c5c675cc7bda27a25610b3907e33aafaff94a3
| 72,160
|
import json
import logging
def get_message_from_json(body):
"""Parses JSON from request body.
Returns parsed JSON object if JSON is valid and the represented object is a
dictionary, otherwise returns None.
"""
try:
message = json.loads(body)
if isinstance(message, dict):
return message
logging.warning('Expected dictionary message'
+ ', request=' + body)
return None
except Exception as e:
logging.warning('JSON load error from BindPage: error=' + str(e)
+ ', request=' + body)
return None
|
f6e45f5ac3b2a85260de2b0819bb83fc1e9a141c
| 72,161
|
import requests
from bs4 import BeautifulSoup
import re
def pr_names(link):
"""
Returns all the contributors involved in every PR.
Parameters:
link (url): Url of the Pull Request
Returns:
name (list) : List containing the contributors.
"""
name=[]
r=requests.get(url=link)
soup=BeautifulSoup(r.content,'html.parser')
div_tags=soup.findAll('div',class_="participation")
for _,i in enumerate(div_tags):
for a in i.findAll('a'):
name.append(re.sub('/' ,"",a['href']))
return name
|
20916134dfe3b4addd7f28abbb7ed7f7e2bf19c8
| 72,163
|
from pathlib import Path
from typing import Any
import hashlib
def recursive_sha256(path: Path, hashsum: Any = None) -> str:
"""
Calculates sha256 hash of the file contents recursively.
Args:
path (Path): Parent path of contents
hashsum (Optional[hashlib._HASH]): Current checksum of files if any
Returns:
str: Accumulated digest hex number string with lowercase letters like
"03e93aae89012a2b06b77d5684f34bc2e27cd64e42108175338f20bec11c770a"
Raises:
ValueError: When `path` does not exist in the system
"""
if not path.exists():
raise ValueError("Path does not exist")
hashsum = hashlib.sha256() if not hashsum else hashsum
if path.is_dir():
for item in path.iterdir():
recursive_sha256(item, hashsum)
else:
hashsum.update(path.read_bytes())
return str(hashsum.hexdigest())
|
d8388793f50419536bba864bd24f54fc2f361e2c
| 72,169
|
from typing import Type
def is_subclass(obj, base_cls: Type) -> bool:
"""Check if `obj` is a sub-class of `base_cls`"""
cls = obj if isinstance(obj, type) else type(obj)
return issubclass(cls, base_cls)
|
ad149693020d487c95ae688fd971623207a7e322
| 72,172
|
def markup_line(text, offset, marker='>>!<<'):
"""Insert `marker` at `offset` into `text`, and return the marked
line.
.. code-block:: python
>>> markup_line('0\\n1234\\n56', 3)
1>>!<<234
"""
begin = text.rfind('\n', 0, offset)
begin += 1
end = text.find('\n', offset)
if end == -1:
end = len(text)
return text[begin:offset] + marker + text[offset:end]
|
40910a2f686e48465fa9d52e0a7c028ce359780b
| 72,176
|
def approx(f, eps=0.001):
"""Returns an "approximate" value of `f`, within `eps`.
This is useful if you want to quantize floats into bins.
"""
return int(f/eps + 0.5) * eps
|
93e0ed857e0637ea84d52d42c951fdebec2c79e7
| 72,179
|
def get_set_of_tuples(df):
"""
Converts DataFrame to set of tuples.
Set conversion ensures that order does not matter anymore.
"""
set_of_tuples = set(tuple(line) for line in df.values)
return set_of_tuples
|
c5fb16aa5f9ecc87eeb873b5d33d78cb151795da
| 72,183
|
def _make_params_string(params):
"""Convert list of parameters to string"""
p_str = '&'.join(['{}={}'.format(f, params[f]) for f in params.keys()])
return p_str
|
db94c55492d506a364b6064bda3638995caf546f
| 72,185
|
def get_section_from_chunk(chunk, sectionname):
"""Extract a named section of a chunk"""
section = []
in_section = False
for line in chunk:
if line == sectionname:
in_section = True
continue
if in_section:
if line == "":
# We've reached the end of the section
break
else:
section.append(line)
return section
|
d2e40c599545f5c770a50f260ce0ac858814a80e
| 72,187
|
def first_of(value, arg=10):
"""
Only returns first X of list
"""
if not value:
return value
count = int(arg)
if len(value) > arg:
return value[:arg]
else:
return value
|
7d41904612a8c772ddab87e2f808c14ae899bab1
| 72,189
|
def aviso_tll(tll):
"""Mensaje de tiempo de llegada."""
return 'El tiempo de llegada del siguiente cliente es: ' + str(tll)
|
adfe7664d2af82afb61a380b2a34f9b0ed75b5b5
| 72,193
|
import requests
def get_cover_path(title):
"""
Return the cover path of the given movie
:param title: the title of the movie
:return: the cover path
"""
base_url = "https://api.themoviedb.org/3/search/movie?api_key={}&query={}"
apikey = "4ca2b1ac7501d2a20234b56f7edcfe88"
url = base_url.format(apikey, title)
response = requests.get(url)
cover = response.json()['results'][0]
return "http://image.tmdb.org/t/p/w500/" + cover['poster_path']
|
6ee156fe8d67b890559fbaa347c0c3161d621f31
| 72,201
|
def color_percent(p):
"""Return the adapted color for `p` percent achievement. `0`
percent will be dark red, `100` percent will be dark gren. A
linear interpolation is returned beetween these extrema but a
coefficient increases the luminosity around the midle `50`.
TESTS:
>>> color_percent(0)
(180, 0, 0)
>>> color_percent(50)
(180, 180, 0)
>>> color_percent(83)
(41, 200, 0)
>>> color_percent(100)
(0, 180, 0)
"""
coef = 2.0 - (max([p-50, 50-p]) / 50.0)
# coef = 1
r = coef*180*(1 - (p/100.0))
g = coef*180*(p/100.0)
b = 0
return (int(r), int(g), int(b))
|
4b7f5f7bad4ea16bb40f2ed902b0f7ad4256c1a6
| 72,206
|
def trained_unit(object_ids, before_tech):
"""Returns matches where only *1* of the object ids is played prior to before_tech."""
query = """
select oi.match_id, oi.initial_player_number as number, (case when count(distinct initial_object_id) = 1 then true else false end) as value
from object_instances as oi join ({sq}) as sq on oi.match_id=sq.id
where initial_class_id=70 and initial_object_id=any(:object_ids) and created > '00:01:00'
and created < (select finished from research where match_id=oi.match_id and player_number=oi.initial_player_number and technology_id=:before_tech)
group by oi.match_id, oi.initial_player_number
having (case when count(distinct initial_object_id) = 1 then true else false end)=true
"""
return (
query,
dict(object_ids=object_ids, before_tech=before_tech)
)
|
71b897ab5652ca7b4d6cbf4aa12866b91c1386e6
| 72,207
|
def _taxa_prefix_to_taxa(taxa_prefix: str) -> str:
"""Turns the taxa prefix letter into the taxa
Args:
taxa_prefix (str): The taxa prefix that will be converted to taxa.
Returns:
(str): The Taxa
"""
taxa_dic = {"A": "Alveolata", "B": "Bryophyta", "C": "Bacillariophyta", "D": "Amoebozoa", "E": "Euglenozoa",
"F": "Fungi", "G": "Chlorophyta", "H": "Rhodophyta", "I": "Phaeophyceae", "L": "Marchantiophyta",
"M": "Metazoa", "O": "Oomycota", "P": "Haptophyceae", "Q": "Raphidophyceae", "R": "Rhizaria",
"S": "Synurophyceae", "T": "Tracheophyta", "U": "Eustigmatophyceae", "ALL": "All"}
taxa_choice = taxa_dic[taxa_prefix]
return taxa_choice
|
ac76688c5e2fbefeba7280057cb88fe09e8d0fda
| 72,208
|
def get_cdxj_line_closest_to(datetime_target, cdxj_lines):
""" Get the closest CDXJ entry for a datetime and URI-R """
smallest_diff = float('inf') # math.inf is only py3
best_line = None
datetime_target = int(datetime_target)
for cdxj_line in cdxj_lines:
dt = int(cdxj_line.split(' ')[1])
diff = abs(dt - datetime_target)
if diff < smallest_diff:
smallest_diff = diff
best_line = cdxj_line
return best_line
|
35c6b1a374e153982abe6edf2d2e6625cf8e8937
| 72,211
|
import requests
def init_session(proxy=None):
"""Creates a new requests session with sensible defaults."""
session = requests.Session()
session.headers[
"User-Agent"
] = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36"
if proxy is not None:
session.proxies = {
"http": proxy,
"https": proxy,
}
return session
|
c7ce8ff7b690ca29fb29bbf637ff6146dd539797
| 72,212
|
def _is_int(string):
"""Returns true or false depending on whether or not the passed in string can be converted to an int"""
result = False
try:
value = int(string)
result = True
except ValueError:
result = False
return result
|
6e46f8eccc1a9ec60c7ab3d276e9b84fd2cf591c
| 72,214
|
def reverse_hex(hex_str):
"""Reverse a hex foreground string into its background version."""
hex_str = "".join([hex_str[i : i + 2] for i in range(0, len(hex_str), 2)][::-1])
return hex_str
|
370ef63d7c1e2d78397b593cdfbdfc72440b3c69
| 72,217
|
def dummy(inp):
"""Dummy fct to test process_map."""
return inp
|
b36a6cc1f5f9cb16580331363892bfaf95fcd247
| 72,226
|
import re
def validate_title(title):
""" Returns True if a valid title is provided """
title = re.sub(' +', ' ', title.strip())
regexp = re.compile(r"^[a-zA-Z0-9-' ]*$")
if regexp.search(title):
return True
return False
|
5ed3598a78585c531f832a901bea82cc86da141a
| 72,227
|
def constant(s):
"""Constant pumping."""
return 1.0 / s
|
7b494ad1d38e38b1daafed7fc7d81f0b7588a69d
| 72,228
|
def format_val(val: float) -> str:
"""Format float and return as str. Rules are round to two decimal places,
then remove any trailing 0s and decimal point if necessary.
Args:
val (float): Number to format.
Returns:
str: Number rounded to two decimal places with trailing '0' and '.'
removed.
"""
return f'{val:.4f}'.rstrip('0').rstrip('.')
|
0337db588992018c5af73ce00504e565f2eaca60
| 72,229
|
def poly_mul(coefs1, coefs2, zero):
"""
Multiplies two polynomials whose coefficients are given in coefs1 and coefs2.
Zero value of the underlying ring is required on the input zero.
"""
coefs3 = [zero] * (len(coefs1) + len(coefs2) - 1)
for i in range(len(coefs1)):
for j in range(len(coefs2)):
coefs3[i + j] += coefs1[i] * coefs2[j]
return coefs3
|
e9ff21a914e8a6546d7a7f9e336c43e8d5d7d845
| 72,230
|
def rotations(t):
""" Return list of rotations of input string t """
tt = t * 2
return [tt[i:i + len(t)] for i in range(0, len(t))]
|
6240e3fb1e06057958eff7cf529115e28afc08ce
| 72,233
|
def find_boundaries(boundaries, ecs, monomer, multimer, symmetric):
"""
Identify axis boundaries for contact map plots
Parameters
----------
boundaries : {"union", "intersection", "ecs", "structure"} or tuple
or list(tuple, tuple)
Set axis range (min/max) of contact map as follows:
* "union": Positions either in ECs or 3D structure
* "intersection": Positions both in ECs and 3D structure
* "ecs": Positions in ECs
* "structure": Positions in 3D structure
* tuple(float, float): Specify upper/lower bound manually
* [(float, float), (float, float)]: Specify upper/lower bounds
for both x-axis (first tuple) and y-axis (second tuple)
ecs : pandas.DataFrame
Table of evolutionary couplings to plot (using columns
"i" and "j")
monomer : evcouplings.compare.distances.DistanceMap
Monomer distance map (intra-chain distances)
multimer : evcouplings.compare.distances.DistanceMap
Multimer distance map (multimeric inter-chain distances)
symmetric : bool
Sets if distance maps and ECs are symmetric (intra-chain or homomultimer),
or not (inter-chain).
Returns
-------
(min_x, max_x) : (float, float)
First and last position on x-axis
(min_y, max_y) : (float, float)
First and last position on y-axis
"""
def _find_pos(axis):
"""
Find first and last index along a single contact map axis
"""
# determine what sets of positions are for ECs/contact maps
ec_pos = set()
monomer_pos = set()
multimer_pos = set()
# need to merge i and j here if symmetric
if ecs is not None:
if symmetric:
ec_pos = set(ecs.i.astype(int)).union(ecs.j.astype(int))
else:
ec_pos = set(getattr(ecs, axis).astype(int))
if monomer is not None:
monomer_pos = set(
getattr(monomer, "residues_" + axis).id.astype(int)
)
if multimer is not None:
multimer_pos = set(
getattr(multimer, "residues_" + axis).id.astype(int)
)
structure_pos = monomer_pos.union(multimer_pos)
# maximum ranges spanned by structure or ECs
# if any of the sets is not given, revert to
# the other set of positions in else case
# (in these cases, union and intersection will
# be trivially the one set that is actually defined)
if len(ec_pos) > 0:
min_ec, max_ec = min(ec_pos), max(ec_pos)
else:
min_ec, max_ec = min(structure_pos), max(structure_pos)
if len(structure_pos) > 0:
min_struct, max_struct = min(structure_pos), max(structure_pos)
else:
min_struct, max_struct = min(ec_pos), max(ec_pos)
# determine and set plot boundaries
if boundaries == "union":
min_val = min(min_ec, min_struct)
max_val = max(max_ec, max_struct)
elif boundaries == "intersection":
min_val = max(min_ec, min_struct)
max_val = min(max_ec, max_struct)
elif boundaries == "ecs":
min_val = min_ec
max_val = max_ec
elif boundaries == "structure":
min_val = min_struct
max_val = max_struct
else:
raise ValueError(
"Not a valid value for boundaries: {}".format(
boundaries
)
)
return min_val, max_val
# check first if range is specified manually
if isinstance(boundaries, tuple):
if len(boundaries) != 2:
raise ValueError(
"boundaries must be a tuple with 2 elements (min, max)."
)
min_x, max_x = boundaries
min_y, max_y = boundaries
elif isinstance(boundaries, list):
if len(boundaries) != 2 or len(boundaries[0]) != 2 or len(boundaries[1]) != 2:
raise ValueError(
"boundaries must be a list of 2 tuples with 2 elements "
"[(min_x, max_x), (min_y, max_y)]."
)
min_x, max_x = boundaries[0]
min_y, max_y = boundaries[1]
else:
min_x, max_x = _find_pos("i")
min_y, max_y = _find_pos("j")
return (min_x, max_x), (min_y, max_y)
|
c4490a7236e107c4a32659bc31d6bdb3f6058a52
| 72,234
|
from datetime import datetime
def get_year(mat_date):
"""
Calc year from matlab's date format
Parameters
----------
mat_date : matlab's datenum
Date to be converted
Return
----------
year : int
Year from matlab datenum
"""
temp = int(mat_date)
year = datetime.fromordinal(max(temp - 366, 1)).year
return year
|
bd2ce95b391c4ece79e501a00e122148926eda45
| 72,236
|
def get_titgroup_type_from_titgroup(group):
"""Given a titratable group unique id e.g. (A:0112:CTERM), return the titgroup type (CTERM)"""
return group.split(':')[-1]
|
02402efd882cf47e2d757214dcca5928096f9f1f
| 72,242
|
def _strigify_val(val):
"""
Stringify a value. Here stringify means:
* If it's a string, surround it with double quotes (so '"' + val '"')
* If it's a callable or a type (class, etc.), then get it's __name__
* If not, just return "{}".format(val)
See an example of use in enhanced_docstr function.
:param val: value to be stringified
:return:
"""
if isinstance(val, str):
return '"' + val + '"'
elif callable(val) or isinstance(val, type):
return val.__name__
else:
return "{}".format(val)
|
64a41c7bbad1fb2dd7053e96eaf1014546842a4e
| 72,250
|
def format_minsep(minsep):
"""
Returns string representation of the minsep
"""
string = ''
for key, value in minsep.items():
if isinstance(value, (list, tuple)):
string += f'{key}={value[0]:.2f}-{value[1]:.2f} '
else:
string += f'{key}={value:.2f} '
return string
|
4f406c18d3658bb553a116fa0c6ebb2a37473386
| 72,252
|
def get_mers(seq, size):
"""
Get X-mers from seq.
Example
-------
>>> get_mers('MEAIKHD', 3)
{'MEA', 'EAI', 'AIK', 'KHD'}
"""
mers = []
mersa = mers.append
for i in range(len(seq) - (size - 1)):
mersa(seq[i:i + size])
return set(mers)
|
516b9afa3ba38ff918e2025a4aed709c18711e3a
| 72,253
|
import requests
def enumerate_a_by_view(cookies, url_root, view_name):
"""Given a view name, return all A records that are contained in that view."""
url = url_root + "/wapi/v2.9.1/record:a"
params = {
"view": view_name,
}
try:
response = requests.get(url, params=params, cookies=cookies, verify=False)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
# Whoops it wasn't a 200
print("ERROR: Infoblox reported an error %s." %(response.status_code))
print(response.text)
return False
except requests.exceptions.RequestException:
# A serious problem happened, like an SSLError or InvalidURL
print("WARN: Unable to communicate with Infoblox.")
return False
return response.json()
|
58842f84c495a8b9ad00699444c12c80bbe51b2a
| 72,256
|
def _greedily_extract(a, s):
"""Greedily calculate extractive fragments.
Exactly follows figure 3 in https://aclweb.org/anthology/N18-1065.
Args:
a: tokenized documents.
s: tokenized summary.
Returns:
extractive fragments.
"""
fs = []
i = j = 0
while i < len(s):
f = []
while j < len(a):
if s[i] == a[j]:
ii, jj = i, j
while ii < len(s) and jj < len(a) and s[ii] == a[jj]:
ii, jj = ii + 1, jj + 1
if len(f) < (ii - i):
f = s[i:ii]
j = jj
else:
j += 1
i, j = i + max(len(f), 1), 0
if f:
fs.append(f)
return fs
|
da41a72830fb0486f746f36c18f985550a6ad58a
| 72,258
|
import random
def _PickRandomBuilder(builders):
"""Randomly select one builder to do a full build for.
This selects one of the builders and uses the relative age of the newest build
(in revisions) for each cache as its weight for random selection, favoring the
caches that are older.
"""
for b in builders:
b['newest_build'] = max(
b['cache_stats'].full_build_commit_positions.values() or [0])
# Do `or 1000` to avoid zero weight choices when the cache is new. Any
# number greater than zero would work.
newest_build = max([b['newest_build'] for b in builders]) or 1000
for b in builders:
b['cache_age'] = newest_build - b['newest_build']
# Weighted random selection
weight_sum = sum(b['cache_age'] for b in builders)
r = random.uniform(0, weight_sum)
for b in builders: # pragma: no branch
if r <= b['cache_age']:
return b
r -= b['cache_age']
|
923bf7e1079d978e9a7405ddaf341eb503beff39
| 72,265
|
from typing import Callable
def change_pokemons_to_utf8(jsonify: Callable, pokemons: list) -> dict:
""" Gets the pokemons list and changes it to a serialized object
Args:
jsonify (function): function do generates a serialized information
pokemons (list): the pokemons that will have the information serialized
Returns:
dict: a dictionary containing all the serialized information
"""
result = []
for pokemon in pokemons:
result.append(pokemon.serialize())
return jsonify(result)
|
5594d321e1767fdf715bbf22137fd1f5847c943d
| 72,268
|
def lagopus_job_id(name, driver, time):
"""
Generate a unique job ID based on the job name.
ID conforms to DNS-1123 restrictions so it can be used as the name for k8s
resources without modification.
:name: job name as provided by user
:driver: fuzzing driver
:time: arbitrary time value; should be the creation time, to provide
uniqueness when the user provides multiple jobs with the same name
:rtype: str
:return: job ID
"""
return "{}.{}.{}".format(name, driver.lower(), time.strftime("%Y-%m-%d-%H-%M-%S"))
|
75befd29bbf1a375ad5d8f6714138d16724c8ed3
| 72,270
|
def resolve_task_field(task, field):
""" Get task field from root or 'data' sub-dict
"""
if field.startswith('data.'):
result = task['data'].get(field[5:], None)
else:
result = task.get(field, None)
return result
|
569fc153f83b08c8428b9b74ef79c94e7389c164
| 72,271
|
def parse_forum_response(resp):
"""
Prepare forum json object from forum response
:param resp: forum response
:return: required forum json object
"""
name = resp.get('name', '')
hostname = resp.get('hostname', '')
tags_list = [tag['name'] for tag in resp['tags']]
tags_value = ', '.join(tags_list)
forum_details = {
'Name': name,
'Hostname': hostname,
'Tags': tags_value
}
return forum_details
|
0df711647f94f3c5ad632e39fc687bc8547b0c67
| 72,273
|
def quote_string(prop):
"""
RedisGraph strings must be quoted,
quote_string wraps given prop with quotes incase
prop is a string.
"""
if not isinstance(prop, str):
return prop
if prop[0] != '"':
prop = '"' + prop
if prop[-1] != '"':
prop = prop + '"'
return prop
|
19cd16de31568ebdf140196c5b6d0db41cc2f260
| 72,274
|
def hot1_get(seqs_1hot, pos):
""" hot1_get
Return the nucleotide corresponding to the one hot coding
of position "pos" in the Lx4 array seqs_1hot.
"""
if seqs_1hot[pos, 0] == 1:
nt = 'A'
elif seqs_1hot[pos, 1] == 1:
nt = 'C'
elif seqs_1hot[pos, 2] == 1:
nt = 'G'
elif seqs_1hot[pos, 3] == 1:
nt = 'T'
else:
nt = 'N'
return nt
|
488db94f3a99aea51392a61bf19b130c88397187
| 72,275
|
def cp_cmtsolution2structure(py, cmtsolution_directory, base_directory):
"""
cp cmtsolution files in cmtsolution_directory to the simulation structure.
"""
script = f"ibrun -n 1 {py} -m seisflow.scripts.source_inversion.cp_cmtsolution2structure --cmtsolution_directory {cmtsolution_directory} --base_directory {base_directory}; \n"
return script
|
1291dc1b30fded8baf0492412ba3a813c002d867
| 72,282
|
def get_dns_stream(flows_stream):
"""
Filter to get only flows containing DNS information.
:param flows_stream: Input flows
:return: Flows with DNS information
"""
return flows_stream \
.filter(lambda flow_json: "ipfix.DNSName" in flow_json.keys())
|
610e104d6683d9bba59ec5c92e9e856ea0f041aa
| 72,283
|
def check_dict_has_kv(dictionary: dict, key, value) -> bool:
"""
Check if `dictionary` has entry `key` with value `value`.
"""
if key in list(dictionary.keys()) and dictionary[key] == value:
return True
return False
|
fdbb5f064476c84966a2924dd67a48b51cb94ca7
| 72,285
|
def remove_additional_whitespace(tokens):
"""
Removes additional whitespaces
:param tokens: A list of tokens
:return: A comparable list of tokens to the input but with additional whitespaces removed
"""
cleaned_tokens = []
for token in tokens:
token = token.replace(' ', '')
cleaned_tokens.append(token)
return cleaned_tokens
|
096f0ae0d88d21159d4bc7349fd1b7d8eb5ebfe7
| 72,286
|
def set_bit(number: int, position: int, value: bool) -> int:
"""
Returns number if you were to set the nth bit to "value".
0 refers to the LSB, aka 1s place.
"""
return (
(number | (1 << position)) if value else
(number & ~(1 << position))
)
|
7bc15dae4031536f9bc3e41b640b0dbdd2f55e04
| 72,291
|
def create_points_list(lists):
"""Transforms two lists values into a list of couple of values"""
created = list()
for i in range(len(lists[0])): #typically i in range(2)
point = list()
for l in lists: #for each coordinate
point.append(l[i])
created.append(point)
return created
|
3527c4f86cda19935ff48da94bda3de9a0bbe894
| 72,296
|
import math
def earth_thickness(distance):
""" Return the thickness of earth for a given distance"""
EARTH_RADIUS = 6371 # km
theta = distance / EARTH_RADIUS
h = EARTH_RADIUS * (1 - math.cos(theta / 2))
msg = print("For", distance, "km, the earth thickness is ", round(h, 3), "km .")
return h, msg
|
61a19420803c089b1267c176bb605d7dbba88485
| 72,304
|
def predict_epslogvar_from_xlogvar(*, x_logvar, logsnr):
"""Scale Var[x] by (1+exp(logsnr)) / (1+exp(-logsnr)) = exp(logsnr)."""
return x_logvar + logsnr
|
05a20247a0e1f53e965111c44ebba5e0a21d96b2
| 72,307
|
def satoshi_to_tbtc(tbtc):
"""Function for converting satoshis to tBTC"""
return round(tbtc * 0.000000000000000001, 5)
|
d7a3f251c729c2496d8bbbf4b67d050a710db7d6
| 72,310
|
def BET_radius(surface: float, surface_err: float, density: float):
"""
Estimate the radius of a nanoparticle based on the measurement of
BET specific surface area analysis, assuming a spherical morphology
of the nanoparticle.
:param density: Density of the material (g/cm³).
:param surface: BET specific surface area (m²/g).
:param surface_err: ± Error in measurement of surface (m²/g).
:return radius: Estimated radius (nm)
"""
def bet_formula(surface, density):
return 3 / (density * surface * 1e-3)
surface_low = surface - surface_err
surface_high = surface + surface_err
radius_low = bet_formula(surface_low, density)
radius_high = bet_formula(surface_high, density)
radius = (radius_high + radius_low) / 2.0
radius_err = abs(radius_high - radius)
return radius, radius_err
|
5fb972e8412def6d4d6bdbf30613c86474135892
| 72,311
|
from typing import Union
import torch
def convert_dict_entries_to_tensors(
loss_params: dict, device: Union[str, torch.device]
) -> dict:
"""Set scalars in loss to torch tensors for use with unsupervised losses.
Args:
loss_params: loss dictionary to loop over
device: device to send floats and ints to
Returns:
dict with updated values
"""
for loss, params in loss_params.items():
for key, val in params.items():
if type(val) == float:
loss_params[loss][key] = torch.tensor(
val, dtype=torch.float, device=device
)
if type(val) == int:
loss_params[loss][key] = torch.tensor(
val, dtype=torch.int, device=device
)
return loss_params
|
3620a7a9858d14c0bb01319a92a406a6cf525946
| 72,317
|
def parse_addr(addr, port=20000):
""" Parse IP addresses and ports.
Works with:
IPv6 address with and without port;
IPv4 address with and without port.
"""
if addr == '':
# no address given (default: localhost IPv4 or IPv6)
return "", port, 0
elif ']:' in addr:
# IPv6 address with port
ip, port = addr.rsplit(':', 1)
return ip.strip('[]'), int(port), 6
elif ']' in addr:
# IPv6 address without port
return addr.strip('[]'), port, 6
elif addr.count(':') > 1:
# IPv6 address without port
return addr, port, 6
elif ':' in addr:
# IPv4 address with port
ip, port = addr.split(':')
return ip, int(port), 4
else:
# IPv4 address without port
return addr, port, 4
|
29fc5901907f81ed144c07532670077e11f69bb7
| 72,319
|
def team_name_to_group_name(team_name):
"""Return the Keycloak group name corresponding to `team_name`."""
return f"TEAM-{team_name}"
|
4996a3ee8dc092850a2031103cb85a137f748778
| 72,325
|
def parse_id_uri(uri):
"""Get the components of a given uri (with identifier at the last position).
:param str uri: URI
:returns: prefix (ex: http://rdf.wikipathways.org/...)
:returns: prefix_namespaces: if there are many namespaces, until the penultimate (ex: .../Pathway/WP22_r97775/...)
:returns: namespace: if there are many namespaces, the last (ex: .../Interaction/)
:returns: identifier (ex: .../c562c/)
:rtype: tuple[str,str,str,str]
"""
# Split the uri str by '/'.
splitted_uri = uri.split('/')
# Get the uri components into different variables.
prefix = '/'.join(splitted_uri[0:3])
prefix_namespaces = '/'.join(splitted_uri[3:-2])
namespace = splitted_uri[-2]
identifier = splitted_uri[-1]
return prefix, prefix_namespaces, namespace, identifier
|
a128377708e0455e360bdcc83a3e9c1dcf2b6214
| 72,327
|
def get_filesystem(node):
"""
Get the file system information about this node
:param Node node: engine node
:rtype: dict
"""
hw = node.hardware_status
all_status = []
for fs in hw.filesystem:
all_status.append(fs._asdict())
for lg in hw.logging_subsystem:
all_status.append(lg._asdict())
return all_status
|
82c8c6b615a72254b0b04b707ebf7703df6838e7
| 72,328
|
def reverse_match_odds(match, odds):
"""
Reverse match opponents and odds (away - home -> home - away)
"""
match = " - ".join(reversed(match.split(" - ")))
odds.reverse()
return match, odds
|
a7e77f2c0a4fbbbcf2fa1e68a930c9886f27eea3
| 72,329
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.