content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import struct
def read_uint32(stream):
"""Reads an unsigned 32 bit integer from a stream."""
return struct.unpack('>I', stream.read(4))[0]
|
7703b3bca828a76af708a7a46d608c15357f8ea1
| 87,886
|
def time_string(seconds):
"""
Gets a string representing the number of seconds in hours, minutes, and seconds,
but will not print leading zeroes.
"""
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
hour_string = "%dh " % hours if hours > 0 else ""
minute_string = "%dm " % minutes if minutes > 0 else ""
second_string = "%ds" % seconds if seconds > 0 else ""
return "%s%s%s" % (hour_string, minute_string, second_string)
|
e12b875c488217b79463b7dccf6c6a863fa574bb
| 87,887
|
def ps_collate(module, ps_data, result):
"""Collate PS output data
Given Process Show data (ps_data), collate results according to
module.parameters.
These module parameters affect the results returned:
* module.params['sort_key']: (e.g, "%CPU")
* module.params['reverse']: Boolean to reverse sort (e.g., True)
* module.params['number_of_results']: How many results to return
Note: `result` is passed into function so more data is available in
output if a module.exit_json is requred.
"""
headers = ps_data.pop(0)
ps_data = [dict(zip(headers, row)) for row in ps_data if row]
if module.params['sort_key'] in headers:
ps_data = sorted(ps_data,
key=lambda k: k[module.params['sort_key']],
reverse=module.params['reverse'])
ps_data = ps_data[:module.params['number_of_results']]
else:
module.fail_json(msg='Sort key %s is not in ps headers: %s' % (
module.params['sort_key'], headers))
module.exit_json(**result)
return ps_data
|
c6f1bd03b29b82f31ac5178b9748a705a49659f4
| 87,893
|
def Lewis(**kwargs):
"""
Calculates Lewis number based upon either of two input variable sets.
First method:
Le(Sc = Schmidt number,
Pr = Prandtl number)
Second method:
Le(D = mass diffusivity,
alpha = thermal diffusivity)
"""
if ('Sc' in kwargs) and ('Pr' in kwargs):
lewis_number = kwargs['Sc'] / kwargs['Pr']
elif ('alpha' in kwargs) and ('D' in kwargs):
lewis_number = kwargs['alpha'] / kwargs['D']
else:
raise KeyError('Incorrect variable assignment')
return lewis_number
|
52b7b8515770e232de7910653e5e44e3f67cdc27
| 87,898
|
def hist_range(array, bins):
""" Compute the histogram range of the values in the array.*
Parameters
----------
array: array
the input data.
bins: int
the number of histogram bins.
Returns
-------
range: 2-uplet
the histogram range.
"""
s = 0.5 * (array.max() - array.min()) / float(bins - 1)
return (array.min() - s, array.max() + s)
|
937df240f2e2c91aa8eb4f4fe6d5075a30f0ff76
| 87,904
|
def dc_average_from_dc_values(
dc_electrical_value: float, dc_thermal_value: float
) -> float:
"""
Determines the average demand covered across both electrical and thermal outputs.
:param dc_electrical_value:
The value for the electrical demand covered.
:param dc_thermal_value:
The value for the thermal demand covered.
:return:
The un-weighted average of both the electrical and thermal demand-covered
values, returned as a value between 0 (corresponding to 0% demand covered) and 1
(corresponding to 100% demand covered).
"""
return 0.5 * (dc_electrical_value + dc_thermal_value)
|
cdba663bcecc8b80f437e85a60383202e9dc2442
| 87,906
|
def get_map_url_for_geolocation(latitude, longitude):
"""Returns a Google Maps url for `latitude`, `longitude`
"""
values = {
'latitude' : latitude,
'longitude' : longitude,
}
url = 'https://www.google.com/maps/place/%(latitude)s,%(longitude)s/@%(latitude)s,%(longitude)s,17z' % values
return url
|
d51607354ca6c07ea56521930f7409a6a16ee30b
| 87,909
|
def bootstrap_stage(args, stage):
"""
Returns true if we are doing a multistage build and on stage 1
:param args: The args variable generated by parse_parameters
:param stage: What stage we are at
:return: True if doing a multistage build and on stage 1, false if not
"""
return not args.build_stage1_only and stage == 1
|
5209c258192f07b8809dbba13b19865939efcc06
| 87,919
|
import csv
def getdata(fn, colnum, delim=";"):
"""
Read a column of data from a CSV file.
Arguments:
fn: Path of the file to read.
colnum: Index of the column to read.
delim: Delimiter to use (defaults to ';').
Returs:
A list of extracted data.
"""
data = []
with open(fn) as df:
for num, row in enumerate(csv.reader(df, delimiter=delim)):
if len(row) > colnum:
data.append((num, row[colnum]))
return data
|
c89efc1775bfb938d0ca90d42801cdc904ae30e9
| 87,923
|
def mock_purge_bad_creds(url, request):
"""
Mocks a response of a purge issued with invalid credentials. In this
situation we will get a text/html response regardless of if we send
a content-type header of json, so the content type in this response
of html is not a typo
"""
return {'status_code': 401,
'content-type': 'text/html;charset=utf-8',
'server': 'Apache',
'www-authenticate': 'Basic realm="Luna Control Center"',
'content': '<html><head><title>401 Unauthorized</title></head>'
'<body><p>401 Unauthorized</p><p>You are not authorized'
' to access that resource</p></body></html>'
}
|
bcea27bda58d9aa1550796ee3802b889e9cf1d31
| 87,924
|
def pick(d, *args):
"""
Pick some keys on a given dictionary.
:param d: the dict
:param args: the keys to pick
"""
res = {}
getitem = d.getlist if hasattr(d, "getlist") else d.__getitem__
for key in args:
if key in d:
res[key] = getitem(key)
return res
|
db040245574ef05faa596fcf6accbd50587f834f
| 87,929
|
import re
def decode_range(s):
"""Decode a range string into a list of integers: 8-10,12,14 --> [8, 9, 10, 12, 14]
"""
R = []
for x in s.split(','):
m = re.search(r'(\d+)-(\d+)', x)
if m:
i1 = int(m.group(1))
i2 = int(m.group(2))
R.extend(list(range(i1, i2 + 1)))
elif x.isdigit():
R.append(int(x))
else:
return None
return R
|
5b39fd9819c0b9055e3254c8cdf86ad8aff2ab6c
| 87,931
|
def _idx_to_conv(idx, conv_width, anchors_per_loc):
"""
Converts an anchor box index in a 1-d numpy array to its corresponding 3-d index representing its convolution
position and anchor index.
:param idx: non-negative integer, the position in a 1-d numpy array of anchors.
:param conv_width: the number of possible horizontal positions the convolutional layer's filters can occupy, i.e.
close to the width in pixels divided by the cumulative stride at that layer.
:param anchors_per_loc: positive integer, the number of anchors at each convolutional filter position.
:return: tuple of the row, column, and anchor index of the convolutional filter position for this index.
"""
divisor = conv_width * anchors_per_loc
y, remainder = idx // divisor, idx % divisor
x, anchor_idx = remainder // anchors_per_loc, remainder % anchors_per_loc
return y, x, anchor_idx
|
89d3936a0a7b01daa7be1e0dc8c805092873117c
| 87,933
|
from typing import List
def parse_notes(
listNotes: List,
) -> List:
"""Given a list of music21 notes, rests and/or chordsd,
group the tuplets as a single list, using the tuplets.type attribute.
"""
result = []
i = 0
while i < len(listNotes):
el = listNotes[i]
tups = el.duration.tuplets
if len(tups) == 0:
result.append(el)
i += 1
else:
t_group = [el]
j = 1
while el.duration.tuplets[0].type != "stop":
el = listNotes[i + j]
t_group.append(el)
j += 1
result.append(t_group)
i += len(t_group)
return result
|
ed96f55e63ee0e0c209f957bff141d8afd14ca20
| 87,938
|
def none_as_unknown(text, number):
"""
Return text if text isn't None or empty, otherwise return 'unknown(number)'
:type text: str
:param text: string, that we want format
:type number: int
:param number: number used in text
"""
if not text:
text = "unknown({0})".format(number)
return text
|
47ae633b3206b03f39b52f9e327fd279b2f9d69b
| 87,942
|
import base64
def b64safe_encode(payload):
"""
b64 url safe encoding with the padding removed.
"""
return base64.urlsafe_b64encode(payload).rstrip(b'=')
|
cae1f293800c3ebfba0b37513c7f5e390bfabe85
| 87,945
|
def calc_IW(P, T):
"""
Fe-FeO (Iron-Wustite)
=====================
Define IW buffer value at P
References
----------
Campbell et al. (2009) High-pressure effects on the iron-iron oxide and nickel-nickel oxide oxygen fugacity buffers
Parameters
----------
P: float
Pressure in GPa
T: float or numpy array
Temperature in degrees K
Returns
-------
float or numpy array
log_fO2
Polynomial coefficients
-----------------------
log fO2 = (a0+a1*P) + (b0+b1*P+b2*P^2+b3*P^3)/T
a0: 6.54106
a1: 0.0012324
b0: -28163.6
b1: 546.32
b2: -1.13412
b3: 0.0019274
"""
log_fO2 = (6.54106+0.0012324*P) + (-28163.6+546.32*P-1.13412*P**2+0.0019274*P**3)/T
return log_fO2
|
23ef09753f0ddd8e73af6b494cb131c888521b52
| 87,946
|
def fis_gbellmf(x:float, a:float, b:float, c:float):
"""Generalized Bell Member Function"""
t = (x - c) / a
if (t == 0) and (b == 0):
return 0.5
if (t == 0) and (b < 0):
return 0
return (1.0 / (1.0 + (t ** b)))
|
abc39b10f6bb3bbd02a8b2d5c25cdb167c1bbe8b
| 87,953
|
def normalize_protocol(raw_protocol):
"""
A function to normalize protocol names between IOS and NXOS. For example, IOS uses 'C' and NXOS uses 'direct" for
connected routes. This function will return 'connected' in both cases.
:param raw_protocol: <str> The protocol value found in the route table output
:return: A normalized name for that type of route.
"""
if raw_protocol[0] == 'S' or "static" in raw_protocol:
return 'static'
elif raw_protocol[0] == 'C' or 'direct' in raw_protocol:
return 'connected'
elif raw_protocol[0] == 'L' or 'local' in raw_protocol:
return 'local'
elif raw_protocol[0] == 'D':
return 'eigrp'
elif raw_protocol[0] == 'O':
return 'ospf'
elif raw_protocol[0] == 'B':
return 'bgp'
elif raw_protocol[0] == 'i':
return 'isis'
elif raw_protocol[0] == 'R':
return 'rip'
else:
return raw_protocol
|
140ccd3bb7f7ba18a36dec91cebe6ce6db03a8de
| 87,954
|
def sum_bbox(bbox1, bbox2):
"""Summarizes two bounding boxes. The result will be bounding box which
contains both provided bboxes.
:type bbox1: list
:param bbox1: first bounding box
:type bbox2: list
:param bbox2: second bounding box
:rtype: list
:return: new bounding box
"""
if not bbox1 or not bbox2:
return bbox1 + bbox2
x0, y0, x1, y1 = bbox1
_x0, _y0, _x1, _y1 = bbox2
new_x0 = min(x0, _x0, x1, _x1)
new_x1 = max(x0, _x0, x1, _x1)
new_y0 = min(y0, _y0, y1, _y1)
new_y1 = max(y0, _y0, y1, _y1)
return [new_x0, new_y0, new_x1, new_y1]
|
83c0eddfe03e66843cbbc6c7de21be6688905087
| 87,956
|
def vhdl_register_address(name, address_offset):
"""
Return a string with a VHDL constant declaration of the supplied
register.
"""
return "constant {} : integer := {};\n".format(name, address_offset)
|
219c621eafa01618bcb342b85b35072ff63ed7fa
| 87,957
|
def recorded_views(label):
"""
Get the dimensions of the view from recorded ones.
Parameters
----------
label: string
Dictionary key for the view.
Returns
-------
view: 4-tuple of floats
The view ('xmin', 'xmax', 'ymin', 'ymax').
"""
views = {}
views['snake'] = (-1.0, 2.0, -1.5, 1.5)
views['wake'] = (-1.0, 15.0, -4.0, 4.0)
views['domain'] = (-15.0, 15.0, -15.0, 15.0)
views['default'] = (-1.0, 1.0, -1.0, 1.0)
if label not in views.keys():
label = 'default'
return views[label]
|
c1a192a1c2333ad5308e9eeafc22dc80600bbe9a
| 87,962
|
def _calculate_key(name):
"""Generate a Redis key with the given name.
Args:
name: The name of the named actor.
Returns:
The key to use for storing a named actor in Redis.
"""
return b"Actor:" + name.encode("ascii")
|
2272bd2c6acb23e8d44c3aa2ba43815d0dc9398d
| 87,967
|
import json
def button_postback(title, payload):
"""
Creates a dict to use with send_buttons
:param title: Button title
:param payload: Button payload
:return: dict
"""
if isinstance(payload, (dict, list)):
payload = json.dumps(payload)
return {
'type': 'postback',
'title': title,
'payload': payload,
}
|
26a15d9efa762cb3f45fb79969f436dbec733de2
| 87,970
|
def add_years(date, years):
"""Add `years` number of years to date."""
try:
return date.replace(year=date.year + years)
except ValueError:
# Handle 29th of February as 28th in non-leap years:
return date.replace(year=date.year + years, day=date.day - 1)
|
4ca7df7b7b3ce96cd6d74ccc7212cd7afcd2db64
| 87,971
|
import torch
def ortho_reg_fn(V, ortho_lambda=1.):
"""Regularization term which encourages the basis vectors in the
columns of V to be orthonormal.
Parameters
----------
V : shape (hidden, fdim)
Projection layer.
ortho_lambda : float
Regularization hyperparameter.
Returns
-------
reg_val : float
Value of regularization function.
"""
fdim = V.shape[1]
reg_val = ortho_lambda * torch.sum((torch.mm(V.t(), V) - torch.eye(fdim, device=V.device, dtype=V.dtype)) ** 2)
return reg_val
|
dc9d2f493689255a3744a18e61b08917d28df7a3
| 87,972
|
import six
def list2str(s):
"""Returns the list of lines `s` as a single string separated by \n"""
return '\n'.join(s) if not isinstance(s, six.string_types) else s
|
c4c51626879a2f0b60b7e3f9f9d802b8faf06e85
| 87,975
|
def load_linelist(filename):
"""Load standard wavelength line list from a given file.
Args:
filename (str): Name of the wavelength standard list file.
Returns:
*list*: A list containing (wavelength, species).
"""
linelist = []
infile = open(filename)
for row in infile:
row = row.strip()
if len(row)==0 or row[0] in '#%!@':
continue
g = row.split()
wl = float(g[0])
if len(g)>1:
species = g[1]
else:
species = ''
linelist.append((wl, species))
infile.close()
return linelist
|
396d7c3d51d011ff85e3b387eae9403a3ae2f767
| 87,979
|
from typing import List
from typing import Pattern
def _is_in_ignore_list_re(element: str, ignore_list_re: List[Pattern]) -> bool:
"""Determines if the element is matched in a regex ignore-list."""
return any(file_pattern.match(element) for file_pattern in ignore_list_re)
|
65c1f1d55a88b64b98d60c4ae861eb49d6c6300d
| 87,983
|
import re
def get_wiki_links(text: str) -> list:
"""
Get all wiki links from the given text and return a list of them.
Each list item is a dictionary with the following keys:
- wiki_link: the exact match
- link: the extracted link
- text: the possible extracted text
"""
wiki_links = []
wiki_link_regex = r'\[\[(.*?)\]\]'
for match in re.finditer(wiki_link_regex, text):
out = {
"wiki_link": match.group(),
}
if '|' in match.group(1):
out['link'], out['text'] = match.group(1).split('|')
else:
out['link'] = match.group(1)
out['text'] = match.group(1)
# if the link ends with `_index` remove it
if out['link'].endswith('_index'):
out['link'] = out['link'][:-6]
wiki_links.append(out)
return wiki_links
|
133c94d532cde1ba95ecf310c2ec64923a3cc9b9
| 87,988
|
def find_node(g, id):
"""returns a node given the node id"""
return g.get_node(id)
|
72c2b96194bbd518e15cabdeb60edc5b6acfc69c
| 87,989
|
def mix(x, y, a):
"""
re-implementation of opengl mix
https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/mix.xhtml
The return value is computed as x×(1−a)+y×a
"""
return x * (1.0 - a) + y * a
|
d95f7026b47273e91891dfdc5f39905e99533e50
| 87,998
|
from typing import Union
from pathlib import Path
def find_test_file(filename: Union[str, Path]) -> Path:
""" find a test file that is located within the test suite """
return Path(__file__).parent / Path(filename)
|
32ca3745d79b86df2f44cac5fd87c26af22736fe
| 88,000
|
def monthly_to_annual(monthly):
"""Convert monthly amount to annual cost.
Args:
monthly (float): Monthly cost.
Returns:
(float): Annual cost.
"""
return monthly * 12
|
300a892340e7c2903d06c0ef7fd057dc789bdd89
| 88,003
|
def compute_label_weights_using_sample_size(label_dict, label_sample_size):
"""Compute weights for each class according to their sample sizes.
Args:
label_dict: a dictionary with class labels as keys (strings) and encoded
label index as values (ints).
label_sample_size: a dictionary with class labels as keys (strings) and
sample size as values (ints).
Returns:
label_weights: weights for labels.
"""
# keys: encoded class labels, values: sample size
label_code_sample_size = {
label_dict[label]: label_sample_size[label]
for label in label_sample_size.keys()
}
print('label_code_sample_size={}'.format(label_code_sample_size))
# create label weights = 1/label_sample_size
label_weights = [
1 / float(label_code_sample_size[idx])
for idx in range(len(label_code_sample_size))
]
# label_weights = [
# x / float(sum(label_weights0)) * len(label_weights0) / float(
# params.batch_size) for x in label_weights0
# ]
return label_weights
|
15a2363f08fbab034ee1f833809d35ea5a5b5f73
| 88,004
|
def _calculate_zone(point, zones):
"""
Given a point (x, y), and a list of Zones (df´s)
Return in which Zone the placemark is
"""
for index, zone in zones.iterrows():
if point.within(zone['geometry']):
return zone['Name']
return 'Sin Zona'
|
b00e98bcd7a36c6f649fff059fbc7782345720d8
| 88,007
|
def slope(x1, y1, x2, y2):
""" Finds slope from two points """
return (y2-y1)/(x2-x1)
|
76ecb294d8bcf08c9fd1a88d7403d3445b10b8c5
| 88,011
|
def _bit_storing_size(n):
"""Returns the number of bytes needed for storing n bits.
>>> _bit_storing_size(16)
2
>>> _bit_storing_size(17)
3
"""
return -((-n) // 8)
|
07dda7adc1f196aed6cb153a9d9898d7fa6ab875
| 88,016
|
from typing import Any
def to_lower(value: Any) -> str:
"""Convert value to lower case stirng. Handles cases where value is not a
string.
Parameters
----------
value: any
Value that is converted to lower case string.
Returns
-------
string
"""
try:
return str.lower(value)
except TypeError:
return value
|
5148c5d2ea10acc202b3bf67e26cb64066b2e834
| 88,021
|
import pathlib
def get_path_from_file(file_path):
""" From a givem file_path, returns the folder path
eg. file_path = 'path/to/file.txt', returns 'path/to/'
"""
path = pathlib.Path(file_path)
return str(path.parents[0])
|
7312d175540322bf1b37480c0e26c2e1fdaa5d14
| 88,027
|
import re
def alphanum_key(key):
"""
Used to generate a natural numeric sort key
Example: ("p10" => ["p", 10]) is greater than ("p1" => ["p", 1])
adapted from https://stackoverflow.com/a/2669120/240553
"""
def convert(text):
return int(text) if text.isdigit() else text
return [convert(c) for c in re.split(r'([0-9]+)', key)]
|
16a0f085b2051dbd951b801dd6713845eae9c09c
| 88,034
|
def pitch_to_str(pitch):
"""
Calculate the corresponding string representation of a note, given the MIDI pitch number
Args:
pitch (int): MIDI pitch number
Returns:
str: corresponding note name
"""
p_to_l_dic = {0: 'C',
1: 'C#',
2: 'D',
3: 'D#',
4: 'E',
5: 'F',
6: 'F#',
7: 'G',
8: 'G#',
9: 'A',
10: 'A#',
11: 'B'}
return p_to_l_dic[pitch % 12]
|
3ce4fcb8f6906cc2adaf40adcc933e5fff834e93
| 88,039
|
import ast
def ast_contains_node(ast_node, nodetypes):
"""
Check recursively if ast_node has a child of type nodetypes.
nodetypes is either a type or a tuple of types.
"""
ast_node = ast_node if type(ast_node) is list else [ast_node]
for ast_root in ast_node:
for node in ast.walk(ast_root):
if isinstance(node, nodetypes):
return True
return False
|
ce6c1f6f85fe1452b82a6dfb8144b8e518c76344
| 88,042
|
def listOflistsToString(listOfLists):
""" Convert a list of lists to a string, each list top level list separated by a newline.
Args:
listOfLists: a list of lists, containing strings in the lowest level
Returns:
listOfLists as a string, each top level list separated by a newline
"""
output = []
line = []
for element in listOfLists:
line = " ".join(element)
line = line
output.append(line)
return "\n".join(output)
|
9312aaa2101632e1193ca59301a0862a1ee8e5fa
| 88,043
|
def is_location_href(href: str) -> bool:
"""
Check whether this href is an encoded location.
"""
return href.startswith("location:")
|
59b67d0beda6321b84a8488cc724335bde127e76
| 88,046
|
def attempt(func, *args, **kargs):
"""Attempts to execute `func`, returning either the result or the caught
error object.
Args:
func (function): The function to attempt.
Returns:
mixed: Returns the `func` result or error object.
Example:
>>> results = attempt(lambda x: x/0, 1)
>>> assert isinstance(results, ZeroDivisionError)
.. versionadded:: 1.1.0
"""
try:
ret = func(*args, **kargs)
except Exception as ex: # pylint: disable=broad-except
ret = ex
return ret
|
93e22f01bd0c8f086160002f4db7ccbcd282a1df
| 88,049
|
def format_theta(theta_topic_row, limit=10):
"""
Convert full theta row (topic) into compact form
The result will be sorted by probability (high to low).
"""
topics = theta_topic_row;
topics = [(i, val) for i, val in enumerate(topics)];
topics = sorted(topics, key=lambda x: x[1], reverse=True);
topics = topics[:limit];
return topics;
|
05c30a16c40d1c474aabb4e054ff8d9cba6e1cd5
| 88,050
|
import requests
def scrape(address):
""" Get HTML-page for the given url
Parameters: address: the address to fetch the HTML-page from
Returns: requests-object if successful, else None
Algorithm: Simple wrapper for requests.get(address)
"""
try:
res = requests.get(address)
res.raise_for_status()
except Exception as e:
print("An exception occured: {}".format(e))
return None
return res
|
ff449d56ca792b65d2b2e36c25337fc09303c9c1
| 88,051
|
def constant_outfile_iterator(outfiles, infiles, arggroups):
"""Iterate over all output files."""
assert len(infiles) == 1
assert len(arggroups) == 1
return ((outfile, infiles[0], arggroups[0]) for outfile in outfiles)
|
1e022ef1b3f41dc3252d8b49994bd60831023ed9
| 88,055
|
def check_if_two_nodes_in_same_building(i, j, nf):
"""
:param i: node id 1
:param j: node id 2
:param nf: number of floors in each building
:return: True, if they are in same building, else False
"""
s1 = int(i/nf)
s2 = int(j/nf)
if s1 == s2:
return True
else:
return False
|
b8eac4de8ca43686fcc641121b379cc8884b7efb
| 88,067
|
from typing import Any
import hashlib
def md5(item: Any):
"""Create an md5 hash of the given item"""
return hashlib.md5(item.encode('utf-8')).hexdigest()
|
ea056ee6a56463681008fa0ac688029cc982946b
| 88,068
|
import aiohttp
from typing import Union
from typing import Any
import json
async def json_or_text(response: aiohttp.ClientResponse) -> Union[dict[str, Any], str]:
"""A quick method to parse a `aiohttp.ClientResponse` and test if it's json or text."""
text = await response.text(encoding="utf-8")
try:
if response.headers["content-type"] == "application/json":
try:
return json.loads(text)
except json.JSONDecodeError:
pass
except KeyError:
pass
return text
|
ec138dd8b412ac7c10d695b5d170c6cee9d65ea5
| 88,070
|
def curry(fun,*args):
"""
>>> @curry
... def F(a,b,c):
... return a+b+c
>>> F(1)(2)(3)
6
>>> F(4,5)(6)
15
>>> F(7)(8,9)
24
>>> F(10,11,12)
33
"""
def curried(*moreArgs):
A=args+moreArgs
if len(A)<fun.func_code.co_argcount:
return curry(fun,*A)
else:
return fun(*A)
return curried
|
0b28eb78240c69047e987a33cec6e92198807d9c
| 88,071
|
def _normalize_screen_name(screen_name: str) -> str:
"""Normalize the screen name, or raise an exception if invalid."""
normalized = screen_name.strip()
if not normalized or (' ' in normalized) or ('@' in normalized):
raise ValueError(f"Invalid screen name: '{screen_name}'")
return normalized
|
d3ca22e4c83dc784f2ec3674a320ec43901b9b48
| 88,083
|
import collections
def tf_namedtuple(name, fieldnames_and_docs):
"""A `namedtuple` class factory that supports field-docstrings.
```
cls = tf_namedtuple("MyNamedTuple",[("a", "Docs for a"),
("b", "Docs for b")])
cls.a.__doc__ # ==> "Docs for a"
```
Args:
name: The name of the new class.
fieldnames_and_docs: A sequence of `(fieldname, docstring)` pairs. The
fieldnames are passed to `collections.namedtuple`.
Returns:
A namedtuple class.
"""
fieldnames_and_docs = list(fieldnames_and_docs)
fieldnames = [fieldname for fieldname, doc in fieldnames_and_docs]
cls = collections.namedtuple(name, fieldnames)
for fieldname, doc in fieldnames_and_docs:
old_prop = getattr(cls, fieldname)
new_prop = property(fget=old_prop.fget, fset=old_prop.fset,
fdel=old_prop.fdel, doc=doc)
setattr(cls, fieldname, new_prop)
return cls
|
fcb7bcb58f2207e809ad8a851d02aaa9417b12d4
| 88,099
|
def merge_value_dicts(old_value_dict, new_value_dict, zero_missing=False):
"""
Merge an old and new value dict together, returning the merged value dict.
Value dicts map from label values tuple -> metric value.
Values from the new value dict have precidence. If any label values tuples
from the old value dict are not present in the new value dict and
zero_missing is set, their values are reset to zero.
"""
value_dict = new_value_dict.copy()
value_dict.update({
label_values: 0 if zero_missing else old_value
for label_values, old_value
in old_value_dict.items()
if label_values not in new_value_dict
})
return value_dict
|
18c2cb84e89605f189fca21fecf6f3a65144f9a8
| 88,101
|
def is_an_account_settings_page(context, account):
"""
Return `true` if the current path is for one of the account "settings" pages.
Used to determine whether or not to show the settings submenu.
"""
path = context["request"].path
return (
path.startswith("/me/")
or path.startswith(f"/{account.name}/profile")
or path.startswith(f"/{account.name}/publishing")
)
|
acafc68d3b2faaa93061766c7990c0606d18ae0e
| 88,106
|
def ensure_operators_are_strings(value, criteria_pattern):
"""
This function ensures that both value and criteria_pattern arguments are unicode (string)
values if the input value type is bytes.
If a value is of types bytes and not a unicode, it's converted to unicode. This way we
ensure all the operators which expect string / unicode values work, even if one of the
values is bytes (this can happen when input is not controlled by the end user - e.g. trigger
payload under Python 3 deployments).
:return: tuple(value, criteria_pattern)
"""
if isinstance(value, bytes):
value = value.decode('utf-8')
if isinstance(criteria_pattern, bytes):
criteria_pattern = criteria_pattern.decode('utf-8')
return value, criteria_pattern
|
210dc4f3e1b92dbb441f32d00e6e9b64b1e1f215
| 88,111
|
def cria_copia_posicao(pos):
"""
Devolve uma copia da posicao inserida.
:param pos: posicao
:return: posicao
"""
return pos.lower()
|
af87283c888f1710b0141cfd6bd7ebc2cc58df9f
| 88,118
|
def is_prime(n):
""" Fuction to check if a given number is a prime number.
A prime number is defined as a number greter than 1 which is not a product of two strictly smaller numbers
Args:
n (int): integer to check
Returns:
bool: whether or not n is a prime number
"""
if type(n) is not int or n <= 1:
return False
i = 2
flag = True
while i < n:
if n%i == 0:
flag = False
break
i += 1
return flag
|
36e24fb289d61ec7bf5651fd96a1fc0004942192
| 88,120
|
def gen_HttpApiLog(source, action, target):
"""Generate a Http Api Log object from action and target."""
httpapilog = {
"@type": "HttpApiLog",
"Subject": source,
"Predicate": action,
"Object": target
}
return httpapilog
|
a3b5757b15121daa5a0b74f2a74994314677d345
| 88,121
|
def sort_012(input_list):
"""
Given an input array consisting on only 0, 1, and 2, sort the array in a single traversal.
Args:
input_list(list): List to be sorted
"""
if type(input_list)!=list:
return "Inappropriate input type"
next0,next2 = 0, len(input_list)-1 # helper indices, init: next 0 at the beginning, next 2 at the end
i = 0 # index, used for A SINGLE TRAVERSAL
# O(n) AND A SINGLE LIST TRAVERSAL
while i <= next2:
if input_list[i] == 0: # if at current index we have 0, then...
input_list[i]=input_list[next0] # at current index store the value that is replacing 0
input_list[next0]=0 # place 0 at the next 0'th position
next0+=1 # and increment both counters
i+=1
elif input_list[i] == 2:
input_list[i]=input_list[next2]
input_list[next2] = 2
next2-=1
else: # we have 1, so leave it, as sorting only 0s and 2s, will place 1s in place
i+=1
return input_list
|
677857a5323e519dff97ae88a06047a0d06189ec
| 88,127
|
import math
def get_bounding_box(location, radius):
"""
Based on given location coordinates and radius in kilometers
returns coordinates of the bounding box.
"""
equator_len = 111
current_latitude_km_length = math.cos(location[0] * math.pi / 180) * equator_len
return {
"lat_min": location[0] - radius / equator_len,
"lat_max": location[0] + radius / equator_len,
"lon_min": location[1] - radius / current_latitude_km_length,
"lon_max": location[1] + radius / current_latitude_km_length,
}
|
d3f6e18d5a29f62ab216c79671664cec66ed29cf
| 88,132
|
def set_radec(hdu_hdr, ra, dec, x, y):
"""
Modify the Header to force RA/DEC value at a given x,y position
Args:
hdu_hdr: Header
ra: float (deg)
dec: float (deg)
x: float
x pixel position in the image
y: float
y pixel position in the image
Returns:
new_hdr: Header
"""
new_hdr = hdu_hdr.copy()
# Offset
new_hdr['CRVAL1'] = ra
new_hdr['CRVAL2'] = dec
new_hdr['CRPIX1'] = x
new_hdr['CRPIX2'] = y
return new_hdr
|
1ad2636be19691034ba3d344c6bb714330d5954a
| 88,135
|
from typing import Dict
from typing import Any
def build_ddo_dict(
did: str,
nft_address: str,
chain_id: int,
metadata: Dict[str, Any],
services: Dict[str, Any],
credentials: Dict[str, Any],
) -> dict:
"""Build a ddo dict, used for testing. See for details:
https://github.com/oceanprotocol/docs/blob/v4main/content/concepts/did-ddo.md#ddo
"""
return {
"@context": ["https://w3id.org/did/v1"],
"id": did,
"version": "4.0.0",
"nftAddress": nft_address,
"chainId": chain_id,
"metadata": metadata,
"services": services,
"credentials": credentials,
}
|
94829feb602b708b93f3d924266c0cc96b6142b5
| 88,140
|
def _is_kron(tt_a):
"""Returns True if the argument is a Kronecker product matrix.
Args:
t_a: `TensorTrain` or `TensorTrainBatch` object.
Returns:
bool
"""
if tt_a.is_tt_matrix():
return max(tt_a.get_tt_ranks()) == 1
return False
|
20f5253c6be3c2c05643cc24da996e1753418ddf
| 88,142
|
import re
import requests
def check_github(repository: str) -> str:
"""Check if the repository is valid and exists, returning full URL"""
if not repository.startswith("https://"):
if not re.match(r"\w+/\w+", repository):
raise ValueError("Invalid repository name, format should be full repository URL or userame/repository")
repository = f"https://github.com/{repository}"
resp = requests.head(repository)
if resp.status_code != 200:
raise ValueError(
f"Repository does not exist or access denied (url: {repository}, status: {resp.status_code}")
return repository
|
ec6583863f2c67e2ca9f8dae683dc87da78fa583
| 88,145
|
import json
def load_json(string):
"""
Return a JSON object based on its string representation.
Return None if the string isn't valid JSON.
"""
try:
json_object = json.loads(string)
except ValueError:
return None
return json_object
|
af4722b73fb0f1a5fc401fe5b5e40f2b90f3f2e6
| 88,146
|
def format_item(item, xmlns, key):
"""
Format a rss item and return data.
This properly format a rss item regarding the xmlns attribute and a key.
It returns the data of the item regarding it's key.
:param item: A rss item
:param xmlns: XML namespace of the rss feed
:param key: The key to format
:type item: WebDriver
:type xmlns: str
:type key: str
:return: The item content
:rtype: str
"""
formated_key = key
if xmlns:
formated_key = xmlns + key
data = item.find(formated_key)
if key == 'link' and xmlns:
return data.attrib.get('href')
else:
return ''.join(data.itertext())
|
10b50932eb71977b76bf4b968227b9d642ee89d5
| 88,152
|
def unpack_vis(vis_vec, shape):
"""Unpack visibility vector into order for unpolarised data.
Parameters
----------
vis_vec : np.ndarray[:]
Packed visibility data.
shape : tuple
Shape of the data (baseline, time, freq).
Returns
-------
vis_vec : np.ndarray[baseline, time, freq]
Unpolarised visibility data
"""
vecr = vis_vec.reshape((shape[-1], 2) + shape[:-1])
return (vecr[:, 0] + 1.0J * vecr[:, 1]).transpose(1, 2, 0)
|
2ad1de8ed8e5b5bcbd6a3de921597239d13cd20d
| 88,153
|
def IsEven(val):
"""
Return whether the supplied value is an even number
"""
val = int(round(val))
return 2 * (val / 2) == val
|
579979549a6fdafa38768b375965bc77a57e591d
| 88,155
|
import torch
def _get_label_batch(label,
num_timesteps=0,
num_classes=0,
num_batches=0,
timesteps_noise=False):
"""Get label batch. Support get sequeue of label along timesteps.
We support the following use cases ('bz' denotes ```num_batches`` and 'n'
denotes ``num_timesteps``):
If num_classes <= 0, return None.
If timesteps_noise is True, we output label which dimension is 2.
- Input is [bz, ]: Expand to [n, bz]
- Input is [n, ]: Expand to [n, bz]
- Input is [n*bz, ]: View to [n, bz]
- Dim of the input is 2: Return the input, ignore ``num_batches`` and
``num_timesteps``
- Callable or None: Generate label shape as [n, bz]
- Otherwise: Raise error
If timesteps_noise is False, we output label which dimension is 1 and
ignore ``num_timesteps``.
- Dim of the input is 1: Unsqueeze to [1, ], ignore ``num_batches``
- Dim of the input is 2: Return the input. ignore ``num_batches``
- Callable or None: Generate label shape as [bz, ]
- Otherwise: Raise error
It's to be noted that, we do not move the generated label to target device
in this function because we can not get which device the noise should move
to.
Args:
label (torch.Tensor | callable | None): You can directly give a
batch of noise through a ``torch.Tensor`` or offer a callable
function to sample a batch of noise data. Otherwise, the
``None`` indicates to use the default noise sampler.
num_timesteps (int, optional): Total timestpes of the diffusion and
denoising process. Defaults to 0.
num_batches (int, optional): The number of batch size. To be noted that
this argument only work when the input ``noise`` is callable or
``None``. Defaults to 0.
timesteps_noise (bool, optional): If True, returned noise will shape
as [n, bz, c, h, w], otherwise shape as [bz, c, h, w].
Defaults to False.
Returns:
torch.Tensor: Generated label with desired shape.
"""
# no labels output if num_classes is 0
if num_classes == 0:
assert label is None, ('\'label\' should be None '
'if \'num_classes == 0\'.')
return None
# receive label and conduct sanity check.
if isinstance(label, torch.Tensor):
if timesteps_noise:
if label.ndim == 1:
assert num_batches > 0 and num_timesteps > 0
# [n, ] to [n, bz]
if label.shape[0] == num_timesteps:
label_batch = label.view(num_timesteps, 1)
label_batch = label_batch.expand(-1, num_batches)
# [bz, ] to [n, bz]
elif label.shape[0] == num_batches:
label_batch = label.view(1, num_batches)
label_batch = label_batch.expand(num_timesteps, -1)
# [n*bz, ] to [n, bz]
elif label.shape[0] == num_timesteps * num_batches:
label_batch = label.view(num_timesteps, -1)
else:
raise ValueError(
'The timesteps label should be in shape of '
'(n, ), (bz,), (n*bz, ) or (n, bz, ). But receive '
f'{label.shape}.')
elif label.ndim == 2:
# dimension is 2, direct return
label_batch = label
else:
raise ValueError(
'The timesteps label should be in shape of '
'(n, ), (bz,), (n*bz, ) or (n, bz, ). But receive '
f'{label.shape}.')
else:
# dimension is 0, expand to [1, ]
if label.ndim == 0:
label_batch = label[None, ...]
# dimension is 1, do nothing
elif label.ndim == 1:
label_batch = label
else:
raise ValueError(
'The label should be in shape of (bz, ) or'
f'zero-dimension tensor, but got {label.shape}')
# receive a noise generator and sample noise.
elif callable(label):
assert num_batches > 0
label_generator = label
if timesteps_noise:
assert num_timesteps > 0
# generate label shape as [n, bz]
label_batch = label_generator((num_timesteps, num_batches))
else:
# generate label shape as [bz, ]
label_batch = label_generator((num_batches, ))
# otherwise, we will adopt default label sampler.
else:
assert num_batches > 0
if timesteps_noise:
assert num_timesteps > 0
# generate label shape as [n, bz]
label_batch = torch.randint(0, num_classes,
(num_timesteps, num_batches))
else:
# generate label shape as [bz, ]
label_batch = torch.randint(0, num_classes, (num_batches, ))
return label_batch
|
b8dd998bc924aaee7172c87464b64e1fa565c202
| 88,157
|
def create_receipt(wo_id, wo_response):
"""
Create work order receipt corresponding to workorder id
"""
# Storing wo-response as receipt as of now. Receipt structure may get modified in future.
return wo_response
|
58172e17be3f7f58ff9c82b50001889096d9dc63
| 88,160
|
def preceding_token(mention):
""" Compute the token preceding a mention.
Args:
mention (Mention): A mention.
Returns:
The tuple ('preceding', TOKEN), where TOKEN is the (lowercased) token
preceding the mention. If no such token exists, set TOKEN to 'NONE'.
"""
prec = mention.get_context(-1)
if prec:
return "preceding", prec[0].lower()
else:
return "preceding", "NONE"
|
23b8ab6c8ab03dadc84413a1dee4c273a9459d06
| 88,162
|
async def get_cache_exist(cache, key: str):
"""
获取键对应的缓存是否存在
-2 or None: 键不存在
-1 or False: 过期时间不存在
:param cache: 缓存client
:param key: 键
:return: 返回这个键对应的缓存是否存在
"""
ret = await cache.ttl(key)
return ret not in [-2, None]
|
c57d177aebb2d0827e7aaf4855384812cfbccc4c
| 88,167
|
import random
def create_hex_decimal_string(string_len=10):
"""
Creates a string of a random Hexadecimal value.
Args:
string_len:
Length of the Hexadecimal string.
Returns:
Returns the Hexadecimal string
"""
return f'%0{string_len}x' % random.randrange(16 ** string_len)
|
3eed2e339d06ef8c3a8d4bce8b7d3be1ff5cc486
| 88,168
|
def term2str(term):
"""Convert a term argument to string.
:param term: the term to convert
:type term: Term | None | int
:return: string representation of the given term where None is converted to '_'.
:rtype: str
"""
if term is None:
return '_'
elif type(term) is int:
if term >= 0:
return 'A%s' % (term + 1)
else:
return 'X%s' % (-term)
else:
return str(term)
|
2e44f23302e961a0efb7d2104b6e43f9c4d33e4c
| 88,172
|
def get_file_extension(filename: str) -> str:
"""
This function returns the file extension of a file
:param filename:
:return:
"""
return filename.split(".")[-1]
|
f424f8cbf0069ebd1b16668bbfad76e0d805b738
| 88,175
|
def get_id_lists(zen_tickets, zen_fieldid):
"""Gets lists of the Zendesk user IDs and the GitHub issue numbers that are
associated with the passed list of Zendesk tickets.
Parameters:
zen_tickets - A list of Zendesk tickets whose associated GitHub issue
numbers and Zendesk user IDs are desired.
zen_fieldid - The ID number of the custom field in Zendesk tickets that
holds its associated GitHub issue number.
Returns a tuple of two value with the first being the gathered list of
associated Zendesk user IDs and with the second being the gathered list
of associated GitHub issue numbers.
"""
# Get Zendesk user IDs that are associated with Zendesk tickets.
zen_user_ids = []
for ticket in zen_tickets:
zen_user_ids.append(ticket['requester_id'])
zen_user_ids = list(set(zen_user_ids)) # Remove duplicates
# Get GitHub issue numbers that are associated with the Zendesk tickets.
git_issue_numbers = []
for ticket in zen_tickets:
association_data = ''
for field in ticket['fields']:
if field['id'] == zen_fieldid:
if field['value'] is not None:
association_data = field['value'].split('-')
break
if association_data and association_data[0] == 'gh':
git_issue_numbers.append(int(association_data[1]))
git_issue_numbers = list(set(git_issue_numbers)) # Remove duplicates
return (zen_user_ids, git_issue_numbers)
|
63f7b6f4ef4a10b6a4f462de49eb5b239fd3b9f3
| 88,176
|
def choose_color_by_layertype(layertype):
"""Define colors for nodes based on the layer type
"""
color = '#6495ED' # Default
if layertype == 'Conv':
color = '#FF5050'
elif layertype == 'Embedding':
color = '#FF9900'
elif layertype == 'FullConnect':
color = '#CC33FF'
elif layertype == 'MaxPooling' or layertype == 'AvgPooling' or layertype == 'DynamicPooling':
color = '#66CC66'
elif layertype == 'Lstm' or layertype == 'Gru':
color = '#B5E61D'
return color
|
7cf32790a0c4b51c3d24cd77ae2d1d0a52661517
| 88,182
|
import hmac
import hashlib
def make_hmac_hash(message: str, key: str) -> str:
"""Make a hash/digest/signature, HMAC + SHA256."""
digester = hmac.new(key=key.encode("UTF-8"),
msg=message.encode("UTF-8"), digestmod=hashlib.sha256)
signature = digester.hexdigest() # hex (base16) or base64
return signature
|
852046383a583968fcb374de5ca2369a2c1ce161
| 88,188
|
def partiallyVerifyRootGeneration(p: int, q: int, g: int) -> bool:
"""Checks partial validity of DSA parameters according to algorithm from FIPS 186-4, Appendix A.2.2
Note that this function verifies correctness, but not security. As standard states:
'The non-existence of a potentially exploitable relationship of g to another genrator g' (that is known to the entity
that generated g, but may not be know by other entities) cannot be checked'
Parameters:
p: int
Bigger prime
q: int
Smaller prime
g: int
Primitive root
Returns:
status: bool
True if parameters is partially valid.
False if parameters are definitely not valid
"""
if g < 2 or g > p - 1: return False
if pow(g, q, p) == 1: return True
return False
|
b141b87662344fc63e820a6afa23254bf532e3a6
| 88,189
|
import re
def bestresult(musixmatch_searchresult_page):
"""Take a musixmatch result page and extract the best result link.
Args:
musixmatch_searchresult_page (stream_object): The html musixmatch searchresult page
Returns:
str: the 'best result' link
Raises:
ValueError: If the regex didnt match anything
"""
html = musixmatch_searchresult_page.read()
pattern = re.compile(b'"track_share_url":"(\S+?)",')
ret = pattern.findall(html)
if ret:
return str(ret[0], 'unicode_escape')
raise ValueError('No result found')
|
371a4b2f7e450ec279c853c7ea2ad1c9e92f435a
| 88,190
|
def poptrailing(lst):
"""Remove trailing blank items from lst."""
while lst and lst[-1] == "":
lst.pop()
return lst
|
48b1a57be2afb0fd8cc2aa8c71558c060704a05e
| 88,191
|
def num_to_emoji(num:int):
""" Retrieve emoji related to a number
Parameters
----------
num: int
number
Returns
-------
str
emoji that represent a number
"""
number = {
0: ':zero:',
1: ':one:',
2: ':two:',
3: ':three:',
4: ':four:',
5: ':five:',
6: ':six:',
7: ':seven:',
8: ':eight:',
9: ':nine:',
}
return number[num]
|
4a6c5889b5c3a7469f7643e5b25e7a7d6a739908
| 88,192
|
import time
def get_date_string_or(string):
"""If the string starts with 6 digits, that will be returned
as a date in iso format. Otherwise, today's date in iso
format is returned.
"""
if str(string[:5]).isdigit():
return '{}-{}-{}'.format(string[:4], string[4:6], string[6:8])
return time.strftime("%Y-%m-%d")
|
61648763a25bf5d7237fc7da6ace836be161ea48
| 88,193
|
import itertools
from typing import Tuple
def itertools_product(arr_1: Tuple[int, ...],
arr_2: Tuple[int, ...]) -> Tuple[Tuple[int, ...], ...]:
"""
>>> itertools_product((1, 2), (3, 4))
((1, 3), (1, 4), (2, 3), (2, 4))
"""
return tuple(itertools.product(arr_1, arr_2))
|
8568036274ba0ec4ce3deda12e8a1aef18a9ae5d
| 88,194
|
def bswap_claim_rewards(self, **kwargs):
"""Claim rewards (TRADE)
Claim swap rewards or liquidity rewards
POST /sapi/v1/bswap/claimRewards
https://binance-docs.github.io/apidocs/spot/en/#claim-rewards-trade
Keyword Args:
type (int, optional): 0: Swap rewards,1:Liquidity rewards, default to 0
recvWindow (int, optional): The value cannot be greater than 60000
"""
return self.sign_request("POST", "/sapi/v1/bswap/claimRewards", kwargs)
|
c34f48b42bf335242b93c63878f92efc409cd965
| 88,196
|
def line_intersect(Ax1, Ay1, Ax2, Ay2, Bx1, By1, Bx2, By2):
""" returns a (x, y) tuple or None if there is no intersection """
d = (By2 - By1) * (Ax2 - Ax1) - (Bx2 - Bx1) * (Ay2 - Ay1)
if d:
uA = ((Bx2 - Bx1) * (Ay1 - By1) - (By2 - By1) * (Ax1 - Bx1)) / d
uB = ((Ax2 - Ax1) * (Ay1 - By1) - (Ay2 - Ay1) * (Ax1 - Bx1)) / d
else:
return
if not(0 <= uA <= 1 and 0 <= uB <= 1):
return
x = Ax1 + uA * (Ax2 - Ax1)
y = Ay1 + uA * (Ay2 - Ay1)
return x, y
|
c888b1430a4b10f6673594745b608947ddb74a92
| 88,198
|
def unlistify(x):
""" remove list wrapper from single item. similar to function returning list or item """
return x[0] if len(x)==1 else x
|
d638809d0d24c6a2687d12b9a59cc893c86c6872
| 88,202
|
def GetCanonicalLineItem(line_item):
"""Simplify product and sku names."""
return line_item.replace('com.google.cloud/services/', '')
|
9efa5961ab32bbf391652a6c79d8577197842907
| 88,206
|
from typing import Any
def _flatten(lst: list[Any]) -> list[Any]:
"""
The helper to flatten a list of lists.
"""
return [item for sublst in lst for item in sublst]
|
4036bca76a08448d4ceea6dd6086426136bee092
| 88,207
|
def rl_to_vswr(rl):
"""Calculate VSWR from RL"""
vswr = (1 + pow(10, -(rl/20)))/(1 - pow(10, -(rl/20)))
return vswr
|
ab2f760c58da1c755867494a022f26812a1137c3
| 88,209
|
def minimax_for_game(game):
"""
Returns the relevant Minimax agent for the given game.
"""
game_name = type(game).__name__
if game_name == "Latrunculi":
return "Minimax"
if game_name == "Connect_Four":
return "Minimax_CF"
if game_name == "Othello":
return "Minimax_Othello"
return "unknown"
|
f25c7ae5c06e225c35ea21bd6e916e0b28bfad95
| 88,212
|
def index2taxid(label_file):
"""Turns the label info into a dict={label: taxid}."""
lab_dic = {}
with open(label_file) as handle:
for line in handle:
line_ls = line.rstrip().split('\t')
label_id = int(line_ls[0])
taxon_id = int(line_ls[1])
lab_dic[label_id] = taxon_id
return lab_dic
|
830cbfac2eef5c8647da79f6c84533ae455e34c1
| 88,214
|
def _pd_to_hdf(pd_to_hdf, lock, args, kwargs=None):
""" A wrapper function around pd_to_hdf that enables locking"""
if lock:
lock.acquire()
try:
pd_to_hdf(*args, **kwargs)
finally:
if lock:
lock.release()
return None
|
dbacf2f2e06301e5c9feefe5d95c5709419054dd
| 88,217
|
def rmvsuffix(subject):
"""
Remove the suffix from *subject*.
"""
index = subject.rfind('.')
if index > subject.replace('\\', '/').rfind('/'):
subject = subject[:index]
return subject
|
dd1050bcaa449174d62cfeaf8d1cb052fa242582
| 88,219
|
def _merge_nics(management_network_id, *nics_sources):
"""Merge nics_sources into a single nics list, insert mgmt network if
needed.
nics_sources are lists of networks received from several sources
(server properties, relationships to networks, relationships to ports).
Merge them into a single list, and if the management network isn't present
there, prepend it as the first network.
"""
merged = []
for nics in nics_sources:
merged.extend(nics)
if management_network_id is not None and \
not any(nic['net-id'] == management_network_id for nic in merged):
merged.insert(0, {'net-id': management_network_id})
return merged
|
2db12ae4c5285d07ee57e7fe30f1f3b93e2ef771
| 88,220
|
def qbytearray_to_str(qba):
"""Convert QByteArray object to str in a way compatible with Python 2/3"""
return str(bytes(qba.toHex().data()).decode())
|
7b1a016d3b7469cfad287a646c3f2e0aa61c2552
| 88,235
|
import pathlib
def get_data_filepath(path: pathlib.Path) -> pathlib.Path:
"""Get the data download filepath."""
return path / "data.csv"
|
13485172d097a5d9a77366f30131078564b83e51
| 88,238
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.