content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from typing import Tuple
from typing import Dict
def load_vocabulary(vocabulary_path: str) -> Tuple[Dict[str, int], Dict[int, str]]:
"""
Loads vocabulary from vocabulary_path.
"""
vocab_id_to_token = {}
vocab_token_to_id = {}
with open(vocabulary_path, "r") as file:
for index, token in enumerate(file):
token = token.strip()
if not token:
continue
vocab_id_to_token[index] = token
vocab_token_to_id[token] = index
return (vocab_token_to_id, vocab_id_to_token)
|
dccd51bee2427f98f29919533fa687e6820d9cff
| 67,077
|
def load (filename):
"""Loads the two dimensional Conway Cubes puzzle input and returns a
mapping of all active coordinates, i.e. `active[coord] = True`.
"""
active = { }
with open(filename) as stream:
for y, line in enumerate( stream.readlines() ):
line = line.strip()
for x, c in enumerate(line):
if c == '#':
active[(x, y)] = True
return active
|
e183d7d741a98d3ff6c0d7613f63180ac7f5af56
| 67,078
|
def convert_row_to_sql_tuple(row):
"""
Take an amended source CSV row:
['1', '01001', '2008-01-01', '268', '260', '4', '1', '0', '3', '2891']
and turn it into an SQL load tuple:
"(1,'01001','2008-01-01',268,260,4,1,0,3,2891)"
"""
return "({},'{}','{}',{},{},{},{},{},{},{})".format(*row)
|
124bc6a60704659fa7e0474b679ab1dce54c1137
| 67,083
|
def get_triangle_number(n: int) -> int:
"""Get Triangle number `T_n=n(n+1)/2` for a given number `n`."""
return (n * (n + 1)) // 2
|
825ccf96ae767392134b32eb847b8b2367e28322
| 67,087
|
def sub_kernel(kernel, dim1, dim2):
"""
Constructs a sub-kernel of a kernel.
Args:
kernel (tensor) : kernel matrix
dim1 (tuple) : start, end
dim2 (tuple) : start, end
"""
sub_kernel = kernel[dim1[0]:dim1[1],dim2[0]:dim2[1]]
return sub_kernel
|
519f2ad90577ff0e6a04b0e3ebc1a26c1a214af5
| 67,090
|
def percentile(seq,q):
"""
Return the q-percentile of a list
"""
seq_sorted = list(seq)
seq_sorted.sort()
return seq_sorted[int((len(seq_sorted)-1)*q)]
|
d2608e4e4bfe5ef4692d23eaae6d1a74ef4f935c
| 67,091
|
from typing import Counter
from functools import reduce
def create_maps(words, tags, min_word_freq=5, min_char_freq=1):
"""
Creates word, char, tag maps.
:param words: word sequences
:param tags: tag sequences
:param min_word_freq: words that occur fewer times than this threshold are binned as <unk>s
:param min_char_freq: characters that occur fewer times than this threshold are binned as <unk>s
:return: word, char, tag maps
"""
word_freq = Counter()
char_freq = Counter()
tag_map = set()
for w, t in zip(words, tags):
word_freq.update(w)
char_freq.update(list(reduce(lambda x, y: list(x) + [' '] + list(y), w)))
tag_map.update(t)
word_map = {k: v + 1 for v, k in enumerate([w for w in word_freq.keys() if word_freq[w] > min_word_freq])}
char_map = {k: v + 1 for v, k in enumerate([c for c in char_freq.keys() if char_freq[c] > min_char_freq])}
tag_map = {k: v + 1 for v, k in enumerate(tag_map)}
word_map['<pad>'] = 0
word_map['<end>'] = len(word_map)
word_map['<unk>'] = len(word_map)
char_map['<pad>'] = 0
char_map['<end>'] = len(char_map)
char_map['<unk>'] = len(char_map)
tag_map['<pad>'] = 0
tag_map['<start>'] = len(tag_map)
tag_map['<end>'] = len(tag_map)
return word_map, char_map, tag_map
|
f873ebc6447eb2024f3abac4aec0a0feccd08470
| 67,097
|
def get_client_region(client):
"""Gets the region from a :class:`boto3.client.Client` object.
Args:
client (:class:`boto3.client.Client`): The client to get the region
from.
Returns:
string: AWS region string.
"""
return client._client_config.region_name
|
6727f8e21fee77a2d869c1c4f3aa077a22145f1d
| 67,098
|
import math
def rotatePoint(x, y, z, ax, ay, az):
"""Returns an (x, y, z) point of the x, y, z point arguments rotated
around the 0, 0, 0 origin by angles ax, ay, az (in radians).
Directions of each axis:
-y
|
+-- +x
/
+z
"""
# Rotate around x axis:
rotatedX = x
rotatedY = (y * math.cos(ax)) - (z * math.sin(ax))
rotatedZ = (y * math.sin(ax)) + (z * math.cos(ax))
x, y, z = rotatedX, rotatedY, rotatedZ
# Rotate around y axis:
rotatedX = (z * math.sin(ay)) + (x * math.cos(ay))
rotatedY = y
rotatedZ = (z * math.cos(ay)) - (x * math.sin(ay))
x, y, z = rotatedX, rotatedY, rotatedZ
# Rotate around z axis:
rotatedX = (x * math.cos(az)) - (y * math.sin(az))
rotatedY = (x * math.sin(az)) + (y * math.cos(az))
rotatedZ = z
return (rotatedX, rotatedY, rotatedZ)
|
39a2f6491ac8df4780f9f39be863d1b46433825c
| 67,099
|
def select_db(cli, dbname):
"""
Select a database with name
:param cli: Client instance
:param dbname: Database name
:return:A database object
"""
db = cli[dbname]
return db
|
a1149ba33128980cb52ea1e84e45a6eaeaa4aeaa
| 67,103
|
def login_exempt(view):
"""A decorator to mark the view as not requiring authentication."""
view.login_exempt = True
return view
|
d337b3ab4add24dcf2314bf261fa2fb9b4d3fda0
| 67,112
|
def parse_aws_tags(tags):
"""
When you get the tags on an AWS resource from the API, they are in the form
[{"key": "KEY1", "value": "VALUE1"},
{"key": "KEY2", "value": "VALUE2"},
...]
This function converts them into a Python-style dict().
"""
result = {}
for aws_tag in tags:
assert isinstance(aws_tag, dict)
assert aws_tag.keys() == {"key", "value"}
assert aws_tag["key"] not in result, f"Duplicate key in tags: {aws_tag['key']}"
result[aws_tag["key"]] = aws_tag["value"]
return result
|
d3d2b9a2b4e0484d95bc149b4cb855b1ba88ee9b
| 67,113
|
def _hsnalgomac(group, switch, port):
""" Returns the string representation of an algorithmic mac address """
# A mac address is 48 bits
# [ padding ][ group ][ switch ][ port ]
# [ 28 bits ][ 9 bits ][ 5 bits ][ 6 bits ]
# Bit 41 (in the padding) must be set to indicate "locally assigned"
mac = (1 << 41) + (group << 11) + (switch << 6) + port
macstr = "%012x" % mac
return "%s:%s:%s:%s:%s:%s" % (macstr[0:2], macstr[2:4], macstr[4:6], macstr[6:8], macstr[8:10], macstr[10:12])
|
f3482f3eaa40488609fe463138e507f35d63c795
| 67,114
|
import collections
def _asdict(self):
"""
Return a new ``collections.OrderedDict`` which maps fieldnames to their
values.
"""
return collections.OrderedDict(zip(self._fieldnames, self))
|
1398a28a1404391c3818943032ac49fb1a49b11f
| 67,119
|
def create_plot_data(convert_data: list, batch_size: int, smooth_window: int=1):
"""
Convert metric data into x, y graph data
:param convert_data: list of metric objects with keys [batch, epoch, value]
:param batch_size: maximum number of batches in one epoch
:param smooth_window: values are averaged over the size of smooth_window
:return: (x_values, y_values) => tuple of x,y values for the scatter plot
"""
x_values = []
y_values = []
window_counter = 0
sum_value = 0
for i, data in enumerate(convert_data):
sum_value += float(data["value"])
window_counter += 1
if window_counter == smooth_window or i == len(convert_data) - 1:
decimal = (float(data["batch"]) / batch_size)
x_val = float(data["epoch"]) + decimal
x_values.append(x_val)
y_val = sum_value / window_counter
y_values.append(y_val)
window_counter = 0
sum_value = 0
return x_values, y_values
|
544f28786031894bb0354afdd7144554edbebc83
| 67,127
|
def to_fix(*args):
""" Join a series of strings into a FIX binary message,
a field list separated by \x01
"""
return '\x01'.join(args) + '\x01'
|
b88c794f395d07768048a83b3bd2903039fba4e3
| 67,131
|
def factorial(num: int) -> int:
"""
Calculate factorial of a number
:param num: the number ad `int`
:return: factorial of the number
"""
fact = 1
if num == 0:
return 1
for j in range(1, num):
fact = fact + fact * j
return fact
|
38d70efafaf8a311a8ee0d710cecfcac0679df81
| 67,133
|
def pooling_output_shape(dimension_size, pool_size, padding, stride,
ignore_border=True):
"""
Computes output shape for pooling operation.
Parameters
----------
dimension_size : int
Size of the dimension. Typically it's image's
weight or height.
filter_size : int
Size of the pooling filter.
padding : int
Size of the zero-padding.
stride : int
Stride size.
ignore_border : bool
Defaults to ``True``.
Returns
-------
int
"""
if dimension_size is None:
return None
if ignore_border:
output_size = dimension_size + 2 * padding - pool_size + 1
output_size = (output_size + stride - 1) // stride
elif stride >= pool_size:
output_size = (dimension_size + stride - 1) // stride
else:
output_size = (dimension_size - pool_size + stride - 1) // stride
output_size = max(1, output_size + 1)
return output_size
|
3726fdc72ddc8813957db5f38754119a47256282
| 67,137
|
def check_type(lst, t):
"""Checks if all elements of list ``lst`` are of one of the types in ``t``.
:param lst: list to check
:param t: list of types that the elements in list should have
:return: Bool
"""
for el in lst:
if type(el) not in t:
return False
return True
|
6fc39d180e177991d11659fb5c8099c87e3453d8
| 67,147
|
import yaml
def load_config(path: str) -> dict:
"""
Load a config from a yaml file.
Args:
path: The path to the config file.
Returns:
The loaded config dictionary.
"""
with open(path, 'r') as f:
return yaml.load(f, Loader=yaml.FullLoader)
|
e701f41a954804edafb0d9f2888910a03a480019
| 67,148
|
import re
def sanitise(string: str) -> str:
"""Sanitise string for use as group/directory name"""
return "_".join(re.findall(re.compile("[^ @&()/]+"), string))
|
6529968687dd90a0189bb67162e8a6e3ec105dce
| 67,149
|
def _coerce_to_integer(value):
"""
Attempts to correctly coerce a value to an integer. For the case of an integer or a float,
this will essentially either NOOP or return a truncated value. If the parameter is a string,
then it will first attempt to be coerced from a integer, and failing that, a float.
:param value: the value to coerce
:return: the coerced value as an integer
"""
try:
return int(value)
except ValueError:
return int(float(value))
|
1cb8594b0c16651ae212d4da649500057f09f35b
| 67,160
|
def emulator_uses_kvm(emulator):
"""Determines if the emulator can use kvm."""
return emulator["uses_kvm"]
|
654ec1207180e0ac90af71d9333acf3ef431add6
| 67,164
|
import configparser
import itertools
def get_imputation_featureset_combis(imputations: list, featuresets: list, target_column: str) -> list:
"""
Function delivering imputation and featureset combinations which make sense
:param imputations: imputations to use
:param featuresets: featuresets to use
:param target_column: target_column for retrieving info from config file
:return: list of combinations which make sense
"""
config = configparser.ConfigParser()
config.read('Configs/dataset_specific_config.ini')
multiple_nans_raw_set = config[target_column].getboolean('multiple_nans_raw_set')
imp_feat_combis = list(itertools.product(*[imputations, featuresets]))
if not multiple_nans_raw_set:
for (imp, feat) in imp_feat_combis.copy():
if (feat == 'cal' or feat == 'none') and (imp == 'iterative' or imp == 'knn'):
imp_feat_combis.remove((imp, feat))
return imp_feat_combis
|
14a02e872449c6fd88be50c0da453e0bee4e4035
| 67,165
|
import random
def calc_next_exponential_backoff_delay(
current_delay_secs,
backoff_factor,
max_delay_secs,
with_jitter=False
):
"""Calculate the delay (in seconds) before next retry based on
current delay using exponential backoff, with optional jitter.
See http://www.awsarchitectureblog.com/2015/03/backoff.html for
information about exponential backoff and jitter.
Args:
current_delay_secs (float): current backoff delay in seconds.
backoff_factor (int): the constant used for exponential backoff.
max_delay_secs (float): maximum backoff delay in seconds.
with_jitter(Optional[bool]): whether to use jitter. Default to False.
Returns (float):
Next backoff delay in seconds.
"""
next_delay_secs = min(
max_delay_secs,
current_delay_secs * backoff_factor
)
if with_jitter:
next_delay_secs = random.uniform(0, next_delay_secs)
return next_delay_secs
|
57196a4901a61a389a7bdc55278ee0a1386454bb
| 67,168
|
import pathlib
from typing import List
def get_nested_directories(directory: pathlib.Path) -> List[pathlib.Path]:
"""Return all directories inside directory and its child directories."""
nested_directories = []
for item in directory.iterdir():
if item.is_dir():
nested_directories.append(item)
for nested_directory in nested_directories:
for item in nested_directory.iterdir():
if item.is_dir():
nested_directories.append(item)
return nested_directories
|
7fda096919a58ce1bf5a0f97bcc2fe8a3754c8db
| 67,169
|
def fibList(n):
"""
returns first n fibonacci suequence as list
"""
fibs = [1, 1]
for i in range(2, n):
fibs.append(fibs[-1]+fibs[-2])
return fibs
|
b7c303bae7d2a4ae1df4a339e24a27d207e8333d
| 67,171
|
import csv
def ReadCVS(filename):
"""
Reads a CSV file and returns a list of lists.
"""
with open(filename, 'r') as f:
reader = csv.reader(f)
return list(reader)
|
0f6c01ade8acdaaa5116a8491aad20af2396fa3f
| 67,173
|
import six
def get_process_signature(process, input_parameters):
""" Generate the process signature.
Parameters
----------
process: Process
a capsul process object
input_parameters: dict
the process input_parameters.
Returns
-------
signature: string
the process signature.
"""
kwargs = ["{0}={1}".format(name, value)
for name, value in six.iteritems(input_parameters)]
return "{0}({1})".format(process.id, ", ".join(kwargs))
|
0004870baaf3a00c35eae815dfcbf4c6965cec51
| 67,174
|
def set_to_list(obj):
"""
Helper function to turn sets to lists and floats to strings.
"""
if isinstance(obj, set):
return list(obj)
if isinstance(obj, float):
return str('%.15g' % obj)
raise TypeError
|
d9db10d550e23d02e4a1ae89e5a2203202eae8aa
| 67,176
|
import re
def extract_waveunit(header):
"""
Attempt to read the wavelength unit from a given FITS header.
Parameters
----------
header : `sunpy.io.header.FileHeader`
One `~sunpy.io.header.FileHeader` instance which was created by
reading a FITS file. For example, `sunpy.io.fits.get_header` returns a list of
such instances.
Returns
-------
waveunit : `str`
The wavelength unit that could be found or ``None`` otherwise.
Examples
--------
The goal of this function is to return a string that can be used in
conjunction with the astropy.units module so that the return value can be
directly passed to `astropy.units.Unit`.
>>> import astropy.units
>>> header = {'WAVEUNIT': 'Angstrom', 'KEYCOMMENTS': {}}
>>> waveunit = extract_waveunit(header)
>>> if waveunit is not None:
... unit = astropy.units.Unit(waveunit)
"""
# algorithm: try the following procedures in the following order and return
# as soon as a waveunit could be detected
# 1. read header('WAVEUNIT'). If None, go to step 2.
# 1.1 -9 -> 'nm'
# 1.2 -10 -> 'angstrom'
# 1.3 0 -> go to step 2
# 1.4 if neither of the above, return the value itself in lowercase
# 2. parse waveunit_comment
# 2.1 'in meters' -> 'm'
# 3. parse wavelnth_comment
# 3.1 "[$UNIT] ..." -> $UNIT
# 3.2 "Observed wavelength ($UNIT)" -> $UNIT
def parse_waveunit_comment(waveunit_comment):
if waveunit_comment == 'in meters':
return 'm'
waveunit_comment = header['KEYCOMMENTS'].get('WAVEUNIT')
wavelnth_comment = header['KEYCOMMENTS'].get('WAVELNTH')
waveunit = header.get('WAVEUNIT')
if waveunit is not None:
metre_submultiples = {
0: parse_waveunit_comment(waveunit_comment),
-1: 'dm',
-2: 'cm',
-3: 'mm',
-6: 'um',
-9: 'nm',
-10: 'angstrom',
-12: 'pm',
-15: 'fm',
-18: 'am',
-21: 'zm',
-24: 'ym'}
waveunit = metre_submultiples.get(waveunit, str(waveunit).lower())
elif waveunit_comment is not None:
waveunit = parse_waveunit_comment(waveunit_comment)
elif wavelnth_comment is not None:
# supported formats (where $UNIT is the unit like "nm" or "Angstrom"):
# "Observed wavelength ($UNIT)"
# "[$UNIT] ..."
parentheses_pattern = r'Observed wavelength \((\w+?)\)$'
brackets_pattern = r'^\[(\w+?)\]'
for pattern in [parentheses_pattern, brackets_pattern]:
m = re.search(pattern, wavelnth_comment)
if m is not None:
waveunit = m.group(1)
break
if waveunit == '':
return None # To fix problems associated with HMI FITS.
return waveunit
|
2574b7b2e8d8a5b76a94d3b025688e11b968e702
| 67,182
|
def is_end_word(word):
"""
Determines if a word is at the end of a sentence.
"""
if word == 'Mr.' or word == 'Mrs.':
return False
punctuation = "!?."
return word[-1] in punctuation
|
9165fede070654b3d0b10b2bb02855307f9ab0c5
| 67,187
|
def norm(angle):
""" Normalizes an angle between 0 and 360. """
return angle % 360
|
04acc963b226e31c8892a7bb56394eab52436eae
| 67,190
|
def area(l, w):
""" Calculates the area using the length and width of squares and rectangles """
area = (l * w)
return area
|
2b4ad1363ada89db784a8dcafb63f7cc17139df0
| 67,192
|
def _wildcarded_except(exclude=[]):
"""
Function factory for :mod:`re` ``repl`` functions used in :func:`re.sub``,
replacing all format string place holders with ``*`` wildcards, except
named fields as specified in ``exclude``.
"""
def _wildcarded(match):
if match.group(1) in exclude:
return match.group(0)
return "*"
return _wildcarded
|
5de34232ea0c0b2e1baf893dbf0bf8d20c6128a1
| 67,197
|
def text2caesar(text,shift = 3):
"""
Returns the encrypted text after encrypting the text with the given shift
Parameters:
text (str): The text that needs to be encrypted in Caesar's cipher
shift (int): The shift that should be used to encrypt the text
Returns:
result (str): The encrypted text
"""
result = ""
for i in range(len(text)):
char = text[i]
if char.isupper():
result += chr((ord(char) + shift-65) % 26 + 65)
elif char.islower():
result += chr((ord(char) + shift - 97) % 26 + 97)
else:
result += char
return result
|
84dc3307224c3773132b5aa33d4e9f31bd83dd64
| 67,201
|
import logging
def _add(num1, num2):
"""Add two numbers"""
result = float(num1) + float(num2)
logging.info("%s + %s = %s", num1, num2, result)
return result
|
7f7d405d5735662296e5cf5beea6f75b86a56623
| 67,203
|
def get_previous_timestep(timesteps, timestep):
"""Get the timestamp for the timestep previous to the input timestep"""
# order_dict starts numbering at zero, timesteps is one-indexed, so we do not need
# to subtract 1 to get to previous_step -- it happens "automagically"
return timesteps[timesteps.order_dict[timestep]]
|
da897b2df0192ec4678fe468a6901df72c0f8d1f
| 67,204
|
from typing import Union
def _find_assign_op(token_line) -> Union[int, None]:
"""Get the index of the first assignment in the line ('=' not inside brackets)
Note: We don't try to support multiple special assignment (a = b = %foo)
"""
paren_level = 0
for i, ti in enumerate(token_line):
s = ti.string
if s == '=' and paren_level == 0:
return i
if s in {'(','[','{'}:
paren_level += 1
elif s in {')', ']', '}'}:
if paren_level > 0:
paren_level -= 1
|
fb0770f31c54ed29239c70cfad7070495314cb80
| 67,207
|
import importlib
def load_migration_module(package_name):
"""Import the named module at run-time.
"""
try:
return importlib.import_module(package_name)
except (ImportError, ValueError) as e:
return None
|
49d9c82a777a18052f37e0466361df09bcc402ef
| 67,209
|
import time
def _waitfn(ind, func, wait, *args, **kwds):
"""waits based on ind and then runs func"""
time.sleep(wait * ind)
return func(*args, **kwds)
|
c9a100210f84795e91a7e536c1187350d2f03aeb
| 67,210
|
def _repl_args(args, key, replacement):
"""Replaces occurrences of key in args with replacement."""
ret = []
for arg in args:
if arg == key:
ret.extend(replacement)
else:
ret.append(arg)
return ret
|
504616e94b6aa3368868a97827e41a5fea85dea9
| 67,211
|
def tidy_input(cmd_line_args):
"""
:param cmd_line_args: vars(args())
:return: input arguments, with all keys made uppercase
"""
out_args = {}
for arg in cmd_line_args:
if cmd_line_args[arg]:
if isinstance(cmd_line_args[arg], str):
out_args[arg] = cmd_line_args[arg].upper()
else:
out_args[arg] = cmd_line_args[arg]
out_args['codon_usage'] = cmd_line_args['codon_usage']
if cmd_line_args['name']:
out_args['name'] = cmd_line_args['name']
else:
out_args['name'] = ''
return out_args
|
f519091849f7ba48e9e0d1e2eab658c4d897ef6a
| 67,212
|
def _get_template_module(name, req, **context):
"""Get template module for a request email notification template.
:param name: template name
:param req: :class:`Request` instance
:param context: data passed to the template
"""
context['req'] = req
return req.definition.get_notification_template(name, **context)
|
5cfff3a9f76259b4033e1f448842f40e18291c3c
| 67,215
|
def fits(bounds_inside, bounds_around):
"""Returns True if bounds_inside fits entirely within bounds_around."""
x1_min, y1_min, x1_max, y1_max = bounds_inside
x2_min, y2_min, x2_max, y2_max = bounds_around
return (x1_min >= x2_min and x1_max <= x2_max
and y1_min >= y2_min and y1_max <= y2_max)
|
35b7e075bd2246d13979d69bc3893006e738623b
| 67,218
|
import re
def parseRange(text):
"""
convert a range string like 2-3, 10-20, 4-, -9, or 2 to a list
containing the endpoints. A missing endpoint is set to None.
"""
def toNumeric(elt):
if elt == "":
return None
else:
return int(elt)
if re.search(r'-', text):
rng = text.split(r':')
rng = [toNumeric(elt) for elt in rng]
else:
v = int(text)
rng = [v, v]
return rng
|
bec19909188ffdb1de1de233a5f33675fa1ea48b
| 67,221
|
def logged_client(django_user_model, client):
"""A Django test client instance with a new user authenticated."""
user = django_user_model.objects.create_user('user test')
client.force_login(user)
return client
|
0cd72b462969fad577ffd3a9a38968d8bb00c80c
| 67,223
|
def compute_nyquist(fs):
"""Compute the Nyquist frequency.
Parameters
----------
fs : float
Sampling rate, in Hz.
Returns
-------
float
The Nyquist frequency of a signal with the given sampling rate, in Hz.
Examples
--------
Compute the Nyquist frequency for a 500 Hz sampling rate:
>>> compute_nyquist(fs=500)
250.0
"""
return fs / 2.
|
febf380a892dd1ad9958d69ded10dd9e36583b8c
| 67,224
|
def get_crashreport_key(group_id):
"""
Returns the ``django.core.cache`` key for groups that have exceeded their
configured crash report limit.
"""
return u"cr:%s" % (group_id,)
|
484fcd0140fdadadebd5faea01e420d39ed66992
| 67,229
|
def translate(x: str, d: dict) -> str:
"""
Convert english digits to persian digits.
:param x: string to translate
:param d: dict for using on translate
:return: translated string
"""
if not isinstance(x, str):
raise TypeError("x is not string")
if not isinstance(d, dict):
raise TypeError("d is not dict")
trans = str.maketrans(d)
return x.translate(trans)
|
826f12cc6713e7da814f547cc8de565004910fec
| 67,234
|
def check_path_exists(path, search_space):
"""Return True if the path exists in search_space"""
if not isinstance(search_space, dict):
return False
if path[0] in search_space:
if len(path) > 1:
current_value = search_space.get(path[0])
return check_path_exists(path[1:], current_value)
return True
return False
|
4cfe64e4b7b627a41317a9b5f0d20552a52cc753
| 67,235
|
def convertNumber(t):
"""Convert a string matching a number to a python number"""
if t.float1 or t.float2 or t.float3 : return [float(t[0])]
else : return [int(t[0]) ]
|
9fde8c0813e684b702fd9d6bb3c8d2df1967d134
| 67,237
|
def add_title(df):
"""
Add Title to DataFrame (Mr., Miss., etc)
:param df: Input Titanic DataFrame (with Name column)
:return: df with ['Title'] column
"""
# Creates 'names' pd.Series
names = df['Name']
# Split 'names' by ', ' and get the latter value
proc_names = names.str.split(', ').str[-1]
# Sets ['Title'] by splitting 'proc_names' by ' ' and getting first value
df['Title'] = proc_names.str.split(' ').str[0]
# Returns df
return df
|
6856200ab24434d54f38b764b49320cd165b1e35
| 67,240
|
def isEmpty(text: str = "") -> bool:
"""
Check if string or text is empty.
"""
if text.replace(" ", "") == "":
return True
elif text == None:
return True
else:
return False
|
ab0e56e30c0045e364e8c55a658e0096a4e51513
| 67,243
|
def apply_to_one(f):
"""Calls the function f with 1 as its argument"""
return f(1)
|
1bc58df883901e7651be50219bd8beec438d49d4
| 67,249
|
def extract_qword(question):
"""
Function used to extract question word in question sentence
Question words: who | which
"""
if 'who' in question.lower():
return 'who'
elif 'which' in question.lower():
return 'which'
return None
|
e44c86249afa4c83f596d8b8e56ce93fe73e0343
| 67,250
|
from bs4 import BeautifulSoup
def get_script_by_id(source_html:str, script_id:int):
"""
This function finds a script in a specific index in the page source html.
@param source_html: The source html of the page in which we want to find the script tag.
@type source_html: str
@param script_id: The id of the script tag (it's index from the top).
@type script_id: int
@return: The script tag of the correct index or None if script was not found.
@rtype: Tag
"""
soup = BeautifulSoup(source_html, "html.parser")
all_scripts = soup.find_all("script")
if script_id > -1 and script_id < len(all_scripts):
return all_scripts[script_id]
else:
return None
|
1befdcaf59ec85dc53ea839e58a16911331648e4
| 67,252
|
def get_node_info(node, ws):
"""
Args:
node (Node):
ws (list(int)): knowledge weights. [cdscore, rdscore, asscore, stscore]
Returns:
return node information for a searched tree analysis
node information: self node, parent node, depth, score, RDScore, CDScore, STScore, ASScore
"""
return (f"{id(node)}\t"
f"{id(node.parent_node)}\t"
f"{node.depth}\t"
f"{node.total_scores / node.visits}\t"
f"{node.state.rdscore}\t"
f"{node.state.cdscore * ws[0]}\t"
f"{node.state.stscore * ws[3]}\t"
f"{node.state.asscore * ws[2]}")
|
d17ce544acbedc6171b1af11a27b1a3356ea3a0e
| 67,258
|
def _error_with_fields(message, fields):
"""
Helper function to create an error message with a list of quoted, comma
separated field names. The `message` argument should be a format string
with a single `%s` placeholder.
"""
return message % ', '.join('"%s"' % f for f in fields)
|
b29466d41fe8b83a73330710469dfdcafa729714
| 67,259
|
def find_vm_interface(ports=[],
vnic_type='normal'):
"""find vm interface
The function receive port list and search for requested
vnic_type.
:param ports: ports connected to specific server
:param vnic_type: vnic_type nomal/direct/direct_physical
return port_id, ip_address
"""
assert len(ports), 'ports is empty or None'
return [[port['id'], port['fixed_ips'][0]['ip_address']]
for port in ports['ports']
if port['binding:vnic_type'] == vnic_type][0]
|
981b698fd6eebe4b53d6e74d8ac3c1a1ee1d1919
| 67,263
|
from typing import OrderedDict
def _split_table(table: OrderedDict):
"""Splits an OrderedDict into a list of tuples that can be turned into a
HTML-table with pandas DataFrame
Parameters
----------
table: OrderedDict
table that is to be split into two columns
Returns
-------
table_split: List[tuple(key, value, key, value)]
list with two key-value pairs per entry that can be used by pandas
df.to_html()
"""
table_split = []
keys = list(table.keys())
half_size = len(keys) // 2
for i in range(half_size):
j = i + half_size
table_split.append(("<b>" + keys[i] + "</b>", table[keys[i]],
"<b>" + keys[j] + "</b>", table[keys[j]]))
if len(keys) % 2 == 1:
table_split.append(("<b>"+keys[-1]+"</b>", table[keys[-1]], '', ''))
return table_split
|
a1083ea842202dda4c75b1a0ba880dcfd0a76942
| 67,264
|
import re
def unindent(string):
"""
Return an unindented copy of the passed `string`.
Replace each occurrence in `string` of a newline followed by spaces with a
newline followed by the number of spaces by which said occurrence exceeds the
minimum number of spaces in any such occurrence. Further strip all whitespace
from both ends of the resulting string before returning it.
"""
# Expand tabs to be 4 spaces long
string = string.replace("\t", " ")
# Find minimum indentation distance
indents = [len(match) for match in re.findall("\n( *)\\S", string)]
if indents:
minIndent = min(indents)
# Unindent by that much
string = string.replace("\n" + " " * minIndent, "\n")
string = string.strip()
return string
else:
return string
|
abca940c9ab95f93c23199f1a20a14e2e24a4860
| 67,266
|
import random
def get_rand_index(n, exclude=None, has_byes=False):
"""
Return a random integer in range(n), given constraints.
The ``exclude`` parameter excludes a single integer value. The ``has_byes``
parameter indicates that every fourth integer should be skipped as those are
occupied by bye slots.
:param n: Integer max value to return (exclusive)
:param exclude: Optional integer value to specifically exclude from output
:param has_byes: Optional Boolean value indicating that every fourth integer
should be disallowed
:returns: Random integer that meets all the constraints specified
"""
if has_byes:
n *= 0.75
if exclude is not None:
n -= 1
i = random.randrange(n)
if exclude is not None:
if has_byes:
exclude -= exclude // 4
if i >= exclude:
i += 1
if has_byes:
i += i // 3
return i
|
1078b853a315cada9dc9affee92f0cd720768c92
| 67,267
|
def compress(v, slen):
"""
Take as input a list of integers v and a bytelength slen, and
return a bytestring of length slen that encode/compress v.
If this is not possible, return False.
For each coefficient of v:
- the sign is encoded on 1 bit
- the 7 lower bits are encoded naively (binary)
- the high bits are encoded in unary encoding
"""
u = ""
for coef in v:
# Encode the sign
s = "1" if coef < 0 else "0"
# Encode the low bits
s += format((abs(coef) % (1 << 7)), '#09b')[2:]
# Encode the high bits
s += "0" * (abs(coef) >> 7) + "1"
u += s
# The encoding is too long
if len(u) > 8 * slen:
return False
u += "0" * (8 * slen - len(u))
w = [int(u[8 * i: 8 * i + 8], 2) for i in range(len(u) // 8)]
x = bytes(w)
return x
|
ce45538933efa74e2673d713eae14a9b47e5c020
| 67,274
|
def dedup_list(list):
"""
deduplicate list
"""
new_list = []
for item in list:
if item not in new_list:
new_list.append(item)
return new_list
|
712e236576d1dbfde1a56914aefcd78dcfbd6acc
| 67,275
|
def magic_to_dict(kwargs, separator="_") -> dict:
"""decomposes recursively a dictionary with keys with underscores into a nested dictionary
example : {'magnet_color':'blue'} -> {'magnet': {'color':'blue'}}
see: https://plotly.com/python/creating-and-updating-figures/#magic-underscore-notation
Parameters
----------
kwargs : dict
dictionary of keys to be decomposed into a nested dictionary
separator: str, default='_'
defines the separator to apply the magic parsing with
Returns
-------
dict
nested dictionary
"""
assert isinstance(kwargs, dict), "kwargs must be a dictionary"
assert isinstance(separator, str), "separator must be a string"
new_kwargs = {}
for k, v in kwargs.items():
keys = k.split(separator)
if len(keys) == 1:
new_kwargs[keys[0]] = v
else:
val = {separator.join(keys[1:]): v}
if keys[0] in new_kwargs and isinstance(new_kwargs[keys[0]], dict):
new_kwargs[keys[0]].update(val)
else:
new_kwargs[keys[0]] = val
for k, v in new_kwargs.items():
if isinstance(v, dict):
new_kwargs[k] = magic_to_dict(v)
return new_kwargs
|
108f574d912cca379a458781317554a093ce9f85
| 67,276
|
def _get_eth_link(vif, ifc_num):
"""Get a VIF or physical NIC representation.
:param vif: Neutron VIF
:param ifc_num: Interface index for generating name if the VIF's
'devname' isn't defined.
:return: A dict with 'id', 'vif_id', 'type', 'mtu' and
'ethernet_mac_address' as keys
"""
link_id = vif.get('devname')
if not link_id:
link_id = 'interface%d' % ifc_num
# Use 'phy' for physical links. Ethernet can be confusing
if vif.get('type') == 'ethernet':
nic_type = 'phy'
else:
nic_type = vif.get('type')
link = {
'id': link_id,
'vif_id': vif['id'],
'type': nic_type,
'mtu': vif['network']['meta'].get('mtu'),
'ethernet_mac_address': vif.get('address'),
}
return link
|
1e3d0fe2dbe09991f6bd2929dfb5c7d60978013b
| 67,278
|
def apply_penalty(tensor_or_tensors, penalty, **kwargs):
"""
Computes the total cost for applying a specified penalty
to a tensor or group of tensors.
Parameters
----------
tensor_or_tensors : Theano tensor or list of tensors
penalty : callable
**kwargs
keyword arguments passed to penalty.
Returns
-------
Theano scalar
a scalar expression for the total penalty cost
"""
try:
return sum(penalty(x, **kwargs) for x in tensor_or_tensors)
except (TypeError, ValueError):
return penalty(tensor_or_tensors, **kwargs)
|
ad4c6ac9102f96ff7b1e27a5700051f5ad70bfd6
| 67,286
|
def tree_in_path(map_line,map_x_coord):
"""
Checks if a tree is in the x-cord of the map line, looping if x is > len(map_line)
returns: True if a tree is in the path, False otherwise
rtype: Bool
"""
offset = map_x_coord % len(map_line) # module operater for rollover
return map_line[offset]=='#'
|
b71f805dcb92d61bc3911ab9c1eee17973b89198
| 67,289
|
def get_neighbours(position, width, height, grown, up, down):
"""
Return connected neighbours that are not already "grown" (i.e. added to the region)
"""
x = position[0]
y = position[1]
neighbours = []
# Left
if x > 0 and not grown[y, x - 1]:
neighbours.append((x - 1, y))
# Up
if up and y > 0 and not grown[y - 1, x]:
neighbours.append((x, y - 1))
# Right
if x < width - 1 and not grown[y, x + 1]:
neighbours.append((x + 1, y))
# Down
if down and y < height - 1 and not grown[y + 1, x]:
neighbours.append((x, y + 1))
return neighbours
|
a3a7c5b70191e1cbbf51776dc05918ff1dc3c876
| 67,297
|
import logging
def get_logger(name):
"""
Gets a logger with the given name.
"""
log = logging.getLogger(name)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s/%(module)s: %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.INFO)
return log
|
c74abdbb3f5c3dc16de49cf019f064732533ace8
| 67,301
|
def decode_reply(reply_code):
"""
Returns True if the RETS request was successful, otherwise False
Intended to fill the response dict's 'ok' field as an alternative to
the RETS specification's wonky reply code bullshit.
:param reply_code: a RETS reply code
:type reply_code: str
:rtype: bool
:return: True if the reply code was 0 (success), False otherwise
"""
if reply_code == '0':
return True
else:
return False
|
a3a8aac63f3eee5a88f587801731aad8576e5b02
| 67,303
|
import yaml
def _parse_yaml_file(path):
"""Load and parse local YAML file
Args:
path: either a local file system path or a GCS path
Returns:
a Python object representing the parsed YAML data
"""
with open(path, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as ex:
print(ex)
|
f3f96c50122bafa36a301353a3b9dd4167987062
| 67,305
|
def helper_stationary_value(val):
"""
Helper for creating a parameter which is stationary (not changing based on t)
in a stationary fuzzy set. Returns the static pertubation function
Parameters:
-----------
val : the stationary value
"""
return lambda t: val
|
1228b3a85a6026c18ebaf57b87841372e73566c6
| 67,306
|
def _CreateGroupLabel(messages, assignment_group_labels):
"""Create guest policy group labels.
Args:
messages: os config guest policy api messages.
assignment_group_labels: List of dict of key: value pair.
Returns:
group_labels in guest policy.
"""
group_labels = []
for group_label in assignment_group_labels or []:
pairs = [
messages.AssignmentGroupLabel.LabelsValue.AdditionalProperty(
key=key, value=value) for key, value in group_label.items()
]
group_labels.append(
messages.AssignmentGroupLabel(
labels=messages.AssignmentGroupLabel.LabelsValue(
additionalProperties=pairs)))
return group_labels
|
80c721f300e8b08fb8b53109e3632057589bf5e6
| 67,307
|
def impedance(vp, rho):
"""
Compute acoustic impedance.
"""
return vp * rho
|
6be115c3d92b394a5b98cbf502d2bc753522a8a6
| 67,309
|
def print_id(id):
"""Print id in n1.n2.n3 form."""
return ".".join(str(i) for i in id)
|
0cc7a1c999e6aabae9256599df59060d6a407a4c
| 67,311
|
def dict_of_lists_to_list_of_dicts(dl):
"""
Thanks to Andrew Floren from https://stackoverflow.com/a/33046935/142712
:param dl: dict of lists
:return: list of dicts
"""
return [dict(zip(dl, t)) for t in zip(*dl.values())]
|
c6d90a463c31f2f0d2dc9e4d72c8b09bdba6ccf5
| 67,317
|
def get_sentences_from_files(files):
""" Read files line by line into a list """
sentences = []
for file_name in files:
with open(file_name, 'r') as fp:
sentences.extend(fp.readlines())
return sentences
|
54212df80ee2dc71c66f39c4c9075831e2403a25
| 67,321
|
def get_columns_from_data_range_rows(data, start_row, end_row, columns):
"""
This function is a wrapper around the indexing of pandas data frames. This function gets some of the rows
from the data frame specified by the start_row and end_row.
:param data: Pandas data frame
:param start_row: int value specifying the row to start from
:param end_row: int value specifying the row to end at
:param columns: The columns which are to be selected.
:return: Pandas data frame
"""
return data.loc[start_row:end_row, columns]
|
550a2ec731d6d7ff817263fd01940f1948752001
| 67,324
|
import json
def is_valid_json(file_path: str) -> bool:
"""Return True if the file is valid json. False otherwise."""
try:
with open(file_path) as fil:
json.load(fil)
except json.JSONDecodeError:
return False
else:
return True
|
bc46fa011caa82a80daeb7e765e2185f5b08d51d
| 67,332
|
def snr(signal, bg):
"""
returns snr as (signal - bg)/bg
"""
return (signal - bg) / bg
|
9bdf25734bbb27f56854731f869b31fcdc6800a9
| 67,334
|
def world_to_pixel(header, axis, value):
"""
Calculate the pixel value for the provided world value using the WCS
keywords on the specific axis. The axis must be linear.
:param header: The FITS header describing the zxes.
:param axis: The number of the target axis.
:param value: The world value to be converted.
:return: The pixel value.
"""
ax = str(axis)
return int(header['CRPIX' + ax] + (value - header['CRVAL' + ax]) / header[
'CDELT' + ax])
|
d436616cde7993f8dce8c142ba5623c7de5d1564
| 67,335
|
def find_cuds_object(criterion, root, rel, find_all, max_depth=float("inf"),
current_depth=0, visited=None):
"""
Recursively finds an element inside a container
by considering the given relationship.
:param criterion: function that returns True on the Cuds object
that is searched.
:type criterion: Callable
:param root: Starting point of search
:type root: Cuds
:param rel: The relationship (incl. subrelationships) to consider
:type rel: Type[Relationship]
:param find_all: Whether to find all cuds_objects with satisfying
the criterion.
:type find_all: bool
:param max_depth: The maximum depth for the search.
:type max_depth: Union(float, int)
:return: the element if found
:rtype: Union[Cuds, List[Cuds]]
"""
visited = visited or set()
visited.add(root.uid)
output = [root] if criterion(root) else []
if output and not find_all:
return output[0]
if current_depth < max_depth:
for sub in root.iter(rel=rel):
if sub.uid not in visited:
result = find_cuds_object(criterion=criterion,
root=sub,
rel=rel,
find_all=find_all,
max_depth=max_depth,
current_depth=current_depth + 1,
visited=visited)
if not find_all and result is not None:
return result
if result is not None:
output += result
return output if find_all else None
|
b8432d8831c78ccaaf147d131b1314e0dc32c7e2
| 67,337
|
from typing import List
def dedupe_loops(loops: List[List]) -> List:
"""
Deduplication of loops with same members.
For example: for nodes 1, 2, 3 in the graph below
[
[0, 1, 1],
[1, 0, 1],
[1, 1, 0],
]
Loops of length 3 are 0->1->2 and 0->2->1. We only retain 1 of these loops after dedupe.
Example 2:
For the following graph:
[
[0, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 0],
]
Pictorially this is the network
1 ---------2
| * * |
| * |
| * * |
4----------3
Loops of length 4 are:
[[0, 1, 2, 3], [0, 1, 3, 2], [0, 2, 1, 3], [0, 2, 3, 1], [0, 3, 1, 2], [0, 3, 2, 1]]
After deduplication we will only retain [0, 1, 2, 3]
"""
seen_sets = set()
filtered = []
for l in loops:
loop = l[:]
l.sort()
l = tuple(l)
if l not in seen_sets:
seen_sets.add(l)
filtered.append(loop)
return filtered
|
5576ca769e7c592e8c10f6066f7cefaba83f5e62
| 67,338
|
import textwrap
def compact_simple_list(match):
"""Callback function. Given a simple list match, compact it and ensure
that it wraps around by 80 characters.
Params:
match The regular expression match
Returns:
The string to replace the expression with
"""
# Calculate the initial indent as the length of the first match group
initial_indent = len(match.group(1))
# Get the lines in the match
lines = match.group(2).splitlines()
# Set the indent by finding the indent of the first lines
if len(lines) > 1:
subsequent_indent = len(lines[1]) - len(lines[1].lstrip())
else:
subsequent_indent = 0
# Strip whitespace from the lines
lines = [l.strip() for l in lines]
# Create and return the string wrapped about 80 chars
list_string = "\n".join(
textwrap.wrap(
" ".join(lines),
80,
initial_indent=" " * initial_indent,
subsequent_indent=" " * subsequent_indent,
)
).lstrip()
# Return the string
return match.group(1) + list_string
|
5889affc8dc10f747c25eece663cb07aa44c95c2
| 67,341
|
import re
import urllib.parse
def parse_stdout(stdout: str):
"""
Parses stdout to determine remote_hostname, port, token, url
Parameters
----------
stdout : str
Contents of the log file/stdout
Returns
-------
dict
A dictionary containing hotname, port, token, and url
"""
hostname, port, token, url = None, None, None, None
urls = set(
re.findall(
r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
stdout,
)
)
for url in urls:
url = url.strip()
if '127.0.0.1' not in url:
result = urllib.parse.urlparse(url)
hostname, port = result.netloc.split(':')
if 'token' in result.query:
token = result.query.split('token=')[-1].strip()
break
return {'hostname': hostname, 'port': port, 'token': token, 'url': url}
|
7a27362369a54ba32e8e0eec4482150318b40608
| 67,342
|
def get_study_period_ranges(data_length: int, test_period_length: int, study_period_length: int, index_name: str,
reverse=True,
verbose=1) -> dict:
"""
Get study period ranges, going backwards in time, as a dict
:param data_length: Number of total dates in index data
:param test_period_length: Length of test period
:param study_period_length: Length of study period
:param index_name: Index name
:param reverse: Reverse ranges
:param verbose: Verbosity
:return: Dict of study period ranges
"""
n_full_periods = (data_length - (study_period_length - test_period_length)) // test_period_length
remaining_days = (data_length - (study_period_length - test_period_length)) % test_period_length
if verbose == 1:
print(f'Data set contains {data_length} individual dates.')
print(f'Available index history for {index_name} allows for {n_full_periods} overlapping study periods.')
print(f'{remaining_days} days are excluded from study data.')
study_period_ranges = {}
for period in range(n_full_periods):
study_period_ranges[period + 1] = (
- (period * test_period_length + study_period_length), -(period * test_period_length + 1))
if reverse:
# Reverse dict such that most recent period has largest index
dict_len = len(study_period_ranges.keys())
study_period_ranges = {dict_len - key + 1: value for key, value in study_period_ranges.items()}
return study_period_ranges
|
19d1560278ade920b7063ecd0a2c89ded68b1216
| 67,345
|
from typing import Sized
import random
import binascii
def generate_pad(message: Sized) -> bytes:
"""
Generates the One Time Pad (OTP) to be used for encrypting the message.
Arguments:
message: bytes - the message to generate a pad for
Returns:
bytes - the randomly generated pad as a hex string
i.e. a pad of [0x01, 0x02, 0xFE] would be b"0102FE"
this is done to make printing the pad / encrypted data easier
"""
# store the padding data in a list while we create it
# this lets us append easily which is harder with a bytes object
pad = []
for i in range(len(message)):
# for each character of the message add a new random number
# between 0 and 255 for each character in the input
pad.append(random.randint(0, 255))
# convert the pad to bytes and then to its hex representation
return binascii.hexlify(bytes(pad))
|
1100477eb34045052cf69f89d8702d775259026d
| 67,351
|
def label_date(ax, label, date, df):
"""Helper function to annotate a date
``date`` is assumed to be in the index of ``df``
Parameters
----------
ax : Axes
The axes to draw to
label : str
The text of the label
date : object in index of df
The x coordinate
df : DataFrame
The data source
"""
y = df.loc[date]["mean"]
return ax.annotate(
label,
(date, y),
ha="right",
xytext=(-10, -30),
textcoords="offset points",
arrowprops={"arrowstyle": "->"},
)
|
d70e104b0e00b590daaa00a156db54908e7461a2
| 67,353
|
def _ul_subvoxel_overlap(xs, x1, x2):
"""For an interval [x1, x2], return the index of the upper limit of the
overlapping subvoxels whose borders are defined by the elements of xs."""
xmax = max(x1, x2)
if xmax >= xs[-1]:
return len(xs) - 1
elif xmax <= xs[0]:
ul = 0
return ul
else:
for i, x in enumerate(xs):
if not x < xmax:
ul = i
return ul
ul = len(xs) - 1
return ul
|
21a6ed3e6b7d718250aff829ceb5d863677f3e3d
| 67,358
|
def encodewithdict(unencoded, encodedict):
"""encodes certain characters in the string using an encode dictionary"""
encoded = unencoded
for key, value in encodedict.iteritems():
if key in encoded:
encoded = encoded.replace(key, value)
return encoded
|
730966b84a596cc9352f0af651d153381b115019
| 67,360
|
def round_to_factor(value, factor):
"""
Round value to nearest multiple of factor. Factor can be a float
:param value: float
:param factor: float
:return: float
"""
return factor * round(value / factor)
|
ab23b290dddacec5d4bb4f349851845109fc1b8e
| 67,361
|
def keywords_mapper(keywords, package):
"""
Update package keywords and return package.
This is supposed to be an array of strings, but sometimes this is a string.
https://docs.npmjs.com/files/package.json#keywords
"""
if isinstance(keywords, str):
if ',' in keywords:
keywords = [k.strip() for k in keywords.split(',') if k.strip()]
else:
keywords = [keywords]
package.keywords = keywords
return package
|
841a7fafe5f51fe3359e39d4015f94be7626f6bc
| 67,363
|
import hashlib
def md5(file_paths, chunk_size=1024*1024*1024):
""" Calculate a md5 of lists of files.
Args:
file_paths: an iterable object contains files. Files will be concatenated orderly if there are more than one file
chunk_size: unit is byte, default value is 1GB
Returns:
md5
"""
md5 = hashlib.md5()
for path in file_paths:
with open(path, 'rb') as fin:
while True:
data = fin.read(chunk_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
|
79b12ac941a761ee15b243a72b0e5b04d6a61980
| 67,364
|
import mimetypes
def guess_extension(content_type):
"""
Guess extension
Parameters
-----------
content_type: str
MIME type
Returns
--------
str
Extension or None if not found
"""
if content_type == "text/plain":
ext = ".txt"
else:
ext = mimetypes.guess_extension(content_type)
if ext == ".htm":
ext = ".html"
elif ext == ".jpe":
ext = ".jpg"
return ext
|
6a8d91cd33ea00aaf74584aa2124154e50216bca
| 67,366
|
def _read_text(file):
""" Read a file in utf-8 encoding."""
with open(file, mode='r', encoding='utf-8') as f:
return f.read()
|
4bc7e9a83a02bdc6dee250958e67e99e7aea4950
| 67,369
|
def _list_of_command_args(command_args, conf_args_dict):
"""
Creates a reduced list of argument-only commands from the namespace args dictionary by removing both
non-argument commands and None arguments from the namespace args.
Parameters
----------
command_args(dict): A dictionary object that contains parsed args namespace of a command.
conf_args_dict(dict): A dictionary object formed that's formed with argument as a key and
a set of its non-conflicting args as value.
Returns
-------
command_arg_keys_as_list(list): Modified list of command keys in the namespace.
"""
command_arg_keys_as_list = []
for k, v in command_args.items():
if k in conf_args_dict and v is not None:
command_arg_keys_as_list.append(k)
return command_arg_keys_as_list
|
8ccce893396cabd41661ec13b20f1551ffc7c4ce
| 67,374
|
def dns_label_count(rows, args):
"""Returns the number of labels in a given domain (eg: www.example.com = 3)"""
label = rows[args[0]]
parts = label.split(".")
# deal with www.exmaple.com. with a trailing dot
if parts[-1] == "":
return (str(len(parts)-1),'')
return (str(len(parts)),'')
|
f0ef232277dc7908449d799d2156dbedbadb21d0
| 67,375
|
from typing import Dict
def remove_answers(squad_dict: Dict) -> Dict:
"""Remove answers from a SQuAD dev or test file"""
for dp in squad_dict:
for para in dp['paragraphs']:
for qa in para['qas']:
qa['answers'] = []
return squad_dict
|
149b23b07dfab32a3d5a5c57f8967d2b059b66c6
| 67,376
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.