content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_frame_IDs(objects_archive, start, end, every):
"""
Returns list of ID numbers of the objects identified in each frame.
Parameters
----------
objects_archive : dictionary
Dictionary of objects identified in a video labeled by ID number
start, end, every : ints
start = index of first frame to analyze; end = index of last frame
to analyze; every = analyze every `every` frame (e.g., if every = 3,
analyzes every 3rd frame)
Returns
-------
frame_IDs : dictionary
Dictionary indexed by frame number in the video. Each entry is
a list of the ID numbers of the objects identified in that frame.
"""
# initializes dictionary of IDs for each frame
frame_IDs = {}
for f in range(start, end, every):
frame_IDs[f] = []
# loads IDs of objects found in each frame
for ID in objects_archive.keys():
obj = objects_archive[ID]
frames = obj.get_props('frame')
for f in frames:
frame_IDs[f] += [ID]
return frame_IDs
|
4826a4d226460e07fbd015e59a052d9792e8ac6a
| 28,875
|
def plan_exists(plan):
"""This function can be used to check if a plan trajectory was computed.
Parameters
----------
plan : :py:obj:`!moveit_msgs.msg.RobotTrajectory`
The computed robot trajectory.
Returns
-------
:py:obj:`bool`
Bool specifying if a trajectory is present
"""
# Check if a trajectory is present on the plan object
if not all(
[
not (
len(plan.joint_trajectory.points) >= 1
), # True when no trajectory was found
not (
len(plan.multi_dof_joint_trajectory.points) >= 1
), # True when no trajectory was found
]
):
# A trajectory was found
return True
else:
# No trajectory was found
return False
|
e2eb384119ed55cb3561e9485bddc3a221a03f92
| 28,876
|
def format_message_pagination(text, max_characters=1500):
"""Splits text with lines > the max_characters length into chunks.
:param text:
:returns:
"""
chunks = str()
messages = []
def is_newline_character(char): # noqa: WPS430
if char == "\n":
return True
return False
for char in text:
chunks += char
if len(chunks) > max_characters:
if is_newline_character(char):
messages.append(chunks)
chunks = str()
messages.append(chunks)
return messages
|
0ffa685ff084251f405f498e77490db0b300bfb6
| 28,878
|
import os
def mock_env_settings_file(mock_os_environ, mock_settings_file):
"""Set the settings file env variable to a temporary settings file."""
os.environ["TEST_STUFF_SETTINGS_FILE"] = mock_settings_file[0]
return mock_settings_file
|
0983959a9a57511f619e1f8f0f508bbf21fb0e45
| 28,879
|
def ISO_datestring(dt, cl):
""" Convert a DateTime object to an ISO datestring.
also fix an error in the conversion
.isoformat() returns 12:00:00 for both Noon and Midnight.
Also trim date to report only date, hours and minutes.
"""
isodatestr = dt.isoformat()
if cl[4] == "12:00AM": # reset time to 00:00 since it incorrectly gets set to 12:00
isodatestr = isodatestr[0:11] + "00:00"
else:
isodatestr = isodatestr[0:16] # just slice off seconds and timezone
return isodatestr
|
01ab02d2aa05405cd955670362bddf4a7467f3b1
| 28,880
|
import numpy
def convert_ndarray_to_bytes(a):
"""Convert numpy array to numpy bytes array"""
return a.view(numpy.uint8)
|
1c5a688bfca4bc58ff31e409a133070accb7eea6
| 28,881
|
def suggest_patience(epochs: int) -> int:
"""Current implementation: 10% of total epochs, but can't be less than 5."""
assert isinstance(epochs, int)
return max(5, round(.1 * epochs))
|
e1631576d63dc3b62df636fb555739158035a25a
| 28,882
|
def offset_types(request):
"""
Fixture for all the datetime offsets available for a time series.
"""
return request.param
|
6ae26e175699dc3a304bff8755ce0fc6aecf0a32
| 28,883
|
def measure_diff(fakes, preds):
"""Measures difference between ground truth and prediction
fakes (float array): generated "true" global scores
preds (list list): list of [video_id: int, criteria_name: str,
score: float, uncertainty: float]
in same order
Returns:
(float): 100 times mean squared distance
between ground truth and predicted score
"""
diff = 0
for fake, pred in zip(fakes, preds):
f, p = round(fake, 2), pred[2]
diff += 100 * abs(f - p) ** 2
return diff / len(preds)
|
824052778fe02620d52ad0ff5987e1f62363d7fc
| 28,884
|
def stringify(vals):
"""Return a string version of vals (a list of object implementing __str__)
Args:
vals (List[any]): List of object that implements __str__
Returns:
str: A string representation
"""
if type(vals) == list:
return '_'.join([str(e) for e in vals])
else:
return str(vals)
|
cbd681b2435a919a91a7eeb905ca3279a9baeea6
| 28,885
|
import logging
def get_stream_handler(
formatter: logging.Formatter, level: int
) -> logging.StreamHandler:
"""
Create a ready-to-go stream handler for a Logger.
Parameters
----------
formatter : logging.Formater
Formatter to apply to the handler.
level : int
Level to apply to the stream handler.
Returns
-------
logging.StreamHandler
"""
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(level)
return handler
|
2c6de5bec9fbcb3f7f76c1c21c0db2091b649c96
| 28,886
|
def print_to_log(message, log_file):
"""Append a line to a log file
:param message: The message to be appended.
:type message: ``str``
:param log_file: The log file to write the message to.
:type log_file: ``Path``
"""
with open(log_file, "a") as file_handle:
message.rstrip()
file_handle.write(message+"\n")
return 0
|
cc79449ac082e3e1053e43bfe6d87c7b617c1923
| 28,887
|
def OneWay(transform, name='unnamed'):
# language=rst
"""
Wrapper for staxplusplus networks.
:param transform: spp network
"""
_init_fun, _apply_fun = transform
def init_fun(key, input_shape, condition_shape):
transform_input_shape = input_shape if len(condition_shape) == 0 else (input_shape,) + condition_shape
return _init_fun(key, transform_input_shape)
def forward(params, state, log_px, x, condition, **kwargs):
network_input = x if len(condition) == 0 else (x, *condition)
z, updated_state = _apply_fun(params, state, network_input, **kwargs)
return log_px, z, updated_state
def inverse(params, state, log_pz, z, condition, **kwargs):
assert 0, 'Not invertible'
return init_fun, forward, inverse
|
7cc3ee9264395f7e01d1481ce869d319e3e2002f
| 28,888
|
def can_replace(func, n):
"""
Write a higher-order function that takes in a function func
and a value n. It return a function such that it takes in
another function func2 and it will return True if func could
be replaced by func2 at value n; False otherwise.
>>> can_replace(lambda x: x, 1)(lambda x: x**2)
True
>>> can_replace(square, -1)(cube)
False
>>> can_replace(identity, -1)(identity)
True
"""
def inner(func2):
return func2(n) == func(n)
return inner
|
5c550700015194682e4aa0c32212c55df648dd93
| 28,890
|
def get_prefix_repr(prefix, activities):
"""
Gets the numeric representation (as vector) of a prefix
Parameters
-------------
prefix
Prefix
activities
Activities
Returns
-------------
prefix_repr
Representation of a prefix
"""
this_pref_repr = [0] * len(activities)
for act in prefix:
i = activities.index(act)
this_pref_repr[i] = this_pref_repr[i] + 1
return tuple(this_pref_repr)
|
3a194ba988ed5d1889e64df011cd7437e0354ce7
| 28,892
|
def sanitizeFilename(filename: str) -> str:
"""
Remove invalid characters <>:"/\\|?* from the filename.
"""
result = ''
for c in filename:
if c not in '<>:"/\\|?*':
result += c
return result
|
90068166ee5ca26cbe4713f2d4edb96be92b961c
| 28,893
|
def _os_symbol_file_path_prefix():
"""
iOS系统相关符号文件路径前缀
"""
return '~/Library/Developer/Xcode/iOS DeviceSupport'
|
046bb85a563ebb5c69539f7075fbe975f94583bf
| 28,895
|
import os
def get_filename(path):
""" Get the filename without extension
input/a.txt -> a
"""
head, tail = os.path.split(path)
return os.path.splitext(tail or os.path.basename(head))[0]
|
e7c3ec7899803ea701662aa3e57f7f576c6e5971
| 28,898
|
def format_time_delta(delta):
"""
Given a time delta object, convert it into a nice, human readable string.
For example, given 290 seconds, return 4m 50s.
"""
d = delta.days
h = delta.seconds / 3600
m = (delta.seconds % 3600) / 60
s = delta.seconds % 60
if d > 0:
return '%sd %sh' % (d, h)
elif h > 0:
return '%sh %sm' % (h, m)
else:
return '%sm %ss' % (m, s)
|
8b795cdb716f78dc7c0be5b7454a9d63cd703463
| 28,899
|
import re
def check_url(url):
""" Check if url is actually a valid YouTube link using regexp.
:param url: string url that is to be checked by regexp
:return: boolean True if the url matches the regexp (it is a valid YouTube page), otherwise False
"""
youtube = re.compile(r'(https?://)?(w{3}\.)?(youtube.com/watch\?v=|youtu.be/).{11}')
return True if youtube.match(url) else False
|
ab0874af82ebe4d7e9435a57a96feb8339da9e70
| 28,901
|
def eval_args(parser, dval_iter=500, show_val_batch_size=False,
dval_batch_size=256, show_val_set_size=False, dval_set_size=0):
"""This is a helper method of the method `parse_cmd_arguments` to add
an argument group for validation and testing options.
Arguments specified in this function:
- `val_iter`
- `val_batch_size`
- `val_set_size`
Args:
parser: Object of class :class:`argparse.ArgumentParser`.
dval_iter (int): Default value of argument `val_iter`.
show_val_batch_size (bool): Whether the `val_batch_size` argument should
be shown.
dval_batch_size (int): Default value of argument `val_batch_size`.
show_val_set_size (bool): Whether the `val_set_size` argument should be
shown.
dval_set_size (int): Default value of argument `val_set_size`.
Returns:
The created argument group, in case more options should be added.
"""
### Eval arguments
agroup = parser.add_argument_group('Evaluation options')
agroup.add_argument('--val_iter', type=int, metavar='N', default=dval_iter,
help='How often the validation should be performed ' +
'during training. Default: %(default)s.')
if show_val_batch_size:
agroup.add_argument('--val_batch_size', type=int, metavar='N',
default=dval_batch_size,
help='Batch size during validation/testing. ' +
'Default: %(default)s.')
if show_val_set_size:
agroup.add_argument('--val_set_size', type=int, metavar='N',
default=dval_set_size,
help='If unequal "0", a validation set will be ' +
'extracted from the training set (hence, ' +
'reducing the size of the training set). ' +
'This can be useful for efficiency reasons ' +
'if the validation set is smaller than the ' +
'test set. If the training is influenced by ' +
'generalization measures on the data (e.g., ' +
'a learning rate schedule), then it is good ' +
'practice to use a validation set for this. ' +
'It is also desirable to select ' +
'hyperparameters based on a validation set, ' +
'if possible. Default: %(default)s.')
return agroup
|
66cc6e1862ca1db18d69c7826049605f21916835
| 28,902
|
def backends_mapping(custom_backend, private_base_url):
"""
Creates four echo-api backends with the private paths being the keys in
the dict
"""
return {"/bar": custom_backend("backend1", endpoint=f"{private_base_url('echo_api')}/backend1"),
"/foo/boo": custom_backend("backend2", endpoint=f"{private_base_url('echo_api')}/backend2")}
|
30167e3165cc372d4812258a1bcb5e3973780642
| 28,903
|
def get_url_prefix(configdict, options):
"""
Get the url prefix based on options
"""
if 'servers' in options:
urlprefix = configdict['servers']['http'][0]
else:
urlprefix = configdict['url_prefix']
return urlprefix
|
4f7065e9dd10f0f99dc9953fd8f22c0ec34746d2
| 28,904
|
def termcap_distance(ucs, cap, unit, term):
"""termcap_distance(S, cap, unit, term) -> int
Match horizontal distance by simple ``cap`` capability name, ``cub1`` or
``cuf1``, with string matching the sequences identified by Terminal
instance ``term`` and a distance of ``unit`` *1* or *-1*, for right and
left, respectively.
Otherwise, by regular expression (using dynamic regular expressions built
using ``cub(n)`` and ``cuf(n)``. Failing that, any of the standard SGR
sequences (``\033[C``, ``\033[D``, ``\033[nC``, ``\033[nD``).
Returns 0 if unmatched.
"""
assert cap in ('cuf', 'cub')
# match cub1(left), cuf1(right)
one = getattr(term, '_%s1' % (cap,))
if one and ucs.startswith(one):
return unit
# match cub(n), cuf(n) using regular expressions
re_pattern = getattr(term, '_re_%s' % (cap,))
_dist = re_pattern and re_pattern.match(ucs)
if _dist:
return unit * int(_dist.group(1))
return 0
|
731c4e02378a626c6a3c3dd7ee30848d88cc2f8a
| 28,905
|
def create_subnet(ec2_c, vpc, rt_table, networks, lab_tag):
""" Must have a least one subnet to provision EC2 instances. """
subnet = ec2_c.create_subnet(CidrBlock=networks, VpcId=vpc.id)
rt_table.associate_with_subnet(SubnetId=subnet.id)
return subnet
|
05fb3cce87e4134d1a0c149b72381f5d6bf45375
| 28,906
|
def _bytes_to_string(value: bytes) -> str:
"""Decode bytes to a UTF-8 string.
Args:
value (bytes): The bytes to decode
Returns:
str: UTF-8 representation of bytes
Raises:
UnicodeDecodeError
"""
return value.decode(encoding="utf-8")
|
b33508db0184f21854c734bbef126ec39d95c7b1
| 28,907
|
def merge(intervals):
""" Turns `intervals` [[0,2],[1,5],[7,8]] to [[0,5],[7,8]].
"""
out = []
for i in sorted(intervals, key=lambda i: i[0]):
if out and i[0] <= out[-1][1]:
out[-1][1] = max(out[-1][1], i[1])
else:
out += i,
return out
|
b0a9eb30f81132f0af568f442465bdd9ce19ab83
| 28,908
|
def date_day_of_week(date):
"""Return the day of the week on which the given date occurred."""
day_of_week = date.strftime('%A')
return day_of_week
|
14e5a60b3089f2ec1aed18f71929c73ea1b50731
| 28,909
|
import torch
def scatter_embs(input_embs, inputs):
"""
For inputs that have 'input_embs' field passed in, replace the entry in input_embs[i] with the entry
from inputs[i]['input_embs']. This is useful for the Integrated Gradients - for which the predict is
called with inputs with 'input_embs' field which is an interpolation between the baseline and the real calculated
input embeddings for the sample.
:param input_embs: tensor of shape B x S x h of input embeddings according to the input sentences.
:param inputs: list of dictionaries (smaples), for which the 'input_embs' field might be specified
:return: tensor of shape B x S x h with embeddings (if passed) from inputs inserted to input_embs
"""
interp_embeds = [(ind, ex.get('input_embs')) for ind, ex in enumerate(inputs)]
for ind, embed in interp_embeds:
if embed is not None:
input_embs[ind] = torch.tensor(embed)
return input_embs
|
d0966c15f51b1b692995cd25076e455bea8a2b8a
| 28,910
|
def load_modules(agent_classes):
"""Each agent class must be in module class_name.lower().
Returns a dictionary class_name->class"""
def load(class_name):
module_name = class_name.lower() # by convention / fiat
module = __import__(module_name)
agent_class = module.__dict__[class_name]
return (class_name, agent_class)
return dict(map(load, agent_classes))
|
958a11ceb8f9442af6dd0a1e210f8efda04d5461
| 28,911
|
import os
import json
def json_from_file(string: str) -> dict:
"""load a `string` JSON file
Args:
string: a path to a JSON file
Return:
An `output` dictionary composed of:
- `output['input_file']` is the path of the JSON file
- `output['json']` is the contents of the JSON file as a Python object
(`json.load()`).
Raises:
FileNotFoundError: If the `string` file does not exist.
"""
if not os.path.isfile(string):
raise FileNotFoundError(string)
# read the file
with open(string, 'r', encoding='utf-8') as read_file:
data = json.load(read_file)
# return the content of the file
return {'input_file': string, 'json': data}
|
bf3efe8027767a4ec6728cc222ead468721428e2
| 28,912
|
def sanitizeStr(data):
"""
Escape all char that will trigger an error.
Parameters
----------
data: str
the str to sanitize
Returns
-------
str
The sanitized data.
"""
data = " ".join(data.split())
new_msg = []
for letter in data:
if letter in ['"',"\\"]:
new_msg.append("\\")
new_msg.append(letter)
return "".join(new_msg)
|
6fcd1455a01997d526cfd178d98ee3e9eca3c888
| 28,915
|
from typing import OrderedDict
def group_unique_values(items):
"""group items (pairs) into dict of lists.
Values in each group stay in the original order and must be unique
Args:
items: iterable of key-value pairs
Returns:
dict of key -> lists (of unique values)
Raises:
ValueError if value in a group is a duplicate
"""
result_lists = OrderedDict()
result_sets = OrderedDict()
for key, val in items:
if key not in result_lists:
result_lists[key] = []
result_sets[key] = set()
if val in result_sets[key]:
raise ValueError("Duplicate value: %s" % val)
result_sets[key].add(val)
result_lists[key].append(val)
return result_lists
|
cd25d657117b34fe408c27149ddf034f3956383d
| 28,917
|
import io
import csv
def scanlist_csv() -> io.StringIO:
"""
Generates a placeholder ScanList.csv
"""
header = [
"No.",
"Scan List Name",
"Scan Channel Member",
"Scan Channel Member RX Frequency",
"Scan Channel Member TX Frequency",
"Scan Mode",
"Priority Channel Select",
"Priority Channel 1",
"Priority Channel 1 RX Frequency",
"Priority Channel 1 TX Frequency",
"Priority Channel 2",
"Priority Channel 2 RX Frequency",
"Priority Channel 2 TX Frequency",
"Revert Channel",
"Look Back Time A[s]",
"Look Back Time B[s]",
"Dropout Delay Time[s]",
"Dwell Time[s]",
]
sio = io.StringIO()
writer = csv.writer(sio, dialect="d878uvii")
writer.writerow(header)
return sio
|
c5c4a8e6d860c3984c7c4af4ab869ca521b1c8cd
| 28,918
|
def standard_program_songs():
"""
Standard program songs.
"""
standard_program_songs = [
"song1",
"song2",
"song3",
"offering",
"response",
]
return standard_program_songs
|
b8e4a997541794894d875aa80ea11bad32abbdae
| 28,919
|
def getTableRADecKeys(tab):
"""Returns the column names in the table in which RA, dec coords are stored, after trying a few possible
name variations.
Args:
tab (:obj:`astropy.table.Table`): The table to search.
Returns:
Name of the RA column, name of the dec. column
"""
RAKeysToTry=['ra', 'RA', 'RADeg']
decKeysToTry=['dec', 'DEC', 'decDeg', 'Dec']
RAKey, decKey=None, None
for key in RAKeysToTry:
if key in tab.keys():
RAKey=key
break
for key in decKeysToTry:
if key in tab.keys():
decKey=key
break
if RAKey is None or decKey is None:
raise Exception("Couldn't identify RA, dec columns in the supplied table.")
return RAKey, decKey
|
93807fe56826ac680605b0494af60ddc6a5014a8
| 28,920
|
def convert_price(price):
"""Convert a price"""
if price[0] == u'\xa3':
return float(price[1:])
return float(price)
|
d5875bd87a1e772bac1aed1e7dc78928f1fb81fe
| 28,921
|
import re
def generate_filename_chart(series, args):
"""returns the generated basename for files for this series"""
s = str(series.get("title")).strip().replace(' ', '_')
title_slug = re.sub(r'(?u)[^-\w.]', '', s)
return f'tv_show_ratings_{series["movie_id"]}_{title_slug}.{args.format}'
|
d08451aa3fc10129bf811c28e63ce322d88efd5c
| 28,923
|
import typing
def to_camel(d: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]:
"""Converts dictionary keys from snake case to camel case."""
def convert(key: str) -> str:
parts = key.split("_")
return parts[0] + "".join(part.capitalize() for part in parts[1:])
return {convert(k): v for k, v in d.items()}
|
799d295aef19fbf4017eba477ce2f0f3d6a6061e
| 28,924
|
import os
def pid_list(ext_name):
""" Return list of PIDs for program name and arguments
Whitespace output is compressed to single space
"""
# Just grep up to first space in command line. It was failing on !
prg_name = ext_name.split(' ', 1)[0]
all_lines = os.popen("ps aux | grep -v grep | grep " +
"'" + prg_name + "'").read().strip().splitlines
PID = []
#for l in all_lines(): # Aug 13/2021 - Change for pycharm
# l = ' '.join(l.split()) # Compress whitespace to single space
# PID.append(int(l.split(' ', 2)[1]))
for line in all_lines():
line = ' '.join(line.split()) # Compress whitespace to single space
PID.append(int(line.split(' ', 2)[1]))
#print('pid_list:',PID)
return PID
|
09ff713b66c93a1fe0db59cf8cbb35b4b2732a6f
| 28,925
|
def create_astropy_time(py_obj, h_group, name, **kwargs):
""" dumps an astropy Time object
Args:
py_obj: python object to dump; should be a python type (int, float,
bool etc)
h_group (h5.File.group): group to dump data into.
call_id (int): index to identify object's relative location in the
iterable.
"""
data = py_obj.value
dtype = str(py_obj.value.dtype)
# Need to catch string times
if '<U' in dtype:
dtype = dtype.replace('<U', '|S')
print(dtype)
data = []
for item in py_obj.value:
data.append(str(item).encode('ascii'))
d = h_group.create_dataset(name, data=data, dtype=dtype, **kwargs)
fmt = str(py_obj.format).encode('ascii')
scale = str(py_obj.scale).encode('ascii')
d.attrs['format'] = fmt
d.attrs['scale'] = scale
return(d)
|
c11fc0e3737913363ae4f60df3dfe52c744b48e3
| 28,926
|
def format_channel_link(name: str, channel_id: str):
"""
Formats a channel name and ID as a channel link using slack control sequences
https://api.slack.com/docs/message-formatting#linking_to_channels_and_users
>>> format_channel_link('general', 'C024BE7LR')
'<#C024BE7LR|general>'
"""
return '<#{}|{}>'.format(channel_id, name)
|
2745a21960c7a2200e07e28bc5644a09f609b8ba
| 28,927
|
import pickle
def load_pickle(filepath_str):
"""
Load pickled results.
Inputs:
filepath_str: path to the pickle file to load
Returns:
loaded pickle file
"""
with open(filepath_str, 'rb') as pickle_file:
return pickle.load(pickle_file)
|
a7cd817327e928a8d1f3ff3290923e9b878d2a06
| 28,928
|
import os
def find_recording_dir(test_file):
""" Find the directory containing the recording of given test file based on current profile. """
return os.path.join(os.path.dirname(test_file), 'recordings')
|
4a0071a1cc87792f4cf8ad17b93ab0d443b71801
| 28,929
|
def local_ij_delta_to_class(local_ij_delta):
"""
:param local_ij_delta: tuple (i, j) returned from local_ij_delta
:return: a value 0-5 for the each of the possible adjecent hexagons, or -1 if
the (i,j) tuple is representing a non-adjecent hexagon coordinate
"""
if (local_ij_delta == (0, 1)):
return 0
elif (local_ij_delta == (1, 0)):
return 1
elif (local_ij_delta == (0, -1)):
return 2
elif (local_ij_delta == (-1, 0)):
return 3
elif (local_ij_delta == (-1, -1)):
return 4
elif (local_ij_delta == (1, 1)):
return 5
else:
return -1
|
ed16645346353304c62b4c6bd7e73ee7b3fb2ead
| 28,930
|
import torch
def get_devices(cuda_device="cuda:0", seed=1):
"""Gets cuda devices
"""
device = torch.device(cuda_device)
torch.manual_seed(seed)
# Multi GPU?
num_gpus = torch.cuda.device_count()
if device.type != 'cpu':
print('\033[93m' + 'Using CUDA,', num_gpus, 'GPUs\033[0m')
torch.cuda.manual_seed(seed)
return device, num_gpus
|
c2115ed5409d0e73da25ab91f33dc8d479cd1034
| 28,933
|
def prior_creator(vector, priors_lowbounds, priors_highbounds):
"""
Generates flat priors between *priors_lowbounds and *priors_highbounds for parameters in *vector
:param vector: array containing parameters optimized within flat priors
:param priors_lowbounds: array containing lower bound of flat priors
:param priors_highbounds: array containing higher bound of flat priors
:return: selection. selection = True if all *vector entries are within their flat prior. Otherwise selection = False
"""
selection = True
for i, entry in enumerate(vector):
if entry > priors_lowbounds[i] and entry < priors_highbounds[i]:
selection = selection * True
else:
selection = selection * False
return selection
|
6351b1946daf2f956b45dc181d7192aa2e70fbbf
| 28,934
|
def read_dataset_file_metadata(client, dataset_name, filename):
"""Return metadata from dataset's YAML file."""
with client.with_dataset(dataset_name) as dataset:
assert client.get_dataset_path(dataset.name).exists()
for file_ in dataset.files:
if file_.path.endswith(filename):
return file_
|
7bf9079614b02d984f722a593fbb81a3555705f2
| 28,935
|
import json
def format_json(d):
"""
Covert a label like 'plate 1' to plate_1
"""
return json.dumps(d)
|
1223ee362d7f5461bc9878eff5a57b84feafbd67
| 28,937
|
def to_string(pkt):
"""Pretty-prints a packet."""
name = pkt._name
detail = ''
if name == 'AppleMIDIExchangePacket':
detail = '[command={} ssrc={} name={}]'.format(
pkt.command.decode('utf-8'), pkt.ssrc, pkt.name
)
elif name == 'MIDIPacket':
items = []
for entry in pkt.command.midi_list:
command = entry.command
if command in ('note_on', 'note_off'):
items.append('{} {} {}'.format(command, entry.params.key, entry.params.velocity))
elif command == 'control_mode_change':
items.append(
'{} {} {}'.format(command, entry.params.controller, entry.params.value)
)
else:
items.append(command)
detail = ' '.join(('[{}]'.format(i) for i in items))
return '{} {}'.format(name, detail)
|
ecdd0dfe3dd9bb2cb24351567520fd821619e19c
| 28,939
|
def overflow_error():
"""Value doesn't fit in object."""
try:
int(float('inf'))
except OverflowError:
return "infinite is too big"
|
ea07b420001e57d6efd1ab9fa4afb335f81c69fa
| 28,940
|
def parse_args(parser):
""" Takes ArgumentParser and parses arguments """
parser.add_argument(
"-p",
"--path",
required=True,
help="Path to database",
action="store",
type=str)
parser.add_argument(
"-s",
"--scema",
required=True,
help="Path to database scema script",
action="store",
type=str)
parser.add_argument(
"-b",
"--bot",
required=True,
help="Which bot should be used",
action="store",
type=str)
parser.add_argument(
"-head",
"--headless",
required=False,
help="Should browser be run headlessly",
action="count")
parser.add_argument(
"-bl",
"--block",
required=False,
help="Path to ad blocking extension",
action="store",
type=str)
return parser.parse_args()
|
7978aca0fd9fd1a6659e03d4c8c5aa814e45c840
| 28,941
|
import copy
import numpy
def _determine_stream_spread_single(sigomatrixEig,
thetasTrack,
sigOmega,
sigAngle,
allinvjacsTrack):
"""sigAngle input may either be a function that returns the dispersion in
perpendicular angle as a function of parallel angle, or a value"""
#Estimate the spread in all frequencies and angles
sigObig2= sigOmega(thetasTrack)**2.
tsigOdiag= copy.copy(sigomatrixEig[0])
tsigOdiag[numpy.argmax(tsigOdiag)]= sigObig2
tsigO= numpy.dot(sigomatrixEig[1],
numpy.dot(numpy.diag(tsigOdiag),
numpy.linalg.inv(sigomatrixEig[1])))
#angles
if hasattr(sigAngle,'__call__'):
sigangle2= sigAngle(thetasTrack)**2.
else:
sigangle2= sigAngle**2.
tsigadiag= numpy.ones(3)*sigangle2
tsigadiag[numpy.argmax(tsigOdiag)]= 1.
tsiga= numpy.dot(sigomatrixEig[1],
numpy.dot(numpy.diag(tsigadiag),
numpy.linalg.inv(sigomatrixEig[1])))
#correlations, assume half correlated for now (can be calculated)
correlations= numpy.diag(0.5*numpy.ones(3))*numpy.sqrt(tsigOdiag*tsigadiag)
correlations[numpy.argmax(tsigOdiag),numpy.argmax(tsigOdiag)]= 0.
correlations= numpy.dot(sigomatrixEig[1],
numpy.dot(correlations,
numpy.linalg.inv(sigomatrixEig[1])))
#Now convert
fullMatrix= numpy.empty((6,6))
fullMatrix[:3,:3]= tsigO
fullMatrix[3:,3:]= tsiga
fullMatrix[3:,:3]= correlations
fullMatrix[:3,3:]= correlations.T
return numpy.dot(allinvjacsTrack,numpy.dot(fullMatrix,allinvjacsTrack.T))
|
d6e5e101d327b0fef714ae5d6d67abc94f92b069
| 28,942
|
from typing import List
def get_input_size_with_dependencies(
combiner_output_size: int, dependencies: List[str], other_output_features # Dict[str, "OutputFeature"]
):
"""Returns the input size for the first layer of this output feature's FC stack, accounting for dependencies on
other output features.
In the forward pass, the hidden states of any dependent output features get concatenated with the combiner's output.
If this output feature depends on other output features, then the input size for this feature's FCStack is the sum
of the output sizes of other output features + the combiner's output size.
"""
input_size_with_dependencies = combiner_output_size
for feature_name in dependencies:
if other_output_features[feature_name].num_fc_layers:
input_size_with_dependencies += other_output_features[feature_name].fc_stack.output_shape[-1]
else:
# 0-layer FCStack. Use the output feature's input size.
input_size_with_dependencies += other_output_features[feature_name].input_size
return input_size_with_dependencies
|
bb4ad34718829a7e545f81b3b83089cb3d11eaaa
| 28,944
|
def delete_after(list, key):
"""
Return a list with the item after the first occurrence of the key
(if any) deleted.
"""
if list == ():
return ()
else:
head1, tail1 = list
if head1 == key:
# Leave out the next item, if any
if tail1 == ():
return list
else:
head2, tail2 = tail1
return (head1, tail2)
else:
return (head1, delete_after(tail1, key))
|
047ad50c25c1c6a2f2d25c5fba1d813b45bd1e17
| 28,945
|
def repeat(a, repeats, axis=None):
"""Repeat arrays along an axis.
Args:
a (cupy.ndarray): Array to transform.
repeats (int, list or tuple): The number of repeats.
axis (int): The axis to repeat.
Returns:
cupy.ndarray: Transformed array with repeats.
.. seealso:: :func:`numpy.repeat`
"""
return a.repeat(repeats, axis)
|
4a7a9382b9aa125dc66c2aaf8da0567c49d9aaf3
| 28,948
|
def resolve_variables(template_configuration: dict) -> dict:
"""
The first step of the lifecycle is to collect the variables provided in the configuration.
This will add the name and value of each variable to the run-configuration.
:param template_configuration:
:return:
"""
run_configuration = {}
variables = template_configuration.get('variables')
if variables:
for variable in variables:
if variable['name']:
run_configuration[variable['name']] = variable['value']
else:
raise AttributeError
return run_configuration
|
60fb49eaa7ffac3b677bac450402c778a6a8832f
| 28,950
|
def read_gr(filename):
"""Reas a single graph from a file."""
f = open(filename, "r")
string = f.readline().split()
num_nodes = int(string[0])
num_edges = int(string[1])
graph = {}
indeg_graph = {}
g_map = {}
inv_map = {}
g_list = []
counter = 0
for line in f:
line = line.split()
u = int(line[0])
v = int(line[1])
weight = int(float(line[2]))
label = line[3]
if u not in graph:
graph[u] = [(v, weight, label)]
g_map[u] = counter
inv_map[counter] = u
counter += 1
else:
graph[u].append((v, weight, label))
#######
if v not in graph:
graph[v] = []
g_map[v] = counter
inv_map[counter] = v
counter += 1
if v not in indeg_graph:
indeg_graph[v] = [(u, weight, label)]
else:
indeg_graph[v].append((u, weight, label))
if u not in indeg_graph:
indeg_graph[u] = []
#######
# g_list = [i for i in range(len(g_map))]
f.close()
return graph, g_list, g_map, inv_map, indeg_graph
|
f9cd04b93977732cae46ec8f814f04bb1f21bbf0
| 28,951
|
import pickle
def loadlists():
"""Function to load all data"""
with open("./data/members.txt", "rb") as file:
members = pickle.load(file)
with open("./data/signups.txt", "rb") as file:
signups = pickle.load(file)
with open("./data/joinrequests.txt", "rb") as file:
joinrequests = pickle.load(file)
with open("./data/feedback.txt", "rb") as file:
feedback = pickle.load(file)
with open("./data/videostarspickle.txt", "rb") as file:
videostars = pickle.load(file)
lists = {
"members": members,
"joinrequests": joinrequests,
"feedback": feedback,
"videostars": videostars,
"signups": signups,
}
return lists
|
24a348d122fe11baa5075e84447a7c23746cac82
| 28,952
|
import json
import base64
def extract_async_task_result_json_values(result_data):
"""Companion function to perform the inverse of `build_async_task_result()`
"""
payload = json.loads(result_data)
content = base64.b64decode(payload['content'])
content_type = payload['content_type']
filename = payload['filename']
return (content, content_type, filename,)
|
460d992af67f5b31508449f622af3d413b24f37f
| 28,953
|
def mini_max_sum(array):
"""
Docs
"""
array.sort()
min_sum = sum(array[:-1])
max_sum = sum(array[1:])
return min_sum, max_sum
|
5d54fe230c96cb155adbf950b0b4ca71502dbcbe
| 28,954
|
def getsupportedcommands(qhlp, dostrip=True):
"""Parse qHLP answer and return list of available command names.
@param qhlp : Answer of qHLP() as string.
@param dostrip : If True strip first and last line from 'qhlp'.
@return : List of supported command names (not function names).
"""
qhlp = qhlp.splitlines()
if dostrip:
qhlp = qhlp[1:-1]
cmds = []
for line in qhlp:
line = line.upper()
cmds.append(line.split()[0].strip())
return cmds
|
90cce0dd689f836b57788632681d33f3f717d239
| 28,955
|
def text_to_json(f):
"""Performs basic parsing of an AFDO text-based profile.
This parsing expects an input file object with contents of the form generated
by bin/llvm-profdata (within an LLVM build).
"""
results = {}
curr_func = None
curr_data = []
for line in f:
if not line.startswith(' '):
if curr_func:
results[curr_func] = ''.join(curr_data)
curr_data = []
curr_func, rest = line.split(':', 1)
curr_func = curr_func.strip()
curr_data.append(':' + rest)
else:
curr_data.append(line)
if curr_func:
results[curr_func] = ''.join(curr_data)
return results
|
efdf3cbba8fbacba78e05cf726dd619b54863866
| 28,956
|
def to_rational(s):
"""Convert a raw mpf to a rational number. Return integers (p, q)
such that s = p/q exactly."""
sign, man, exp, bc = s
if sign:
man = -man
if bc == -1:
raise ValueError("cannot convert %s to a rational number" % man)
if exp >= 0:
return man * (1<<exp), 1
else:
return man, 1<<(-exp)
|
3dccd2acc324d8b748fe95290c49d961f1c636e1
| 28,957
|
def int2ascii(i: int) -> str:
"""Convert an integer to an ASCII character.
Args:
i (int): Integer value to be converted to ASCII text.
Note:
The passed integer value must be <= 127.
Raises:
ValueError: If the passed integer is > 127.
Returns:
str: The ASCII character associated to the passed integer.
"""
if i > 127:
raise ValueError('The passed integer value must be <= 127.')
return chr(i)
|
f46ed05d425f9277ea6c97a0f8bafb070b15091c
| 28,958
|
def null_count():
"""cleans Pandas Dataframes"""
return 'this is a function'
|
4325a260f03c6e2de1d4e5cb56f0623628a53c4b
| 28,959
|
def strip_dollars(text):
"""
Remove all dollar symbols from text
Parameters
----------
text : str
Text to remove dollars from
"""
return text.strip('$')
|
ca084d64fb928f2374cbbf0a453bead52a2682a0
| 28,962
|
def get_from_dict_or_default(key, dict, default):
"""Returns value for `key` in `dict` otherwise returns `default`"""
if key in dict:
return dict[key]
else:
return default
|
14ad53c4cac7f554cfa537edeaf7a11e1e8ecac3
| 28,963
|
from typing import Mapping
def genericDictValidator(value, prototype):
"""
Generic. (Added at version 3.)
"""
# not a dict
if not isinstance(value, Mapping):
return False
# missing required keys
for key, (typ, required) in prototype.items():
if not required:
continue
if key not in value:
return False
# unknown keys
for key in value.keys():
if key not in prototype:
return False
# incorrect types
for key, v in value.items():
prototypeType, required = prototype[key]
if v is None and not required:
continue
if not isinstance(v, prototypeType):
return False
return True
|
0ad002c33c38a50f06041f133e98fc79a2307d36
| 28,964
|
import math
def distance_formula(x1: float, y1: float, x2: float, y2: float) -> float:
"""
Distance between two points is defined as the square root of (x2 - x1)^2 + (y2 - y1) ^ 2
:raises TypeError: Any of the values are non-numeric or None.
"""
return math.sqrt(((x2 - x1) ** 2) + ((y2 - y1) ** 2))
|
5c1a4706365a2347bc23d7efcc74caa003405c0e
| 28,965
|
import os
def _is_path_in(path, base):
"""Returns true if **path** is located under the **base** directory."""
if not path or not base: # empty path may happen, base too
return False
rp = os.path.relpath(path, base)
return (not rp.startswith(os.path.pardir)) and (not rp == os.path.curdir)
|
c583bab64daae496973e6ea75738f5740d14b2b0
| 28,966
|
def validate_asn(asn):
"""
Validate the format of a 2-byte or 4-byte autonomous system number
:param asn: User input of AS number
:return: Boolean: True if valid format, False if invalid format
"""
try:
if "." in str(asn):
left_asn, right_asn = str(asn).split(".")
asn_ok = (0 <= int(left_asn) <= 65535) and \
(0 <= int(right_asn) <= 65535)
else:
asn_ok = 0 <= int(asn) <= 4294967295
except ValueError:
asn_ok = False
return asn_ok
|
14806fc04132c06dbb75abb2ceefd0340b7845e6
| 28,967
|
def make_incrementtor(n):
"""Do thing , but document it
jiushiwo
"""
return lambda x:x+n
|
92f82f8d17b587305c4e486cc826b7ddd3156de7
| 28,968
|
def bloch_sphere_plot_check(function):
"""Decorator to check the arguments of plotting bloch sphere function.
Arguments:
function {} -- The tested function
"""
def wrapper(u, v, w, xfigsize=15, yfigsize=7.5, frame_on=False, tight_layout_on=False, \
style='dark_background', surface_on=True, wireframe_on=True, surface_cmap='Blues_r', \
surface_alpha=0.3, wireframe_color='#d3d3d3', wireframe_linewidth=0.075, \
quiver_color='#ffffff', quiver_linewidth=1.5, quiver_ratio=0.1, line_color='#d3d3d3', \
line_linewidth=0.3, circle_edgecolor='#d3d3d3', circle_facecolor='none', \
circle_linewidth=0.3):
"""This function visualizes the qubit using its bloch coordinates and the matplotlib module.
Arguments:
u {int, float} -- 1st coordinate of Bloch representation
v {int, float} -- 2nd coordinate of Bloch representation
w {int, float} -- 3rd coordinate of Bloch representation
Keyword Arguments:
xfigsize {int, float} -- X size of figure (default: {15})
yfigsize {int, float} -- Y size of figure (default: {7.5})
frame_on {bool} -- Frame (default: {False})
tight_layout_on {bool} -- Tight layout (default: {False})
style {str} -- Style (default: {'dark_background'})
surface_on {bool} -- Surface (default: {True})
wireframe_on {bool} -- Wireframe (default: {True})
surface_cmap {str} -- Surface cmap (default: {'Blues_r'})
surface_alpha {int, float} -- Surface alpha (default: {0.3})
wireframe_color {str} -- Wireframe color (default: {'#d3d3d3'})
wireframe_linewidth {int, float} -- Width of wireframe line (default: {0.075})
quiver_color {str} -- Quiver color (default: {'#ffffff'})
quiver_linewidth {int, float} -- Width of quiver line (default: {1.5})
quiver_ratio {int, float} -- Quiver ratio (default: {0.1})
line_color {str} -- Line color (default: {'#d3d3d3'})
line_linewidth {int, float} -- Width of line (default: {0.3})
circle_edgecolor {str} -- Edge color of circle (default: {'#d3d3d3'})
circle_facecolor {str} -- Face color of circle (default: {'none'})
circle_linewidth {int, float} -- Width of circle line (default: {0.3})
Raises:
ValueError, TypeError
Examples:
>>> import qvantum
>>>
>>> q = qvantum.Random_Qubit()
>>> q.show()
'|Ψ> = (0.6257-0.4027i)|0> + (-0.5114+0.4299i)|1>'
>>> u = qvantum.bloch_coords(q)[0]
>>> v = qvantum.bloch_coords(q)[1]
>>> w = qvantum.bloch_coords(q)[2]
>>> qvantum.bloch_sphere_plot(u, v, w)
"""
if all(isinstance(elem, (int, float)) for elem in [u, v, w]):
if round(u ** 2 + v ** 2 + w ** 2 - 1, 10) == 0:
return function(u, v, w, xfigsize, yfigsize, frame_on, tight_layout_on, \
style, surface_on, wireframe_on, surface_cmap, surface_alpha, \
wireframe_color, wireframe_linewidth, quiver_color, quiver_linewidth, \
quiver_ratio, line_color, line_linewidth, circle_edgecolor, \
circle_facecolor, circle_linewidth)
else:
raise ValueError('Invalid input! u, v and w must satisfy: ' +\
'u\u00b2 + v\u00b2 + w\u00b2 = 1.')
else:
raise TypeError('Invalid input! u, v, and w must be integer or float.')
return wrapper
|
a377a8b426ae004e0c22999c35b1a12dfb25e371
| 28,969
|
import math
def get_points_distance(point1, point2):
"""
Gets the distance between two points
:param point1: tuple with point 1
:param point2: tuple with point 2
:return: int distance
"""
return int(math.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2))
|
40af84836715b49ba531fe5113e40230110d49b9
| 28,971
|
from typing import Tuple
import unicodedata
def validate_ae(value: str) -> Tuple[bool, str]:
"""Return ``True`` if `value` is a conformant **AE** value.
An **AE** value:
* Must be no more than 16 characters
* Leading and trailing spaces are not significant
* May only use ASCII characters, excluding ``0x5C`` (backslash) and all
control characters
Parameters
----------
value : str
The **AE** value to check.
Returns
-------
Tuple[bool, str]
A tuple of (bool, str), with the first item being ``True`` if the
value is conformant to the DICOM Standard and ``False`` otherwise and
the second item being a short description of why the validation failed
or ``''`` if validation was successful.
"""
if not isinstance(value, str):
return False, "must be str"
if len(value) > 16:
return False, "must not exceed 16 characters"
# All characters use ASCII
if not value.isascii():
return False, "must only contain ASCII characters"
# Unicode category: 'Cc' is control characters
invalid = [c for c in value if unicodedata.category(c)[0] == 'C']
if invalid or '\\' in value:
return False, "must not contain control characters or backslashes"
return True, ''
|
aee3ec59ea1965bd745f1527368053c5c04e5c4b
| 28,972
|
def state2bin(s, num_bins, limits):
"""
:param s: a state. (possibly multidimensional) ndarray, with dimension d =
dimensionality of state space.
:param num_bins: the total number of bins in the discretization
:param limits: 2 x d ndarray, where row[0] is a row vector of the lower
limit of each discrete dimension, and row[1] are corresponding upper
limits.
Returns the bin number (index) corresponding to state s given a
discretization num_bins between each column of limits[0] and limits[1].
The return value has same dimensionality as ``s``. \n
Note that ``s`` may be continuous. \n
\n
Examples: \n
s = 0, limits = [-1,5], num_bins = 6 => 1 \n
s = .001, limits = [-1,5], num_bins = 6 => 1 \n
s = .4, limits = [-.5,.5], num_bins = 3 => 2 \n
"""
if s == limits[1]:
return num_bins - 1
width = limits[1] - limits[0]
if s > limits[1]:
print(
"Tools.py: WARNING: ", s, " > ", limits[1], ". Using the chopped value of s"
)
print("Ignoring", limits[1] - s)
s = limits[1]
elif s < limits[0]:
print(
"Tools.py: WARNING: ", s, " < ", limits[0], ". Using the chopped value of s"
)
s = limits[0]
return int((s - limits[0]) * num_bins / (width * 1.0))
|
c2a0cb48864e58481681e8c72074e618dda7ddb8
| 28,973
|
import pickle
def load_db(pathname):
"""
returns the stored database from a pickle file
Parameters
----------
pathname: string
Returns
-------
database: dictionary mapping names to profiles
"""
with open(pathname, mode="rb") as opened_file:
database = pickle.load(opened_file)
return database
|
2ca73cfaf500fd6a841cfb9dc12c3b21d320ac69
| 28,974
|
import numpy
def _gauss_legendre(order, composite=1):
"""Backend function."""
inner = numpy.ones(order+1)*0.5
outer = numpy.arange(order+1)**2
outer = outer/(16*outer-4.)
banded = numpy.diag(numpy.sqrt(outer[1:]), k=-1) + numpy.diag(inner) + \
numpy.diag(numpy.sqrt(outer[1:]), k=1)
vals, vecs = numpy.linalg.eig(banded)
abscis, weight = vals.real, vecs[0, :]**2
indices = numpy.argsort(abscis)
abscis, weight = abscis[indices], weight[indices]
n_abscis = len(abscis)
composite = numpy.array(composite).flatten()
composite = list(set(composite))
composite = [comp for comp in composite if (comp < 1) and (comp > 0)]
composite.sort()
composite = [0]+composite+[1]
abscissas = numpy.empty(n_abscis*(len(composite)-1))
weights = numpy.empty(n_abscis*(len(composite)-1))
for dim in range(len(composite)-1):
abscissas[dim*n_abscis:(dim+1)*n_abscis] = \
abscis*(composite[dim+1]-composite[dim]) + composite[dim]
weights[dim*n_abscis:(dim+1)*n_abscis] = \
weight*(composite[dim+1]-composite[dim])
return abscissas, weights
|
70689e5644355f8c0c8658b59fca1c5bf4fcb0e1
| 28,976
|
def Read_Finished(filename):
"""Extracts sequences from a .seqs file"""
f = open(filename,'r')
lines = f.readlines()
f.close()
sequences = {}
strands = {}
for line in lines:
# Check for comment
if '#' in line:
line = line.split('#')[0]
parts = line.split(' ')
if len(parts) == 4:
name = parts[1]
namseq = parts[3]
namlen = len(namseq)
if parts[0] == 'sequence':
sequences[name] = namseq[0:namlen-1]
elif parts[0] == 'strand':
strands[name] = namseq[0:namlen-1]
return (sequences, strands)
|
30f16fd03e6a62454f3a01fe3fe3fb1ed6f792bb
| 28,978
|
import struct
def stringarray(string):
"""Output array of binary values in string.
"""
arrstr = ""
if (len(string) != 0):
for i in range(0,len(string)):
arrstr += "%x " % struct.unpack("=B",string[i])[0]
return arrstr
|
1c96a78f11bc1ea781658c3b23ce801a37dbde25
| 28,979
|
import os
def parent(path, level=1, sep="/", realpath=False):
"""
Get the parent of a file or a directory
:param path:
:param level:
:param sep:
:return list:
"""
path = os.path.realpath(path) if realpath else path
def dir_parent(path, level=1):
return sep.join(path.split(sep)[:-level])
return [dir_parent(_path) for _path in path] if type(path) == list else dir_parent(path, level)
|
aababa89db18c2a99c7c19053e75ac038829b7a1
| 28,981
|
def _is_user_author_or_privileged(cc_content, context):
"""
Check if the user is the author of a content object or a privileged user.
Returns:
Boolean
"""
return (
context["is_requester_privileged"] or
context["cc_requester"]["id"] == cc_content["user_id"]
)
|
7711d8fa31d3b6d590595ed0612cd19f6c1becc2
| 28,985
|
def gen_test_data(request):
""" Generate test data. """
return request.param
|
18fcb668c1e46226289924f8f8a4f7d1d3588c36
| 28,988
|
def mkcol_mock(url, request):
"""Simulate collection creation."""
return {"status_code": 201}
|
313fb40302c282d102d6846980ce09ea3600c50c
| 28,989
|
def dict2bibtex(d):
""" Simple function to return a bibtex entry from a python dictionary """
outstring = '@' + d.get('TYPE','UNKNOWN') + '{' + d.get('KEY','') + ',\n'
kill_keys = ['TYPE','KEY','ORDER']
top_keys = ['AUTHOR','TITLE','YEAR']
top_items = []
rest_items = []
for k in d.keys():
if k in top_keys:
top_items = top_items + [ ( k , d[k] )]
elif not k in kill_keys:
rest_items = rest_items + [ ( k , d[k] )]
rest_items.sort()
for k in top_items + rest_items:
outstring = outstring + '\t' + k[0] + ' = {' + k[1] + '},\n'
outstring = outstring + '}\n\n'
return outstring
|
1ffbc9ec9754acf9904c959be05e0585302435a3
| 28,990
|
def random_permutation(df_list):
"""
Run permutations in the dataset
Parameters
---------
df_list: list
list of pandas DataFrames, each DataFrames containing one traffic type
Return
------
reordered_df_list: array
Resulting array of pandas DataFrames
"""
df_list_size = len(df_list)
reordered_df_list = df_list
for idx in range(df_list_size):
# Shuffle rows with a given seed to reproduce always same result
reordered_df_list[idx] = df_list[idx].sample(frac=1, replace=False,
random_state=0)
return reordered_df_list
|
397328f9fe12a64d1cd4b552d680f4eea0fbbd89
| 28,991
|
def dict_as_args(input_dict: dict) -> str:
"""
Разложить словарь на последовательность аргументов будто это ключевые слова
>>> dict_as_args(dict(a=1, b=2, c="test"))
"a=1, b=2, c='test'"
"""
return ', '.join(f'{key}={value!r}' for key, value in input_dict.items())
|
9508c0517259056de8cf7570ab413d12eb4a65be
| 28,993
|
def generate_system_redaction_list_entry(newValue):
"""Create an entry for the redaction list for a redaction performed by the system."""
return {
'value': newValue,
'reason': 'System Redacted',
}
|
22d7831106c6dd5350a3c86b51facc273835a1e6
| 28,994
|
from typing import Union
def _print_result(
start_quantity: Union[int, float],
start_unit: str,
end_unit: str,
end_quantity: Union[int, float],
) -> str:
"""
Function to create final output string for conversion.
:param start_quantity: Integer or float starting quantity which needed conversion.
:param start_unit: Initial unit type of integer or float starting quantity.
:param end_unit: Ending unit type of integer or float quantity.
:param end_quantity: Integer or float of converted starting quantity from start unit to end unit.
:return: String of values concatenated in user friendly message.
"""
if end_quantity < 0.000001:
output = "Value smaller than decimal precision 6. Cannot output."
else:
output = f"```{start_quantity} {start_unit} = {end_quantity} {end_unit}```"
return output
|
26228ea4c3064894748e5352ea117393031ee79b
| 28,995
|
import importlib
def get_version(version_module_name):
"""Load currently declared package version."""
version_module = importlib.import_module(version_module_name)
# always reload
importlib.reload(version_module)
version = f"{version_module.__version__}"
print(f"version is {version}")
return version
|
156d7e18e96ea0716011e0ca0536d38eaa078b9e
| 28,996
|
def _expand_task_collection(factory):
"""Parse task collection task factory object, and return task list.
:param dict factory: A loaded JSON task factory object.
"""
return factory.tasks
|
1d2c0b2a763b9e3c78b4bea8d012575038a5804c
| 28,999
|
import platform
def IsLinux():
"""
Returns True if platform is Linux.
Generic utility function.
"""
return (platform.uname()[0] == "Linux")
|
06ed6068d5914c25c5f24faf89979ca3e93620ca
| 29,002
|
from textwrap import dedent
def dedent_docstr(s, n=1):
"""Dedent all lines except first n lines
Args:
s (type): some text to dedent
n (int): number of lines to skip, (n == 0 is a normal dedent,
n == 1 is useful for whole docstrings)
"""
lines = s.splitlines(keepends=True)
if lines:
first_n_lines = "".join([l.lstrip(' \t') for l in lines[:n]])
dedented = dedent("".join(lines[n:]))
return first_n_lines + dedented
else:
return ""
|
ce9d89bbba7ef6784e707fcae0ea6b127ee3cdcd
| 29,003
|
def _split_path(loc):
""" Split S3 path into bucket and prefix strings """
bucket = loc.split("s3://")[1].split("/")[0]
prefix = "/".join(loc.split("s3://")[1].split("/")[1:])
return bucket, prefix
|
46ace1084e7688847d60fe34e4b3958c89cfbd31
| 29,004
|
def strip_white_space_from_columns_of_dtype_str(df):
""" Takes one parameter, a dataframe. """
for i in df.select_dtypes([object]).columns:
s = df[i].copy()
s.update(s.dropna().astype(str).str.strip())
df.loc[: ,i] = s.copy()
del i, s
return df
|
4857ab2938567c99770889be16356b92272c7ecc
| 29,006
|
def GetStepStartAndEndTime(build, full_step_name):
"""Gets a step's start_time and end_time from Build.
Returns:
(start_time, end_time)
"""
for step in build.steps or []:
if step.name == full_step_name:
return step.start_time.ToDatetime(), step.end_time.ToDatetime()
return None, None
|
ef3d2a017ad6aa1e0b5c9c974a64d715ae62d1c1
| 29,008
|
def part1(adapters):
""" Find adapters with jolt diff of 1 and 3 """
adapters.sort()
adapters.insert(0, 0) # add 0 at start for the wall jolt
adapters.append(max(adapters) + 3) # adding phone's inbuilt adapter
diff_of_one = 0
diff_of_three = 0
for i in range(len(adapters) - 1):
diff = adapters[i + 1] - adapters[i]
if diff == 1:
diff_of_one += 1
elif diff == 3:
diff_of_three += 1
return diff_of_one * diff_of_three
|
639b0d46c3621a55f27fe4702688e8ae353060b0
| 29,009
|
import os
def does_exist(file_with_path):
"""Checks if a file exists """
return os.path.isfile(file_with_path)
|
2e5266ca650b794a8039752817dbaca9602eed9c
| 29,010
|
import os
def convert_audio_to_mono_wav(decoded_data):
"""
Поскольку vosk api принимает аудио определенного формата mono wav, а пользователь отправляет данные
в другим расширением, например mp3, mp4a, mp4 etc, необходимо преобразовать их к нужному формату.
input: decoded_data - аудио данные в исходном расширении
output: аудио данные в формате mono wav
"""
os.system(f'ffmpeg -i {decoded_data} -acodec pcm_s16le -ac 1 -ar 8000 {decoded_data}.wav')
os.remove(decoded_data)
return f'{decoded_data}.wav'
|
e27f0981eeca30b85bbb92e9e930bff6922c391d
| 29,011
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.