content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_email_settings(settings):
"""
Helper function for grouping email-related properties
"""
email_settings = {}
email_settings['smtp_host_for_outbound_mail'] = settings.smtp_host_for_outbound_mail
email_settings['redcap_support_sender_email'] = settings.redcap_support_sender_email
email_settings['redcap_uri'] = settings.redcap_uri
email_settings['smtp_port_for_outbound_mail'] = settings.smtp_port_for_outbound_mail
email_settings['redcap_support_receiver_email'] = settings.redcap_support_receiver_email
email_settings['batch_warning_days'] = settings.batch_warning_days
return email_settings | 46f8d74b7595b9dcdf58284817f64462e4df7862 | 47,112 |
def natural_key_fixed_names_order(names_order):
"""Convert symbol to natural key but with custom ordering of names.
Consider a QAOA ansatz in which parameters are naturally ordered as:
gamma_0 < beta_0 < gamma_1 < beta_1 < ...
The above is an example of natural_key_fixed_names_order in which name 'gamma'
precedes name 'beta'.
Note that unlike natural_key and natural_key_revlex, this function returns
a key, i.e. it is a key factory.
"""
symbol_weights = {name: i for i, name in enumerate(names_order)}
def _key(symbol):
name, index = symbol.name.split("_")
return int(index), symbol_weights[name]
return _key | 74e20a7e19305716d501da8ced49ec28bf85eac1 | 47,113 |
def circuit_path_string_to_int(circuit_rpp):
"""
Converts nodes in path lists from strings to integers
Args:
circuit_rpp (list): rpp circuit generated by postman_problems.solver.rpp
Returns:
circuit_rpp (list): modified circuit
"""
for e in circuit_rpp:
if type(e[3]['path']) == str:
exec('e[3]["path"]=' + e[3]["path"])
return(circuit_rpp) | 7663f8010d802c6d10ec1dd9a1cdf218eeb49f5a | 47,114 |
def update_upload_api(requests_pathname_prefix, upload_api):
"""Path join for the API path name.
This is a private method, and should not be exposed to users.
"""
if requests_pathname_prefix == "/":
return upload_api
return "/".join(
[
requests_pathname_prefix.rstrip("/"),
upload_api.lstrip("/"),
]
) | 65338eb81e3c60665ed755e86c18ea17ea75f8bc | 47,115 |
def find_set(x):
"""Finds representant of the given data structure x."""
if x.parent is None:
return x
x.parent = find_set(x.parent)
return x.parent | 47b332938e6a648f0d353d027979b63b0c1e8826 | 47,116 |
def to_list(x):
"""
:param x:
:return:
"""
return x if type(x) == list else [x] | b3434e2239830f9d7b57f0346f2a00202374ba1a | 47,118 |
def not_submitted_count(drafts):
"""
Get count of not-submitted services
Defaults to 0 if no not-submitted services
:param drafts:
:return:
"""
return drafts.get('not-submitted', 0) | dda309998234be6c560cf3b4ecafda41c43d20e3 | 47,119 |
import math
import numpy
def bdsnr(metric_set1, metric_set2):
"""
BJONTEGAARD Bjontegaard metric calculation
Bjontegaard's metric allows to compute the average gain in psnr between two
rate-distortion curves [1].
rate1,psnr1 - RD points for curve 1
rate2,psnr2 - RD points for curve 2
returns the calculated Bjontegaard metric 'dsnr'
code adapted from code written by : (c) 2010 Giuseppe Valenzise
http://www.mathworks.com/matlabcentral/fileexchange/27798-bjontegaard-metric/content/bjontegaard.m
"""
# pylint: disable=too-many-locals
# numpy seems to do tricks with its exports.
# pylint: disable=no-member
# map() is recommended against.
# pylint: disable=bad-builtin
rate1 = [x[0] for x in metric_set1]
psnr1 = [x[1] for x in metric_set1]
rate2 = [x[0] for x in metric_set2]
psnr2 = [x[1] for x in metric_set2]
log_rate1 = list(map(math.log, rate1))
log_rate2 = list(map(math.log, rate2))
# Best cubic poly fit for graph represented by log_ratex, psrn_x.
poly1 = numpy.polyfit(log_rate1, psnr1, 3)
poly2 = numpy.polyfit(log_rate2, psnr2, 3)
# Integration interval.
min_int = max([min(log_rate1), min(log_rate2)])
max_int = min([max(log_rate1), max(log_rate2)])
# Integrate poly1, and poly2.
p_int1 = numpy.polyint(poly1)
p_int2 = numpy.polyint(poly2)
# Calculate the integrated value over the interval we care about.
int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)
# Calculate the average improvement.
if max_int != min_int:
avg_diff = (int2 - int1) / (max_int - min_int)
else:
avg_diff = 0.0
return avg_diff | cf96e38d07e83afde80de4967c0e8f1535bff5e4 | 47,120 |
def unpack_args(args, num):
"""
Extracts the specified number of arguments from a tuple, padding with
`None` if the tuple length is insufficient.
Args:
args (Tuple[object]): The tuple of arguments
num (int): The number of elements desired.
Returns:
Tuple[object]: A tuple containing `num` arguments, padded with `None` if `len(args) < num`
"""
args += (None, ) * (num - len(args))
return args[0:num] | 9f0a2ab601a7974f7ae5ba5dc008b04ee07f0678 | 47,121 |
from typing import Iterable
def _variable_or_iterable_to_set(x):
"""
Convert variable or iterable x to a frozenset.
If x is None, returns the empty set.
Arguments
---------
x: None, str or Iterable[str]
Returns
-------
x: frozenset[str]
"""
if x is None:
return frozenset([])
if isinstance(x, str):
return frozenset([x])
if not isinstance(x, Iterable) or not all(isinstance(xx, str) for xx in x):
raise ValueError(
"{} is expected to be either a string or an iterable of strings"
.format(x))
return frozenset(x) | 36ab763a3a4341c49fefb6cb3b10d88bef040fa8 | 47,123 |
import os
def list_dir_files(folder_path, full_path=False):
"""
List all files in directory.
Args:
folder_path (str):
full_path (bool): Return full file paths
Returns:
list (list): Files list
"""
file_names = os.listdir(folder_path)
files_list = []
for file_name in file_names:
file_path = os.path.join(folder_path, file_name)
if os.path.isfile(file_path):
files_list.append(file_path if full_path else file_name)
files_list.sort()
return files_list | 5a5b343eef6ca632d45f5b8341899ee60cb79b8a | 47,125 |
import re
def workdir_from_dockerfile(dockerfile):
"""Parse WORKDIR from the Dockerfile."""
WORKDIR_REGEX = re.compile(r'\s*WORKDIR\s*([^\s]+)')
with open(dockerfile) as f:
lines = f.readlines()
for line in lines:
match = re.match(WORKDIR_REGEX, line)
if match:
# We need to escape '$' since they're used for subsitutions in Container
# Builer builds.
return match.group(1).replace('$', '$$')
return None | 33a927626a023ba988534afe5b3f8885e18db471 | 47,126 |
import re
def CellStartsWith(regex, cell):
"""Checks if the first line of the cell matches the regexp with re.match.
Args:
regex (str): A regexp string.
cell (dict): A JSON object representing a cell in the IPython notebook
Returns:
None if the regexp does not match.
The match object if the first line of the cell source matches regex with re.match().
"""
if len(cell['source']) == 0:
return False
return re.match(regex, cell['source'][0]) | 1ca17fa085ab7f458a275ab293e3c969d8b78b48 | 47,127 |
def count_one_four_seven_eight(data):
"""
>>> data = read_input('example')
>>> count_one_four_seven_eight(data)
26
"""
onefourseveneights = 0
for digits, values in data:
vlengths = [len(value) for value in values]
onefourseveneights += sum(1 if vlen in (2, 3, 4, 7) else 0 for vlen in vlengths)
return onefourseveneights | 750b422feafea7b381feacfb2ade4ee928ef5ee8 | 47,128 |
import six
def _get_program_cmd(name, pconfig, config, default):
"""Retrieve commandline of a program.
"""
if pconfig is None:
return name
elif isinstance(pconfig, six.string_types):
return pconfig
elif "cmd" in pconfig:
return pconfig["cmd"]
elif default is not None:
return default
else:
return name | 55f04c1fc3598afc410cdda7d27e6cd4b15c57a5 | 47,129 |
def openstack_connection_kwargs(self):
"""
:rtype: ``dict``
"""
rv = {}
if self._ex_force_base_url:
rv['ex_force_base_url'] = self._ex_force_base_url
if self._ex_force_auth_token:
rv['ex_force_auth_token'] = self._ex_force_auth_token
if self._ex_force_auth_url:
rv['ex_force_auth_url'] = self._ex_force_auth_url
if self._ex_force_auth_version:
rv['ex_force_auth_version'] = self._ex_force_auth_version
if self._ex_token_scope:
rv['ex_token_scope'] = self._ex_token_scope
if self._ex_domain_name:
rv['ex_domain_name'] = self._ex_domain_name
if self._ex_tenant_name:
rv['ex_tenant_name'] = self._ex_tenant_name
if self._ex_force_service_type:
rv['ex_force_service_type'] = self._ex_force_service_type
if self._ex_force_service_name:
rv['ex_force_service_name'] = self._ex_force_service_name
if self._ex_force_service_region:
rv['ex_force_service_region'] = self._ex_force_service_region
return rv | 2363cc2e0278f7e8ff6674d5cb66b6f2baf0cd5e | 47,130 |
import pickle
def load_database(filename):
"""Read in pickled database file."""
with open(filename, 'rb') as fin:
return pickle.load(fin) | ee9f55e626585624f75eb17663b7393628941ece | 47,131 |
def get_beshp_xml(
map_file,
people,
person_path_weight,
people_speed,
display_path_cost_p,
add_person_spacing_p,
people_wait_p,
equal_diagonal_weight_p,
people_move_rates,
set_fire_p,
fire_speed,
):
"""<experiments>
<experiment name="FireSim" repetitions="1" runMetricsEveryStep="false">
<setup>setup</setup>
<go>go</go>
<exitCondition>not any? turtles</exitCondition>
<metric>count turtles</metric>
<metric>mean-escape-time</metric>
<enumeratedValueSet variable="map-file"> <value value=""blank.map""/> </enumeratedValueSet>
<enumeratedValueSet variable="person_path_weight"> <value value="2"/> </enumeratedValueSet>
<enumeratedValueSet variable="add-person-spacing?"> <value value="true"/> </enumeratedValueSet>
<enumeratedValueSet variable="equal-diagonal-weight?"> <value value="true"/> </enumeratedValueSet>
<enumeratedValueSet variable="display-path-cost?"> <value value="true"/> </enumeratedValueSet>
<enumeratedValueSet variable="Fire_Speed"> <value value="50"/> </enumeratedValueSet>
<enumeratedValueSet variable="people-wait?"> <value value="true"/> </enumeratedValueSet>
<enumeratedValueSet variable="set-fire?"> <value value="false"/> </enumeratedValueSet>
<enumeratedValueSet variable="Slow-Speed"> <value value="0.1"/> </enumeratedValueSet>
<enumeratedValueSet variable="Medium-Speed"> <value value="0.4"/> </enumeratedValueSet>
<enumeratedValueSet variable="Fast-Speed"> <value value="0.8"/> </enumeratedValueSet>
<enumeratedValueSet variable="People"> <value value="500"/> </enumeratedValueSet>
<enumeratedValueSet variable="Slow"> <value value="33"/> </enumeratedValueSet>
<enumeratedValueSet variable="Medium"> <value value="0"/> </enumeratedValueSet>
<enumeratedValueSet variable="Fast"> <value value="0"/> </enumeratedValueSet>
<metric>count turtles</metric>
"""
return f"""<experiments>
<experiment name="FireSim" repetitions="1" runMetricsEveryStep="false">
<setup>setup</setup>
<go>go</go>
<exitCondition>not any? turtles</exitCondition>
<metric>mean-escape-time</metric>
{map_file}{people}{person_path_weight}{people_speed}{display_path_cost_p}{add_person_spacing_p}{people_wait_p}{equal_diagonal_weight_p}{people_move_rates}{set_fire_p}{fire_speed}
</experiment> </experiments>
""" | 26c6bd26d8bbdc4a8c48eb1587ccaac4c3c3b3e4 | 47,132 |
def loadstastic(file):
"""
this method takes an ALREADY SCRUBBED chunk of file(string), and convert that into a WordLists
(see :return for this function or see the document for 'test' function, :param WordLists)
:param file: a string contain an AlREADY SCRUBBED file
:return: a WordLists: Array type
each element of array represent a chunk, and it is a dictionary type
each element in the dictionary maps word inside that chunk to its frequency
"""
Words = file.split()
Wordlist = {}
for word in Words:
try:
Wordlist[word] += 1
except:
Wordlist.update({word: 1})
return Wordlist | 96f3173d7be184b1211f330674ef937a1aac68d6 | 47,133 |
import logging
def get_video_bitrate(dict_inf, stream_video):
"""Bitrate search. It may be in one of the 2 possible places.
Args:
dict_inf (dict): video metadata
stream_video (dict): video stream data
Raises:
NameError: If Bitrate is not found in any of the possible places
Returns:
int: video bitrate
"""
try:
video_bitrate = stream_video['bit_rate']
except Exception as e:
try:
video_bitrate = dict_inf['format']['bit_rate']
except Exception as e:
print(f'{e}\n{dict_inf}')
file = dict_inf['format']['filename']
msg_err = "File bellow don't have 'bit_rate' in " + \
f'detail file:\n{file}'
logging.error(msg_err)
raise NameError(msg_err)
return int(video_bitrate) | cc7e2423c3288c9f392776cd5b6477973492874c | 47,134 |
def _clean_state(state):
"""Return purged state of values so only wanted values can be modified.
Args:
state(dict): device state dictionary. Original won't be modified.
"""
out = {}
for k, v in state.items():
if isinstance(v, dict): # recurse nested dicts
out[k] = _clean_state(v)
elif k == "type": # type values are kept
out[k] = v
else: # null out the rest
out[k] = None
return out | 43fbcf5d4e9554ff604aebe991bc19a9c3503530 | 47,136 |
import os
from pathlib import Path
def _get_xdg_cache_dir():
"""
Return the XDG cache directory.
See https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
"""
cache_dir = os.environ.get('XDG_CACHE_HOME')
if not cache_dir:
cache_dir = os.path.expanduser('~/.cache')
if cache_dir.startswith('~/'): # Expansion failed.
return None
return Path(cache_dir, 'matplotlib') | 70236c9830687feb0d45f817f56e1d31b189a4f4 | 47,137 |
import argparse
def get_args():
"""Argparse Setup"""
parser = argparse.ArgumentParser(description='Pixel for Pixel websites clones using BeEF')
# Beef password
parser.add_argument('password', metavar='password', help='Password for BeEF server instance')
# Site to be cloned
parser.add_argument('site', help='Site you wish to clone. e.g test.com')
# Mount point
parser.add_argument('-m', metavar='MOUNTPOINT', default='/',
help='Mount point of cloned site on your BeEF host')
# Host IP address
parser.add_argument('-i', metavar='IP', default='127.0.0.1', help='IP address of your BeEF host')
# Host port
parser.add_argument('-p', metavar='PORT', default='3000', help='Port number BeEF is running on')
# Beef username
parser.add_argument('-u', metavar='USERNAME', default='beef', help='Username for beef')
# Edit Mode
parser.add_argument('-e', metavar=('FIND', 'REPLACE'), nargs=2,
help='Enables edit mode. E.g. -e string_to_replace string_replacement')
arguments = parser.parse_args()
return arguments | 2c5e3191f1c645fe0d1546aa029338df46fbfc84 | 47,138 |
import torch
def raw_morlets(grid_or_shape, wave_vectors, gaussian_bases, morlet=True, ifftshift=True, fft=True):
""" Helper function for creating morlet filters
Parameters:
grid_or_shape -- a grid of the size of the filter or a tuple that indicates its shape
wave_vectors -- directions of the wave part of the morlet wavelet
gaussian_bases -- bases of the gaussian part of the morlet wavelet
morlet -- boolean for morlet or gabor wavelet
ifftshift -- boolean for the ifftshift (inverse fast fourier transform shift)
fft -- boolean for the fft (fast fourier transform)
Returns:
filters -- the wavelet filters before normalization
"""
n_filters, n_dim = wave_vectors.shape
assert gaussian_bases.shape == (n_filters, n_dim, n_dim)
device = wave_vectors.device
if isinstance(grid_or_shape, tuple):
shape = grid_or_shape
ranges = [torch.arange(-(s // 2), -(s // 2) + s, device=device, dtype=torch.float) for s in shape]
grid = torch.stack(torch.meshgrid(*ranges), 0)
else:
shape = grid_or_shape.shape[1:]
grid = grid_or_shape
waves = torch.exp(1.0j * torch.matmul(grid.T, wave_vectors.T).T)
gaussian_directions = torch.matmul(grid.T, gaussian_bases.T.reshape(n_dim, n_dim * n_filters)).T
gaussian_directions = gaussian_directions.reshape((n_dim, n_filters) + shape)
radii = torch.norm(gaussian_directions, dim=0)
gaussians = torch.exp(-0.5 * radii ** 2)
signal_dims = list(range(1, n_dim + 1))
gabors = gaussians * waves
if morlet:
gaussian_sums = gaussians.sum(dim=signal_dims, keepdim=True)
gabor_sums = gabors.sum(dim=signal_dims, keepdim=True).real
morlets = gabors - gabor_sums / gaussian_sums * gaussians
filters = morlets
else:
filters = gabors
if ifftshift:
filters = torch.fft.ifftshift(filters, dim=signal_dims)
if fft:
filters = torch.fft.fftn(filters, dim=signal_dims)
return filters | 307a0b3e104e436c3ba2d37c5cf661b44959d411 | 47,139 |
def Rgb2Hex(rgb=(255, 255, 255)):
"""
将 RGB 颜色三元组转换成十六进制颜色的字符串
如: (255, 255, 0) -> #ffff00
https://blog.csdn.net/hepu8/article/details/88630979
"""
return "#%02x%02x%02x" % rgb | 8430428588466890695cf0296c3dc4504fcc05c3 | 47,140 |
import os
def get_upload_path(instance, filename):
"""
첨부파일이 업로드 되는 경로를 반환하는 함수.
첨부파일은 포스트별로 다른 디렉토리에 저장됩니다.
"""
return os.path.join("post-%d" % instance.post.id, filename) | bdea2878d388f94146b1324239c01841143f1d0e | 47,141 |
def logic(number: int) -> int:
"""Perform logic for even-odd transformation.
Each even-odd transformation:
* Adds two (+2) to each odd integer.
* Subtracts two (-2) to each even integer.
"""
# Determine the modifier based on number being even or odd.
modifier = -2 if number % 2 == 0 else 2
return number + modifier | 64d10c9a605f09a1ecaaec695320a98094a63bc3 | 47,142 |
from datetime import datetime
def inject_date():
"""
Inject date so it can be used in the footer easily.
"""
year = datetime.now().year
return dict(year=year) | 945be669e85390e5fc1e714199e3a9038e302772 | 47,144 |
def get_label(timestamp, commercials):
"""Given a timestamp and the commercials list, return if
this frame is a commercial or not. If not, return label."""
for com in commercials['commercials']:
if com['start'] <= timestamp <= com['end']:
return 'ad'
return commercials['class'] | 72d96f35c63d5f6e8859b2c5bc3a0b8dab37fc39 | 47,147 |
import re
def _parse_multiplicity(strings, substance_keys=None):
"""
Examples
--------
>>> _parse_multiplicity(['2 H2O2', 'O2']) == {'H2O2': 2, 'O2': 1}
True
>>> _parse_multiplicity(['2 * H2O2', 'O2']) == {'H2O2': 2, 'O2': 1}
True
>>> _parse_multiplicity(['']) == {}
True
>>> _parse_multiplicity(['H2O', 'H2O']) == {'H2O': 2}
True
"""
result = {}
for items in [re.split(" \\* | ", s) for s in strings]:
items = [x for x in items if x != ""]
if len(items) == 0:
continue
elif len(items) == 1:
if items[0] not in result:
result[items[0]] = 0
result[items[0]] += 1
elif len(items) == 2:
if items[1] not in result:
result[items[1]] = 0
result[items[1]] += (
float(items[0]) if "." in items[0] or "e" in items[0] else int(items[0])
)
else:
raise ValueError("To many parts in substring")
if substance_keys is not None:
for k in result:
if k not in substance_keys:
raise ValueError("Unkown substance_key: %s" % k)
return result | ef1a2729b657ab6463372866609a241c2aced370 | 47,148 |
import io
def config_to_string(config):
"""
Convert ConfigParser object to string in INI format.
Args:
config (obj): ConfigParser object
Returns:
str: Config in one string
"""
strio = io.StringIO()
config.write(strio, space_around_delimiters=False)
return strio.getvalue() | 264159206b7367ed584c25ec36c2232a1a558880 | 47,149 |
def count_fn(true_fn_flags, ground_truth_classes, class_code):
"""
Count how many true FN are left in true_fn_flags for class given by class_code
Args
true_fn_flags: list of flags that are left 1 if ground truth has not been detected at all
ground_truth_classes: list of classes corresponding to the ground truths in true_fn_flags
class_code: code of class that is of interest
Returns:
number of 1s left true_fn_flags that correspond to class given by class_code
"""
count = 0
for i in range(len(true_fn_flags)):
if true_fn_flags[i] == 1 and ground_truth_classes[i] == class_code:
count += 1
return count | bb68474afe5d60fd30db59a9dc8e640f5ab3f00e | 47,150 |
def read_IMDB(file_name):
"""
:param file_name: Takes as input the IMDB dataset file name as a string
:return: and outputs a list of lists containg datapoins consisting of a sentence (string) and a target (an integer)
"""
with open(file_name, 'r', encoding="latin-1") as text_file:
l = []
target_string_to_int = {'negative': 0, 'positive': 1}
lines = text_file.readlines()[1:]
for i, line in enumerate(lines):
l.append(line.rstrip().rsplit(',', 1))
# Transforming target variables from stings to integers
l = [(sentence[1:-1], target_string_to_int[target]) for sentence, target in l]
return l | 72638f65ff1d6f220f299f996ed59b5d24bd6c16 | 47,151 |
import argparse
def parse_args():
"""Parse Arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('mutations', metavar='M', help="Individual list file")
parser.add_argument('wt', metavar='W', help="WT FoldX AC File")
parser.add_argument('foldx', metavar='F', help="FoldX output directory")
return parser.parse_args() | dd72467b24e21e06d17a4cf6736030aff680e0ab | 47,153 |
def toHexByte(n):
"""
Converts a numeric value to a hex byte
Arguments:
n - the vale to convert (max 255)
Return:
A string, representing the value in hex (1 byte)
"""
return "%02X" % n | e6a7ceab39731f38d1c3fdaa4cba847d1ad41929 | 47,154 |
def conj(coll, to_add):
"""
Similar to clojure's function, add items to a list or dictionary
See https://clojuredocs.org/clojure.core/conj for more reading
Returns a new collection with the to_add 'added'. conj(None, item) returns
(item). The 'addition' may happen at different 'places' depending on the
concrete type. if coll is:
[] - appends [1, 2, 3, 4] == conj([1, 2], [3, 4])
() - prepend in reverse ((4, 3, 1, 2) == conj((1, 2), (3, 4))
{} - appends {'a': 'A', 'b': 'B'} == conj({'a':'A'}, {'b':'B'})
Parameters:
coll: collection to add items to
to_add: items to be added to coll
Return:
object of the same type as coll but with to_add items added
"""
ret = coll
if coll is None:
ret = to_add
elif isinstance(coll, list):
ret = coll + to_add
elif isinstance(coll, tuple):
ret = list([])
for item in coll:
ret.append(item)
for item in to_add:
ret.insert(0, item)
ret = tuple(ret)
elif isinstance(coll, dict):
ret = {}
for key in coll:
ret[key] = coll[key]
for key in to_add:
if key not in ret:
ret[key] = to_add[key]
return ret | 62cca3766c3a4467db73372209d6f173647ed3af | 47,155 |
import re
def split_host_and_port(netloc):
"""Returns ``(host, port)`` tuple from ``netloc``.
Returned ``port`` will be ``None`` if not present.
.. versionadded:: 4.1
"""
match = re.match(r'^(.+):(\d+)$', netloc)
if match:
host = match.group(1)
port = int(match.group(2))
else:
host = netloc
port = None
return (host, port) | 4bc29e7861c84385fd09cd5b4dc60b1b76f1e447 | 47,156 |
import collections
def _format_expand_payload(payload, new_key, must_exist=[]):
""" Formats expand payloads into dicts from dcids to lists of values."""
# Create the results dictionary from payload
results = collections.defaultdict(set)
for entry in payload:
if 'dcid' in entry and new_key in entry:
dcid = entry['dcid']
results[dcid].add(entry[new_key])
# Ensure all dcids in must_exist have some entry in results.
for dcid in must_exist:
results[dcid]
return {k: sorted(list(v)) for k, v in results.items()} | ae9af5400f0bf38954c99afca62914e30f42ec32 | 47,158 |
def get_dominant_letter_count(word_list, valid_word):
"""
:param word_list: a list of words composing a sentence.
:param valid_word: a regex string defining the acceptance criteria being matched.
:return: returns the total of dominant characters across all words in the sentence.
"""
total = 0
for word in word_list:
if valid_word.match(word):
# Find max count for each word.
max_count = max([word.count(x) for x in word])
# Add the dominant counts up.
total += max_count
return total | de8a585a1c7c4a3c7f76ecaa1e46bfa6b915025f | 47,159 |
def to_float_list(a):
"""
Given an interable, returns a list of its contents converted to floats
:param a: interable
:return: list of floats
"""
return [float(i) for i in a] | 8576c649802c9a88694097ceefb55c86ab845266 | 47,161 |
from pathlib import Path
def join_legacy_read_path(sample_path: Path, suffix: int) -> Path:
"""
Create a path string for a sample read file using the old file naming
convention (eg. reads_1.fastq).
:param sample_path: the path to the sample directory
:param suffix: the read file suffix
:return: the read path
"""
return sample_path / f"reads_{suffix}.fastq" | c5efb0da5ace242b916ea0515062190fb55346f5 | 47,164 |
from typing import Counter
import re
def get_word_counts(txt):
"""
Counts word occurrences in "txt".
The methodology of dealing with unknown words is to calculate a count of "UNK" by splitting the set of words, and
after counting words in the bigger set, every unknown word that appears in the smaller set will be counted as "UNK".
:param txt:
:return: a {'word':integer} dictionary that represents the number of times a word appears in the txt.
"""
counts = Counter()
all_words = re.split(r'[\s\,\.\?\!\;\:"]', txt.lower())
all_words = [w for w in all_words if w]
size = len(all_words)
most_words, rest = all_words[:int(size*0.9)], all_words[int(size*0.9):]
for word in most_words:
counts[word] += 1
for word in rest:
if word in counts:
counts[word] += 1
else:
counts['UNK'] += 1
return counts | c513e4713e222ee5bc0726f84889a3dfbf4ab278 | 47,165 |
def remove_hidden(names):
"""Remove (in-place) all strings starting with a '.' in the given list."""
i = 0
while i < len(names):
if names[i].startswith('.'):
names.pop(i)
else:
i += 1
return names | 19ed2fc093f96038612c6eb5b943f89f9b91f452 | 47,166 |
def normalize(rngs):
"""Function: normalize
Description: Normalizes a list of ranges by merging ranges, if possible,
and turning single-position ranges into tuples. The normalization
process is to sort the ranges first on the tuples, which makes
comparsions easy when merging range sets.
Arguments:
(input) rng -> List of range sets.
(output) result -> List of ordered range sets.
"""
result = []
last = None
for rng in sorted(rngs):
if len(rng) == 1:
rng = (rng[0], rng[0])
if last is None:
last = rng
elif rng[1] <= last[1]:
continue
elif rng[0] <= last[1] or last[1] + 1 >= rng[0]:
last = (last[0], max(rng[1], last[1]))
else:
result.append(last)
last = rng
result.append(last)
return result | b173c5d2f85d753c053c6436f86c76dada1d1d2f | 47,167 |
import re
import operator
def is_valid(policy: str, password: str) -> bool:
"""
Given a policy (e.g. `1-3 a`) and a password (e.g. `abcde`),
determine if the password complies with the policy
"""
char = policy[-1:]
pos_1 = int(re.findall("[0-9]+", policy)[0])
pos_2 = int(re.findall("[0-9]+", policy)[1])
return operator.xor((password[pos_1 - 1] == char), (password[pos_2 - 1] == char)) | aa513b29d7575e80ee962055e0933e173c18441c | 47,168 |
def _append_docs(cls):
"""
Add a note to documentation on parent expansion process.
.. Note::
This function assumes the class is defined at module-level scope.
If not the indentation may be funny.
"""
new_doc = (cls.__doc__ + "\n") if cls.__doc__ else ""
new_doc += """\n .. Note::\n This node has parent expansion, following these procedures.\n\n"""
for form_handler in cls._parent_expander:
add = form_handler.add_class_doc()
if add:
new_doc += f" #. {add}\n"
else:
raise ValueError("No documentation for transformation!")
new_doc += "\n"
return new_doc | 00f6da32a1d27b93a51f522226a24b42f05d1897 | 47,169 |
def _convert2dict(file_content_lines):
"""
Convert the corosync configuration file to a dictionary
"""
corodict = {}
index = 0
for i, line in enumerate(file_content_lines):
stripped_line = line.strip()
if not stripped_line or stripped_line[0] == '#':
continue
if index > i:
continue
line_items = stripped_line.split()
if '{' in stripped_line:
corodict[line_items[0]], new_index = _convert2dict(file_content_lines[i+1:])
index = i + new_index
elif line_items[0][-1] == ':':
corodict[line_items[0][:-1]] = line_items[-1]
elif '}' in stripped_line:
return corodict, i+2
return corodict, index | 31ffb2449e8f4b52478fa79197b10dfc96b76f4a | 47,170 |
import requests
def get_seed(pulse_url):
"""
Given a pulse url does a GET request to the Random UChile API to get the seed given by that pulse.
:param pulse_url: String representing the URL of the pulse.
:return: A 512-bit random string that can be used as seed by a pseudo random generator.
"""
response = requests.get(pulse_url)
return response.json()['pulse']['outputValue'] | 411746e0768599ea6d3e4fc23a5efc7482a18373 | 47,171 |
def map_zero_one(A):
"""
Maps a given array to the interval [0,1]
"""
# return A
# print("MI max and min ",A.max(),A.min())
A_std = (A - A.min())/(A.max()-A.min())
retval = A_std * (A.max() - A.min()) + A.min()
return A_std
# return softmax(A)
# return torch.sigmoid(torch.Tensor(A)).numpy() | 222e43212e1e6c1eab04f74db16cee2cd3b203b0 | 47,172 |
import argparse
def parse_arguments(argv):
"""Parse command line arguments.
Args:
argv: A list of command line arguments.
Returns:
The parsed arguments returned by argparse.ArgumentParser.
"""
parser = argparse.ArgumentParser(
description='Runs preprocessing for input raw data.')
parser.add_argument(
'--runner',
help='Runner option where to run the transformation.',
choices=['DirectRunner', 'DataflowRunner'],
default='DirectRunner')
parser.add_argument('--project', help='GCP project id.')
parser.add_argument(
'--all-data',
help='Path to CSV file (local or Cloud Storage) containing all data.')
parser.add_argument(
'--train-data',
help='Path to CSV file (local or Cloud Storage) or BigQuery table'
' containing training data.')
parser.add_argument(
'--eval-data',
help='Path to CSV file (local or Cloud Storage) or BigQuery table '
' containing evaluation data.')
parser.add_argument(
'--predict-data',
help='Path to CSV file (local or Cloud Storage) or BigQuery table '
' containing prediction data.')
parser.add_argument(
'--data-source',
help='Type of data source: CSV file or BigQuery table (csv|bigquery).',
choices=['csv', 'bigquery'],
default='bigquery',
required=True)
parser.add_argument(
'--mode',
choices=['train', 'predict'],
default='train',
help='If train, do transformation for all data (train, eval, predict). '
'Otherwise, transform only predict data.')
parser.add_argument(
'--transform-dir', help='Directory to store transformer model.')
parser.add_argument(
'--output-dir', help='Directory to store transformed data.')
parser.add_argument(
'--job_name',
help='Dataflow Runner job name dynamically created by Airflow.')
args, _ = parser.parse_known_args(args=argv[1:])
return args | 84959ae05a1837a8ea2bcbf46f2bd0844b8e14ec | 47,173 |
import re
def _is_pattern_match(re_pattern, s):
"""Check if a re pattern expression matches an entire string."""
match = re.match(re_pattern, s, re.I)
return match.group() == s if match else False | 30b65a696c27b2141e50672775333642a1fb2b57 | 47,175 |
import torch
def double_features(f: torch.Tensor) -> torch.Tensor:
"""Double feature vector as (A, B, C, D) --> (A, A, B, B, C, C, D, D)
Args:
f: Feature vectors (n_batch, n_features)
Returns:
f: Feature vectors (2*n_btach. n_features)
"""
return torch.repeat_interleave(f, 2, dim=0) | d7f1dca83d1933a7a550d3fadfbf2ff96fe79ad8 | 47,176 |
def flatten_dictionary(dictionary):
"""
Input: a request's JSON dictionary output with nested dictionary
Output: a flattened dictionary (format: key1.key2 = value2)
"""
flattenedDictionary = dict()
for key, value in dictionary.items():
if isinstance(value, dict):
for subkey, subvalue in value.items():
flattenedDictionary[key + '.' + subkey] = subvalue
else:
flattenedDictionary[key] = value
return flattenedDictionary | 1ca7c9021360bc6c39fb1f3ba07794ac74831272 | 47,177 |
def five_uneven_peak_trap(x=None):
"""
F1: Five-Uneven-Peak Trap
Variable ranges: x in [0, 30
No. of global peaks: 2
No. of local peaks: 3.
"""
if x is None:
return None
result = None
if 0 <= x < 2.50:
result = 80*(2.5-x)
elif 2.50 <= x < 5:
result = 64*(x-2.5)
elif 5.0 <= x < 7.5:
result = 64*(7.5-x)
elif 7.50 <= x < 12.5:
result = 28*(x-7.5)
elif 12.50 <= x < 17.5:
result = 28*(17.5-x)
elif 17.5 <= x < 22.5:
result = 32*(x-17.5)
elif 22.5 <= x < 27.5:
result = 32*(27.5-x)
elif 27.5 <= x <= 30:
result = 80*(x-27.5)
return result | 530994e36bd8f5d895e8e4ff65f43ec784760618 | 47,178 |
import numpy as np
def get_normals(space, npoints):
"""Get the normal vectors on the quadrature points."""
grid = space.grid
number_of_elements = grid.number_of_elements
normals = np.empty((npoints * number_of_elements, 3), dtype="float64")
for element in range(number_of_elements):
for n in range(npoints):
normals[npoints * element + n, :] = (
grid.normals[element] * space.normal_multipliers[element]
)
return normals | c7ae6248e973258f277fce3d3e2c1c9566f2e3b6 | 47,180 |
import torch
def _to_real(x: torch.Tensor) -> torch.Tensor:
"""View complex tensor as real."""
x = torch.view_as_real(x)
return x.view(*x.shape[:-2], -1) | 8bb7b8db208bcfd433976236fa56ce42560b8adf | 47,181 |
def compute_path_cost(nxobject, path):
"""
Compute cost of a path
"""
cost = 0
for index_station in range(len(path) - 1):
cost += nxobject[path[index_station]][path[index_station + 1]]["weight"]
return cost | 01652396938f2431a0b8e077723e615680856e32 | 47,182 |
from typing import Any
import numpy
def should_sanitize(indexing_element: Any) -> bool:
"""Decide whether to sanitize an indexing element or not.
Sanitizing in this context means converting supported numpy values into python values.
Args:
indexing_element (Any): the indexing element to decide sanitization.
Returns:
bool: True if indexing element should be sanitized otherwise False.
"""
return isinstance(indexing_element, numpy.integer) or (
isinstance(indexing_element, numpy.ndarray)
and issubclass(indexing_element.dtype.type, numpy.integer)
and indexing_element.shape == ()
) | 8ab59d2f8397a7b8e63b0be5e418ef3baca8d4ee | 47,183 |
def parse_request(event):
"""
Parses the input api gateway event and returns the product id
Expects the input event to contain the pathPatameters dict with
the productId key/value pair
:param event: api gateway event
:return: a dict containing the productId key/value
"""
if 'pathParameters' not in event:
raise Exception("Invalid event. Missing 'pathParameters'")
path_parameters = event["pathParameters"]
if 'productId' not in path_parameters:
raise Exception("Invalid event. Missing 'productId' in 'pathParameters'")
return {
"product_id": path_parameters['productId']
} | 1f56867c5a15ea602f92d2c08c3410cc690b392b | 47,184 |
def listify(l):
"""Encapsulate l with list[] if not."""
return [l] if not isinstance(l, list) else l | f029b8df4f4442ec8e3f16f7c92ec59fe6338cce | 47,185 |
def paste(x, sep=", "):
"""
Custom string formatting function to format (???) output.
"""
out = ""
for i in x:
out += i + sep
return out.strip(sep) | 5b1667fda6c82eff3764a09aee90b0c642811c83 | 47,187 |
def getContainerName(ctx, containerFlavor):
""" Get name of the container for a specific flavor """
return getattr(ctx.cf.ocp.containers, containerFlavor).name | 66baf4ab95b8a6a8a43fb70568140d46158b2e41 | 47,188 |
def clean_non_ascii(str):
"""
remove non ascii chars from a string
"""
str = ''.join([i if ord(i) < 128 else ' ' for i in str])
return str | 634ac8410c483a398ef4efd86dcab6e841e26666 | 47,189 |
import csv
def tab_to_csv(txtfile, csvfile):
""" Converts a file from tab delimited to comma delimited format."""
with open(txtfile, 'r') as infile, open(csvfile, 'w') as outfile:
stripped = (line.strip() for line in infile)
lines = (line.split(",") for line in stripped if line)
writer = csv.writer(outfile)
writer.writerows(lines)
return True | 837dd58def6ea40b8a78cc680c52d2db2eeb82b7 | 47,190 |
def check_image_in_supercell(site1, site2, supercell_size):
"""
Checks whether site1 and site2 are periodic images of each other in the super cell structure given the size of the
super cell
:param site1: (Site) site in super cell
:param site2: (Site) site in super cell
:param supercell_size: (integer) side length of super cell (in unit cells)
:return: (boolean) whether site1 and site2 are periodic images of each other in the super cell
"""
is_image = False
x1 = site1.frac_coords[0]
x2 = site2.frac_coords[0]
y1 = site1.frac_coords[1]
y2 = site2.frac_coords[1]
z1 = site1.frac_coords[2]
z2 = site2.frac_coords[2]
if round((x1 - x2) * supercell_size, 5).is_integer() and \
round((y1 - y2) * supercell_size, 5).is_integer() and \
round((z1 - z2) * supercell_size, 5).is_integer():
is_image = True
return is_image | 905ab373287586c34a6b19f23c5acdb91bc821d7 | 47,191 |
def get_n_week(dfi, n=1):
"""Get data for the week -N"""
df = dfi.resample("W-MON", closed="left").sum().iloc[-n].T
return df[df > 0].sort_values(ascending=False).to_dict() | 00c0a994d6527225222d9dea25c8465ee3de055a | 47,192 |
def formatAbn(abn):
"""Formats a string of numbers (no spaces) into an ABN."""
if len(abn)!=11:
return abn
return u'{0} {1} {2} {3}'.format(abn[0:2],abn[2:5],abn[5:8],abn[8:11]) | 2746c206ee5156fa7939ed11f04af1865824ef8c | 47,193 |
def login_elements(tag):
"""A filter to find cas login form elements"""
return tag.has_key('name') and tag.has_key('value') | 66913948af672500cdbbbaf742e3842479350138 | 47,195 |
def disjoint_bounds(bounds1, bounds2):
"""Returns True if bounds do not overlap
Parameters
----------
bounds1: rasterio bounds tuple (xmin, ymin, xmax, ymax)
bounds2: rasterio bounds tuple
"""
return (bounds1[0] > bounds2[2] or bounds1[2] < bounds2[0] or
bounds1[1] > bounds2[3] or bounds1[3] < bounds2[1]) | 866162f11f293609a07179b1b688ddc10157e72a | 47,196 |
def text_to_be_spellchecked(request):
"""Represents the text fields that can be spellchecked, including:
- tactic and technique names and descriptions
- case study names and summaries, procedure step descriptions
"""
return request.param | f259c73afe06e254bedd06c6f19fb57ee90833ac | 47,198 |
import pkgutil
import importlib
import sys
def list_modules(package, exclude=None):
"""
Get a list of modules in a package
:param package: object, a module object
:param exclude: set, a set of modules to
exclude from the resulting list
:return: list, a list of modules of a package
"""
if exclude is None:
exclude = set()
res = []
if not hasattr(package, '__path__'):
return res
base_name = package.__name__
for _, m, _ in pkgutil.iter_modules(package.__path__):
if m.startswith('__'):
continue
fullname = '%s.%s' % (base_name, m)
mod = importlib.import_module(fullname)
del sys.modules[fullname]
res.append(fullname)
res += list_modules(mod)
return [m for m in res if m not in exclude] | c0356004b686c2e343572a67914e4fee92d25a78 | 47,199 |
import codecs
def encoding(argument):
"""
Verfies the encoding argument by lookup.
(Directive option conversion function.)
Raises ValueError for unknown encodings.
"""
try:
codecs.lookup(argument)
except LookupError:
raise ValueError('unknown encoding: "%s"' % argument)
return argument | 4563ab18821d22f8613c82227a9b6192080e95bb | 47,202 |
def get_dependencies(cells):
"""
Retrieves the 'pip install' commands from the cells.
:param cells: the cells to iterate
:type cells: list
:return: the list of dependencies (instances of "pip install ...")
:rtype: list
"""
result = []
for cell in cells:
if ("cell_type" in cell) and (cell["cell_type"] == "code") and ("source" in cell):
for line in cell["source"]:
if ("%" in line) and ("pip " in line):
result.append(line[line.index("%")+1:])
return result | b938d24025da8aa1b4198b00f3f539943a304717 | 47,204 |
import os
def _input_to_bed(theta_input, work_dir, get_coords, headers):
"""Convert input file to a BED file for comparisons
"""
theta_bed = os.path.join(work_dir, "%s.bed" % os.path.splitext(os.path.basename(theta_input))[0])
with open(theta_input) as in_handle:
with open(theta_bed, "w") as out_handle:
for line in in_handle:
if not line.startswith(headers):
chrom, start, end = get_coords(line)
out_handle.write("\t".join([chrom, start, end]) + "\n")
return theta_bed | b67fe99d143697a1d236bda4315f4c3a52018633 | 47,206 |
def qvarToString(qv):
"""converts a QVariant to a python string."""
return str(qv) | e1174510d6071b815bcc88819f3583901862d763 | 47,207 |
def has_linear_regression(args):
"""Returns whether some kind of linear regression
"""
return args.linear_regression or args.linear_regressions or \
args.linear_regression_tag | 459679a4489f30ee96496c1086e413441119f686 | 47,208 |
import os
def find_file_name_without_suffix(file_full_path):
"""
获取不含后缀的文件名
:param file_full_path: 文件完全路径
:return:
"""
file_path, file_name = os.path.split(file_full_path)
suffix_begin_index = file_name.rfind('.') # 找到最后一个.出现的位置
return file_name[0: suffix_begin_index] | 11544a76953e10dbbabab55628f1bc4bf4957cdc | 47,209 |
import os
def checkFileIsWritable(filename, create=False):
"""
Check that the input filename is writable. That is, the path
leading up to it exists and you can write to it. If it doesn't
exists the needed folders can be created.
:param file: File to check
:param create: If True then needed directories are created
"""
try:
if filename is None:
return False
# Don't bother if the file already exists
if (os.path.isfile(filename)):
return True;
# If it's an existing directory then can't create
if (os.path.isdir(filename)):
return False;
# Get file directory
filedir = os.path.dirname(filename)
# If directory exists then just create the file
if (os.path.isdir(filedir)):
return True
# Create needed directories and file
if create:
os.makedirs(filedir)
except:
return False
return True | 9a023328e3af7371d9be4af0cef4bd7fa8a8cfa8 | 47,210 |
def _is_spark_step_type(step_type):
"""Does the given step type indicate that it uses Spark?"""
return step_type.split('_')[0] == 'spark' | 31d367c44f1a856e21c25f03e78a4eab48fc3af8 | 47,211 |
def is_message(line, msg_number):
"""Return 'True' if 'line' contains the message identified by 'msg_number'.
Parameters
----------
line: A single line from the NEST CI build log file.
msg_number: Message number string.
Returns
-------
True or False
"""
if msg_number in line:
return True
return False | d1290a8b2e13955f2dcb6d59576d032c8f499165 | 47,212 |
def _clean_header_str(value: bytes) -> str:
"""Null-terminates, strips, and removes trailing underscores."""
return value.split(b'\x00')[0].decode().strip().rstrip('_') | e5776a36eeb36320aec8085f094d6d00f3c3a718 | 47,213 |
def factors(number):
"""Give a list of all the facotrs of given natural number."""
if number > 1:
return [i for i in range(1, number) if number % i == 0]
elif number == 1:
return []
else:
raise ValueError("Requires a Natural number") | 78613f8c7aff72855b3b1a95806e0d38f3879c67 | 47,214 |
def _get_query_sample(query):
"""Format query sample."""
return f"""
<p><b>Example</b></p>
<p>{{QueryProvider}}[.QueryPath].QueryName(params...)</p>
<pre>qry_prov.{query}(start=start, end=end, hostname=host)</pre>
""" | f32740e77d4d6916d52d04c2a6e4efc33aa83a29 | 47,215 |
def binData(data, new_shape):
"""bin time-series.
Parameters
-----------
data : nd array
time-series (numTrials * time-points).
new_shape : 1d array
[numTrials, numBin]
Returns
-------
binned_data : nd array
binned time-series (numTrials * numBin).
"""
shape = (new_shape[0], data.shape[0] // new_shape[0],
new_shape[1], data.shape[1] // new_shape[1])
binned_data = data.reshape(shape).sum(-1).sum(1)
return binned_data | 1f27c77a695404c516aa19dc94d0d60ca6d455cd | 47,216 |
import os
def checkCudaInstalation(version):
"""
Checks if Cuda is installed.
"""
if 'libcuda.so' in os.listdir('/usr/lib/{}-linux-gnu'.format(version)):
return True
else:
return False | 60d686422001f296c8b0909ee2637448bec6367f | 47,217 |
def compose_batch_command_of_script(
source, destination, script, particle, wait_jobs, suffix
):
"""
Creates the slurm command of the 'cmd' script
Parameters
----------
source: str
Source directory
destination: str
Destination directory
script: str
Script to be used in the slurm command. Either 'lstmcpipe_utils_move_dir' or 'lstmcpipe_utils_cp_config'
particle: str
Particle type for slurm job-naming
suffix: str
Suffix to indicate the kind of job
wait_jobs: str
Job-id to be used as dependency in the batched slurm command
Returns
-------
batch_cmd: str
Full slurm batch command ready to batched the script argument
"""
cmd = f"{script} -s {source} -d {destination}"
jobe = f"slurm-{particle}_{suffix}.e"
jobo = f"slurm-{particle}_{suffix}.o"
batch_cmd = (
f"sbatch --parsable -p short -J {particle}_{suffix} -e {jobe} -o {jobo} "
f'--dependency=afterok:{wait_jobs} --wrap="{cmd}"'
)
return batch_cmd | aa5ed37eeb0a75da60ea2c2100ba5bc3f02503f5 | 47,219 |
def fib(num):
"""Resuelve la sucesión de Fibonacci"""
anterior,actual=0,1
for x in range(0,num//2):
aux = anterior + actual
print(anterior, actual)
anterior = aux
actual += anterior
return anterior,actual | ab87dcd4a217b675e05ec765b594d9886de2a9ae | 47,220 |
def check_greenlist_positions(curword: str, grn: list) -> bool:
"""
Checks the greenlist positions to ensure every word has a green letter in the correct positions
:param curword: The current word from the word pool
:param grn: Array representing the correct letters
:return: Bool -false if the word does not contain green letters, true otherwise
"""
for i in range(5):
# Checks if a letter has been guessed and then if the word matches the correct guess
if grn[i] != "0" and curword[i] != grn[i]:
return False
return True | aad1e8267fd77134c9408c28724f6077f58ae984 | 47,221 |
def find_klt_for_frame(klt, klt_frames, i):
""" Finds all KLT tracks appearing in a given frame """
if not i in klt_frames:
return []
ids = klt_frames[i]
klt_this = [klt[x] for x in ids]
return klt_this | 108e0dc926ed175d8e5bde1bdceb7c7ed038571b | 47,222 |
def properGetitem(f):
"""
This decorator modifies a __getitem__/__setitem__ method such that it will always receive a tuple
"""
def proper(self,mbt,*args):
if not isinstance(mbt,tuple):
mbt = (mbt,)
return f(self,mbt,*args)
return proper | 1e87742a555a34a86cc0a925f3da504e4ae2e7c7 | 47,225 |
import re
def process_reddit(comment):
"""
Pre-processes a given comment by removing some characters
Parameters
----------
comment : str
Given sentence
Returns
-------
Processed sentence
"""
comment = comment.encode("ascii", errors="ignore").decode()
comment = re.sub('[^A-Za-z,. ]+', '', comment)
return comment | 861cf172e15f8a699bad31ce27ed836de02afff0 | 47,226 |
def divisors(num):
"""
m < n ==> m = k n | k in (0, 1)
T = T1 + T2 + n (T3 + T4) + k n T5 ==> O(n)
= Tb + n (T3 + T4 + k T5) = Tb + n Ta
T = n Ta + Tb
O(n/2) = O(0.5 n) ==> O(n)
T = 0.5 n Ta + Tb
"""
assert isinstance(num, int) # T1
divs = [] # T2
# range --> [1, n + 1) ==> [1, n]
for x_num in range(1, num + 1): # n * (T3 + T4) + m * (T5), m < n, n > 1
if num % x_num == 0: # T3 + T4
divs.append(x_num) # T5 **special** if previous condition
return divs | 472340ae97ece2eeb81db53492a721a9f40d1f56 | 47,227 |
import math
def translate_point(point, angle, distance):
"""Translate a point a distance in a direction (angle)."""
x, y = point
return (x + math.cos(angle) * distance, y + math.sin(angle) * distance) | a32c4209cad97fc670c18acb47c27ec7fbc8bc5c | 47,228 |
def dict_in_list_always(main, sub):
"""
>>> main = [{'c': 'c', 'a': 'a', 'b': 'b'}, {'c': 'c', 'd': 'd'}]
>>> dict_in_list_always(main, {'a': 'a', 'b': 'b', 'c': 'c'})
False
>>> dict_in_list_always(main, {'c': 'c', 'd': 'd'})
False
>>> dict_in_list_always(main, {'a': 'a', 'c': 'c'})
False
>>> dict_in_list_always(main, {'a': 'a'})
False
>>> dict_in_list_always(main, {})
False
>>> main = [{'c': 'c', 'a': 'a', 'b': 'b'}, {'c': 'c', 'a': 'a', 'b': 'b'}]
>>> dict_in_list_always(main, {'c': 'c', 'a': 'a', 'b': 'b'})
True
>>> main = [{'c': 'c', 'a': 'a', 'b': 'b'}]
>>> dict_in_list_always(main, {'c': 'c', 'a': 'a', 'b': 'b'})
True
"""
for item in main:
if sub != item:
return False
return True | 549e27bf42ed5bd64e7e2d5b57e4b00328b83fca | 47,229 |
def compute_Ia_fr(stretch,stretchVel,muscActivation,species):
""" Compute the firing rates of the Ia afferent fibers.
Uses the experimetnally derived model devloped by Prochazka (1999 pag 136).
Keyword arguments:
stretch -- muscle stretch in mm.
stretchVel -- muscle stretch velocity in mm/s.
muscActivation -- normalized muscle activation [0 , 1].
"""
bias =50
vCoef = 4.3
expn = 0.6
xCoef = 2
musCoeff = 50
if not stretchVel == 0 : stretchVelSign = stretchVel/abs(stretchVel)
else: stretchVelSign = 1
stretchVelPow = stretchVelSign*abs(stretchVel)**expn
#compute firing rates
fr_Ia = bias + xCoef*stretch + vCoef*stretchVelPow + musCoeff*muscActivation
if fr_Ia<0: fr_Ia=0
if species == "rat" or species == "mouse": return fr_Ia
elif species == "human": return fr_Ia*0.25
else: raise(Exception("Invalid species")) | 1b0c534d2a47b17797d72cb6f79845410725d9d3 | 47,230 |
def get_U_d_windowdoor(U_d_W, U_d_D, A_d_W, A_d_D):
"""欄間付きドア、袖付きドア等のドアや窓が同一枠内で併設される場合の開口部(窓又はドア)の熱貫流率U_d…………式(10)
Args:
U_d_W(float): ドアや窓が同一枠内で併設される場合の開口部(窓又はドア)の窓部分の熱貫流率
U_d_D(float): ドアや窓が同一枠内で併設される場合の開口部(窓又はドア)のドア部分の熱貫流率
A_d_W(float): ドアや窓が同一枠内で併設される場合の開口部(窓又はドア)の窓部分の面積…………入力(WindowPart>Area)
A_d_D(float): ドアや窓が同一枠内で併設される場合の開口部(窓又はドア)のドア部分の面積…………入力(DoorPart>Area)
Returns:
float: 欄間付きドア、袖付きドア等のドアや窓が同一枠内で併設される場合の開口部(窓又はドア)の熱貫流率U_d
"""
return (A_d_W * U_d_W + A_d_D * U_d_D) / (A_d_W + A_d_D) | 16158ddf47c90b4078ed3024acdf6828401d5486 | 47,231 |
import collections
def decode_attribute_idx_data(submissions):
"""Return a list of dicts representing the decoded data.
Some of the form data returned from MTurk is encoded as
``"attribute-idx": value`` mappings, where attribute represents the
attribute encoded and idx is the index of the problem instance. This
function takes a list of dictionaries in the attribute-idx style and
decodes them into the individual problem instances.
Parameters
----------
submissions : List[Dist[str, str]]
The data to decode. Each submission must be formatted in the
attribute-idx style.
Returns
-------
List[Dist[str, str]]
A list of dictionaries with each instance separated out
individually.
"""
rows = []
for submission in submissions:
idx_to_row = collections.defaultdict(dict)
for k, v in submission.items():
attribute, idx = k.rsplit('-', 1)
idx_to_row[idx][attribute] = v
rows.extend(idx_to_row.values())
return rows | 3a1853e18e1038e9c0891ad684b39c916e26fade | 47,232 |
def get_index_with_default(header, column_name, default_value=None):
"""Helper function to extract the index of a column."""
return header.index(column_name) if column_name in header else default_value | 0a2fefc8def6e6d91c4852d42da0db5ca4813a8c | 47,233 |
def concat(arrays):
"""
Concatenates an array of arrays into a 1 dimensional array.
"""
concatenated = arrays[0]
if not isinstance(concatenated, list):
raise ValueError(
"Each element in the concatenation must be an instance "
"of `list`."
)
if len(arrays) > 1:
for array in arrays[1:]:
if not isinstance(array, list):
raise ValueError(
"Each element in the concatenation must be an instance "
"of `list`."
)
concatenated += array
return concatenated | 669217a26058553b25db580ecf4ed61cb0e1f513 | 47,234 |
import argparse
def parse_args(argv=None):
"""DRY and KISS."""
parser = argparse.ArgumentParser(
description="partitioning of small sets with 25 or less members")
group = parser.add_mutually_exclusive_group()
# group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
group.add_argument(
"-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
parser.add_argument(
'-o', '--out-filename', action='store', nargs=1,
help='out file name if specified, else all sent to stdout',
required=False)
parser.add_argument(
"-T", "--type", type=str, choices=['text', 'csv', 'json'],
default="text",
help="type of output (format), defaults to text")
parser.add_argument("-b", "--bell-numbers", action="store_true",
help="export the Bell numbers known by package")
parser.add_argument(
"-m", "--multi-set", action="store_true",
help="handle elements as being part of a multiset or bag")
parser.add_argument(
"element", nargs="+",
help="define set as list of elements separated by spaces")
return parser.parse_args(argv) | a6faab2b98db691736dbdd8509a0e2c201a5c049 | 47,235 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.