content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def find_fibre_table(hdulist):
"""Returns the extension number for FIBRES_IFU, MORE.FIBRES_IFU FIBRES or MORE.FIBRES,
whichever is found. Modified from SAMI versiuon that only uses FIBRES_IFU.
Raises KeyError if neither is found."""
extno = None
try:
extno = hdulist.index_of('FIBRES')
except KeyError:
pass
if extno is None:
try:
extno = hdulist.index_of('MORE.FIBRES')
except KeyError:
pass
if extno is None:
try:
extno = hdulist.index_of('FIBRES_IFU')
except KeyError:
pass
if extno is None:
try:
extno = hdulist.index_of('MORE.FIBRES_IFU')
except KeyError:
raise KeyError("Extensions 'FIBRES_IFU' and "
"'MORE.FIBRES_IFU' both not found")
return extno | 88ffff80672e74ce7e2f076073a82da5daccf406 | 114,684 |
def get_attributes(attrs):
"""return attributes string for input dict
format: key="value" key="value"...
spaces will be stripped
"""
line = ''
for i,j in attrs.items():
s = ''
# Caution! take care when value is list
if isinstance(j,list):
for k in j: s += k + ' '
s = s.strip()
else:
s = j
line += '%s="%s" ' % (i,s)
return line.strip() | 663f3f77549c7f6eb37ba7fa40a556e5c2e7873f | 114,687 |
def join(s1, s2):
# type: (str, str) -> str
"""Join pathnames.
Ignore the previous parts if a part is absolute. Insert a '/' unless the
first part is empty or already ends in '/'.
Special case of os.path.join() which avoids varargs.
"""
if s2.startswith('/') or len(s1) == 0:
# absolute path
return s2
if s1.endswith('/'):
return s1 + s2
return '%s/%s' % (s1, s2) | bc0f003d1189aa822ef4e183e234f23dda13b3db | 114,691 |
def full_overlap(aIntervalA, aIntervalB):
""" Returns True if interval A falls completely within interval B otherwise returns False"""
# Check that both inputs are 3-column intervals
if not len(aIntervalA) == len(aIntervalB) == 3:
raise Exception("Regions could not be overlapped")
if aIntervalA[0] == aIntervalB[0]:
if aIntervalA[1] >= aIntervalB[1]:
if aIntervalA[2] <= aIntervalB[2]:
return True
else:
return False | 4c7199d26e0ae5248e632cb52a596d4573e4c589 | 114,697 |
def uniquify_names(names):
"""
Takes a list of strings. If there are repeated values, appends some suffixes
to make them unique.
"""
count = {}
for name in names:
count.setdefault(name, 0)
count[name] += 1
seen = set()
res = []
for name in names:
unique = count[name] == 1
new_name = name
i = 0
# If a name is not unique, append suffix to all occurrences.
# E.g. turn ["a", "a"] into ["a[0]", "a[1]"], not ["a", "a[0]"]
while new_name in seen or (not unique and i == 0):
new_name = name + "[" + str(i) + "]"
i += 1
seen.add(new_name)
res.append(new_name)
return res | 7d5cda80cb0fa788d63fd175e559f9bf0bf0872d | 114,698 |
def game_of_life(board):
"""
Returns a new board with the next generation of Conway's Game of Life.
"""
new_board = [[0 for _ in range(len(board[0]))] for _ in range(len(board))]
for i in range(len(board)):
for j in range(len(board[0])):
neighbors = 0
for x in range(-1, 2):
for y in range(-1, 2):
if x == 0 and y == 0:
continue
if i + x < 0 or i + x >= len(board):
continue
if j + y < 0 or j + y >= len(board[0]):
continue
neighbors += board[i + x][j + y]
if board[i][j] == 1:
if neighbors < 2:
new_board[i][j] = 0
elif neighbors > 3:
new_board[i][j] = 0
else:
new_board[i][j] = 1
else:
if neighbors == 3:
new_board[i][j] = 1
else:
new_board[i][j] = 0
return new_board | 37e0e676307f160c7ba669999217513ed34a409c | 114,700 |
import re
def is_valid_hair_color(value):
"""Validate a # followed by exactly six characters 0-9 or a-f.
>>> is_valid_hair_color("#123abc")
True
>>> is_valid_hair_color("#123abz")
False
>>> is_valid_hair_color("123abc")
False
"""
return bool(re.match(r"^#[0-9a-f]{6}$", value)) | 214e8574ea052318059b9f4ddcd8dbc86cd937df | 114,704 |
def _get_column_name(x):
"""Get the name of a column from Altair specification."""
if len(x.shorthand) > 1 and x.shorthand[-2] == ':':
return x.shorthand[:-2]
else:
return x.shorthand | 2c1f61a19defacb890a567bec48e5b5e3881d4fd | 114,705 |
def triggers_to_event_id(triggers):
"""Convert list or dict of triggers to MNE-style event_id"""
# Convert list to dict with triggers as condition names
if isinstance(triggers, list):
triggers = {str(trigger): trigger for trigger in triggers}
else:
assert isinstance(triggers, dict), \
'`triggers` must be either list or dict'
# Make sure that trigger values are integers (R would pass them as floats)
triggers = {key: int(value) for key, value in triggers.items()}
event_id = triggers
return event_id | 9e721ef032b206200945903b935cdefebc3a95f3 | 114,707 |
def silent_progress_bar(iterable):
"""
Dummy function, just returns an iterator.
:param iterable: the iterable to turn into an iterable
:type iterable: iterable
:return: iterable
:rtype: iterable
>>> next(silent_progress_bar([1, 2, 3]))
1
"""
return iter(iterable) | f0434d8494a9f8d46fa91c975894b36f6b5d5a07 | 114,708 |
from typing import OrderedDict
def create_results_dict(results_per_test, linked_problems):
"""
Generates a dictionary used to create HTML and RST tables.
:param results_per_test : results nested array of objects
:type results_per_test : list[list[list]]
:param linked_problems : rst links for supporting pages
:type linked_problems : list[str]
:return : tuple(acc_results, time_results, html_links)
dictionary of accuracy and timing results and
html links for rending
:rtype : tuple(dict, dict, list)
"""
count = 1
prev_name = ''
template = '<a target="_blank" href="{0}">{1}</a>'
acc_results = OrderedDict()
time_results = OrderedDict()
html_links = []
for prob_results, link in zip(results_per_test, linked_problems):
name = prob_results[0].problem.name
if name == prev_name:
count += 1
else:
count = 1
prev_name = name
prob_name = name + ' ' + str(count)
url = link.split('<')[1].split('>')[0]
html_links.append(template.format(url, prob_name))
acc_results[prob_name] = [result.chi_sq for result in prob_results]
time_results[prob_name] = [result.runtime for result in prob_results]
return acc_results, time_results, html_links | 01b45524e44be099642f20f8f8891927690ff1d4 | 114,711 |
def count_if(predicate, iterable):
"""Count the number of items of an iterable that satisfy predicate."""
return sum(1 for item in iterable if predicate(item)) | 2eb1ade827105a3f7281d161632043858eaa9b6d | 114,714 |
def byte_to_snorm(x):
"""integer x in [0, 255] to float [-1, 1]"""
return (x / 127.5) - 1 | b273d62e2bf9f95c76ebd05abd47ec4bf1292162 | 114,718 |
def get_fields(this_line):
"""
Takes a line from semcor, parses it and returns it as a dictionary
For <wf id=a lemma=b>hi</wf> would created m['id']=a m['lemma']=b m['text']=hi
@type this_line: string
@param this_line: line from SemCor
@rtype: map
@return: map with the attributes
"""
ret = {}
fields = this_line.strip().split('>')
attribs = fields[0].split(' ')
text = fields[1].split('<')[0]
ret['text'] = text
for at in attribs:
pair = at.split('=')
if len(pair) == 2: #To avoid <wf
ret[pair[0]] = pair[1]
return ret | ff1544213b5c2b218102b297c95a9fe7ee7bce13 | 114,722 |
import re
def _remove_chars(chars, s):
"""
:param chars: string of characters to remove.
:param s: input string
:rtype: str
>>> _remove_chars(string.punctuation, 'OLI_TIRS+')
'OLITIRS'
>>> _remove_chars('_', 'A_B_C')
'ABC'
>>> _remove_chars(string.punctuation, None)
"""
if not s:
return s
return re.sub('[' + re.escape(''.join(chars)) + ']', '', s) | 3363ac240bcf463bf3b91df5365b07f535ed9b6d | 114,726 |
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a
maximum length."""
text = text.decode('utf-8')
if len(text) > maxlen:
pre = max(0, (maxlen-3))
text = text[:pre] + '...'
return text.encode('utf8') | 488c6323d3d00852c92f1c1536ff9b3766b071b0 | 114,727 |
def pad(data, bs):
""" Pad data to the given blocksize, PKCS-5 style.
:param data: the data
:param bs: the blocksize """
return data + (bs - len(data) % bs) * chr(bs - len(data) % bs) | 16dd78dd6de6e53e6ecc46b70e240e9dba5e2bf1 | 114,728 |
from typing import List
from typing import Dict
from typing import Set
import collections
import json
def load_entities_fn(file_name: str, types: List[str]) -> Dict[str, Set[str]]:
"""Given a json-lines file from Pubtator, loads the entities.
Args:
file_name: Path to the json-lines files from Pubtator.
types: List of entity types to be loaded.
Returns:
type_entities_dict: A dictionary where key is an entity type and
value is a set of entities of that type.
"""
type_entities_dict = collections.defaultdict(set)
with open(file_name) as f:
for line in f:
json_obj = json.loads(line)
for entity in json_obj['entity_list']:
if entity['type'].lower() in types:
for entity_i in entity['name'].lower().split('|'):
type_entities_dict[entity['type'].lower()].add(
entity_i.strip())
return type_entities_dict | c7d7098640278e459cc2d1b0ad0b285a7a95c620 | 114,729 |
def split_to_chunks(x, y, max_chunk_size, overlapping_size):
"""
We split input data into `max_training_size` chunks with each chunk overlapping `overlapping_size` points
with the previous and next chunks.
:param x: unscaled feature matrix of size n x 1
:param y: unscaled label matrix of size n x 2
:param max_chunk_size: the max size of each chunks
:param overlapping_size: the #points overlapping between each consecutive chunks
:return: list of tuples where each tuple contains (x_i, y_i) of i-th chunk
"""
chunks = list()
n = len(x)
i = 0
while True:
next_i = min(i + max_chunk_size, n)
chunks.append((x[i:next_i], y[i:next_i]))
if n <= next_i:
break
i = next_i - overlapping_size
return chunks | 04f3e850c1063466fb3597f0062c3c8b360d67b7 | 114,731 |
def getClusterMembers(Z):
"""Generate dict of lists where each key is a cluster ID from the results
of linkage-based hierarchical clustering with scipy.cluster.hierarchy.linkage (Z)
Parameters
----------
Z : linkage matrix [clusters, 4]
Returns
-------
members : dict of lists
Each element has a cluster ID (key) and a list of
cluster members (indices into the original data matrix)"""
clusters = {}
for i, merge in enumerate(Z):
cid = 1 + i + Z.shape[0]
clusters[cid] = [merge[0], merge[1]]
def _getIndices(clusters, i):
if i <= Z.shape[0]:
return [int(i)]
else:
return _getIndices(clusters, clusters[i][0]) + _getIndices(clusters, clusters[i][1])
members = {i:_getIndices(clusters, i) for i in range(Z.shape[0] + 1, max(clusters.keys()) + 1)}
return members | eb7c777cb2b6d00dfb418325c450c0b0446caedf | 114,732 |
def _remove_missing_resource_ids(config_records, resource_ids):
"""
Remove resource_ids found in config_results and return any remaining resource_ids
:param config_records: config compliance records
:param resource_ids: list of resource ids
:returns: list of resource IDs found in compliance records
"""
resources_in_config = []
for config_record in config_records:
config_record_id = config_record['EvaluationResultIdentifier'][
'EvaluationResultQualifier']['ResourceId']
if config_record_id in resource_ids:
resources_in_config.append(config_record_id)
return resources_in_config | 7c2d5bec70773dba6cdd091ed01dfc38e4d58cf2 | 114,734 |
def yesno(ans):
"""Convert user input (Yes or No) to a boolean"""
ans = ans.lower()
if ans == 'y' or ans == 'yes':
return True
else:
return False | 15f74a66ae82742cf74254db7f3dc1a6682dc38c | 114,736 |
def text_extend(text, width, padchar=" "):
"""
Pad string `text` to width `width` using char `padchar`.
Extend a string 'smartly', that is, don't extend, reduce, if it's already
too long.
"""
out = text.ljust(width, padchar)
if len(out) > width:
return "..."+out[(-1*width)+2:-1]
return out | ccf62b32c329c09301ebb23129927cb50943f810 | 114,737 |
def is_data_format_channel_last(data_format):
"""True if data_format puts channel last."""
if data_format is None:
return True
return data_format.endswith("C") | 80e047a37968a93202c0d9e093ab4286c5db44da | 114,738 |
import re
def squash_underscores(_s: str) -> str:
"""squash consecutive underscores into singular ones"""
return re.sub(pattern=r"(_)+", repl=r"\1", string=_s) | 5222aa719f3b4779a0d909ae7ad34ed7a1abc506 | 114,744 |
def to_kwh(m):
"""
This function converts a mass flow rate in klbs/hr of steam
to an energy in kWh. Known values are currently hard coded.
Parameters:
-----------
m : float
This is the mass of Abbott steam in klbs/hr
Returns:
--------
kwh : float
The energy equivalent in kWh thermal.
"""
cp = 4243.5 # specific heat of water [J/ kg K]
dT = 179 # change in steam temperature [deg C]
h_in = 196 # inlet enthalpy [BTU/lb]
h_out = 1368 # outlet enthalpy [BTU/lb]
# times 0.29307107 to convert from BTU/hr to kilowatts
kwh = (m * (h_out - h_in)) * 0.29307107
return kwh | cde2a4fe09226bbe9a0d19630b682f3f87092d8a | 114,746 |
from functools import reduce
from operator import mul
def product(numbers):
"""
Return the result of multiplying the given numbers together.
Return 1 if numbers is empty.
"""
return reduce(mul, numbers, 1) | f1839596f909b41bce702b4a84ebfd74a72ddea8 | 114,747 |
def keras_fit(model, X_train, Y_train, X_test, Y_test, batch_size=128,
nb_classes=None, epochs=12, fLOG=None):
"""
Fits a :epkg:`keras` model.
@param model :epkg:`keras` model
@param X_train training features
@param Y_train training target
@param X_test test features
@param Y_test test target
@param batch_size batch size
@param nb_classes nb_classes
@param epochs number of iterations
@param fLOG logging function
@return model
"""
# numpy.random.seed(1337) # for reproducibility
if nb_classes is None:
nb_classes = Y_train.shape[1]
if fLOG:
fLOG("[keras_fit] nb_classes=%d" % nb_classes)
try:
model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs,
verbose=1, validation_data=(X_test, Y_test))
except Exception: # pylint: disable=W0703
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=epochs,
verbose=1, validation_data=(X_test, Y_test))
return model | 2a61fdeb227932d8864c0ec5fb45bb70411dd802 | 114,754 |
def convert_calendar(ds, calendar, time_dim="time"):
"""
Convert calendar, dropping invalid/surplus dates or inserting missing dates
Parameters
----------
ds : xarray Dataset
A dataset with a time dimension
time_dim : str, optional
The name of the time dimension
"""
return ds.convert_calendar(calendar=calendar, dim=time_dim, use_cftime=True) | a1363f2ff9b41322083aab86afc9128a9df97fae | 114,760 |
import aiohttp
async def fetch(url):
"""Execute an http call async
Args:
url: URL to call
Return:
responses: A dict like object containing http response
"""
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
resp = await response.json(content_type=None)
return resp | d72a1ccbd8a5f6f68c0ff45894be5e9dcac71ea9 | 114,763 |
import random
def SplitGaps(zs):
"""Splits zs into xs and ys.
zs: sequence of gaps
Returns: tuple of sequences (xs, ys)
"""
xs = [random.uniform(0, z) for z in zs]
ys = [z-x for z, x in zip(zs, xs)]
return xs, ys | 087e95faa10c8b519aed6c018a215fa5620d8382 | 114,764 |
def get_duration(days=0, hours=0, minutes=0, seconds=0, millis=0):
"""
Get the given duration in seconds
:param float|int days:
:param float|int hours:
:param float|int minutes:
:param float|int seconds:
:param float|int millis:
:return float: Number of seconds in the duration, incl. fractions
"""
duration = 0.0
duration += float(days) * 24 * 60 * 60
duration += float(hours) * 60 * 60
duration += float(minutes) * 60
duration += float(seconds)
duration += millis / 1000.0
return duration | 7b7bbe6e90153d3ebdbc6f41c3fc981992879a4f | 114,766 |
def get_lr(T, s):
"""
Returns the left and right neighbors (branches) of s in T.
"""
try:
sl = T.floor_key(s)
except KeyError:
sl = None
try:
sr = T.ceiling_key(s)
except KeyError:
sr = None
return sl, sr | ce9b2f30ec67eac2a3576eddc1ece023aea4f0db | 114,770 |
def getBundleKey(bundlePath):
"""
Return all parts of a bundle's "key" as used in a timestamp file,
given its full filename.
>>> getBundleKey("/bundleinfo/tor-browser/win32/some-file-name.txt")
'/bundleinfo/tor-browser/win32/'
"""
# No, we can't use "os.path.directory" or "os.path.split". Those are
# OD-dependent, and all of our paths are in Unix format.
idx = bundlePath.rindex("/")
return bundlePath[:idx+1] | 4f50ba4d87a4c2f326cf7924c71fc5da7f01d346 | 114,771 |
def base_subtraction(x, y, b):
"""
Subtract two numbers in base b like we do it manually on paper
So there's no need to convert from base b to base 10, and reconvert the number to base b again
:param x: number x
:param y: number y
:param b: base b
:return: the subtraction of the 2 number in base b as string
"""
# Used to carry if the botom number is greater than the top number
rem = 0
res = ''
# For each number starting with the smallest value (right)
for pos in reversed(range(len(x))):
# Get the int of the top number
cx = int(x[pos])
# Get the int of the bottom number + rem from previous number
cy = int(y[pos]) + rem
# Do the subtraction with modulo to stay in base b
res = str((cx - cy) % b) + res
# If the bottom number is greater than the top one, we need to add a 1 remaining for the next number
if cy > cx:
rem = 1
else:
rem = 0
# Return the result in string format
return res | f0d1455291f12e6ff3f6862cc96d15e9c7f9072d | 114,773 |
def FindDuplicates(seq):
"""Identifies duplicates in a list.
Does not preserve element order.
@type seq: sequence
@param seq: Sequence with source elements
@rtype: list
@return: List of duplicate elements from seq
"""
dup = set()
seen = set()
for item in seq:
if item in seen:
dup.add(item)
else:
seen.add(item)
return list(dup) | ea10505b657fd796fac596ecf1eda909dc8f4775 | 114,779 |
def TypeOrNothing(dart_type, comment=None, nullable=False):
"""Returns string for declaring something with |dart_type| in a context
where a type may be omitted.
The string is empty or has a trailing space.
"""
nullability_operator = '?' if nullable else ''
if dart_type == 'dynamic':
if comment:
return '/*%s*/ ' % comment # Just a comment foo(/*T*/ x)
else:
return '' # foo(x) looks nicer than foo(var|dynamic x)
else:
return dart_type + nullability_operator + ' ' | 98c4da1bbd6047f85a41d5a9de55b7a41919a28a | 114,783 |
from pathlib import Path
def ensure_abspath(p):
"""Return the fully expanded path for `p`."""
return Path(p).expanduser().resolve() | cd41abbc4e0bcd3d662a8f68874fea6a567633b6 | 114,784 |
def build_str_closures(s, lexid, iterator):
"""Builds the pair of closures for recognizing exact strings.
"""
def match_str(iterator):
return iterator.source.startswith(s, iterator.pos)
def result_str(iterator):
return lexid, s, iterator.pos + len(s)
return (match_str, result_str) | 8926dceb4374a065c7bfe4840e8ea16e887b7d3a | 114,785 |
def count(values):
"""
Returns a dict of counts for each value in the iterable.
"""
counts = dict()
for v in values:
if v not in counts:
counts[v] = 0
counts[v] += 1
return counts | 0e4febef6dbfefb2e04b103f177af3dd3d3bfc59 | 114,786 |
def is_falsy(x):
"""
Check if argument is falsy.
This is the same as calling ``not bool(x)``
"""
return not bool(x) | 242f6ff9b5ba0e4ab0183a2f3f9a7829333382fb | 114,789 |
import typing
def get_percentage(now: int, needed: int, /, *, save_float: bool = False) -> typing.Union[int, float]:
"""``function``
Method for getting the current percentage of progress.
Parameters:
-----------
now: :class:`int` [Positional only]
Current progress parameter.
needed: :class:`int` [Positional only]
Needed progress parameter.
save_float: :class:`bool` = False [Keyword only]
If True, fill return :class:`float` percentage.
Returns:
--------
percentage: :class:`typing.Union[int, float]`
Percentage of progress.
"""
if not save_float:
return int((now / needed) * 100)
else:
return (now / needed) * 100 | 190d72bfd4522b00d98169943b77c91d6fda04f4 | 114,792 |
def string_to_list(string):
"""Takes a string and splits into a list of strings separated by semicolons. In
effect, the inverse operation of list_to_string().
e.g.
string_to_list('John; Alice')
#=> ['John', 'Alice']
"""
return string.split("; ") | 421ca907440d0131d1fcd2ca9b77aff4c0c9f0b1 | 114,794 |
import torch
def SelectDevice(max_gpu_num: int = 0):
"""
CUDA GPU が使用できるかを判定し,使用できればその個数を取得,できなければ cpu を選択する関数.
GPU 情報の取得 および 個数のカウント方法 は以下のサイトを参照.
REF: https://note.nkmk.me/python-pytorch-cuda-is-available-device-count/
Arg:
max_gpu_num(int): 使用する GPU の最大個数.0 <= n <= max_gpu_count で指定する.Default to 0.
Returns:
device_name(str): 使用可能なデバイス名 ("cpu" or "cuda").
num_devices(List[int]): 使用可能なデバイスの番号.
`device_name="cpu"` : `num_devices=[]`.
GPUが1つしか搭載されていない場合,`device_name="cuda"` : `num_devices=[0]`.
GPUが複数搭載されている場合,`device_name="cuda"` : `num_devices=[0, 1, ...]`.
"""
if torch.cuda.is_available(): # GPU が使用可能な場合
num_devices = torch.cuda.device_count()
if num_devices == 1: # GPU が1つしか搭載されていない場合
return "cuda", [0]
else: # GPU が 2 つ以上搭載されている場合
gpu_num = []
for i in range(num_devices):
gpu_num.append(i)
if num_devices < max_gpu_num:
break
return "cuda", gpu_num
else: # GPU が使用不可
return "cpu", [] | 150d976d86084d726cbeddc0608fb9b543f7fe9c | 114,797 |
import hashlib
import json
def get_data_checksum(proc_input, proc_slug, proc_version):
"""Compute checksum of processor inputs, name and version."""
checksum = hashlib.sha256()
checksum.update(json.dumps(proc_input, sort_keys=True).encode("utf-8"))
checksum.update(proc_slug.encode("utf-8"))
checksum.update(str(proc_version).encode("utf-8"))
return checksum.hexdigest() | f5fe50176613f0996bbef143bab826f3656cbc6f | 114,802 |
def get_confirm_token(session, url):
"""
Google may include a confirm dialog if the file is too big. This retreives the
confirmation token and uses it to complete the download.
:param aiohttp.ClientSession session: used to the get the cookies from the reponse
:param str url : the url is used to filter out the correct cookies from the session
:return the cookie if found or None if not found
:rtype str
"""
cookies = session.cookie_jar.filter_cookies(url)
for key, cookie in cookies.items():
if key.startswith('download_warning'):
return cookie
return None | 07c97d3bdc92dfa8ffa8a69994cdda1a9c4b378b | 114,806 |
def panelParentShortcut(panel, parentShortcut):
"""
return the first panelParent of panel that has the provided parentShortcut.
Returns None if no panelParent with shortcut is found.
"""
while hasattr(panel, 'panelParent'):
panel = panel.panelParent()
if panel is None or panel.par.parentshortcut == parentShortcut:
return panel | e06dc859962d12e5d19fcc9236dfcd8f75f42ede | 114,810 |
def sampler_base(s):
"""Given the full name of sampler, e.g. blog.sample.MHSampler, returns
its base, i.e. MHSampler
"""
return s.split(".")[-1:][0] | 1aa2866d17952fbf9dc17166fdb95c5028bf1c78 | 114,812 |
import math
def discounted_cumulative_gain(relevance_list, p):
"""
Method that given a list of relevant documents (in this case the ranking positions),
calculates the discounted cumulative gain for the list at position p. In order for this measure to be effective,
the final score should be normalized (Dividing the ideal dcg by the dcg). DCG is defined as:
dcg_p = sum(from i=1, to p) rel_i/log2(i+1)
:param relevance_list list of elements to consider
:param p position to which we want to calculate the score
"""
dcg_p = 0
i = 1
for element in relevance_list:
if i <= p:
dcg_p += element/math.log2(i+1)
i += 1
else:
break
return dcg_p | cfa43b7dcd18212a0229886cfd5ee41f47efd784 | 114,813 |
def get_story_memcache_key(story_id, version=None):
"""Returns a memcache key for the story.
Args:
story_id: str. ID of the story.
version: str. Schema version of the story.
Returns:
str. The memcache key of the story.
"""
if version:
return 'story-version:%s:%s' % (story_id, version)
else:
return 'story:%s' % story_id | adba3b1b04e95847d94b593a0d49eaa6867eefbf | 114,814 |
def poly_lr_scheduler(optimizer, init_lr, step, end_learning_rate=0.0001,
lr_decay_step=1, max_step=100000, power=1):
"""
Polynomial decay of learning rate.
Input:
- optimizer : initial optimizer
- init_lr : initial learning rate
- step : current iteration
- end_learning_rate : terminal learning rate
- lr_decay_step : how frequently decay occurs, default is 1
- max_step : number of maximum iterations
- power : polymomial power
Returns:
- updated optimizer
"""
# Do not perform the scheduler if following conditions apply.
if step % lr_decay_step or step > max_step:
return optimizer
# Apply the polymomial decay scheduler on the learning rate.
lr = (init_lr - end_learning_rate)*(1 - step/max_step)**power + \
end_learning_rate
# Adjust the learning rate of the optimizer.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer | f6f37fa182c86ea73e2b1a6fbb8384b0843939c4 | 114,817 |
def list_diff(list1, list2):
"""return list1 items not in list2"""
return [x for x in list1 if x not in set(list2)] | 9041f35bdd872478b4d90331225e49cb550aabb4 | 114,819 |
def check_format_rules(lineno, line):
"""
Given a line number and a line, compare them against a set of rules. If it
it fails a given rule, return an error message. If it passes all rules
then return false.
"""
# Since enumerate starts at 0
real_lineno = lineno + 1
if lineno == 0:
if len(line) > 50:
return "E%d: First line should be less than 50 characters in " \
"length." % (real_lineno,)
if lineno == 1:
if line:
return "E%d: Second line should be empty." % (real_lineno,)
if not line.startswith('#'):
if len(line) > 72:
return "E%d: No line should be over 72 characters long." % (
real_lineno,)
return False | f29dd4d38f72bfa969402b863a5f51609ce9d187 | 114,820 |
def defineParameter(dictionary, key, fallback = None):
"""
Define a variable if it is contained in the dictionary, if not use a fallback
Fairly self explanitory, this gets used a *lot*
"""
if key in dictionary:
parameter = dictionary[key]
else:
parameter = fallback
return parameter | 36dac058baf94e207c3b3e54dacf2e9968610620 | 114,828 |
import json
def load_json(msg):
"""
Load the message value to json.
"""
return json.loads(msg) | 24b70847130b15ff6dcdf5f6121a4bee39b72fe3 | 114,839 |
def show_num_bins(autoValue, slider_value):
""" Display the number of bins. """
if "Auto" in autoValue:
return "# of Bins: Auto"
return "# of Bins: " + str(int(slider_value)) | 567df1e11687d8b2536497e6c72db6b729fa9833 | 114,841 |
def flatten(l):
"""Flatten 2D lists"""
return [i for sublist in l for i in sublist] | 310d58fea25180d05b8da2a0a1ba6098f38edba8 | 114,848 |
def select(partition, model, train):
"""Select a group to split from current partition."""
candidates = [g for g in partition if g.all_same is False]
if len(candidates) == 0: return None
candidates.sort(key=lambda c:c.err, reverse=True)
#del candidates[5:]
return candidates[0] | 505ecdca76f53c08f7fcb326fbdd1a87c84c840d | 114,849 |
def __scale_gpumd_tc(vol, T):
"""
Used to scale the thermal conductivity when converting GPUMD heat-flux correlations
to thermal conductivity.
Args:
vol (float):
Volume in angstroms^3
T (float):
Temperature in K
Returns:
float: Converted value
"""
one = 1.602176634e-19 * 9.651599e7 # eV^3/amu -> Jm^2/s^2*eV
two = 1. / 1.e15 # fs -> s
three = 1.e30 / 8.617333262145e-5 # K/(eV*Ang^3) -> K/(eV*m^3) w/ Boltzmann
return one * two * three / (T * T * vol) | be7c887cfd3aa79be688e3ace4ded78334f77d28 | 114,850 |
def handle_difficulty_string(song, links):
"""
Helper function. Returns a string that is formatted according to Discord's
hyperlink syntax.
:param song: song to return difficulty information of
:param links: URL(s) containing links to view each difficulty's chart
"""
difficulty_string = ""
# obtain links and output in discord hyperlink format, i.e.
# [text here](url here)
if links[0] is not None:
difficulty_string += f'[EASY {"".join(str(int(song.Diff_E)))}]({links[0]})'
difficulty_string += " | "
if links[1] is not None:
difficulty_string += f'[HARD {"".join(str(int(song.Diff_H)))}]({links[1]})'
difficulty_string += " | "
difficulty_string += f'[CHAOS {"".join(str(int(song.Diff_C)))}]({links[2]})'
if links[3] is not None:
difficulty_string += " | "
difficulty_string += f'[GLITCH {"".join(str(int(song.Diff_G)))}]({links[3]})'
return difficulty_string | faec66ab7c7df4f7fe3cc37b11acc12e91931b3a | 114,852 |
def human_bytes(byte: int, precision: int = 2) -> str:
"""Return a human readable version of the byte amount"""
kilo_byte = 10 ** 3
mega_byte = 10 ** 6
giga_byte = 10 ** 9
tera_byte = 10 ** 12
if byte >= tera_byte:
return f"{round(byte / tera_byte, precision)} TB"
elif byte >= giga_byte:
return f"{round(byte / giga_byte, precision)} GB"
elif byte >= mega_byte:
return f"{round(byte / mega_byte, precision)} MB"
elif byte >= kilo_byte:
return f"{round(byte / kilo_byte, precision)} kB"
return f"{byte} B" | 7e715e7406ae724be702a0acf8cc73dd36d78a83 | 114,853 |
def pick(mapping, keys):
"""Return a new dict using only the given keys."""
return {k: v for k, v in mapping.items() if k in keys} | beef15e255051eaab052e3c71a23aa8952fe4e12 | 114,855 |
import colorsys
def get_N_colors(N, s=0.8, v=0.9):
""" get N distinct colors as a list of hex strings """
HSV_tuples = [(x*1.0/N, s, v) for x in range(N)]
hex_out = []
for rgb in HSV_tuples:
rgb = map(lambda x: int(x*255), colorsys.hsv_to_rgb(*rgb))
hex_out.append("#"+"".join(map(lambda x: format(x, '02x'), rgb)))
return hex_out | 735ffd08b8e9f2a4806f504c756a0c0dcef8844d | 114,858 |
def darglint_check(arg):
"""
Used to trigger DAR101.
Returns:
Just a value to trigger the check.
"""
return 'check trigger' | 9c7e29b78e1e8efd80a8c9d1c824f3e923eba59a | 114,859 |
def pick_single_fn(heads, fn_name):
""" Iterates over heads and returns a static method called fn_name
if and only if one head has a method of that name. If no heads have such a method, None is returned.
If more than one head has such a method, an Exception is thrown"""
merge_fns = []
for h in heads:
merge_fns.append(getattr(h, fn_name, None))
merge_fns = [x for x in merge_fns if x is not None]
if len(merge_fns) == 0:
return None
elif len(merge_fns) == 1:
return merge_fns[0]
else:
raise Exception(f"More than one of the prediction heads have a {fn_name}() function") | 542a6947de77dead34a0788d00537baee1a3f844 | 114,864 |
def dummy_lambda_event_get() -> dict:
"""
Return dummy GET request data.
"""
event_get = {
'year': '2011',
'month': '11',
'http_method': 'GET'
}
return event_get | 4c580fc7bf69686af0916afb57c501d4d70ef892 | 114,865 |
import re
def _remove_prefix(line, prefix):
"""Remove the prefix and eventually one '=' or spaces"""
return re.sub(r'\s*=?\s*', '', line[len(prefix):]) | 1e5db8dcaf930510cd242eb1aa85d90013ba4773 | 114,866 |
def invert_dictionary(dict_map):
"""
Invert a dictionary-mapping such that values point to keys.
Parameters
----------
dict_map : dict
dictionary, that shall be inverted
Returns
-------
dict_map_inv : dict
inverted dictionary
"""
def add_new_value_to_key(dictionary, key, value):
if key in dictionary:
if not isinstance(dictionary[key], list):
dictionary[key] = [dictionary[key]]
dictionary[key].append(value)
else:
dictionary[key] = value
return dictionary
dict_map_inv = dict()
for k, v in dict_map.items():
dict_map_inv = add_new_value_to_key(dict_map_inv, v, k)
return dict_map_inv | d3450eac02a5dd19466240b3d53df32224d35f3a | 114,870 |
def lex_cache_key(key):
"""
Returns the language and site ID a cache key is related to.
"""
return key.rsplit('_', 2)[1:] | f2cb638f37c9aacd67930210dc26b8a0acc137d8 | 114,875 |
def estimate_phi(x):
""" Estimate proportion of genotypable reads.
Args:
x (numpy.array): major, minor, and total read counts
Returns:
numpy.array: estimate of proportion of genotypable reads.
"""
phi = x[:,0:2].sum(axis=1).astype(float) / (x[:,2].astype(float) + 1.0)
return phi | 41782f6cce5ad50e81c81017668a38dc3ed65f5d | 114,876 |
def split_data(data, train_len) :
"""Split a dataframe into train and test data
Parameters
----------
data : pandas.core.frame.DataFrame
Dataframe to split
train_len : float
Percentage of data for training
Returns
-------
pandas.core.frame.DataFrame
a dataframe with the train data
pandas.core.frame.DataFrame
a dataframe with the test data
"""
#calculate the index on which to split the data
train_split = int(len(data)*train_len)
#split the data into train and test
train_data = data[0:train_split]
test_data = data[train_split+1:len(data)]
#return the splitted data
return train_data, test_data | 871b6745f01833c56b91020bc7a611a136fc3980 | 114,878 |
import uuid
def mock_user_pool(mock_cognitoidp_client):
"""Create a mocked cognito user pool used to instantiate a mock AWS Connector
Args:
mock_cognitoidp_client: a mocked Cognito IDP client
Returns:
dict: containing a mocked user pool id (key: UserPoolId) and a mocked user pool client id (key: UserPoolClientId)
"""
name = str(uuid.uuid4())
value = str(uuid.uuid4())
result_user_pool = mock_cognitoidp_client.create_user_pool(PoolName=name, LambdaConfig={"PreSignUp": value})
client_name = str(uuid.uuid4())
value = str(uuid.uuid4())
user_pool_id = result_user_pool["UserPool"]["Id"]
result_user_pool_client = mock_cognitoidp_client.create_user_pool_client(
UserPoolId=user_pool_id,
ClientName=client_name,
CallbackURLs=[value]
)
return {
"UserPoolId": user_pool_id,
"UserPoolClientId": result_user_pool_client["UserPoolClient"]["ClientId"],
} | 4fc880e68c7d65001a6958ce44c10cf1fdb46a01 | 114,882 |
def encode_event(timestamp, name, type):
"""Returns a string-encoded event representation.
Example: '2021-03-27 12:21:50.624783+01:00,prepare,start'"""
return f"{timestamp},{name},{type}" | 44d7c3b166549324b4dafbed4ca3abf5e1e7d153 | 114,883 |
def insert_nth_char(string: str, n: int, char):
"""
Insert `char` in `string` at zero-indexed position `n`.
Example:
>>> insert_nth_char(string='strng', n=3, char='I')
'strIng'
"""
return string [:n] + str(char) + string[n:] | e5bca0a66eef39b590cf2501f3cdce378cca6f90 | 114,885 |
def read_file_contents(filename):
"""
This function loads all the information in a given file. (remember to include .txt)
:param filename: a full file path to the document to open (string)
:return: (string) Contents of file
"""
with open(filename) as f:
contents = f.readlines()
return contents | e8caab4dc91d87b63f84e88fc704aca204caaf27 | 114,886 |
def add_versions(nodes, versions):
"""
In Dutch rechtspraak.nl data, an version can have an annotation.
:param nodes: list of node objects
:param versions: list of versions
:return: list of nodes
"""
count_version = {}
count_annotation = {}
for item in versions:
id0 = item['id']
val = item['hasVersion']
count_version[id0] = count_version.get(id0, 0) + 1
if val.lower().find('met annotatie') >= 0:
count_annotation[id0] = count_annotation.get(id0, 0) + 1
for node in nodes:
node['count_version'] = count_version.get(node['id'], 0)
node['count_annotation'] = count_annotation.get(node['id'], 0)
return nodes | a2fe192246f07a698095d908962586098358a801 | 114,891 |
import string
def whitelisted(
s, whitelist="_-" + string.ascii_letters + string.digits, substitute="_"
):
"""
>>> whitelisted("ab/cd#ef(gh")
'ab_cd_ef_gh'
>>> whitelisted("ab/cd#ef(gh", substitute='')
'abcdefgh'
"""
return "".join(c if c in whitelist else substitute for c in s) | b70236d523416da367d3e5d372b6dfd94635b3b6 | 114,894 |
def new_line_pad(string):
"""
Returns a string padded with newlines.
str -> str
"""
new_line = '\n'
return new_line + string + new_line | c3af79c790d9be76fa5c5d324ed069d4cdd756c9 | 114,895 |
def getInt(prompt):
"""get a positive integer"""
n = input(prompt)
if n.isdigit():
return int(n)
else:
return getInt(prompt) | 44040f0cb216639858b886345c405ef11bcc8523 | 114,897 |
def accuracy_score(y_true, y_pred):
"""
Classification performance metric that computes the accuracy of y_true
and y_pred.
:param numpy.array y_true: array-like of shape (n_samples,) Ground truth correct labels.
:param numpy.array y_pred: array-like of shape (n_samples,) Estimated target values.
:returns: C (float) Accuracy score.
"""
correct = 0
for true, pred in zip(y_true, y_pred):
if true == pred:
correct += 1
accuracy = correct / len(y_true)
return accuracy | ccd3290a66e69bafa819a681cd14c579aceca88d | 114,902 |
def multi_endpoint(mocker):
"""Create a multi network manager endpoint mock"""
return [mocker.stub(), mocker.stub()] | a2c0d2ed563d09fdf0cf94e11af30571f4da22e8 | 114,904 |
def dustratio_to_dust1(dust2=0.0, dust_ratio=0.0, **extras):
"""Set the value of dust1 from the value of dust2 and dust_ratio
Parameters
----------
dust2 : float
The diffuse dust V-band optical depth (the FSPS ``dust2`` parameter.)
dust_ratio : float
The ratio of the extra optical depth towards young stars to the diffuse
optical depth affecting all stars.
Returns
-------
dust1 : float
The extra optical depth towards young stars (the FSPS ``dust1``
parameter.)
"""
return dust2 * dust_ratio | 649a570d78f85c4a6c8258d69498a0e692c6ed0b | 114,907 |
from typing import List
def fizz_buzz_encode(x: int) -> List[int]:
""" One-hot encode an integer according to it's FizzBuzz class """
if x % 15 == 0:
return [0, 0, 0, 1]
elif x % 5 == 0:
return [0, 0, 1, 0]
elif x % 3 == 0:
return [0, 1, 0, 0]
else:
return [1, 0, 0, 0] | c887ea525d97f0d3e289a0d00000ed10c21a6e5f | 114,913 |
import re
def collapse_spaces(string=None, markdown_compatible=False):
"""Strip outer and extra internal whitespace, preserving newlines.
Args:
string (str): The string from which to remove whitespace.
markdown_compatible (bool): If True, be conservative in removing
whitespace to avoid changing the
meaning of markdown.
Returns:
str: The input string with excess whitespace removed.
"""
if markdown_compatible:
# Don't remove inner whitespace in case the user intended a <br>
# by writing trailing spaces on a line other than the last one.
return string.strip() if string else string
else:
return re.sub(r'\s+', ' ', string.strip()) if string else string | 3d01d24bed8a23e3fb91e53953ad86e3af6d91d4 | 114,916 |
def read_line_with_filter(
fp,
filter_length=None,
preprocessing_fn=None):
""" Reads one line from `fp`, filters by `filter_length` and
does preprocessing if provided.
Args:
fp: A file identifier.
filter_length: An integer, the maximum length of one line.
preprocessing_fn: A callable function.
Returns: A list.
"""
line = fp.readline()
if line == "":
return ""
tokens = line.strip()
if preprocessing_fn:
tokens = preprocessing_fn(tokens)
if filter_length and len(tokens) > filter_length:
return None
return tokens | 0652b253b20763d9c41f9e3df009c15dec52c3e4 | 114,917 |
def str_to_bin_array(number, array_length=None):
"""Creates a binary array for the given number. If a length is specified, then the returned array will have the same
add leading zeros to match `array_length`.
Args:
number: the number to be represented as a binary array.
array_length: optional) the length of the binary array to be created. (Default value = None)
Returns:
the binary array representation of `number`.
"""
bin_str = "{0:b}".format(number)
bin_str = bin_str.zfill(array_length) if array_length else bin_str
return list(map(int, bin_str)) | f2a6763c59da9efa2c08943278622b3b50d703e2 | 114,918 |
def quote_cmdline(seq):
"""Quotes the strings in seq for feeding to shell.
This is a severe protection to prevent:
- variable, command, or other substitutions
- shell expansions (parameter, wildcard)
- word splitting
- invocation of shell builtin (!!!)
"""
# Compared to other implementations:
# - Python 2.6's subprocess.py has list2cmdline, but I don't like it because
# it still allows the shell to interpret wildcards. We have to quote wildcards
# (*, [], {}, ?) and $ as well.
# - Python 3.3 has shlex.quote that performs similarly.
# The reverse operation can be done via shlex standard module.
rslt = []
for i in seq:
inew = '"' + i.replace("\\", "\\\\").replace('"', '\\"').replace('$', '\\$').replace('`', '\\`') + '"'
rslt.append(inew)
return " ".join(rslt) | ed14019600985422ccf3c344370d54ba003ed70b | 114,920 |
import __main__ as m
def is_running_interactive() -> bool:
"""Check if we're running in an REPL"""
try:
return not hasattr(m, "__file__")
except Exception as e:
print(e)
pass
return False | bafe84caf9e40c97a2f000136ca77546850e26c0 | 114,931 |
from typing import List
from typing import Any
def ls_del_empty_elements(ls_elements: List[Any]) -> List[Any]:
"""
>>> ls_del_empty_elements([])
[]
>>> ls_del_empty_elements(['',''])
[]
>>> ls_del_empty_elements(['','','a',None,'b'])
['a', 'b']
>>> ls_del_empty_elements([' ','','a',None,'b'])
[' ', 'a', 'b']
>>> ls_del_empty_elements([' ','','a',None,'b',0])
[' ', 'a', 'b']
"""
return list(filter(None, ls_elements)) | c6654fa1d4995604fa931f80f592ab8185441374 | 114,932 |
def has_edge(e, f):
"""Check if an edge `e` is in a face `f`.
Example:
>>> e = (0, 1)
>>> f = (1, 2, 3, 0)
>>> has_edge(e, f)
>>> True # because 0, 1 are adjacent in f (first and last)
"""
for pair in zip(f, f[1:] + (f[0],)):
if e == pair or e == pair[::-1]:
return True
return False | 49b33742c5ae93d217d09512d70dd1c088e8971e | 114,937 |
def make_cutout(image, start_i, start_j, h_monster, w_monster):
"""
Create cutout from final image from coordinates (start_i, start_j)
and as size as monster image.
"""
new_image = ""
image = image.split("\n")
for i_row in range(start_i, start_i + h_monster):
new_image += image[i_row][start_j: start_j + w_monster] + "\n"
return new_image | ae0b2a9635ebf64c7fb31dd5e5e2c93d5b11c3e6 | 114,943 |
def conv2d_size_out(size, kernel_size, stride):
"""
common use case:
cur_layer_img_w = conv2d_size_out(cur_layer_img_w, kernel_size, stride)
cur_layer_img_h = conv2d_size_out(cur_layer_img_h, kernel_size, stride)
to understand the shape for dense layer's input
"""
return (size - (kernel_size - 1) - 1) // stride + 1 | d5e27458ba9c13f66f9bb02e52218a8ef4ba40b3 | 114,947 |
def _valid_char_in_line(char: bytes, line: bytes) -> bool:
"""Return True if a char appears in the line and is not commented."""
comment_index = line.find(b"#")
char_index = line.find(char)
valid_char_in_line = char_index >= 0 and (
comment_index > char_index or comment_index < 0
)
return valid_char_in_line | fc9c371171d19b012b1d157b85274a9dd3c6cc13 | 114,949 |
import dataclasses
def make_filename(metadata: dict) -> str:
"""
Make a filename from `metadata` that is returned by `download(onmemory=True)`.
Parameters
----------
metadata
A metadata dictionary.
Returns
-------
filename
File name.
"""
rect = metadata["rect"]
args = dataclasses.asdict(rect)
type = args["type"]
if type == "warp":
args["name"] = f'{args["name"]}_{metadata["visit"]:06d}'
if type == "coadd/bg":
args["type"] = "coadd+bg"
name = args.pop("name")
return name.format(**args) + ".fits" | 9ca7f89a5e0b3de4f1eedf73f6a5c80addcce316 | 114,952 |
import logging
def extract_clas(sicd):
"""
Extract the classification string from a SICD as appropriate for NITF Security
tags CLAS attribute.
Parameters
----------
sicd : SICDType
Returns
-------
str
"""
if sicd.CollectionInfo is None or sicd.CollectionInfo.Classification is None:
return 'U'
c_str = sicd.CollectionInfo.Classification.upper().strip()
if 'UNCLASS' in c_str or c_str == 'U':
return 'U'
elif 'CONFIDENTIAL' in c_str or c_str == 'C' or c_str.startswith('C/'):
return 'C'
elif 'TOP SECRET' in c_str or c_str == 'TS' or c_str.startswith('TS/'):
return 'T'
elif 'SECRET' in c_str or c_str == 'S' or c_str.startswith('S/'):
return 'S'
elif 'FOUO' in c_str.upper() or 'RESTRICTED' in c_str.upper():
return 'R'
else:
logging.critical('Unclear how to extract CLAS for classification string {}. '
'Should be set appropriately.'.format(c_str))
return 'U' | 32dc8c5286bcdc31cc459fe138aaac7cb22c5f27 | 114,953 |
def format_line(line):
"""
Format a line of Matlab into either a markdown line or a code line.
Parameters
----------
line : str
The line of code to be formatted. Formatting occurs according to the
following rules:
- If the line starts with (at least) two %% signs, a new cell will be
started.
- If the line doesn't start with a '%' sign, it is assumed to be legit
matlab code. We will continue to add to the same cell until reaching
the next comment line
"""
if line.startswith('%%'):
md = True
new_cell = True
source = line.split('%%')[1] + '\n' # line-breaks in md require a line
# gap!
elif line.startswith('%'):
md = True
new_cell = False
source = line.split('%')[1] + '\n'
else:
md = False
new_cell = False
source = line
return new_cell, md, source | 2ef8ea7d1c5aaff0a3fc11d7013c2a0722617f97 | 114,958 |
import codecs
def loadPOSTaggedReviewData(POSDataPath):
"""
Module to load POS tagged review data
Args:
POSDataPath: POS tagged data path
Returns:
reviewList: list of pos tagged reviews
"""
reviewList=[]
posReview = codecs.open(POSDataPath,encoding='utf-8')
reviewList=[x.strip() for x in posReview.readlines()]
return reviewList | dcbbe7281a2709e7d9a98d5266f09a763602afb2 | 114,960 |
import string
def is_palindrome(s):
"""
function to check if a string is palindrom or not
Returns a boolean value
"""
s = s.lower() #convert the whole string into lowercase letters
s = s.translate(None, string.punctuation) #removes pucntuation from the string, is there is any
s = s.replace(" ", "") #Removes whitespaces from a string
return s == s[::-1] | 2470b4c08e23a3c5218d33838b036a4bcda65d7d | 114,962 |
def all_list_element_combinations_as_pairs(l):
"""Returns a list of tuples that contains element pairs from `l` in all possible combinations
without repeats irrespective of order.
:param list l: list to parse into pairs
:return: element_pairs, list of tuples for each pair
:rtype: list
"""
element_pairs = []
for i in range(len(l)):
element_pairs += [(l[i], l[j]) for j in range(i, len(l))]
return element_pairs | 5f0412337726c16d112f619a3086e3b31ac1f610 | 114,963 |
def _prepare_pylint_args(args):
"""
Filter and extend Pylint command line arguments.
--output-format=parsable is required for us to parse the output.
:param args: list of Pylint arguments.
:returns extended list of Pylint arguments.
"""
# Drop an already specified output format, as we need 'parseable'
args = [a for a in args if not a.startswith("--output-format=")]
args.append("--output-format=parseable")
return args | 0b25072e4b203ccd3a320d7d60178d6d51a456b9 | 114,965 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.