content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def fatorial(n, show=False):
"""
Calcula o fatorial de um numero com a opção de se exibir ou não o processo
:param n: numero a ser calculado o fatorial
:param show: True para exibir o processo
:return: f
"""
f = 1
if show:
for valor in range(n, 0, -1):
print(f'{valor} ', end='')
f *= valor
print('= ', end='')
else:
for valor in range(n, 0, -1):
f *= valor
return f | 35eb0a4d0245454b637bf49409c978438790df3a | 113,837 |
from typing import Dict
from typing import List
def get_blocks_or_data(json_object: Dict, name: str) -> List[int]:
"""Get array of blocks or data from schematic json
Arguments:
json_object {Dict} -- Schematic json
name {str} -- 'Blocks' or 'Data'
Returns:
List[int] -- Array of values
"""
return [obj['value'] for obj in json_object['nbt'][0]['value'] if obj['name'] == name][0] | 340b9841d492b17adc1a30363dd1c6a542dce9c4 | 113,838 |
def preprocess(text):
"""
Removes comments ('#') and handles continue line characters for makefile
text
"""
lines = []
previousLine = ''
for line in text:
# Join previous line (blank if there was no continue line)
line = previousLine + line
# ignore comments
line = line.split('#')[0]
# strip
line = line.strip()
if len(line) > 0:
if line[-1] == '\\':
previousLine = line[0:-1]
else:
previousLine = ''
lines.append(line)
else:
previousLine = ''
return lines | b99023f2ed3c1e2ee101617d88fe316f470e8213 | 113,839 |
def time_delta_to_millis(delta):
"""Converts time delta into total number of milliseconds."""
millis = delta.days * 24 * 60 * 60 * 1000
millis += delta.seconds * 1000
millis += delta.microseconds / 1000
return millis | 8a200a6699a3c7cc84ba1da46f56fc67894f3fb9 | 113,841 |
def pp_hex(raw, reverse=True):
"""Return a pretty-printed (hex style) version of a binary string.
Args:
raw (bytes): any sequence of bytes
reverse (bool): True if output should be in reverse order.
Returns:
Hex string corresponding to input byte sequence.
"""
if not reverse:
return ''.join(['{:02x}'.format(v) for v in bytearray(raw)])
return ''.join(reversed(['{:02x}'.format(v) for v in bytearray(raw)])) | 8e979fa9adf201d48b7b34b075d797655fec6498 | 113,842 |
def hash_file(method, filepath):
"""Calculates an hash on a file by path.
@param method: callable hashing method
@param path: file path
@return: computed hash string
"""
f = open(filepath, "rb")
h = method()
while True:
buf = f.read(1024 * 1024)
if not buf:
break
h.update(buf)
return h.hexdigest() | dfe2a23cfe8b7b3a80a5fdb4ae428333d18459ec | 113,843 |
def _run(runner, parsed_args):
"""Run function/method and return the result."""
if type(parsed_args) is tuple: # list and keyword arguments
res = runner(*parsed_args[0], **parsed_args[1])
if "q" not in parsed_args[1]:
print(res)
else: # only keyword arguments
res = runner(**parsed_args)
if "q" not in parsed_args:
print(res)
return res | 2bfbf5642300790f8f3f8ef10f49c51ed9125efe | 113,845 |
def add_feature_rate_overall(profile_updated_df):
"""
Create Feature :
- Total count of received, viewed, completed
- Overall Rates :
- rate_review = total reviewed / total received
- rate_completed_reviewed = total completed / total reviewed
- rate_completed_received = tatal completed / total received
"""
profile_updated = profile_updated_df.copy()
profile_updated['offer_received_total'] = profile_updated.bogo_received + profile_updated.discount_received + \
profile_updated.informational_received
profile_updated['offer_viewed_total'] = profile_updated.bogo_viewed + profile_updated.discount_viewed + \
profile_updated.informational_viewed
profile_updated['offer_completed_total'] = profile_updated.bogo_completed + profile_updated.discount_completed
profile_updated['rate_offer_viewed_overall'] = \
(profile_updated['offer_viewed_total'] / profile_updated['offer_received_total']).fillna(0)
profile_updated['rate_offer_completed_received_overall'] = \
(profile_updated['offer_completed_total'] / profile_updated['offer_received_total']).fillna(0)
profile_updated['rate_offer_completed_viewed_overall'] = \
(profile_updated['offer_completed_total'] / profile_updated['offer_viewed_total']).fillna(0)
return profile_updated | 9ba022020a18eb027ccf943dceaddaf8f37103d3 | 113,846 |
def nearest(ts, img_list):
"""given a timestamp, find the nearest timestamp in img_list"""
# ts - timestamp
min_diff = 1000000000000
min_ts = img_list[0]
for t in img_list:
if abs(int(t) - ts) < min_diff:
min_diff = abs(int(t) - ts)
min_ts = t
return min_ts | 71916b3c181bdf621114c793dfa0a676667bfbb3 | 113,848 |
import torch
import typing
def mixup(
inputs: torch.Tensor, targets: torch.Tensor, gamma: float
) -> typing.Tuple[torch.Tensor, torch.Tensor]:
"""Perform per-batch mixup on images.
See `mixup: Beyond Empirical Risk Minimization <https://arxiv.org/abs/1710.09412>`__.
for explanation of the method.
Example::
class TrainStep(tt.steps.Train):
def forward(self, module, sample):
images, labels = sample
images, labels = tt.functional.inputs.mixup(images, labels)
# Calculate what you want below, say loss
...
return loss
step = TrainStep(criterion, device)
.. note::
**IMPORTANT**: Examples are modified in-place!
Arguments
---------
inputs: torch.Tensor
`torch.Tensor` of shape :math:`(N, *)` and numerical `dtype`.
labels: torch.Tensor
`torch.Tensor` of shape :math:`(N, *)` and numerical `dtype`.
gamma: float
Level of mixing between inputs and labels. The smaller the value,
the more "concrete" examples are (e.g. for `0.1` and `cat`, `dog` labels
it would be `0.9` cat and `0.1` dog).
Returns
-------
Tuple(torch.Tensor, torch.Tensor)
Inputs and labels after mixup (linear mix with `gamma` strength).
"""
if inputs.shape[0] != targets.shape[0]:
raise ValueError(
"inputs and labels 0 dimension (batch) has to be equal, "
"got {} for inputs and {} for labels".format(
inputs.shape[0], targets.shape[0]
)
)
perm = torch.randperm(inputs.shape[0])
perm_inputs = inputs[perm]
perm_targets = targets[perm]
return (
inputs.mul_(gamma).add_(perm_inputs, alpha=1 - gamma),
targets.mul_(gamma).add_(perm_targets, alpha=1 - gamma),
) | c41db8590c05ca7563e03276a67d8e3ecbbc779b | 113,853 |
def get_extent(gtws):
"""Returns extent of gateways (parameter gtws)."""
minx = float("inf")
miny = float("inf")
maxx = float("-inf")
maxy = float("-inf")
for gtw in gtws:
if gtws[gtw][0] < minx:
minx = gtws[gtw][0]
if gtws[gtw][0] > maxx:
maxx = gtws[gtw][0]
if gtws[gtw][1] < miny:
miny = gtws[gtw][1]
if gtws[gtw][1] > maxy:
maxy = gtws[gtw][1]
# print (minx, miny, maxx, maxy)
return minx, miny, maxx, maxy | a52558d4cf3f1b5132466e434e5dccda4cde19eb | 113,854 |
def process(obj, ops):
"""
Process `obj` with a sequence of operations (unary callables).
"""
for op in ops:
obj = op(obj)
return obj | 347141cd974c97ecea1637a3feb185c01e34b1fc | 113,858 |
def clear_selected_indices(value):
"""
Whenever the cluster changes, the selected rows get
set to none.
Parameters
----------
value : str
cluster name
Returns
-------
[]
returns and empty list which then means non of the
entries in the table are selected
"""
return [] | 9cf62e005d6d7c81c37d9f6eba9e5906c6a3121e | 113,859 |
def _is_branch_el8(pkg):
""" Is this a branch el8 pacakge. Eg. foo-1-2.el8_3.noarch """
return 'el8_' in pkg.release | ff485c5655bfac5aeb4899b120612154b6707e2e | 113,860 |
def camelcase(name):
"""
Converts lower_case to LowerCase
>>> camelcase('camel_case_this')
u'CamelCaseThis'
>>>
"""
return u''.join(map(lambda s: s.capitalize(), name.split('_'))) | 2dff85bd8cdcdd889f6c3ab584de3f4167be4984 | 113,861 |
def _safe_snr_calculation(s, n):
"""
Helper used in this module for all snr calculations. snr is
always defined as a ratio of signal amplitude divided by noise amplitude.
An issue is that with simulation data it is very common to have a noise
window that is pure zeros. If computed naively snr would then be
normally be returned as NaN. NaNs can cause a lot of mysterious errors
so we handle that differently here. When noise amplitude is 0 we
then test the signal amplitude. If it is nonzero we return large
number defined inside this function as 999999.9 (just under 1 million).
If both amplitudes are zero we return -1.0 which can be properly treated
as an error or data with low snr.
"""
if n == 0.0:
if s > 0.0:
return 999999.9
else:
return -1.0
else:
return s / n | 9d903853ed3753a187c9efb4b935518d47eaf93b | 113,862 |
def get_columns_by_type(df, req_type):
"""
get all columns of dataframe with provided dtypes
Parameters:
df : Pandas dataframe
req_type: dtype
Returns:
list: list of columns
"""
g = df.columns.to_series().groupby(df.dtypes).groups
type_dict = {k.name: v for k, v in g.items()}
return type_dict.get(req_type) | 862dba3a47d7af1a0465a028633e8efa08fb246d | 113,869 |
def num_knots_curve_lsq(k, num_internal_knots):
"""
Returns the number of total knots created by curve_lsq_fixed_knots.
"""
return (k+1)*2+num_internal_knots | c5ccc5592bf83ee4ea11b89b80924fce4aae6ad4 | 113,871 |
def merge_two_dicts(A, B):
"""
Merge two dictionaries.
:param A: Dictionary A
:param B: Dictionary B
:return: The merged dictionary
"""
result = A.copy()
result.update(B)
return result | 6ea952bc34a26f07c1d7bf40fb6ab284abfc28a5 | 113,872 |
def _count_headers(csv_data: str) -> int:
"""Return the number of header rows in the CSV string data.
Header rows are defined by having a number in their second column because
the first column contains the sample (test) name which could be a number.
Raises:
ValueError: if cell is empty or cannot be converted to a float.
"""
for line_num, line in enumerate(csv_data.splitlines()):
try:
# Try converting the second column to a float
float(line.split(',')[1])
return line_num
except ValueError:
# Empty cell, or cannot be converted to a float
continue
# Raise a ValueError if no end to the headers was found
raise ValueError('Line starting with integer not found') | 1865124b3f5ab6fd936161cecf01bdd66ec65e45 | 113,873 |
def max_class_score(class_scores, return_total=False):
"""
Get the class with the highest score, score (and score total) from a list of classes with scores
Parameters
----------
:param class_scores: list of classes with scores attached
Format: [(class, score), ... (all items)]
:param return_total: boolean denoting whether or not the total score should be returned
Return
------
:return: class with the highest score, score (and score total)
Format: [[max_class, score], total]
"""
best = (None, -1)
total = float(0)
for item_score in class_scores.items():
total += item_score[1]
if best[1] < item_score[1]:
best = item_score
return best if not return_total else best, total | b42310143085fb1136b2949e847d45fd3f83c8e6 | 113,874 |
def key_klifs_residues(numbering):
"""
Retrieve a list of PDB residue indices relevant to key kinase conformations mapped via KLIFS.
Define indices of the residues relevant to a list of 12 collective variables relevant to
kinase conformational changes. These variables include: angle between aC and aE helices,
the key K-E salt bridge, DFG-Phe conformation (two distances), X-DFG-Phi, X-DFG-Psi,
DFG-Asp-Phi, DFG-Asp-Psi, DFG-Phe-Phi, DFG-Phe-Psi, DFG-Phe-Chi1, and the FRET L-S distance.
All features are under the current numbering of the structure provided.
Parameters
----------
numbering : list of int
numbering[klifs_index] is the residue number for the given PDB file corresponding to KLIFS residue index 'klifs_index'
Returns
-------
key_res : list of int
Key residue indices
"""
key_res = []
# angle between aC and aE helices
key_res.append(numbering[20]) # residue 21 (res1 in aC)
key_res.append(numbering[28]) # res29 (res2 in aC)
key_res.append(numbering[60]) # res61 (res1 in aE)
key_res.append(numbering[62]) # res63 (res2 in aE)
# key salt bridge
key_res.append(numbering[16]) # res17 (K in beta3)
key_res.append(numbering[23]) # res24 (E in aC)
# DFG conformation and Phe conformation
key_res.append(numbering[27]) # res28 (ExxxX)
key_res.append(numbering[81]) # res82 (DFG-Phe)
# X-DFG Phi/Psi
key_res.append(numbering[79]) # res80 (X-DFG)
# DFG-Asp Phi/Psi
key_res.append(numbering[80]) # res81 (DFG-Asp)
# FRET distance
# not in the list of 85 (equivalent to Aura"S284"), use the 100% conserved beta III K as a reference
key_res.append(numbering[16] + 120)
# not in the list of 85 (equivalent to Aura"L225"), use the 100% conserved beta III K as a reference
key_res.append(numbering[16] + 61)
return key_res | 40babdaf3a4aa6182ef2eba0866e1e7e85216321 | 113,877 |
def get_operation_id_number(value: str) -> str:
"""
Validates then extracts from PAPI operation IDs just the final number.
eg:
papiv1: 'operations/EMj9o52aLhj78ZLxzunkiHcg0e2BmaAdKg9wcm9kdWN0aW9uUXVldWU -> EMj9o52aLhj78ZLxzunkiHcg0e2BmaAdKg9wcm9kdWN0aW9uUXVldWU'
papiv2alpha1: 'projects/project_name/operations/01234567891011121314' -> '01234567891011121314'
"""
return value.split('/')[-1] | 14cd92a9021fe7c6496b9851abbfe8400a55634d | 113,879 |
def _singleton_violation(pat, interpretation):
"""
Checks if an abstraction pattern violates the singleton constraints in a
given interpretation, that is, if the interpretation already contains an
observation of the same type of the hypothesis of the pattern, and that
hypothesis is a singleton.
Parameters
----------
pat:
PatternAutomata.
interpretation:
Interpretation.
"""
return issubclass(pat.Hypothesis, tuple(interpretation.singletons)) | 69bc252e86d443768d9a076cc1883a227b6e9185 | 113,890 |
def _is_ignored(server):
"""Return True if the server should be ignored for test infra prod alerts."""
if server.hostname.startswith('chromeos1-'):
return True
if server.hostname.startswith('chromeos15-'):
return True
return False | 2eba4ac56529615f72180969d2226635779bdbbe | 113,893 |
def infer_type_from_keys(keys: list):
"""infer ddf data type from the primary key"""
if len(keys) == 1:
if keys[0] == 'concept':
return 'concepts'
else:
return 'entities'
if 'synonym' in keys:
return 'synonyms'
else:
return 'datapoints' | 83f057169d6800e7d4e3064edc0054da1741cf46 | 113,895 |
def rebuild_optional(matched_group: str) -> str:
"""
Rebuild `Union[T, None]` as `Optional[T]`.
Arguments:
matched_group: The matched group when matching against a regular expression (by the parent caller).
Returns:
The rebuilt type string.
"""
brackets_level = 0
for char in matched_group:
if char == "," and brackets_level == 0:
return f"Union[{matched_group}]"
elif char == "[":
brackets_level += 1
elif char == "]":
brackets_level -= 1
return matched_group | 2e1fece7db6e76832e6e258f8e470903cc5c98b8 | 113,896 |
import re
def strip_internal_spaces(text):
"""
Removes spaces between digits, digit and dot,
dot and digit; after opening brackets and parentheses
and before closing ones; spaces around ^ symbol.
"""
text = text.replace("{ ", "{")
text = text.replace(" }", "}")
text = text.replace("( ", "(")
text = text.replace(" )", ")")
text = text.replace(" ^ ", "^")
return re.sub(r'(?<=[\d.]) (?=[\d.])', '', text) | 0e4c72884e8a22e0b1138429ea21aff50220fda1 | 113,898 |
def split_from(string, sep, pos):
"""
Return string splitted from the desired separator.
Args:
string (str): The supplied string that will be splitted.
sep (str): The desired separator to use for the split.
pos (int): The desired first occurence of the defined separator
within the supplied string. This will be the position of
the first split performed.
Returns:
list: A list of the splitted string
Examples:
>>> isc_string = 'failover peer "dhcpd-failover" state'
>>> shared.utils.split_from(isc_string, ' ', 2)
['failover peer', '"dhcpd-failover"', 'state']
"""
string = string.split(sep)
return [sep.join(string[:pos])] + string[pos:] | f32d53f0528a740ac9f8b95accd6d74fe296ba32 | 113,901 |
def Namer(name):
""" Converts the input name to '-' delimited format."""
name = name.strip(' ')
name = name.replace(' : ', '-')
for ch in (': ', ' :', '# ', '. ', '- ', ' ', '_', '#', '+',):
name.strip(ch)
name = name.replace(ch, '-')
name = name.strip('-') # -naruto- isnt hot.
name = name.replace('--', '-') # Boruto--Naruto isn't sexy.
name = name.replace('---', '-') # Second pass
return name | 91d30ec61f0c305b601d8fa905cd7828212b86a6 | 113,903 |
def is_css(syntax: str):
"""
Check if given syntax is a CSS dialect. Note that it’s not the same as stylesheet
syntax: for example, SASS is a stylesheet but not CSS dialect (but SCSS is)
"""
return syntax in ('css', 'scss', 'less') | f7793f272407c56c3f6384a1e69d312a8c82ce98 | 113,908 |
def data_bits(n: int = 2):
"""
Calculate the data bits in one hamming data block.
:param n: The dimension of the hamming data block is specified by 2 ** n or n << 1
:return: The number of valid data bits carrying information in one hamming data block.
"""
return (1 << 2 * n) - (n << 1) - 1 | 41779477538dc5bf3bb0d1bb96449dadfa6e0fc9 | 113,909 |
def day_of_week(day, month, year):
"""
Determine the day of the week for a given day, month and year, using
Zeller's Rule (see http://mathforum.org/dr.math/faq/faq.calendar.html).
Args:
d (int): The specified day of the month (1-31).
m (int): The specified month (1-12).
y (int): The specified year (including the century, ie. '2019' not '19').
Returns:
int: The day of the week: 0 (Monday) to 6 (Sunday).
"""
month -= 2
if month < 1: month += 12
century = int(str(year)[:2])
year = int(str(year)[2:])
year = year - 1 if month > 10 else year
dow = day + int((13 * month - 1) / 5) + year + int(year / 4) + int(century / 4) - (2 * century)
dow = dow % 7
if dow < 0: dow += 7
return dow | 4b59226c76ca3d7e69b316695cef938e5fc5bb31 | 113,910 |
def noProcessing(img, **parameter):
"""Perform no image processing at all and return original image
Used as the default functon in :func:`parallelProcessStack` and
:func:`sequentiallyProcessStack`.
Arguments:
img (array): imag
Returns:
(array): the original image
"""
return img | 61d027b02182945c3d0d69e1ba747be6e70b1471 | 113,912 |
def get_matching_points(requested_file_names, all_file_names, object_points, image_points):
"""
Gets the object points and image points of a requested set of files
:param requested_file_names: files to look through
:param all_file_names: the list of file names
:param object_points: the object points list of the images in the given directory
:param image_points: the image points list of the images in the given directory
:return: the requested object points and image points
"""
requested_file_nameset = set(requested_file_names)
requested_object_points = []
requested_image_points = []
for index, filename in enumerate(all_file_names):
if filename in requested_file_nameset:
requested_object_points.append(object_points[index])
requested_image_points.append(image_points[index])
return requested_object_points, requested_image_points | 4c32090c11ab64775154465191bc1d345dcaaaf6 | 113,916 |
def pad_to_size(text, x, y):
"""
Adds whitespace to text to center it within a frame of the given
dimensions.
"""
input_lines = text.rstrip().split("\n")
longest_input_line = max(map(len, input_lines))
number_of_input_lines = len(input_lines)
x = max(x, longest_input_line)
y = max(y, number_of_input_lines)
output = ""
padding_top = int((y - number_of_input_lines) / 2)
padding_bottom = y - number_of_input_lines - padding_top
padding_left = int((x - longest_input_line) / 2)
output += padding_top * (" " * x + "\n")
for line in input_lines:
output += padding_left * " " + line + " " * (x - padding_left - len(line)) + "\n"
output += padding_bottom * (" " * x + "\n")
return output | 03bc6750aeb9cd78f5b7abf712ab4ec407b16a96 | 113,922 |
def complement_strand(sequence):
"""
Returns the string which will be the second strand of the DNA sequence
given that Ts complement As, and Cs complement Gs. If given
a bad input, the function returns "Sequencing Error"
:param sequence: A DNA sequence
:return: the complement string for the DNA sequence
"""
complement = "" # This can be used to "build" the complement
letter_dictionary = {"A": "T", "C": "G", "T": "A", "G": "C"}
for letter in sequence:
if letter in letter_dictionary:
complement += letter_dictionary[letter]
else:
return "Sequencing Error"
return complement | 3857c2669313d521be0d09a9c4d4d3c000d97c9d | 113,930 |
from enum import Enum
def deep_enum_to_str(d):
"""
Replaces enums with their plain string equivalents, i.e. `str(Enum.name)`. If a dict or list is passed to this
function, it will be applied deeply/recursively through all nested objects in the collection.
"""
if isinstance(d, Enum):
return str(d.name)
if isinstance(d, dict):
for k, v in d.items():
d[k] = deep_enum_to_str(v)
if isinstance(d, list):
for n in range(len(d)):
d[n] = deep_enum_to_str(d[n])
return d | 4e7c31b1a1f3d314dd1cebf8bcb637795845bb0a | 113,937 |
import re
def parse_fst_iface_event(ev):
"""Parses FST iface event that comes as a string, e.g.
"<3>FST-EVENT-IFACE attached ifname=wlan9 group=fstg0"
Returns a dictionary with parsed "event_type", "ifname", and "group"; or
None if not an FST event or can't be parsed."""
event = {}
if ev.find("FST-EVENT-IFACE") == -1:
return None
if ev.find("attached") != -1:
event['event_type'] = 'attached'
elif ev.find("detached") != -1:
event['event_type'] = 'detached'
else:
return None
f = re.search("ifname=(\S+)", ev)
if f is not None:
event['ifname'] = f.group(1)
f = re.search("group=(\S+)", ev)
if f is not None:
event['group'] = f.group(1)
return event | b864b5fa3689f27aa6f8517d52e34eb3f05a9a56 | 113,949 |
def get_count(labels):
"""
count each label type
:param labels: items to count
:return: {label_type: freq}
"""
ages = dict()
for label in labels:
if label in ages.keys():
ages[label] += 1
else:
ages[label] = 1
return ages | 8cfd814fdfef798ecec761cc1928d0fb5df2bf0a | 113,950 |
from pathlib import Path
import typing
def resolve_path(filename: Path) -> typing.Union[Path, None]:
"""Find a file by walking up parent directories until the file is found.
Return the absolute path of the file.
"""
current = Path.cwd()
# Stop search at home directory
sentinel_dir = Path.home().parent.resolve()
while current != sentinel_dir:
target = Path(current) / Path(filename)
if target.exists():
return target.resolve()
else:
current = current.parent.resolve()
return None | cc8355c28c0ee71c0248700278bf6dd6cae18000 | 113,951 |
def format_keys(keys):
"""
Converts keys like findHighlightForeground to find_highlight_foreground.
"""
for key in keys:
formatted_key = ''.join([f"_{c.lower()}" if c.isupper() else c for c in key.text])
key.text = formatted_key
return keys | b7877b5dc1dfe54f93fc43719cc4b074282fc1cc | 113,952 |
def admitted_to_hospital(
on_or_before=None,
on_or_after=None,
between=None,
returning="binary_flag",
find_first_match_in_period=None,
find_last_match_in_period=None,
date_format=None,
with_these_diagnoses=None,
with_these_primary_diagnoses=None,
with_these_procedures=None,
return_expectations=None,
):
"""Return information about admission to hospital.
Options for `returning` are:
binary_flag: Whether patient was admitted to hospital
primary_diagnosis: ICD-10 code of primary diagnosis
date_admitted: Date patient was admitted to hospital
date_discharged: Date patient was discharged from hospital
`with_these_diagnoses` is optional, and is a list of ICD-10 codes
`with_these_primary_diagnoses` is optional, and is a list of ICD-10 codes
`with_these_procedures` is optional, and is a list of OPCS-4 codes
See https://github.com/opensafely/cohort-extractor/issues/186 for discussion.
"""
return "admitted_to_hospital", locals() | f64b2b751d033313c78c2b42314172175f6cc2ba | 113,956 |
def hill (x, parameters):
"""Hill function
POI = (B*(x^n))/(1+(x^n))
Parameters
----------
x: float or array of floats
variable
parameters: dict
dictionary containing 'hill_B' and 'hill_N'
Returns
-------
float or array of floats:
function result
"""
B = parameters['hill_B']
N = parameters['hill_N']
return (B * (x**N))/(1. + (x**N)) | 959da051348c476c194ab8082923d49110d9b078 | 113,959 |
from typing import Iterable
def make_indices_str(indices: Iterable[int]) -> str:
"""Generate a string representation of an iterable of indices;
if indices are contiguous, returns a string formatted like like
'<min_idx> - <max_idx>', otherwise a string formatted like
'[idx_1, idx_2, ..., idx_n'].
"""
idcs = sorted(indices)
contiguous = len(idcs) > 1 and (idcs[-1] - idcs[0] == len(idcs) - 1)
return f"{idcs[0]} - {idcs[-1]}" if contiguous else f"{idcs}" | caf43463b0ae3daa676903b4e8acf7a62b6fa91d | 113,960 |
def pad_to_multiple_of(n, mult):
"""
pads n to a multiple of mult
"""
extra = n % mult
if extra > 0:
n = n + mult - extra
return n | 92ccf0519ec868a7592c775705a8d9ac44f8b6dc | 113,967 |
from typing import Any
from typing import Optional
def get_str_or_none(value: Any) -> Optional[str]:
"""
Given a value, get its string representation
or None object
:param value: Input value
:return String value of input or None
"""
if not value:
return None
return str(value) | c0e8c27808d4cd417ddff8a19fddcefa4cdf3498 | 113,970 |
def get_savename_from_varname(
varname, varname_prefix=None,
savename_prefix=None):
"""
Args:
varname(str): a variable name in the graph
varname_prefix(str): an optional prefix that may need to be removed in varname
savename_prefix(str): an optional prefix to append to all savename
Returns:
str: the name used to save the variable
"""
name = varname
if varname_prefix is not None \
and name.startswith(varname_prefix):
name = name[len(varname_prefix) + 1:]
if savename_prefix is not None:
name = savename_prefix + '/' + name
return name | c249dfd0557e52d1ed4e7800ffb99cae6601208b | 113,971 |
def CaptureHDRStack(camera, exposure_min, exposure_max, nimages):
"""CaptureHDRStack(exposure_min, exposure_max, nimages)
captures a set of nimages from the pi's camera with
exposures ranging from min to max.
Returns a list of filenames of images saved.
"""
exp_step = (exposure_max - exposure_min) / (nimages-1)
exposures = range(exposure_min, exposure_max+1, exp_step)
fnames = []
for step in exposures:
# Set filename based on exposure
fname = 'e%d.jpg' % (step)
fnames.append(fname)
# Set camera properties and capture
camera.brightness = step
camera.capture(fname)
return fnames | f80c1972fb342a0dd9d45633df09c49168a6f907 | 113,976 |
def format_line(line):
"""
Formats a line to required representation.
:param line:
:return: string url encoded.
"""
if line:
return line.replace("\"", "%22").replace("\n", "%0a").replace(" ", "+") | 43fae05a76c06880f3b47a30b8704496e4a02a69 | 113,980 |
import pathlib
def norm_vahadane(remote_sample) -> pathlib.Path:
"""Sample pytest fixture for norm_vahadane image.
Download norm_vahadane image for pytest.
"""
return remote_sample("stainnorm-target-vahadane") | ef0f2d8a97ae1186b8bdde630454f5f8c99c4abe | 113,982 |
def merge_two_dicts(dict1, dict2):
"""
Helper function for merging two dictionaries into a
new dictionary as a shallow copy.
:param dict1: (dict) First of two dictonaries to merge
:param dict2: (dict) Second dictionary
:returns: Merged dictionary
:rtype: dict
"""
merged_dict = dict1.copy()
merged_dict.update(dict2)
return merged_dict | bf47850c043e75f6155c19993788fa3e017894c4 | 113,984 |
from typing import Union
def convert_to_int_if_possible(s: str) -> Union[int, str]:
"""
Convert the given string to an int if possible. Otherwise, return the
original string.
"""
return int(s) if s.isdigit() else s | 75c81bb91ef07719e6e7ff63979a350c9dfa374a | 113,986 |
def isfuture(obj):
"""Check for a Future.
This returns True when obj is a Future instance or is advertising
itself as duck-type compatible by setting _asyncio_future_blocking.
See comment in Future for more details.
"""
return (hasattr(obj.__class__, '_asyncio_future_blocking') and
obj._asyncio_future_blocking is not None) | e21241fb4a2612be484aedc10426e182496be33c | 113,993 |
def is_palindrome(some_str: str) -> bool:
"""
Accepts a string
Returns whether or not string is palindrome
:param some_str:
:return:
"""
return some_str == some_str[::-1] | 216f05ec92d376450e0d2a7307e9843b6e0b5dae | 113,995 |
def set_count(items):
"""
This is similar to "set", but this just creates a list with values.
The list will be ordered from most frequent down.
Example:
>>> inventory = ['apple', 'lemon', 'apple', 'orange', 'lemon', 'lemon']
>>> set_count(inventory)
[('lemon', 3), ('apple', 2), ('orange', 1)]
"""
item_count = {}
for item in items:
if not item:
continue
if item not in item_count:
item_count[item] = 0
item_count[item] += 1
items = [(v, k) for k, v in item_count.items()]
items.sort()
items.reverse()
return [(k, v) for v, k in items] | 0a5d6a5505713095a44eb2e199a04aaedb665160 | 113,997 |
def round_(number, ndigits=0):
""":yaql:round
Returns a floating number rounded to ndigits after the decimal point.
:signature: round(number, ndigits => 0)
:arg number: input value
:argType number: number
:arg ndigits: with how many digits after decimal point to round.
0 by default
:argType ndigits: integer
:returnType: number
.. code::
yaql> round(12.52)
13
yaql> round(12.52, 1)
12.5
"""
return round(number, ndigits) | a0d0c6c0dd917b97eaacd557c159eec3b05fc9b7 | 113,998 |
from csv import reader
def getChannelNames(fname):
"""
Read Names of Electrodes from text file
Input:
------
-- fname - str - tab-delimeted textfile containing electrode number
and electrode-names
example:
1 C1
2 C3
output:
-------
-- elecnames - List of electrode names in ascending order as
determined by the electrode numbers in fname
Example:
--------
>>> getChannelNames(_path.join(_path.join(_packdir, 'test_data'), \
'elecnames.txt'))
['C1', 'C3']
"""
filereader = reader(open(fname, 'r'), delimiter='\t', quotechar='"')
elecnames = dict([(row[0].upper(),int(row[1]))
for row in filereader])
from operator import itemgetter
elecnames = sorted(iter(elecnames.items()), key=itemgetter(1))
return [item[0] for item in elecnames] | dcb093188f7b2db25cb484e5124c78978cb1ab8e | 114,002 |
def data_row(gen):
"""Generate a row of fake data.
Arguments
---------
gen : faker.Faker
A generator for fake data.
Returns
-------
list
A list with fake data.
"""
return [
gen.uuid4(),
gen.date_this_year().isoformat(),
gen.name().replace(",", " "),
gen.phone_number(),
gen.company_email(),
gen.street_name().replace(",", " "),
gen.city(),
gen.postcode(),
gen.state(),
gen.random.randint(1e4, 1e7) / 100.0,
gen.random.randint(1e3, 1e6) / 100.0
] | fac4652bb5a0cb7138eb0a26a0cf7341cd069a33 | 114,009 |
import requests
def find_mime_type(url: str) -> str:
"""Determine the mime type of a file at the given url.
Args:
url: remote url to check
Returns:
Mime type - defaults to 'audio/mpeg'
"""
mime = 'audio/mpeg'
response = requests.Session().head(url, allow_redirects=True)
if 200 <= response.status_code < 300:
mime = response.headers['content-type']
return mime | ce95c232c514b63f735a035acc3c2eede1b20b17 | 114,015 |
def get_top_node(data):
""" The top node is what is not present in any other node's children list"""
child_nodes = set()
for key, node in data.items():
[child_nodes.add(elem) for elem in node.children]
return list(filter(lambda node_name: node_name not in child_nodes, data.keys())) | 7c6a94d9dd2743b6b16878712fd162eaeabdd7fb | 114,016 |
def split_data(x, y, test_data_ratio):
"""Split one dataset which consists of
descriptive features and target features into 2 datasets,
that is training data and test data.
The number of x's row must be the same as the one of y's row.
Arguments
---------
x : np.array
Descriptive features in 2d array
whose shape is (num of data, num of feature).
y : np.array
Target features in 2d array
whose shape is (num of data, num of feature).
test_data_ratio : float
Desired ratio of test data in range from 0.0 to 1.0.
If 0.3, 30% data is for test data and
rest of the data is for training data.
Returns
-------
np.array, np.array, np.array, np.array
The former 2 arrays are descriptive features
and target features of training data.
The latter 2 arrays are descriptive features
and target features of test data.
"""
training_data_num = x.shape[0]
if (test_data_ratio > 0.) and (test_data_ratio < 1.):
training_data_num = int(training_data_num * (1. - test_data_ratio))
x_train = x[:training_data_num, :]
y_train = y[:training_data_num, :]
x_test = x[training_data_num:, :]
y_test = y[training_data_num:, :]
return x_train, y_train, x_test, y_test | e018d1b9a0f1b28dff18395bda7005ddea575640 | 114,018 |
def fetch_bases(fasta, contig, start, length):
"""
Returns a subsection from a specified FASTA contig. The start coordinate is 1-based.
"""
zero_base_start = start - 1
end = zero_base_start + length
new_ref = fasta.fetch(reference=contig, start=zero_base_start, end=end)
return new_ref | e9ab1e986f99780de3a029f0c3ef40923173e4e1 | 114,022 |
import yaml
def load(fd):
"""
Load a YAML file.
"""
return yaml.load(fd, Loader=yaml.FullLoader) | 52358f061b0518bc0f9ffe1a7717871245fc7618 | 114,025 |
def required_passes(bytes_per_pass, share_sizes):
"""
Calculate the number of passes that are required to store shares of the
given sizes for one lease period.
:param int bytes_per_pass: The number of bytes the storage of which for
one lease period one pass covers.
:param list[int] share_sizes: The sizes of the shared which will be stored.
:return int: The number of passes required to cover the storage cost.
"""
if not isinstance(share_sizes, list):
raise TypeError(
"Share sizes must be a list of integers, got {!r} instead".format(
share_sizes,
),
)
result, b = divmod(sum(share_sizes, 0), bytes_per_pass)
if b:
result += 1
# print("required_passes({}, {}) == {}".format(bytes_per_pass, share_sizes, result))
return result | dfff68f33bc01623204e4fab6db683583adb1586 | 114,028 |
import decimal
def intround(value):
"""Given a float returns a rounded int. Should give the same result on
both Py2/3
"""
return int(decimal.Decimal.from_float(
value).to_integral_value(decimal.ROUND_HALF_EVEN)) | acec7588703a69ae29ee79fc3400f714fc424b4e | 114,030 |
def _to_positive_step(orig_slice, N):
"""
Convert a slice object with a negative step to one with a positive step.
Accessing an iterable with the positive-stepped slice, followed by flipping
the result, should be equivalent to accessing the tensor with the original
slice. Computing positive-step slice requires using N, the length of the
iterable being sliced. This is because PyTorch currently does not support
slicing a tensor with a negative step.
"""
# Get rid of backward slices
start, stop, step = orig_slice.indices(N)
# Get number of steps and remainder
n, r = divmod(stop - start, step)
if n < 0 or (n == 0 and r == 0):
return slice(0, 0, 1)
if r != 0: # a "stop" index, not a last index
n += 1
if step < 0:
start, stop, step = start + (n - 1) * step, start - step, -step
else: # step > 0, step == 0 is not allowed
stop = start + n * step
stop = min(stop, N)
return slice(start, stop, step) | e9990b95ae0873479c109ee7971f45edaf5612a6 | 114,038 |
def weave_lists(tables, non_tables, text_first):
"""
Takes a list of tables, non-tables and a boolean indicating which
should come first and returns a single list of lines.
"""
new_list = []
total_blocks = len(tables) + len(non_tables)
for i in range(total_blocks):
if text_first:
new_list.extend(non_tables.pop(0))
text_first = False
else:
new_list.extend(tables.pop(0))
text_first = True
return new_list | b885e66a30ad78cfa95358bd6beb42a57bbf9c49 | 114,039 |
def options_to_str(options_list):
"""
Helper method to create a sting out of a list of choice options.
"""
tmp_list = ["{} - {}".format(i + 1, o) for i, o in enumerate(options_list)]
return "\n".join(tmp_list) | 95811b295c44e61560627ac25c7d59bfa20b54fc | 114,043 |
def add_zero(value):
"""Returns a string appended with 0 if the value is less than 10."""
if int(value) < 10:
value = '0' + str(value)
return value | b327168e74f0b5991158339d8755533ebdcdd990 | 114,048 |
from typing import List
from typing import Tuple
from typing import Any
from typing import Optional
def judge(
expected: List[Tuple[Any, ...]],
answered: List[Tuple[Any, ...]],
order_strict: bool
) -> Tuple[bool, Optional[int]]:
"""
クエリ結果が模範解答と等しいかを比べる
:param expected: 模範解答
:param answered: 提出解答
:param order_strict: ORDER BY などで順序一致を求めるか
:return: (正解したか, 不正解の場合 answered の最初の不適切行)
"""
if order_strict: # 順序まで要求する場合
if expected == answered:
return True, None
for idx, (expected_record, answered_record) in enumerate(zip(expected, answered), start=1):
if expected_record != answered_record:
return False, idx # 最初に不一致した行の番号を返す
expected_set = set(expected)
for idx, answered_record in enumerate(answered, start=1): # answer を1行ずつチェック
if answered_record in expected_set:
expected_set.remove(answered_record)
else:
return False, idx # 存在しないとき,その行の番号を返す
if len(expected_set) != 0: # answer にレコードの不足がある場合
return False, len(expected_set) + 1
return True, None | 48406a8eb989c001357336be8ca26356560263f7 | 114,049 |
def color_to_rgba(color):
"""Converts a color (tuple or string) to an RGBA tuple.
This function does not validate the input, so if the input format
does not match one of the formats listed below, the output format
is not guaranteed.
Args:
color: The color to be converted. It can be:
An RGB tuple. 3 ints/floats between 0.0 and 1.0.
e.g. (1, 0, 0)
An RGBA tuple. 4 ints/floats between 0.0 and 1.0 (in which
case it is returned as is).
e.g. (1, 0, 0, 0.5)
An RGB hex string.
e.g. #ff0000, ff0000, #f00, f00 (these are all
equivalent)
An RGBA hex string.
e.g. #ff0000cc, #f00c, ff0000cc, f00c (these are all
equivalent)
Returns:
A RGBA tuple of 4 ints/floats between the values 0.0 and 1.0.
"""
if isinstance(color, tuple):
if len(color) == 4:
return color
if len(color) == 3:
return (*color, 1)
if isinstance(color, str):
_color = color.lstrip('#')
if len(_color) == 3:
_color = _color[0] * 2 + _color[1] * 2 + _color[2] * 2 + 'ff'
elif len(_color) == 4:
_color = _color[0] * 2 + _color[1] * 2 + _color[2] * 2 + _color[3] * 2
elif len(_color) == 6:
_color += 'ff'
if len(_color) == 8:
return tuple(int(_color[i : i + 2], 16) / 255 for i in range(0, 8, 2))
raise ValueError(f'Invalid color: {color}') | cdec624a54e3a1ed3c5d8854dc93c82c453abf57 | 114,051 |
import configparser
def readConfigFile(filePath):
"""
Read config file
Args:
filePath ([str]): path to config file
Returns:
[Obj]: config object
"""
config = configparser.ConfigParser()
config.read(filePath)
return config | ad9efa59c345ad0792510a593f6e53b3cdb77864 | 114,052 |
def flat_key(*keys):
"""Generate one key from a sequence of identifiers"""
return ':'.join([str(k) for k in keys]) | 19bc120f9e6bb013a4be4186274a4dc758c76d22 | 114,054 |
def datetime_to_ts(date_time):
"""Convert a DateTime object into a WARC 1.0 timestamp."""
return date_time.strftime('%Y-%m-%dT%H:%M:%SZ') | 672733bb83888535f9b72f51a8d456b0bd92efd4 | 114,064 |
import re
def is_parial_pattern_of_number_expression(re_match: re.Match, processed_text: str) -> bool:
"""対象パターンが数字表現の一部かを判定する
正規表現の記法によっては、数字表現の一部を取得してしまう例がある。
与えられたパターンが数字表現の一部を間違って取得していないかをチェックする
e.g. "これは13/13です" に対して "3/13" というパターンを取得している場合 -> True
e.g. "これは3/13です" に対して "3/13" というパターンを取得している場合 -> False
Args:
re_match (re.Match): 対象となる正規表現のパターン
processed_text (str): 入力文字列
Returns:
bool: 数字表現の一部かを表す真偽値
"""
start_i = re_match.span()[0]
if start_i != 0 and re.match("[0-9]", processed_text[start_i - 1]):
return True
else:
return False | 186936051916f92576b238489b2792f7e4354360 | 114,067 |
import requests
import json
def get_invoice(amt: int, memo: str) -> str:
"""
Returns a Lightning Invoice from the Tallycoin API
"""
tallydata = {'type': 'fundraiser', 'id': 'zfxqtu', 'satoshi_amount': str(amt), 'payment_method': 'ln',
'message': memo}
response = requests.post('https://api.tallyco.in/v1/payment/request/', data=tallydata).text
dict = json.loads(response)
return dict["lightning_pay_request"] | 777d4d4fa39d91cf9ddace89716a1ae96acc5e80 | 114,069 |
import ast
def _is_call_to_format(call: ast.Call) -> bool:
"""
Check if a call is a call to `str.format`, like '{0}'.format(1).
"""
if not isinstance(call.func, ast.Attribute):
return False
if not isinstance(call.func.value, ast.Constant):
return False
if not isinstance(call.func.value.value, str):
return False
return call.func.attr == "format" | 75cf2b8d99ec76aad05ac23baadd901c27bd5346 | 114,070 |
def staff_client(client, staff_user):
"""Version of the client that is authenticated with the staff_user"""
client.force_login(staff_user)
return client | e18c130a9f9b77329355543e40ca0ded39847ab9 | 114,071 |
def round_elements_keeping_sum(float_list, benchmark_list):
"""Helper function for chi2test
Round a list of float numbers maintaining the sum. Adjustment of the
difference will be made on the biggest element in the list.
Parameters
----------
float_list : list (float)
List containing decimal numbers to round off.
benchmark_list: list
Sum of all its elements will be a benchmark for the other.
Returns
-------
round_list : list (int)
Rounded list
"""
round_list = [round(x) for x in float_list]
# difference between sums of lists
diff = sum(benchmark_list)-sum(round_list)
# difference adjustment
if diff != 0:
# on the biggest element
round_list[round_list.index(max(round_list))] += diff
return round_list | 396bf470416f808007d7493d23fb026fa3cceb3d | 114,074 |
def distance_to_spirale_center(size):
"""
Gives the distance from one layer to the first layer
ex: 3 (first layer) => 1
ex: 5 (second layer) => 2
"""
return (size - 1) // 2 | 4f12ed3cdc3cac4c55eacc1c46aadc142dbf8b9b | 114,076 |
def convertTimeStringToTime(timeStr):
"""
We assume the timeStr has the format [hh:mm:ss.ssss]
Returns -1.0 if conversion fails, otherwise time as a float
"""
# noinspection PyBroadException
try:
if timeStr[0] != '[':
return -1.0
if timeStr[-1] != ']':
return -1.0
clippedStr = timeStr[1:-1] # remove [] from input
parts = clippedStr.split(':')
hourSecs = float(parts[0]) * 60 * 60
minSecs = float(parts[1]) * 60
secs = float(parts[2])
return hourSecs + minSecs + secs
except Exception:
return -1.0 | 668c7507718874b1de4ddb90bc31ce68b12e2724 | 114,078 |
def sami_params(php, os):
"""Configuration parameters for sami with their default values"""
return {'KRW_CODE': '/code',
'SAMI_CONFIG': '/etc/sami/sami.conf.php',
'SAMI_PROJECT_TITLE': 'API Documentation',
'SAMI_SOURCE_DIR': 'src',
'SAMI_BUILD_DIR': 'docs/build/html/api',
'SAMI_CACHE_DIR': 'docs/cache/html/api',
'SAMI_FLAGS': '-v --force',
'SAMI_SERVER_PORT': 8001,
'SAMI_SOURCE_REGEX': r'\.\(php\|txt\|rst\)$',
'SAMI_THEME': 'default'} | 08ca9dd9dc486f744988e356eb893e3665e421c5 | 114,081 |
def _create_widget(widget_type, **kwargs):
"""
Creates a widget of the given type with the given parameters.
Widget can be with/without units dropdown.
Parameters
----------
widget_type: `any`
Type of the widget to be created
**kwargs: `dict`
Parameters specific to the widget
Returns
-------
`~ipywidgets.widgets.Widget or [~ipywidgets.widgets.Widget, ~ipywidgets.widgets.Widget]`
widget or [widget, units_dropdown]
"""
unit = None
placeholder = None
opts = None
if "unit" in kwargs:
unit = kwargs["unit"]
del kwargs["unit"]
if "placeholder" in kwargs:
placeholder = kwargs["placeholder"]
del kwargs["placeholder"]
if "opts" in kwargs:
opts = kwargs["opts"]
del kwargs["opts"]
widget_element = widget_type(**kwargs)
widget_element.create_widget()
if unit:
widget_element.set_unit(unit)
if placeholder:
widget_element.set_place_holder(placeholder)
if opts:
widget_element.attach_units_dropdown(opts)
return [widget_element.get_widget(), widget_element.get_dropdown_widget()]
else:
return widget_element.get_widget() | e8d538084dc3517fa59ec1dc41f9670ea3107246 | 114,084 |
import re
def clean(text):
""" Clean text and convert everything to uppercase. """
text = text.upper()
return re.sub(r'[~`!@#$%^&*()-=_+\{\}\[\]\\:\;\"\'\|\?\>\<\,\.\/QWXY]+', " ", text) | 7facbf1879166e0c01543768a7e9720e8a3989d4 | 114,085 |
def _build_workhour_lookup(schedule, lunch_hour):
"""Build a lookup dict to determine whether a given hour of a given day of
week is a work hour.
"""
res = {d: [False] * 24 for d in range(7)}
for dow in res:
if len(schedule[dow]) == 0: # off day
continue
start_h, end_h = schedule[dow][0], schedule[dow][1]
for wh in range(start_h, end_h):
res[dow][wh] = True
res[dow][lunch_hour] = False
return res | 8aa9e76f27c4a3ffdbcb5ea1062be97fdc8b70ab | 114,086 |
import re
def ignore_spaces(string, must_begin=True):
""" Takes a non-regex string and return a regex string which ignores extra
spaces in s and newline after
must_begin : if True, provided string must be at the beginning of code / cell
"""
if len(string) == 0:
raise ValueError("Expect a non-empty string !")
# so we do not get spaces escaped, which I find horrible default behaviour:
# https://stackoverflow.com/questions/32419837/why-re-escape-escapes-space
escaped = [re.escape(x) for x in string.split()]
removed_spaces = r'\s+'.join(escaped)
begin_char= r'^' if must_begin else ''
return r"(?s)(%s\s*%s)(.*)" % (begin_char, removed_spaces) | 8fe55e087d10cb0763f409fb2f6d20729f3d8798 | 114,087 |
def steps_f12(j=None, Xs=None):
"""Stepsize for f update given current state of Xs"""
# Lipschitz const is always 2
L = 2
slack = 0.1# 1.
return slack / L | c0134827872a73a5805b8440597c9c26c82ac07f | 114,090 |
import re
def clean_tag(tag):
"""
Clean image tags before logging to tensorboard
"""
invalid_characters = re.compile(r'[^-/\w\.]')
return invalid_characters.sub('_', tag) | e5b200564966a5832cd0ec5be14b42c77fe21f32 | 114,096 |
def is_cover(set_of_sets, alphabet):
"""
Determine whether `set_of_sets` is a cover of `alphabet`; that is,
is every element of `alphabet` represented somewhere in `set_of_sets`?
Parameters
----------
set_of_sets : a (frozen)set of (frozen)sets
The potential covering.
alphabet : set
The full alphabet.
Returns
-------
cover : bool
Whether set_of_sets is a cover or not.
"""
return set().union(*set_of_sets) == set(alphabet) | 0c9a206ea9902e39f8d72eed5fb883f912c4f494 | 114,099 |
def repeat_string(source, size):
"""repeat or truncate <source> string, so it has length <size>"""
cur = len(source)
if size > cur:
mult = (size+cur-1)//cur
return (source*mult)[:size]
else:
return source[:size] | 254ab68e672495903ee9b91e7e09bc1918a14998 | 114,103 |
import re
def sort_correctly(iterable):
"""
Sort the given iterable in the way that humans expect.
How to sort alpha numeric set in python
https://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python
"""
convert = lambda text: int( text ) if text.isdigit() else text
alphanum_key = lambda key: [convert( characters ) for characters in re.split( '([0-9]+)', str( key ).lower() )]
return sorted( sorted( iterable, key=alphanum_key ), key=lambda item: str( item ).istitle() ) | 1ad1dc10a0ba4ceb67df58fe7a296dc2f17654b8 | 114,104 |
def calculate_grid_points(size, buffer, bars_per_line, lines_per_page):
"""
Calculates and returns two lists.
The first list consists of x-coordinates of all bar lines.
The second list consists of y-coordinates of all center staff lines.
Parameters
----------
size : 2-tuple of ints
Pixel size of the output image (X,Y).
buffer : int
Size of white space on all sides of the output image, in pixels.
bars_per_line : int
lines_per_page : int
"""
x_list = []
y_list = []
for i in range(bars_per_line + 1):
x_list.append(buffer + i * (size[0]-2*buffer) / bars_per_line)
for i in range(lines_per_page):
y_list.append(buffer
+ ((size[1]-2*buffer) / lines_per_page)/2
+ i*(size[1]-2*buffer) / lines_per_page)
return x_list, y_list | d0bc4a0928adc5c886ca6f82a88f66c3bd03e90f | 114,106 |
def snake_to_camel(word):
"""
changes word snake to camel case
example: my_plan -> MyPlan
"""
return ''.join(x.capitalize() or '_' for x in word.split('_')) | 06e2704fcaaf7c65d2be4c5c104f55f1df1fb207 | 114,107 |
import pickle
def to_pkl(self, path_pkl=None):
"""
Save the WaterFrame into a pickle file.
Parameters
----------
path_pkl: str
Location of the pickle file. If path_pkl is None, the path will be the metadata['id'].
Returns
-------
path_pkl: str
Location of the pickle file.
"""
if path_pkl is None:
path_pkl = self.metadata['id'] + '.pkl'
pickle.dump(self.__dict__, open(path_pkl, "wb"))
return path_pkl | 8f6fabe638bcc1f1ee51aee0676a9e6dbdc1a1d0 | 114,108 |
import requests
def is_shopify_shop(shop_url: str):
"""is_shopify_shop : check if a the website at 'shop_url' has been built with shopify.
Here we are doing a request to the target wepsite and searching in it's content if we can find 'var Shopify'.
"""
response = requests.get(shop_url, timeout=2)
response_content : str = response.content.decode("utf-8")
return response_content.find('var Shopify') >= 0 | c5f9efb85ff6cb750541d4d14fa073128d529d66 | 114,110 |
def td_to_s(td):
"""
Convert timedelta to seconds since start of day.
"""
# if not td.seconds and td.days:
# return 24*60*60
return td.seconds | 377edcdbb28a01aff9639697b30b7554201d2279 | 114,119 |
def mention_to_tokens(mention, token_type="words", lowercase=False):
"""
Extract tokens from the mention
:param mention: mention object.
:param token_type: token type that wants to extract.
:type token_type: str
:param lowercase: use lowercase or not.
:type lowercase: bool
:return: The token list.
:rtype: list
"""
tokens = mention.context.sentence.__dict__[token_type]
return [w.lower() if lowercase else w for w in tokens] | c15f8a606f6bc8b2fb6dbd4a1834dea00a987858 | 114,130 |
def some(predicate, iterable):
"""
Iterate over iterable, pass the value to the predicate predicate and
return True if the predicate returns True at least one time.
@param predicate: Predicate function.
@param iterable: Iterable containing the elements to test.
@return: Return True if any element is True based on the predicate.
If iterable is empty, it returns False.
"""
for i in iterable:
if predicate(i):
return True
return False | d01b70263da1bc29a0751b322bee25739a977aea | 114,133 |
def get_local_username(pamh):
"""
Returns the local user name wanting to authenticate
:param pamh: PAM handle from python_pam module
:return: local username or empty string if not found
"""
try:
user = pamh.get_user(None)
except:
user = ''
return user | 5aa2954771aefa66e53005769477b75713e31885 | 114,134 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.