content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import logging
import hashlib
def verify(checksums):
"""
Verify if all given files are present and their SHA256
checksum is correct. Any files not explicitly listed are deleted.
checksums is a dict of file => checksum, with file a file relative to dir.
Returns a boolean indicating that all checksums are correct, and all files
are present.
Any warnings and errors are printer to logger.
Errors or exceptions result in a return value of False.
"""
logger = logging.getLogger("nbt.tests.downloadsample")
success = True
for path in checksums.keys():
try:
check = checksums[path]
if check == None: continue # Skip folders
localfile = open(path, 'rb')
h = hashlib.sha256()
chunksize = 524288 # 0.5 MiB
while True:
data = localfile.read(chunksize)
if not data: break
h.update(data)
localfile.close()
calc = h.hexdigest()
if calc != check:
logger.error("Checksum failed %s: %s found, %s expected" % (path, calc, check))
success = False
except IOError as e:
if e.errno == 2:
logger.error('Checksum verificiation failed: file %s not found' % e.filename)
else:
logger.error('Checksum verificiation of %s failed: errno %d: %s' % \
(e.filename, e.errno, e.strerror))
return False
logger.info("Checksum of %d files verified" % len(checksums))
return success | ed59409d9ca41fac7fd05fd9435bcda3dfe3163e | 99,239 |
from pathlib import Path
def get_parent(p: Path, name: str, offset: int = 0) -> Path:
"""Get Path parent by name
- protects for max/min of parents length
Parameters
----------
p : Path
name : str
parent name to find
offset : int
offset from found parent
Returns
-------
Path
parent of original Path
"""
parents = p.parents
m = {k.name.lower(): i for i, k in enumerate(parents)}
i = min(max(m[name.lower()] - offset, 0), len(parents) - 1)
return p.parents[i] | 477a382aff3a09bdb0480a61afd4da4d7d7cc324 | 99,243 |
def name_matrix(vma):
""" Obtain the name matrix of the V-Matrix that contains the
coordinate names by row and column.
:param vma: V-Matrix
:type vma: automol V-Matrix data structure
:rtype: tuple(tuple(str))
"""
if vma:
name_mat = tuple(zip(*vma))[2]
else:
name_mat = ()
name_mat = [list(row) + [None]*(3-len(row)) for row in name_mat]
return tuple(map(tuple, name_mat)) | d36672f5c8b38ac2225d71daed338e3a815cea52 | 99,248 |
import itertools
def pairs(a):
"""creates pairs of elements of a single list (no redundancies)"""
return [X for X in list(itertools.product(a, a)) if X[0] != X[1]] | f2b9885663247ff81b023c79a53a1197a11177fa | 99,249 |
def _title(c, index):
"""Generic image title if not provided.
Parameters
-----------
c: pyvims.VIMS
Cube to plot.
index: int, float, str or tuple
VIMS band or wavelength to plot.
Returns
-------
str
Default title.
"""
if isinstance(index, int):
return f'{c} on band {index}'
if isinstance(index, float):
return f'{c} at {index:.2f} µm'
if isinstance(index, str):
if 'um' in index:
return f'{c} | {index.replace("um", " µm")}'
return f'{c} | {index.title()}'
if isinstance(index, tuple):
if isinstance(index[0], float):
return f'{c} at ({index[0]:.2f}, {index[1]:.2f}, {index[2]:.2f}) µm'
else:
return f'{c} on bands {index}'
return None | 331637b23b1ec8bdc10c18e1a438eee75e5b7b49 | 99,253 |
def adapt_decimal(value):
"""Convert decimal.Decimal to string."""
return str(value) | 8dea9c750787b8b54adf75c76d7cd001622a01bb | 99,256 |
def evaluate_condition(backend, condition):
"""
Evaluates a condition such as
``StrictVersion(onnxruntime.__version__) <= StrictVersion('0.1.3')``
"""
if backend == "onnxruntime":
return eval(condition)
else:
raise NotImplementedError("Not implemented for backend '{0}'".format(backend)) | 5e86f112ff25d1fabb786d7412a22d5b7f7399a3 | 99,258 |
import torch
def MatPower(mat_m, p):
"""Computes mat_m^p, for p a positive integer.
Args:
mat_m: a square matrix
p: a positive integer
Returns:
mat_m^p
"""
if p in [1, 2, 4, 8, 16, 32]:
p_done = 1
res = mat_m
while p_done < p:
res = torch.matmul(res, res)
p_done *= 2
return res
power = None
while p > 0:
if p % 2 == 1:
power = torch.matmul(mat_m, power) if power is not None else mat_m
p //= 2
mat_m = torch.matmul(mat_m, mat_m)
return power | dfb7eb45f030fb3b156a7270b7790d83d57e1660 | 99,259 |
def _make_notification(
category, title, body=None, content_available=False, mutable_content=False,
additional=None
):
"""Summary
For full documentation of remote notifications, see:
https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/generating_a_remote_notification
Parameters
----------
category : str
APNs category
title : str
Notification title
body : str, optional
Notification body
content_available : bool, optional
If True, client will perform background app refresh
mutable_content : bool, optional
If True, client will pass notification to notification service app
extension before delivery
additional : dict, optional
Additional, client-specific info
Returns
-------
dict
Description
""" # noqa E501
aps = {
'category': category,
'sound': 'bingbong.aiff'
}
# Alert
if body:
aps.update(alert={'title': title, 'body': body})
else:
aps.update(alert=title)
# Mutable-Content
if mutable_content:
aps.update({'mutable-content': 1})
# Content-Available
if content_available:
aps.update({'content-available': 1})
apns_dict = {'aps': aps}
# Client-specific info
if additional:
apns_dict.update(additional)
return apns_dict | 9ee9edafb549b1c329554ea82ca4c5804747059d | 99,260 |
def fileToStringList(FName):
""" Returns a list of strings L with the property that
L[k] is line k in the file with name FName.
PreC: FName is a string that names the file that is to be read.
It should include the filename's suffix, e.g., for file
MyFile.txt, FName must be 'MyFile.txt', not 'MyFile'.
The file must be in the current working directory.
"""
L = []
with open(FName,"r") as F:
for s in F:
# Remove trailing newline character...
s1=s.rstrip("\n")
# Remove trailing carriage return...
s2 = s1.rstrip("\r")
# Append this "cleaned up" file line...
L.append(s2)
return L | 454a857d726716ffb95df5b0c6559a98f47ba0f5 | 99,265 |
def get_word2vec_related_words_from_word(word, model, threshold, max_qty):
"""
Method to generate a list of words that are found to be related to the input word through
assessing similarity to other words in a word2vec model of word embeddings. The model can
be learned from relevant text data or can be pre-trained on an existing source. All words
that satisfy the threshold provided up to the quantity specified as the maximum are added.
Args:
word (str): The word for which we want to find other related words.
model (Word2Vec): The actual model object that has already been loaded.
threshold (float): Similarity threshold that must be satisfied to add a word as related.
max_qty (int): Maximum number of related words to accept.
Returns:
list: The list of related words that were found, could be empty if nothing was found.
"""
related_words = []
try:
matches = model.most_similar(word, topn=max_qty)
except KeyError:
matches = []
for match in matches:
word_in_model = match[0]
similarity = match[1]
if (similarity >= threshold):
related_words.append(word_in_model)
return(related_words) | 15451b3528407d2c8056e2a057ed91ed30b8dabd | 99,267 |
def matrix_transpose(m):
""" Transposes the input matrix.
The input matrix :math:`m` is a 2-dimensional array.
:param m: input matrix with dimensions :math:`(n \\times m)`
:type m: list, tuple
:return: transpose matrix with dimensions :math:`(m \\times n)`
:rtype: list
"""
num_cols = len(m)
num_rows = len(m[0])
m_t = []
for i in range(num_rows):
temp = []
for j in range(num_cols):
temp.append(m[j][i])
m_t.append(temp)
return m_t | d2fdfa4371bdfc4fd91cbc56e7aed930f31fa196 | 99,272 |
def parse_idx(img_name):
"""
Simple helper function that takes an image name and return the index position of the image.
"""
bk = 0
#Find where the significant digit appears
prefix = img_name.split('.')[0][3:]
for idx,alpha in enumerate(prefix):
if int(alpha) == 0:
continue
else:
bk = idx
break
num = int(prefix[bk:]) - 1 #Since image names start from 1
return num | f94942a346083e7860321a6c3c65769da1b58b99 | 99,278 |
def create_ticket(session, subject, description, return_type=None, **kwargs):
"""
Creates a support ticket for the VPSA. This ticket will be assigned to a
member of the support staff.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type subject: str
:param subject: The subject for the ticket (analogous to an e-mail
subject). For example: 'Help With Expanding Pool'. Required.
:type description: str
:param description: The full body of the ticket (analogous to an e-mail
body). For example: 'I would like more information on best practices
for expanding my "pool1" storage pool.' Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
if subject is None or description is None:
raise ValueError('A ticket subject and description must be provided.')
body_values = {'subject': subject, 'description': description}
path = '/api/tickets.json'
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs) | 156571f2744a4b88b7b54912274563a9b12bff6f | 99,279 |
def get_y_pred(task_name, pred_data_dir):
"""
Read file to obtain y_pred and scores.
"""
pred=[]
score=[]
if task_name in ["sentitel_NLI_M"]:
with open(pred_data_dir, "r", encoding="utf-8") as f:
s=f.readline().strip().split()
while s:
pred.append(int(s[0]))
score.append([float(s[1]),float(s[2]),float(s[3])])
s = f.readline().strip().split()
return pred, score | 4ca5e9f73aa67e3e9136b308812457e69203c74e | 99,286 |
def get_core_identifier(cpu_identifier, sensor_identifier):
"""
a cpu has many sensors. some of them for cores. cores themselves don't have identifiers.
thus, we use the identifier of the sensor to infer to which core it belongs.
:param cpu_identifier: e.g. /intelcpu/0
:param sensor_identifier: e.g. /intelcpu/0/load/1 - i.e. the load sensor for core #1 of CPU #0.
:return: a core identifier e.g. /intelcpu/0/1
"""
core_number = sensor_identifier.split('/')[-1]
return cpu_identifier + "/" + core_number | e501c36da4c9d597a8020a8e9b47b3a0c198267a | 99,290 |
import statistics
def median(values):
"""Get the median of the provided values.
:param values: An iterable of values to be evaluated. They may be of different number types.
.. note::
This is just a convenience wrapper around the statistics module, but captures ``StatisticsError`` and returns
``None`` if ``values`` is empty.
"""
try:
return statistics.median(values)
except statistics.StatisticsError:
return None | 920ef0a158a54031650be296d0e94dbd922dbd95 | 99,291 |
def reduce_graph_dataset(graphs, graphs_to_keep):
"""
graphs_to_keep must only have graphids that are valid
"""
if graphs_to_keep is None:
return graphs
else:
reduced_dataset = dict()
for graphId in graphs_to_keep:
reduced_dataset[graphId] = graphs[graphId]
return reduced_dataset | 2cb82388e17e410d4e932d5efe361f63c1846e55 | 99,293 |
def space_tokenize(text):
"""
Tokenizes a piece of text by splitting it up based on single spaces (" ").
Args:
text (str): input text as a single string
Returns:
list(str): list of tokens obtained by splitting the text on single spaces
"""
return text.split(" ") | 207174d7dcd52edc38248bf59b48abcabb10a293 | 99,296 |
def limit_value(value: float, lower: float, higher: float) -> float:
"""
Binds a value between two others.
"""
return lower if value < lower else higher if value > higher else value | 9d653274c86378d56d7effc75b7f718778c3dfdf | 99,297 |
from typing import Union
def as_str(o: Union[str, None], base_type=str, raise_=True):
"""
Return `o` if already a str, otherwise return the string value for `o`.
If `o` is None or an empty string, return `default` instead.
If `o` cannot be converted to an str, raise an error if `raise_` is true,
other return `default` instead.
"""
if isinstance(o, base_type):
return o
if o is None:
return base_type()
try:
return base_type(o)
except ValueError:
if raise_:
raise
return base_type() | 3ac0c73b4f18b9b86aa568896cf5cc0204f14b0d | 99,303 |
def player_has_missed_games(game_being_processed, cur_pfr_game):
"""
Function that checks if the player has missing games
:param game_being_processed: Game (by week) we are processing
:param cur_pfr_game: Game (by week) we are on for the player
:return: Bool
"""
if game_being_processed == cur_pfr_game:
return False
return True | f4f1f9b23a82477b1910717fc3f7e9e3df5946f9 | 99,306 |
def case_insensitive_dict_get(d, key, default=None):
"""
Searches a dict for the first key matching case insensitively. If there is
an exact match by case for the key, that match is given preference.
Args:
d: dict to search
key: key name to retrieve case-insensitively
Returns: value or default
"""
if not key:
return default
if key in d:
return d[key]
if isinstance(key, str):
key_lower = key.lower()
for k in d:
if k.lower() == key_lower:
return d[k]
return default | d01524292ef8fb7d8f4248c9f526dbef1a580280 | 99,310 |
import re
def active_page(url, regex):
"""Check the current url with regex and return 'active' if matched.
Avoid using this filter: better is to make use of CSS inheritance.
"""
if re.search(regex, url):
return 'active'
else:
return '' | 00ccbbe06abbe35ea74d198113427e8f036ab991 | 99,314 |
def getMaxSize(size=None, maxDefault=4096):
"""
Get the maximum width and height that we allow for an image.
:param size: the requested maximum size. This is either a number to use
for both width and height, or an object with {'width': (width),
'height': height} in pixels. If None, the default max size is used.
:param maxDefault: a default value to use for width and height.
:returns: maxWidth, maxHeight in pixels. 0 means no images are allowed.
"""
maxWidth = maxHeight = maxDefault
if size is not None:
if isinstance(size, dict):
maxWidth = size.get('width', maxWidth)
maxHeight = size.get('height', maxHeight)
else:
maxWidth = maxHeight = size
# We may want to put an upper limit on what is requested so it can't be
# completely overridden.
return maxWidth, maxHeight | 53310e721fbc769dcb1d4873580d9d0a93ba69bd | 99,316 |
def t_bold(s):
"""
Returns string s wrapped in terminal bold font code
"""
return '\033[1m' + s + '\033[0m' | 36e28e79500f1c394fd4439eddf41844248acb11 | 99,324 |
def longest_common_prefix(seq1, seq2):
"""
taken from https://www.quora.com/What-is-the-easiest-way-to-find-the-longest-common-prefix-or-suffix-of-two-sequences-in-Python
:param seq1: first string
:param seq2: second string
:return: the prefix
"""
start = 0
while start < min(len(seq1), len(seq2)):
if seq1[start] != seq2[start]:
break
start += 1
return seq1[:start] | 79dbaf1e0ef4859bb7b0e525c09d0273b4dbf590 | 99,325 |
def _step_1_only(step_n):
"""Returns true for step 1 only."""
return step_n == 1 | 7e2dcf74c7a274a6e8300baf789448024a130189 | 99,327 |
def split(input_list):
"""
Splits a list into two pieces
:param input_list: list
:return: left and right lists (list, list)
"""
input_list_len = len(input_list)
midpoint = input_list_len // 2
return input_list[:midpoint], input_list[midpoint:] | 1c4a94bafb3a3e43b3536161f11755630ad726e0 | 99,329 |
def listify(config, key, sep=','):
""" Create a list from a string containing list elements separated by
sep.
"""
return [i.strip() for i in config[key].split(sep)] | 4bc3f3ae45a49e89457508ba6404f994d28c18e8 | 99,339 |
def lorentzian_function(x_L, sigma_L, amplitude_L):
"""Compute a Lorentzian function used for fitting data."""
return amplitude_L * sigma_L**2 / (sigma_L**2 + x_L ** 2) | e26fa0e2213ff999e6691fc780fbf1f6dc48cb32 | 99,340 |
def _get_sheet_from_workbook(workbook, sheet_index=None):
"""Returns a sheet from a Workbook; default active or at the given index
By default returns the active worksheet from the given openpyxl.Workbook
instance, but if given a sheet_index argument, the sheet at that index
will instead be returned
Args:
workbook (openpyxl.Workbook): The workbook to select a sheet from.
sheet_index (int, optional): The index of the worksheet to return.
A value of None returns the active worksheet. Defaults to None.
Returns:
openpyxl.worksheet.Worksheet: The active or selected worksheet.
"""
if sheet_index is None:
return workbook.active
return workbook.worksheets[sheet_index] | cff78917208097d261a8758ea6987f6420d661b8 | 99,342 |
def normalize_location_cot(location_str):
"""
When geocoding intersections, the City of Toronto geocoder maintained by GCC expects input of
the form "Street 1 and Street 2", but some Ped Delay files use "at" and "over" (the latter for
overpasses).
We normalize these to use "and".
"""
return location_str.replace(' at ', ' and ').replace(' over ', ' and ') | a19f605aa22a7b3ebc6694a74e8ed13d441f63f5 | 99,348 |
def getstr255(r):
"""Get string from str255 resource"""
if not r.data: return ''
len = ord(r.data[0])
return r.data[1:1+len] | 597b56ac9f6b67ed37b3676e7c98468abeb7e9a0 | 99,349 |
import re
def normalize_file_name(fname):
"""
Remove/replace characters in the string so that the string is suitable
to be used as a file name.
"""
t = fname
t = re.sub('[^0-9A-Za-z_-]', '_', t )
t = re.sub('_+', '_', t)
return t | d4d988b2f7b0cd2acf74f113df6c6091481d7b0c | 99,351 |
def choose_state_to_del(G, StatesLeft):
"""Helper for del_gnfa_states
---
Given a GNFA G and a list StatesLeft,
choose first eligible state to delete, and return it
plus the set of non-deleted entries.
Called only if there is an eligible state to be deleted.
"""
for q in StatesLeft:
if (q not in G["Q0"] | G["F"]):
# There is one eligible state to delete
return ( q, [x for x in StatesLeft if x != q] ) | 001b4a22262393e91f8cbf7e3e643a9ae7681f74 | 99,355 |
def get_expressions_out(
raw_expressions, descriptors, source, expression_type, group_by
):
"""Return expressions output."""
return {
"raw_expressions": raw_expressions,
"descriptors": descriptors,
"source": source,
"expression_type": expression_type,
"group_by": group_by,
} | bd159f198a2b17ab584d66fa832b16280825f0b2 | 99,358 |
def get_items(obj):
"""Get items in obj."""
return list(obj.items()) | 7a0f6090b9253ca4f7dcd7f35b74cb460aa89daf | 99,361 |
import torch
def tracebatch(mat):
"""
Trace of of batch of matrices.
Defaults to last two dimensions.
"""
return torch.diagonal(mat, dim1=-2, dim2=-1).sum(dim=-1) | d8c94b1914655943132fea598077a89e1c78fef8 | 99,370 |
def id(x):
"""Returns the argument without modification."""
return x | f353b2f5790dee6f8b2deb4ef215c1ca591e7bc0 | 99,374 |
import io
import functools
def blockiter(fd, blocksize=io.DEFAULT_BUFFER_SIZE):
"""Iterate on file-like objects reading blocks of the specified size.
The `fd` parameter must be a binary or text file-like object opened
for reading.
The `blocksize` parameter defaults to `io.DEFAULT_BUFFER_SIZE`.
"""
guard = '' if isinstance(fd, io.TextIOBase) else b''
return iter(functools.partial(fd.read, blocksize), guard) | 2e833397cf5055713b8c8ec2846c6c356da611ab | 99,375 |
import socket
def hostToIP(host):
""" Returns ip of host, or None on failure"""
try:
return socket.gethostbyname(host)
except:
return None | 6c539350bdc47b82c5a71e85b2ba07538bb0234a | 99,381 |
def printret(obj):
"""Print then return _obj_."""
print(obj)
return obj | 74df88905c1b38c386fc29d52a0f521c5b550a51 | 99,385 |
def interface_is_untagged_member(db, interface_name):
""" Check if interface is already untagged member"""
vlan_member_table = db.get_table('VLAN_MEMBER')
for key,val in vlan_member_table.items():
if(key[1] == interface_name):
if (val['tagging_mode'] == 'untagged'):
return True
return False | d9bd23e172d94ea647ef098e46f87b365beff035 | 99,387 |
from pathlib import Path
def get_benchmark_directory(base_directory, executable, runtime_sec, rmw, create=False):
"""
Return the directory to place measurements and reports for the given experiment.
If `create` is True, the directory is created if it does not exist yet.
"""
# Note: memory_usage.py and std_latency.py make assumptions about the directory format.
# Do not change this without also changing these other files.
directory = Path(base_directory)/f'{runtime_sec}s/{rmw}/{executable}/'
if create:
directory.mkdir(parents=True, exist_ok=True)
return directory | 960d8c709c11d2ca625c10a065b10e082f671069 | 99,391 |
def format_resolve_template(template_in, format_variables):
"""Renders the given Resolve template with the given Python dict. The
result is always Resolve safe. Meaning that variables like "%{Clip Type}"
will be preserved if the given ``format_variables`` doesn't alter it.
:param str template_in: A string
:param dict format_variables:
:return:
"""
return template_in.replace("{", "(").replace("}", ")s") % format_variables | 949ad3c804a8fb129a0ff130b52c59fdfbae9cfe | 99,402 |
import threading
def start_daemon(target, args=(), kwargs=None):
"""Start and return daemon thread."""
t = threading.Thread(target=target, args=args, kwargs=kwargs, daemon=True)
t.start()
return t | f3f82bf60bb075d824ce016e464b13866f0b74e7 | 99,404 |
import pprint
def _message_to_javadoc(message_definition):
""" Converts JSON message definition to javadoc """
formatted_message = pprint.pformat(message_definition, indent=4, width=120, depth=None)
return " * " + formatted_message.replace("\n", "\n * ") | 9de196864172894883903f651680bd040dc6e40b | 99,407 |
def _strip_unsafe_kubernetes_special_chars(string: str) -> str:
"""
Kubernetes only supports lowercase alphanumeric characters, "-" and "." in
the pod name.
However, there are special rules about how "-" and "." can be used so let's
only keep
alphanumeric chars see here for detail:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
:param string: The requested Pod name
:return: Pod name stripped of any unsafe characters
"""
return ''.join(ch.lower() for ch in list(string) if ch.isalnum()) | f5a0e0f61aec408acfc4eee1745053e36a483a6a | 99,410 |
def convert_data_types(df):
"""
Takes a pandas Dataframe (part of the black_friday_data_hack project) as input
and convert the data types of some of its features.
Arguments: data - DataFrame
Returns: same DataFrame with converted data types
"""
# Convert categorical features into numeric type
df['Age'] = df['Age'].map({'0-17': 15, '18-25': 21, '26-35': 30, '36-45': 40, '46-50': 48, '51-55': 53, '55+': 55})
df['Stay_In_Current_City_Years'] = df['Stay_In_Current_City_Years'].map({'0': 0, '1': 1, '2': 2, '3': 3, '4+': 4})
# Convert numeric features into categorical type
df['Occupation'] = df['Occupation'].astype('category')
df['Product_Category_1'] = df['Product_Category_1'].astype('category')
df['Product_Category_2'] = df['Product_Category_2'].astype('category')
df['Product_Category_3'] = df['Product_Category_3'].astype('category')
df['Marital_Status'] = df['Marital_Status'].astype('category')
# Convert Product_ID to numerical type by discarding the 1st letter 'P'
df['Product_ID'] = df['Product_ID'].map(lambda x: x[1:])
df['Product_ID'] = df['Product_ID'].astype('int64')
# Convert Purchase to numerical type
df['Purchase'] = df['Purchase'].astype('int64')
return df | 1a24fe16bdf270c3bad2d56722269f4355cb3b96 | 99,412 |
def v_first_option(options):
"""
Return the first value in a menu-select options structure. This is useful when
you create an options structure and you want the first item as the placeholder or
the default selected value.
Parameters
----------
options : list[dict]
The menu select option list
Returns
-------
str
The text of the first option
"""
return options[0]['text']['text'] | d66c8ad525b3e36091b2bb3641ee4c9ff536e21f | 99,417 |
def _write_slurm_script(args, cmd_str, cmd_folder_name):
"""Write a slurm job script for the given command string.
The bash script will be dumped in the current folder.
Args:
args (argparse.Namespace): Command-line arguments.
cmd_str (str): The actual command that should be executed by the job
job scheduler (independent of slurm).
cmd_folder_name (str): The folder name of the command ``cmd_str`` within
the hpsearch output folder. This is used to determine a filename.
Returns:
(str):
Bash script filename.
"""
script_fn = '%s_script.sh' % cmd_folder_name
with open(script_fn, 'w') as f:
f.write('#!/bin/bash\n')
f.write('#SBATCH --job-name %s\n' % cmd_folder_name)
f.write('#SBATCH --output %s_' % cmd_folder_name + '%j.out\n')
f.write('#SBATCH --error %s_' % cmd_folder_name + '%j.err\n')
f.write('#SBATCH --time %d:00:00\n' % args.num_hours)
if len(args.slurm_mem) > 0:
f.write('#SBATCH --mem %s\n' % args.slurm_mem)
if len(args.slurm_gres) > 0:
f.write('#SBATCH --gres %s\n' % args.slurm_gres)
if len(args.slurm_partition) > 0:
f.write('#SBATCH --partition %s\n' % args.slurm_partition)
if len(args.slurm_qos) > 0:
f.write('#SBATCH --qos %s\n' % args.slurm_qos)
if len(args.slurm_constraint) > 0:
f.write('#SBATCH --constraint %s\n' % args.slurm_constraint)
f.write(cmd_str)
return script_fn | 0b7848696a7d59bec4297218b1682dd31ee9d173 | 99,420 |
import platform
import re
def sanitize_app_name(name):
"""
App names can contain characters that are invalid for file system paths.
This function attempts to sanitize them depending on the platform.
"""
if platform.system() == "Windows":
return re.sub(r'[<>:"/\\|?*]', '', name)
else:
# assume unix-like if platform is not windows
return re.sub(r'[/]', '', name) | d585aef340ee2c5c263f555ebe5496dd19be5530 | 99,422 |
def get_totients(max_one: int) -> list[int]:
"""
Calculates a list of totients from 0 to max_one exclusive, using the
definition of Euler's product formula.
>>> get_totients(5)
[0, 1, 1, 2, 2]
>>> get_totients(10)
[0, 1, 1, 2, 2, 4, 2, 6, 4, 6]
"""
totients = [0] * max_one
for i in range(0, max_one):
totients[i] = i
for i in range(2, max_one):
if totients[i] == i:
for j in range(i, max_one, i):
totients[j] -= totients[j] // i
return totients | 779f9ac1730898dbcdc627577cc8cef3771ea25f | 99,424 |
import re
def author_to_query(author):
"""Helper function for template macro to convert an author string into a
search query.
:param author: unicode string from creator or contributor field of gutenberg index
:returns: whoosh query string to search creator/contributor fields for given author.
"""
# contributor field provides extra details about contirbutor's role in brackets -- strip that off so we can search for author in any role.
author = re.sub(r'\[[^\]]+\]', '', author).strip()
return u'creator:"{0}" OR contributor:"{0}"'.format(author) | 04c8d06323dbcf149786ab98fa003261979a57fd | 99,432 |
def chartoi(c):
"""
convert a single character to an integer
:param str c:
:return int:
"""
if len(c) == 1 and c.isdigit():
return int(c)
return 0 | c08f95ac67c59df123850ac17a158663da0b7915 | 99,433 |
def is_prefixed_with(string, prefix):
"""
Checks if the given string is prefixed with prefix
Parameters:
-----------------------------------
string: str
An input word / sub-word
prefix: str
A prefix to check in the word / sub-word
Returns:
-----------------------------------
bool : True if prefix, else False
"""
return string.find(prefix) == 0 | a95ed0b46e01f81c8267f8cbd2807f46b7b2c81f | 99,434 |
import random
def make2D_grid_random_fill(rows, cols):
"""Returns a 2d grid filled with RANDOM zeros and ones"""
grid = []
for x in range(0, cols):
grid.append([])
for y in range(0, rows):
if random.randint(0, 2) == 0: # 33% change of filling square
grid[x].append(1)
else:
grid[x].append(0)
return grid | 431efe2155daa63bf3146000d96ff90f042a4e25 | 99,436 |
def ra_to_set(row):
"""Convert the ra columns into a set.
"""
return (row['ra_hrs'],row['ra_minutes'],row['ra_seconds']) | d96e3113a946e526d1823f65a31324093f4f2966 | 99,437 |
def find_div_highlight(value):
"""Look for <div class=highlight-<LANG>> blocks and return LANG """
[[_, classes, _], _] = value
for x in classes:
xs = x.split("-")
if len(xs) == 2 and xs[0] == "highlight":
return xs[1] | b4a6964a9c3cce1509642eeb2ebcb41f9c76e80b | 99,443 |
def improve_data(data_list: list, non_integers: list) -> list:
"""Takes a list of dictionaries containting the data, make sure all
dictionaries have the correct data, 0's all missing entries
Args:
data_list (list): list of dictionaries containing the data
non_integers (list): list of headers which should not be converted to
an integer
Returns:
list: improved list of data
"""
headers = list(data_list[0].keys())
for data in data_list:
for header in headers:
if data[header] == '':
data[header] = '0'
for data in data_list:
for header in headers:
if header not in non_integers:
data[header] = str(data[header])
data[header] = data[header].replace('*', '')
data[header] = float(data[header])
return data_list | c9345ebfb172a76d63c88f231fba058538a9ec77 | 99,446 |
def coerce_date_to_string(d):
"""
Force a datetime-like object to an ISO date format string yyyy-mm-dd.
Not too highly recommended, but used by SQLite, which uses strings for date.
If there are no year, month, day members, just force to a string. (For things like integer time axes from
models.)
:param d: datetime.date
:return: str
"""
try:
return '{0}-{1:02}-{2:02}'.format(d.year, d.month, d.day)
except:
# Tough luck
return str(d) | b7c3eb043f59e6059476359d2f56a092a1a3ac50 | 99,447 |
import torch
def convert_boxes_to_locations(
boxes: torch.Tensor,
priors: torch.Tensor,
center_variance: float,
size_variance: float,
) -> torch.Tensor:
"""Convert boxes (x, y, w, h) to regressional location results of SSD
$$hat{center} * center_variance = \frac {center - center_prior} {hw_prior}$$
$$exp(hat{hw} * size_variance) = \frac {hw} {hw_prior}$$
:param boxes: center form boxes
:param priors: center form priors
:param center_variance: changes the scale of center
:param size_variance: changes scale of size
:return: locations for training SSD
"""
if priors.dim() + 1 == boxes.dim():
priors = priors.unsqueeze(0)
centers = (boxes[..., :2] - priors[..., :2]) / priors[..., 2:] / center_variance
hws = torch.log(boxes[..., 2:] / priors[..., 2:]) / size_variance
return torch.cat([centers, hws], dim=boxes.dim() - 1) | 253e2786552728ad23f1037de27b3bc8b65e54f9 | 99,451 |
def get_paramvals_percentile(table, percentile, chi2_arr):
"""
Isolates 68th percentile lowest chi^2 values and takes random 1000 sample
Parameters
----------
table: pandas dataframe
Mcmc chain dataframe
pctl: int
Percentile to use
chi2_arr: array
Array of chi^2 values
Returns
---------
mcmc_table_pctl: pandas dataframe
Random 1000 sample of 68th percentile lowest chi^2 values
"""
percentile = percentile/100
table['chi2'] = chi2_arr
table = table.sort_values('chi2').reset_index(drop=True)
slice_end = int(percentile*len(table))
mcmc_table_pctl = table[:slice_end]
# Best fit params are the parameters that correspond to the smallest chi2
bf_params = mcmc_table_pctl.drop_duplicates().reset_index(drop=True).\
values[0][:5]
# Sample random 1000 of lowest chi2
mcmc_table_pctl = mcmc_table_pctl.drop_duplicates().sample(100)
return mcmc_table_pctl, bf_params | 0e1d6675b04d5eef3e052af00d2a53196205b9b3 | 99,457 |
def make_diff_prop_header(path):
"""Return a property diff sub-header, as a list of newline-terminated
strings."""
return [
"\n",
"Property changes on: " + path.replace('\\', '/') + "\n",
"___________________________________________________________________\n"
] | 996b624e8aa93c44fb67915d8a49907a999804f6 | 99,467 |
def _mock_queryresult() -> str:
"""Create a mock catalog collection response."""
response = """
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix vcard: <http://www.w3.org/2006/vcard/ns#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://dataservice-publisher:8080/catalogs/1> a dcat:Catalog .
"""
return response | a2923f2447d568e53d4325e7b49ef8147bdfc5b8 | 99,476 |
def upperhexstr(buff):
"""Buffer -> Upper Human Readable Hex String"""
return ' '.join([("%.2x" % ord(c)).upper() for c in buff]) | e4e988b4f2422191d5fe16a5a9c8326594814cf0 | 99,482 |
def add_to_tuple(var, *args, **kw):
"""
Append new items inside a tuple.
This utility method should be used to modify settings tuples and lists.
Features:
* Avoid duplicates by checking whether the items are already there;
* Add many items at once;
* Allow to add the items before some other items, when order is important
(by default it appends). If the before item does not exist, just insert
the value at the end;
Example:
INSTALLED_APPS = add_to_tuple(INSTALLED_APPS, 'foo')
INSTALLED_APPS = add_to_tuple(INSTALLED_APPS, 'foo', 'bar')
INSTALLED_APPS = add_to_tuple(INSTALLED_APPS, 'first' before='second')
INSTALLED_APPS = add_to_tuple(INSTALLED_APPS, 'second', after='first')
"""
before = kw.get('before')
after = kw.get('after')
var = list(var)
for arg in args:
if arg not in var:
if before and before in var:
var.insert(var.index(before), arg)
elif after and after in var:
var.insert(var.index(after)+1, arg)
else:
var.append(arg)
return tuple(var) | 048764f01f7eada6526c95e4fd41521f24c73b54 | 99,487 |
def linear_interpolation(start: float, stop: float, percent: float) -> float:
"""
Linear interpolation function
:param start: starting value for interpolation
:param stop: ending value for interpolation
:param percent: proportion of the way through interpolation (0.0 -> 1.0)
:return: the interpolated value
"""
return (1 - percent) * start + percent * stop | 681d2142cbbd86ea616c8872a31f2c74ce11fc36 | 99,488 |
def _target_to_aabb_polygons(target):
""" Transforms target AABB into 4 point polygons
The vehicle AABB representation is transformed into:
((xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax))
Parameters
----------
target : dict
single image label as provided by Boxy dataset
Returns
list of AABB as polygons defined by 4 points
"""
boxes = []
for box in target['vehicles']:
box = box['AABB']
aabb = [
(box['x1'], box['y1']),
(box['x2'], box['y1']),
(box['x2'], box['y2']),
(box['x1'], box['y2'])
]
boxes.append(aabb)
return boxes | b67189d3595e1d39707632f7322f38db5e0e273a | 99,489 |
def RobustL2Loss(output, log_std, target) -> float:
"""Robust L2 loss using a Gaussian prior with aleatoric uncertainty estimation."""
loss = 0.5 * (output - target) ** 2 / (2 * log_std).exp() + log_std
return loss.mean() | 53a47a5c81b768271fee6bc4f45a26e047f9d438 | 99,491 |
def next_chunk(current: int,
length: int,
desired: int):
"""
This is a helper function that can be used whenever you want to access a large sequence
of data in chunks. It simply carries out the calculation that returns the extents of the
next chunk taking into account the ``length`` of the sequence. The sequence itself is not
required here, only the length.
:param current: the starting point of the chunk
:param length: the length of the sequence being chunked
:param desired: the requested length of the chunk
:return: A tuple of the chunk extents. The first value is inclusive; the second is exclusive
"""
if current + desired < length:
return current, current + desired
else:
return current, length | 0b1a834e433b60a80a8433e320a1f0517423a0f8 | 99,498 |
def solution(power: int = 1000) -> int:
"""
Returns the sum of the digits of the number `2 ^ power`.
>>> solution(1000)
1366
>>> solution(50)
76
>>> solution(20)
31
>>> solution(15)
26
"""
if not isinstance(power, int):
raise TypeError("The parameter `power` should be of type int.")
if power < 0:
raise ValueError("The value of `power` should be greater than or equal to 0.")
n = 2 ** power
digits = [int(digit) for digit in str(n)]
return sum(digits) | be99bb9aa22acfee37cd0998cab83044d8ae21b2 | 99,499 |
def is_version_dev_valid(dotted_ver_dev):
"""
Checks if the development marker is valid.
Args:
dotted_ver_dev (str): The dev marker portion of the version string to
check. This should have already stripped the plus (`+`) sign that needs
to immediately precede it.
Returns:
(bool or None): True if the dev marker is present and is the correct
format; False if there is text but it is the wrong format; None if the
string is empty.
"""
if not dotted_ver_dev:
return None
if dotted_ver_dev.lower() == 'dev':
return True
return False | 442006ee6b4ef578858be795d0d8464eba10be69 | 99,505 |
def url_join(parts):
""" Take various parts of a url and join them """
return "/".join(map(lambda part: part.strip('/'), parts)) | 778377572592a814913f734000f57fc080c586f9 | 99,506 |
def in_obstacle(obstacles, px, py):
"""
Check if query point is within any of the rectangular obstacles
:param obstacles: list of rectangular obstacles [(xmin, xmax, ymin, ymax)]
:param px: query point x coordinate
:param py: query point y coordinate
:return:
"""
for (xmin, xmax, ymin, ymax) in obstacles:
if xmin <= px <= xmax and ymin <= py <= ymax:
return True
return False | d2dcd07cd8a6b04d0a00956218cb61d1e31ea5ce | 99,509 |
import math
def tan(theNumber):
"""Returns math.tan(theNumber)."""
return math.tan(theNumber) | 478a9b228c5e9f97f8980b67375932bb9f0ebc02 | 99,515 |
def column_aspect_ratio(row):
"""Find aspect ratio of column."""
return row.column_length / row.column_width | 64dd5e018efc72ddb54f3ecc3b0a64fbaa51c6b8 | 99,519 |
def precedes(layers, n1, n2):
"""
Helper function to determine whether node n1 is in
a layer that immediately precedes the layer of node n2.
"""
i = [k for k in range(len(layers)) if n1 in layers[k]][0]
j = [k for k in range(len(layers)) if n2 in layers[k]][0]
return i + 1 == j | 350bdf2d12b8851933434f31312d8ddc6618b91f | 99,528 |
def concat_to_pair_list(pair_list, prefix='', suffix=''):
""" Add prefix and suffix to the strings in a a list of 2-tuple str.
Parameters
----------
pair_list: list of 2-tuples of str
prefix: str
suffix: str
Returns
-------
formatted_pair_list: list of 2-tuples of str
"""
return [(prefix + s[0] + suffix, prefix + s[1] + suffix) for s in pair_list] | 591b95f5ee4337f52ded8eb79399e32f9015e5ab | 99,538 |
from typing import List
from typing import Any
def _filter_trainable(param_list: List[Any]) -> List[Any]:
"""
Keep on the trainable parameters of the model and return the list of
trainable params.
"""
# Keep only the trainable params
return list(filter(lambda x: x.requires_grad, param_list)) | d942539d71f973c588f0797e72b69602931bdc31 | 99,545 |
def redfish_alias_missing(metadata):
"""
Utility function to check if RedfishExtensions.v1_0_0 is present but missing the Redfish alias
:param metadata: the metadata document
:type metadata: schema.Edmx
:return: True if Redfish alias is missing from include for RedfishExtensions.v1_0_0, False otherwise
"""
for reference in metadata.References:
for include in reference.Includes:
if include.Namespace == 'RedfishExtensions.v1_0_0':
if include.Alias != 'Redfish':
return True
return False | e5b8f95babc69fa367b7b6a0ddc29820a28a8d1e | 99,546 |
def check_Ns(seq):
"""Check whether DNA string contains Ns"""
valid = ['A', 'T', 'C', 'G']
for i in seq:
if i not in valid:
return False
return True | 906c8273e37c068dd1b9952f8cf2aa05632c47d3 | 99,547 |
import base64
import binascii
def get_payment_data(token):
"""Build a string of the concatenated payment
data provided in the apple pay token, including
the ephemeral public key, payload, transaction id
and the optional application data.
This assumes the provided token contains an ECDSA
signature. RSA is not supported.
Args:
token (dict): the decoded apple pay token
Returns:
str: the concatenated payment data
"""
ephemeral_public_key = token['header']['ephemeralPublicKey']
payload = token['data']
transaction_id = token['header']['transactionId']
application_data = token['header'].get('applicationData') # optional
concatenated_data = base64.b64decode(ephemeral_public_key)
concatenated_data += base64.b64decode(payload)
concatenated_data += binascii.unhexlify(transaction_id)
if application_data:
concatenated_data += binascii.unhexlify(application_data)
return concatenated_data | b333a3a4baf786a13eecc32950f4e415c1ba8cd2 | 99,552 |
import socket
import errno
def ignore_eagain(func):
"""
errno.EAGAIN is generated in some platforms when the socket is
non-blocking. This decorator ignores this exception, as it's not an error.
"""
def decorate(ret, *kargs):
"""
Decorator.
"""
try:
return func(*kargs)
except socket.error as error:
if error.errno == errno.EAGAIN:
return ret
raise error
return decorate | 6b277c14e4c1e733bb64b277ba928325e96ec4d8 | 99,556 |
import random
def generateNum(start, finish):
"""Generate a random number between a start and finish value
:param start: the smallest possible number generated
:type start: int
:param finish: the largest possible number generated
:type finish: int
:returns: random number between start and finish parameter values
:rtype: int
>>>generateNum(1, 50)
any value between 1 and 50 inclusive
>>>generateNum(1, 10)
any value between 1 and 10 inclusive
STRATEGY: random method from the random library
"""
return random.randint(start, finish) | 55d122bae0ce2ea0c16092639027c8da3caddfc6 | 99,560 |
def MMFMultipleFiles(rootName, fileIndex):
""" Returns the name of the MMF file 'fileIndex' when a result is saved in multiple binary files.
It takes 2 arguments: root name of the files and the file number whose name is requested (from 0 to MMFHeader.noDensityFiles-1)."""
return rootName + ".%i" % fileIndex | c6864dd539e157d18b7b8a29489bb70f45c7d790 | 99,564 |
def partition_probes_and_true_rel_by_split(probes, true_relatives, fid_splits):
"""
Separate probes and true relatives by split.
Parameters
---------------
probe:
A dictionary mapping from FID to a probe MID.
true_relatives:
A dictionary mapping from FID to a list of true relatives.
fid_splits:
A dataframe with columns [FID, set], which indicates the split each
FID belongs to.
Returns
----------------
probes_by_split:
A dictionary with keys being splits, and the values being the split original probe dict.
As a concrete example, we turn probes = {"F0002: F0002/MID1", "F0003: F0003/MID4" ...}
into {"train": {"F0002: F0002/MID1"}, "test": {"F0003: F0003/MID4"}.
true_relatives_by_split:
A dictionary with keys being splits, and the values being the split original true relatives
dict. Exactly what happens to probes_by_split.
"""
split_names = fid_splits["set"].unique()
probes_by_split = {}
true_relatives_by_split = {}
for split_name in split_names:
fids_in_split = fid_splits[fid_splits["set"].eq(split_name)].FID.values
probes_by_split[split_name] = {fid: probes[fid] for fid in fids_in_split}
true_relatives_by_split[split_name] = {
fid: true_relatives[fid] for fid in fids_in_split
}
return probes_by_split, true_relatives_by_split | 86fe1c1b5a376adaa217e001ed50c7574f06606a | 99,565 |
def format_counts(counts):
"""Formats the contents of the counts dictionary into a string suitable
for output.
Args:
counts - dictionary containing the attendance statistics
Returns:
string containing the counts formatted for output
"""
fstr = ("Attendance Figures:\n"
" Registrations: {0}\n"
" Total Attendees: {1}\n"
" Registered No Shows: {2}\n"
" Non-registered Attendees: {3}\n")
return fstr.format(counts['registrants']
,counts['attendees']
,counts['reg_no_attend']
,counts['attend_no_reg']) | 9161a38ea65ddf99a4141b6528342ffd9b1cb67b | 99,572 |
def extract_content(messages, msg_type):
"""Extract content from messages received from a kernel."""
return [
message['content']
for message
in messages
if message['header']['msg_type'] == msg_type] | 9ad6de8195f4250f8662e3d0a1df7b9066690912 | 99,573 |
import requests
def post_request(url, payload):
"""
Gets a url and arguments (key, value) and creates and submits a POST request
:param url: string
:param payload: python dict
:return: post request object
"""
return requests.post(url, data=payload) | 40f7bbc2cb5b7c93357451e2fb7c2ed748fa2b27 | 99,578 |
import uuid
import re
def identifier_str(value=None, allow_prefix=False):
"""Generate or validate a unique identifier.
If generating, you will get a UUID in hex format
.. testsetup:: idstr
from idaes.core.dmf.resource import identifier_str
.. doctest:: idstr
>>> identifier_str() #doctest: +ELLIPSIS
'...'
If validating, anything that is not 32 lowercase letters
or digits will fail.
.. doctest:: idstr
>>> identifier_str('A' * 32) #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
ValueError: Bad format for identifier "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA":
must match regular expression "[0-9a-f]{32}"
Args:
value (str): If given, validate that it is a 32-byte str
If not given or None, set new random value.
Raises:
ValuError, if a value is given, and it is invalid.
"""
# regular expression for identifier: hex string len=32
if allow_prefix:
id_expr = "[0-9a-f]{1,32}"
else:
id_expr = "[0-9a-f]{32}"
if value is None:
value = uuid.uuid4().hex
elif not re.match(id_expr, value):
raise ValueError(
'bad format for identifier "{}": must match '
'regular expression "{}"'.format(value, id_expr)
)
return value | 6707e209b3c840b092dfab4f23b8d0b0b2f79aef | 99,593 |
def make_recommendation(inner_knee_angle, ideal_angle=145, buffer=5):
"""Returns a recommendation based on the difference from the ideal angle
Args:
inner_knee_angle: actual angle of the user
ideal_angle: target angle
buffer: accepted range above and below ideal_angle
Returns:
str: 'UP', 'DOWN', 'NOOP'
"""
if inner_knee_angle < ideal_angle - buffer:
return "UP"
elif inner_knee_angle > ideal_angle + buffer:
return "DOWN"
return "NOOP" | 05025b98201c1dae210d9c745fb03e051f6fcc37 | 99,596 |
def generate_x_y_fov_pairs_rhombus(top_left, top_right, bottom_left, bottom_right,
num_row, num_col):
"""Generates coordinates (in microns) of FOVs as defined by corners of a rhombus
Args:
top_left (XYCoord): coordinate of top left corner
top_right (XYCoord): coordinate of top right corner
bottom_left (XYCoord): coordinate of bottom right corner
bottom_right (XYCoord): coordiante of bottom right corner
num_row (int): number of fovs on row dimension
num_col (int): number of fovs on column dimension
Returns:
list: coordinates for all FOVs defined by region"""
# compute the vertical shift in the top and bottom row of the TMA
top_row_shift = top_right.y - top_left.y
bottom_row_shift = bottom_right.y - bottom_left.y
# average between the two will be used to increment indices
avg_row_shift = (top_row_shift + bottom_row_shift) / 2
# compute horizontal shift in the left and right column of the TMA
left_col_shift = bottom_left.x - top_left.x
right_col_shift = bottom_right.x - top_right.x
# average between the two will be used to increment indices
avg_col_shift = (left_col_shift + right_col_shift) / 2
# compute per-FOV adjustment
row_increment = avg_row_shift / (num_col - 1)
col_increment = avg_col_shift / (num_row - 1)
# compute baseline indices for a rectangle with same coords
row_dif = bottom_left.y - top_left.y
col_dif = top_right.x - top_left.x
row_baseline = row_dif / (num_row - 1)
col_baseline = col_dif / (num_col - 1)
pairs = []
for i in range(num_col):
for j in range(num_row):
x_coord = top_left.x + col_baseline * i + col_increment * j
y_coord = top_left.y + row_baseline * j + row_increment * i
pairs.append((int(x_coord), int(y_coord)))
return pairs | 54b564249e89e62b037bbf1c52150695e2510f8e | 99,597 |
def _kind(kind, expr):
"""
Build a `kind(...)` type query expression.
`kind` is a regex that matches on the rule type.
`epxr` is a query expression of any supported type.
"""
return "kind('{}', {})".format(kind, expr) | 698d7aec042e2d859bdc3a3ebc6367c65d2d9054 | 99,600 |
from pathlib import Path
def extract_filename(input_data: str) -> "tuple[str, str, str]":
"""Extract the component of the filename
Args:
input_data (str): Filename with extension
Returns:
Python tuple with path, filename and extension
"""
_filename = Path(input_data.strip('"'))
path: str = str(_filename.parent) if str(_filename.parent) != "." else ""
filename: str = _filename.stem
extension: str = _filename.suffix
return path, filename, extension | 55fc09e51e0cbe154fc44bc9ff5743f67772f473 | 99,601 |
def exif_offset_to_seconds(offset: str) -> int:
"""Convert timezone offset from UTC in exiftool format (+/-hh:mm) to seconds"""
sign = 1 if offset[0] == "+" else -1
hours, minutes = offset[1:].split(":")
return sign * (int(hours) * 3600 + int(minutes) * 60) | 58a26bd28efb56f61b9122e976ea934a27be5c18 | 99,607 |
def determine_steps(n_samples, n_optimizations):
"""Determine the number and type of steps for the multistart optimization.
This is mainly used to write them to the log. The number of steps is also
used if logging is False.
Args:
n_samples (int): Number of exploration points for the multistart optimization.
n_optimizations (int): Number of local optimizations.
Returns:
list: List of dictionaries with information on each step.
"""
exploration_step = {
"type": "exploration",
"status": "running",
"name": "exploration",
"n_iterations": n_samples,
}
steps = [exploration_step]
for i in range(n_optimizations):
optimization_step = {
"type": "optimization",
"status": "scheduled",
"name": f"optimization_{i}",
}
steps.append(optimization_step)
return steps | 82a6c160e4da4df1595fd4c216cdbd1772049eb1 | 99,608 |
def read_text_file( file_path: str, mode = 'r' ) -> str:
"""reads text file at the given file_path, returns the contents"""
f = open(file_path, mode)
string = f.read()
f.close()
return string | c174a9865f00c564921793f76bc21f6b701c237f | 99,609 |
import random
import string
def random_string(length: int = 16) -> str:
"""Returns a random string of readable characters."""
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length)) | cf1ba900c37007823d05b1a0d74e825fbb484f59 | 99,611 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.