content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def timeToMicrosecondFormat(time_str, nzeros=6):
"""Helper function to change the given time string to a microsecond precision."""
dot = time_str.rfind(".")
if dot < 0:
return time_str + "." + "0" * nzeros
else:
return time_str + "0" * (nzeros - (len(time_str) - dot - 1)) | 0888cb41693624617ce47d526864ff811fd32425 | 107,860 |
from typing import Tuple
from typing import List
def partition(condition, collection) -> Tuple[List, List]:
"""Partitions a list into two based on a condition."""
succeed, fail = [], []
for x in collection:
if condition(x):
succeed.append(x)
else:
fail.append(x)
return succeed, fail | cd96dd65a2982013b8222f744074680bf0bfc9a3 | 107,862 |
def first_of(iterable, function):
"""
Return the first matching element of an iterable
This function is useful if you already know there is at most one matching
element.
"""
for item in iterable:
if function(item):
return item
return None | d7a083575859bcdf92df697d4cb8a0bb22dbb19f | 107,868 |
def is_not_integer(input_messages):
"""Check if all elements are integers"""
for message in input_messages:
if not isinstance(message, int):
return True
return False | af7a1fe953e8f92d479df3f793eb75a647c96a3c | 107,874 |
def clean_plan_name(plan_name):
"""Standardize plan name."""
return str.lower(plan_name.replace(' ', '_')) | 863468932c12145ebb1ed819fbf0d2ede9c2f4e8 | 107,878 |
def refractivity_dry_nondispersive(ν, θ, pd, e):
"""Complex refractivity due to dry airnondispersive term.
ν GHz frequency at which refractivity is evaluated
θ - reciprocal temperature
pd hPa pressure of dry air
e hPa pressure of water vapor
Liebe et al. (1993).
"""
return 0.2598e-6 * pd * θ | acc96117068cf19abefc35ef03d320347860342a | 107,881 |
def is_xblock_an_assignment(xblock):
"""
Takes in a leaf xblock and returns a boolean if the xblock is an assignment.
"""
graded = getattr(xblock, 'graded', False)
has_score = getattr(xblock, 'has_score', False)
weight = getattr(xblock, 'weight', 1)
scored = has_score and (weight is None or weight > 0)
return graded and scored | 20d3e1d6cba48c1e4e75a7f8d9ef3949f351f58d | 107,882 |
def pids_value_dict(profDict, value):
""" Creates a dictionary with the key being 'pid' and the values being the
value in a prof entry with key 'value'.
"""
pidsDict = {}
for prof in profDict:
pidsDict[prof['pid']] = prof[value]
return pidsDict | b1f854365f7d97dcf2cd0ec71dde653f4c78ec70 | 107,885 |
def withquerytype(query, is_function=False):
"""
Parse query type from query component
>>> withquerytype('{a: 5, ...}')
('update', '{a: 5}')
>>> withquerytype('{a: 5}')
('map', '{a: 5}')
>>> withquerytype('(.a > 5)')
('filter', '(.a > 5)')
>>> withquerytype('count()', True)
('function', 'count()')
>>> withquerytype('sorted(.a)', True)
('function', 'sorted(lambda x: (.a))')
"""
if is_function:
if query.startswith("unique"):
query = query[6:]
return "function", f"unique(lambda x: {query})"
elif query.startswith("sorted"):
query = query[6:]
return "function", f"sorted(lambda x: {query})"
elif query.startswith("yield from "):
query = query[11:]
return "function", f"yield_from(lambda x: {query})"
else:
if query.startswith("x."):
return "function", "lambda y: map(lambda x: " + query + ", y)"
parts = query.split("(", 1)
if len(parts) == 1:
return "function", "lambda y: map(lambda x: " + parts[0] + ", y)"
if len(parts[1]) < 2:
return "function", "(".join(parts)
ret = "function", "(lambda x: ".join(parts)
return ret
elif query[-1] == ")":
return "filter", query
elif query[-6:] == ", ...}":
return "update", query[:-6] + "}"
elif query[-5:] == ",...}":
return "update", query[:-5] + "}"
return "map", query | 627205b2d9282b244cb88c20794d9a7f307d9c49 | 107,887 |
def hex2rgb(hex):
"""
method will convert given hex color (#00AAFF)
to a rgb tuple (R, G, B)
"""
h = hex.strip('#')
return tuple(int(h[i:i + 2], 16) for i in (0, 2, 4)) | e3287725b0f8cdb2a8357b7c52673f2ac1ccb73b | 107,888 |
def new_size_by_croping_ratio(original_size, target_size, crop_type='center'):
"""Return a tuple of top-left and bottom-right points (x1, y1, x2, y2) coresponding
to a crop of the original size keeping the same aspect ratio of the target size.
Note: target_size is only used to calculate aspect ratio, the returned coordinates
doesn't fit to it.
The position of the rectangle can be defined by the crop_type parameter:
* top-left
* top-center
* top-right
* center-left
* center
* center-right
* bottom-left
* bottom-center
* bottom-right
"""
# Get current and desired ratio for the images
img_ratio = original_size[0] / float(original_size[1])
ratio = target_size[0] / float(target_size[1])
tx, ty = original_size
if ratio > img_ratio:
# crop on constant width
ty = int(original_size[0] / ratio)
elif ratio < img_ratio:
# crop on constant height
tx = int(ratio * original_size[1])
x, y = 0, 0
if crop_type.endswith('left'):
x = 0
elif crop_type.endswith('center'):
x = (original_size[0] - tx) // 2
elif crop_type.endswith('right'):
x = original_size[0] - tx
if crop_type.startswith('top'):
y = 0
elif crop_type.startswith('center'):
y = (original_size[1] - ty) // 2
elif crop_type.startswith('bottom'):
y = original_size[1] - ty
return (x, y, tx + x, ty + y) | f65144311aeb9ad2eac83a77fb45d9d4060e432d | 107,889 |
def get_profile_item_image_upload_path(item, filename):
"""
Get the path to upload a profile item's image to.
Args:
item:
The profile item whose image is being uploaded.
filename:
The original name of the image being uploaded.
Returns:
The path to upload the profile item's image to.
"""
return "know-me/users/{id}/profile-images/{file}".format(
file=filename, id=item.topic.profile.km_user.id
) | ff9d146bf87964c744d6c8c93ec175687ebd6a22 | 107,890 |
def _select_fields_for_receive(transcript_group):
"""Selects only the fields for receive model training
Args:
transcript_group (pandas.DataFrame): The transcript dataset
Returns:
pandas.DataFrame: The modified transcript with the fields for receive model training
"""
cols = [
"purchased",
"gender",
"age",
"income",
"membership_year",
"membership_month",
"membership_day",
"gen_z",
"millenials",
"gen_x",
"boomers",
"silent",
"young",
"adult",
"middle_age",
"old"
]
transcript_group = transcript_group[cols]
return transcript_group | 72b99c2f07df6b501d8516f3eddb752cba2adea6 | 107,894 |
def get_width(image):
"""get_width(image) -> integer width of the image (number of columns).
Input image must be rectangular list of lists. The width is
taken to be the length of the first row of pixels. If the image is
empty, then the width is defined to be 0.
"""
if len(image) == 0:
return 0
else:
return len(image[0]) | 6532d7e543735e4a0991f52bc0c65d1cc820738f | 107,896 |
import re
import io
def get_version(filename):
"""
Trivial parser to extract a __version__ variable from a source file.
:param str filename: the file to extract __version__ from
:returns str: the version string for the package
"""
version_re = re.compile(r'(\d\.\d(\.\d+)?)')
with io.open(filename, 'r') as source:
for line_num, line in enumerate(source):
if line.startswith('__version__'):
match = version_re.search(line)
if not match:
raise Exception(
'Invalid __version__ string found on '
'line %d of %s' % (line_num + 1, filename))
return match.group(1)
raise Exception('No __version__ line found in %s' % filename) | 6abdd829a2067f148cb3f4e500d7881e8e0b3e60 | 107,900 |
def read_enabled_tests(filename):
"""Read list of enabled tests from specified file."""
with open(filename) as fin:
content = fin.readlines()
return [line.strip() for line in content] | c1a8f5be898a7129f4b6dedbbb988e67da967943 | 107,903 |
def print_failed(failed):
"""Print failed counts."""
lines = []
for key, val in sorted(failed.items()):
if key != 'failed-cutoff':
pretty_key = key.replace('-', ' ').title()
lines.append(f' {pretty_key}: {len(val)}')
return '\n'.join(lines) | 7287516de5c58aa8f7f72df102b472d39b4058f3 | 107,904 |
import torch
def t_normalize_adj(t_adj):
"""Symmetrically normalize adjacency matrix."""
rowsum = t_adj.sum(1) # D-degree matrix
d_inv_sqrt = 1 / torch.sqrt(rowsum)
d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.0
d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
# D^0.5 .A. D^0.5=A对称,D对角=(A. D^0.5).T.(D^0.5)
return t_adj.mm(d_mat_inv_sqrt).t().mm(d_mat_inv_sqrt) | 24f5c06ae6ae1e0544304ebbee945ea5b548e48a | 107,905 |
def postprocess(context_words, answer):
"""Post-process results to show the chosen result
Parameters
--------
context_words : str
Original context
answer : InferResult
Triton inference result containing start and
end positions of desired answer
Returns
--------
Numpy array containing the words from the context that
answer the given query.
"""
start = answer.as_numpy("start_pos")[0]
end = answer.as_numpy("end_pos")[0]
print(f"start is {start}, end is {end}")
return [w.encode() for w in context_words[start : end + 1].reshape(-1)] | babc5d9744f5592f460b0c45bd88c0ec6e01a80a | 107,907 |
def order(name: str):
"""
Extract index of image from my poor naming scheme
:param name: name from which to extract index
:return: index of name
"""
if name.startswith('pred'):
split = name.split('_')
if len(str(split[-2])) > 10: # New file format, -2 is hash
return int(split[-3])
return int(split[-2])
split = name.split('_')
x = split[-1].split('.')[0]
return int(x) | 8676c089c2d3a3ecc064f3056a7740c4c234bfaa | 107,908 |
def format_version(version):
"""format server version to form X.X.X
Args:
version (string): string representing Demisto version
Returns:
string.
The formatted server version.
"""
formatted_version = version
if len(version.split('.')) == 1:
formatted_version = f'{version}.0.0'
elif len(version.split('.')) == 2:
formatted_version = f'{version}.0'
return formatted_version | b6a0a8a90eba9369d0eb9ed9e9582a49a820b8bf | 107,911 |
def get_indent(text):
"""
Returns indentation for a given ``text``.
:param text: String, can be multi-line. Only first non-empty line is used to determine the indentation
:return: Indentation (the number of whitespace characters)
"""
lines = text.split('\n')
while len(lines) > 0 and lines[0] == '':
lines.pop(0)
if len(lines) == 0:
return 0 # Text was empty, indentation for empty text is 0
n_stripped = len(lines[0].lstrip()) # Length of the string after stripping whitespaces on the left
return len(lines[0]) - n_stripped | c42665f306629a28d41ead6d785af08569e2234e | 107,912 |
from typing import Tuple
from typing import Optional
import re
def parse_ansible_version(stdout: str) -> Tuple[str, Optional[str]]:
"""Parse output of 'ansible --version'."""
# ansible-core 2.11+: 'ansible [core 2.11.3]'
match = re.match(r"^ansible \[(?:core|base) ([^\]]+)\]", stdout)
if match:
return match.group(1), None
# ansible-base 2.10 and Ansible 2.9: 'ansible 2.x.y'
match = re.match(r"^ansible ([^\s]+)", stdout)
if match:
return match.group(1), None
return "", "FATAL: Unable parse ansible cli version: %s" % stdout | 67648d17c48f46bbfa0d47d33b3ce0adeccdc3ee | 107,913 |
def _get_gmb_feed_mapping(client, customer_id, feed_resource_name):
"""Gets a Google My Business Feed mapping.
Args:
client: An initialized Google Ads client.
customer_id: The customer ID for which the call is made.
feed_resource_name: The string Google My Business feed resource name.
Returns:
The requested FeedMapping, or None if it is not available.
"""
googleads_service = client.get_service("GoogleAdsService")
query = f"""
SELECT feed_mapping.resource_name, feed_mapping.status
FROM feed_mapping
WHERE
feed_mapping.feed = '{feed_resource_name}'
AND feed_mapping.status = ENABLED
AND feed_mapping.placeholder_type = LOCATION
LIMIT 1"""
result = googleads_service.search(customer_id=customer_id, query=query)
try:
return next(iter(result)).feed_mapping
except StopIteration:
return None | a831724dc9e845990d5b06d888b17c7caf7134a5 | 107,914 |
import logging
def exists_handler_with_name(name: str) -> bool:
"""
>>> discard = set_stream_handler()
>>> assert exists_handler_with_name('stream_handler')
>>> discard2 = set_stream_handler_color()
>>> assert exists_handler_with_name('stream_handler_color')
>>> assert not exists_handler_with_name('unknown_handler')
"""
handlers = logging.getLogger().handlers
for handler in handlers:
if hasattr(handler, 'name'):
if handler.name == name:
return True
return False | ab2fc4f6b1a7ae02c74164c7054ad0e4fe24312a | 107,918 |
def name2env(name):
"""
convert a name of the for 'radical.pilot' to an env vare base named
'RADICAL_PILOT'.
"""
return name.replace('.', '_').upper() | 4a35ef536e1199cda692505936e38ecae3dfd859 | 107,921 |
def file_is_gamess(file):
""" Check first line of file for 'rungms' string """
with open(file, "r") as f:
return "rungms" in f.readline() | cdfafaebd5916b362c2e3b3bd63fc60549abd96b | 107,922 |
def source_file_filter(input_api):
"""Returns filter that selects source code files only."""
bl = list(input_api.DEFAULT_BLACK_LIST) + [
r'.+\.pb\.go$',
r'.+_string\.go$',
]
wl = list(input_api.DEFAULT_WHITE_LIST) + [
r'.+\.go$',
]
return lambda x: input_api.FilterSourceFile(x, white_list=wl, black_list=bl) | adda84626e085ea541964953094cbb28f3b1cc0a | 107,925 |
def getPortFromUrl(url):
""" Get Port number for given url """
if not url:
raise ValueError("url undefined")
if url.startswith("http://"):
default_port = 80
elif url.startswith("https://"):
default_port = 443
elif url.startswith("http+unix://"):
# unix domain socket
return None
else:
raise ValueError(f"Invalid Url: {url}")
start = url.find('//')
port = None
dns = url[start:]
index = dns.find(':')
port_str = ""
if index > 0:
for i in range(index+1, len(dns)):
ch = dns[i]
if ch.isdigit():
port_str += ch
else:
break
if port_str:
port = int(port_str)
else:
port = default_port
return port | eb27ea283632c6bca977829bfe462520d41f9124 | 107,926 |
def list_inventory(inventory):
"""
Return the inventory in tuple form. Skip any items with a value of zero.
:param inventory: dict - an inventory dictionary.
:return: list of tuples - list of key, value pairs from the inventory dictionary.
"""
inventory_list = []
for item in inventory:
if inventory[item] <= 0:
continue
inventory_tuple = (item, inventory[item])
inventory_list.append(inventory_tuple)
return inventory_list | fe2fabdbe5a360d22a1ad2d336fb9e7545d5d190 | 107,929 |
def clear_messages(request, key=None):
"""Call from a template after rendering messages to clear out the current
message list.
"""
if key:
key = f'messages__{key}'
else:
key = 'messages'
request.session[key]= []
return '' | 025d1fb2e2c29d28a17b8eccff172da798fad5f0 | 107,933 |
import re
def ok_wallet_reft(token: str) -> bool:
"""
Whether input token looks like a valid wallet credential identifier
(aka wallet referent, wallet cred id, wallet cred uuid).
:param token: candidate string
:return: whether input token looks like a valid wallet credential identifier
"""
return bool(re.match(r'[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', token or '')) | 0d0a5e2abc9daccad302f0685f81740ec457a82a | 107,934 |
def top_ten(df,col_na):
""" Presenting the top ten conutries in the dataframe
This function accepts the dataframe and column name
keyword arguments
df - dataframe of country names and medals
col_na - column name is country_name and medals
returns - top 10 country list
"""
country_list = []
a = df.nlargest(10,col_na)
country_list = list(a['Country_Name'])
return country_list | 4f605f2e656cc062bf93eefbbee7f25a102a6d38 | 107,937 |
def pu_score(y, yhat):
"""
Returns a score used for PU learning as introduced in [LEE2003]_.
:param y: true function values
:param yhat: predicted function values
:returns:
.. math:: \\frac{P(\hat{y}=1 | y=1)^2}{P(\hat{y}=1)}
y and yhat must be boolean vectors.
Higher is better.
.. [LEE2003] Wee Sun Lee and Bing Liu. Learning with positive and unlabeled examples
using weighted logistic regression. In Proceedings of the Twentieth
International Conference on Machine Learning (ICML), 2003.
"""
num_pos = sum(y)
p_pred_pos = float(sum(yhat)) / len(y)
if p_pred_pos == 0:
return 0.0
tp = sum([all(x) for x in zip(y, yhat)])
return tp * tp / (num_pos * num_pos * p_pred_pos) | 169dd0ae40cd25a4be68745bfe0da4496544d0c1 | 107,938 |
def add_url_parameters(url, **parameters):
""" Add url parameters to the base url.
:param url: base url.
:type url: str
:kwargs: key-value pairs of the url parameters to add. If the value is None, the pair is not added.
:return: url string.
:rtype: str
"""
parameters = {k: v for k, v in parameters.items() if v is not None}
if len(parameters) == 0:
return url
parameters = "&".join([f"{k}={v}" for k, v in parameters.items()])
return f"{url}?{parameters}" | 7967b77f888c9dff25f186a1bdb7862a1757b664 | 107,942 |
import itertools
def grouper(iterable, n, fillvalue=None):
""" Iterate over chunks of iterable.
Note: will fill last value with None if size is not a multiple of n.
From: http://stackoverflow.com/a/434411/6079076
"""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue) | afd491fe975865e77f3abce87d2df324771d5b97 | 107,943 |
import heapq
def dijkstra(graph):
"""
Dijkstra's Algorithm for Shortest Path
Complexity: O(V + E*logE) or O(E*logE) or O(E*logV)
because E <= V*(V-1)/2 or E <= V*(V-1)
We use a min heap as the priority queue. The O(V) int the coplexity
comes from the initialization of distances.
"""
visited = [False] * len(graph)
distances = [float("inf")] * len(graph)
distances[0] = 0
priority_queue = []
heapq.heappush(priority_queue, (0, 0))
while priority_queue:
min_vert = heapq.heappop(priority_queue)
vert = min_vert[1]
if visited[vert]:
continue
visited[vert] = True
for i in range(len(graph[vert])):
next_vert = graph[vert][i][0]-1
next_weight = graph[vert][i][1]
if distances[vert] + next_weight < distances[next_vert]:
distances[next_vert] = distances[vert] + next_weight
heapq.heappush(priority_queue, (distances[next_vert], next_vert))
return distances | cc308797aeec72eaf5da334f78176dac03d3ba95 | 107,951 |
def get_unique_name(collection, name, spacer='_'):
"""
Create a unique key for a collection by appending numbers when entries exist
:param collection: A list, collection, array, ...
:param name: Name (for instance 'Points')
:param spacer: Spacer char
:return: New name, for instance 'Points_4'
"""
similar_ones = []
max_value = 0
if name not in collection:
return name
for key in collection:
if name in key:
similar_ones.append(key)
if spacer in key:
value = key.split(spacer)[1]
max_value = max(int(value), max_value)
value = max(len(similar_ones), max_value)
return f'{name}{spacer}{value}' | 5a785c811239e1c53b2db38c0990e350a54f45f0 | 107,954 |
def concatenate_path_pk(path, *args):
"""
Receive path and args and concatenate to create the path needed
for an object query.
Make sure parameters are plugged in in the right order. It must
be aligned with the Bexio API, so the endpoints are valid. Check
out the official docs to see, how the final path should look like.
:param: str, endpoint path to be used (must be first arg)
:param: list, *args that come after
:return: str, concatenated path
"""
return '/'.join([str(path)] + [str(arg) for arg in args]) | 6989b99a64b4eaae7cf683bdbdb427d43efcffe5 | 107,963 |
def nested_dict_get_path(key, var):
"""Searches a nested dictionary for a key and returns
a list of strings designating the
complete path to that key.
Returns an empty list if key is not found.
Warning: if key is in multiple nest levels,
this will only return one of those values."""
path = []
if hasattr(var, 'iteritems'):
for k, v in var.items():
if k == key:
path.append(k)
break
if isinstance(v, dict):
local_path = [k]
maybe_path = nested_dict_get_path(key, v)
if maybe_path != []:
local_path.extend(maybe_path)
path.extend(local_path)
return path | 6547d4c7b3fd96b0c42c20e5314310606653bcae | 107,974 |
def is_letter_correct(letter_guessed, hidden_word):
"""
letter_guessed: string, letter user have guessed
hidden_word: string, the hidden word to guess
returns: boolean, True if letter_guessed doesn't exist in hidden_word, otherwise False
"""
if letter_guessed not in hidden_word:
return True
return False | 0f5548cc8b23f3d9707287a4212853653fb2cb6a | 107,975 |
def get_points_tally_tab_url(sheet, tab_name: str = 'Tallies By Category'):
"""Get the URL of the 'Tallies By Category' tab"""
return sheet.url + '/edit#gid=' + str(sheet.worksheet(tab_name).id) | 5c4102f11362b3ab9648f0f6a7c230f49157c677 | 107,976 |
import random
def generate_number() -> int:
"""Generate a random number from 1 to 10.
Returns:
int: Random number.
"""
return random.randint(1, 10) | f9d4c000bc1e0103f0548924b326c0fa7c27700c | 107,982 |
def temp_from_voltage(V: float) -> float:
"""Map voltage to temperature for the TI LM61, in °C
Datasheet: https://datasheet.lcsc.com/szlcsc/Texas-Instruments-
TI-LM61BIM3-NOPB_C132073.pdf"""
return 100. * V - 60. | f80e6e39bcae4db64657dadc4e233dbed0bf9a23 | 107,988 |
def get_user_names(group):
"""
Extracts Python list of user names from a Groupy group
:param group: Groupy group to extract names from
:return: List of user names
"""
users = group.members()
user_names = list()
for user in users:
user_names.append(user.nickname)
return user_names | 852ad38b04d3aa6ed5860d4ec985c439661386ed | 107,989 |
import fnmatch
def _match(name, inlist):
"""Return True if the given name matches any of the
contents of the list of glob patterns inlist.
"""
for pat in inlist:
if fnmatch.fnmatchcase(name, pat):
return True
return False | d70c5373259b24ad12ed6de7c593663640a7be1e | 107,991 |
def trans_array(data, _axes):
"""Calculate indices for an array transpose use by anFFT.
:param data: The data, used for dimension purposes (rank)
:type data: numpy array
:param _axes: axes to FFT over, and hence transpose to the end
:type _axes: list
:return: tuple of axes values
:rtype: tuple of integers
"""
ind = list(range(data.ndim))
try:
axes = list(_axes)
except Exception: # integer
axes = [_axes]
axes.sort()
axes.reverse()
for a in axes:
ind.pop(a)
ind.extend(axes)
return ind | 1e1d0460432f0c1cdc1d7f813b90eaf2cc4fe494 | 107,995 |
def get_sdf_type(type):
"""
Returns a SDF timing type for the given type plus information whether it
is sequential or not. Returns None, None if the type is unknown
"""
# Known keywords and their SDF counterparts
seq_keywords = {
"Setup": "setup",
"Hold": "hold",
"Recov": "recovery",
"Remov": "removal",
}
comb_keywords = {
"Prop": "iopath",
}
# Sequential
if type in seq_keywords:
return seq_keywords[type], True
# Combinational
if type in comb_keywords:
return comb_keywords[type], False
# Unknown
return None, None | 379ad9056fd34da7f1c3218a31a3876d8dce8bc4 | 107,998 |
import torch
def get_random_cuda_device() -> str:
"""
Function to get a random GPU device from the
available devices. This is useful for testing
that custom cuda kernels can support inputs on
any device without having to set the device explicitly.
"""
num_devices = torch.cuda.device_count()
device_id = (
torch.randint(high=num_devices, size=(1,)).item() if num_devices > 1 else 0
)
return "cuda:%d" % device_id | a1ff7cbad9532370b2f66df4a3b07b11d57e4c9c | 108,001 |
def get_attr(node, attrname, default=None):
"""Get content of a node's attr. If attr is not present, return default."""
if attrname in node.attrs:
return node.attrs[attrname].content
return default | 27c68ac69e65d8dfe52aeb9039a7059e59584ed3 | 108,002 |
async def health() -> dict:
"""
Reports service health status.
:return: Empty JSON Object.
:rtype: `python:dict`
"""
return {} | 23547d342a27f744b873bca64c1a1422b55be001 | 108,005 |
def starting_guess(mini, estimate_sigma=True):
"""
Use best a fit as a starting point for the samplers.
If no sigmas are given, it is assumed that
all points have the same uncertainty which will
be also part of the sampled parameters.
"""
vals = [i.value for i in mini.params.values() if i.vary]
if estimate_sigma:
vals.append(mini.residual.std())
return vals | 8ffb1d43a952c89234ca6d1b760d833eac4bb913 | 108,006 |
def set_attribute(library, session, attribute, attribute_state):
"""Sets the state of an attribute.
Corresponds to viSetAttribute function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param attribute: Attribute for which the state is to be modified. (Attributes.*)
:param attribute_state: The state of the attribute to be set for the specified object.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
"""
return library.viSetAttribute(session, attribute, attribute_state) | dbd05edfb57000b052c1adae5b188832a48ddf98 | 108,007 |
def get_scalar_column(df, column):
"""
Extract the single scalar value if column contains it.
"""
col = df[column]
val = col.iloc[0]
assert col.isnull().all() or (col == val).all(), (column, val, df[col != val, :])
return val | a696482156c13cee6b48fe573eb7327d7e987fc8 | 108,012 |
import time
def set_computer_policy_check_rate_limit(api, configuration, api_version, api_exception, computer_ids, policy_id):
""" Sets the policy for a number of computers. On each call to Deep Security Manager, checks whether the API rate limits are exceeded and if so retries the call.
:param api: The Deep Security API modules.
:param configuration: Configuration object to pass to the api client.
:param api_version: The version of the API to use.
:param api_exception: The Deep Security API exception module.
:param computer_ids: A list of IDs of the computers to modify.
:param policy_id: The ID of the policy to assign.
:return: A PoliciesApi object that contains the ID of the modified policy.
"""
computers_api = api.ComputersApi(api.ApiClient(configuration))
# IDs of modified computers
modified_computer_ids = []
# Count the number of computers that have been modified -- also used as the index for computer_ids
change_count = 0
# Count retries, and set a maximum
retries = 0
MAX_RETRIES = 12
while True:
# Create a computer object and set the policy ID
computer = api.Computer()
computer.policy_id = policy_id
try:
# Modify the computer on Deep Security Manager and store the ID of the returned computer
computer = computers_api.modify_computer(computer_ids[change_count], computer, api_version, overrides=False)
modified_computer_ids.append(computer.id)
retries = 0
# Increment the count and return if all computers are modified
change_count += 1
if change_count == len(computer_ids):
return modified_computer_ids
except api_exception as e:
if e.status == 429 and retries < MAX_RETRIES:
# The error is due to exceeding an API rate limit
retries += 1
# Calculate sleep time
exp_backoff = (2 ** (retries +3)) / 1000
print("API rate limit is exceeded. Retry in {} s.".format(exp_backoff))
time.sleep(exp_backoff)
else:
# Return all other exception causes or when max retries is exceeded
return "Exception: " + str(e) | 869dabb15bd7b56232c4a9ebc5d4b1634fc3ec4e | 108,013 |
def sorted_committees(committees):
"""
sorts a list of committees, ensures that committees are sets
"""
return sorted([set(committee) for committee in committees], key=str) | 4d64d31ea9f307db086694075ba4d65deb877790 | 108,016 |
def get_default_geofence_id(resource_name):
""" Helper to ensure consistency when referring to default Geofence by id (name) """
return f"{resource_name}-default-geofence" | 10934fa8e49fbd647cc43fd94f3fd340a2bd3fd7 | 108,017 |
from typing import Union
import pathlib
import yaml
def read_yaml(file_path: Union[pathlib.Path, str]) -> dict:
"""Read a YAML file and return its contents."""
with open(file_path, encoding='utf8') as input_file:
return yaml.load(input_file, Loader=yaml.FullLoader) | 2e17db601aa3ad641ed6184bc8027d05ab249244 | 108,020 |
def tokens(text, tok_size=3):
""" Returns a list of all substrings contained in 'text' of size 'tok_size'
:param text: (string) text to tokenize
:param tok_size: length of substrings
:return: (list) tokens of 'text'
"""
return [text[i : i + tok_size] for i in range(len(text) - tok_size + 1)] | 8a8f5ad57be248f56a9146dd75477edf08675c33 | 108,021 |
def estimate_beta_parameters(dataset, meanfactor=2):
"""Estimates alpha and beta parameters for Beta distribution
by using the mean of each feature
"""
alpha = meanfactor * dataset.mean(0)
beta = meanfactor - alpha
return alpha, beta | 9278cf64bb2989c254b7ac1e92e37a77c197fb2e | 108,026 |
def withdraw(exchange, coin: str, amount: float, address: str) -> bool:
"""
Withdraw {amount} of {coin} to {address} from binance
:param exchange: ccxt binance obj instance
:param coin: coin symbol to withdraw
:param amount: amount of coin to withdraw
:param address: address for coin to withdraw to
:return: true if successful, false otherwise
"""
try:
exchange.withdraw(coin, amount, address)
return True
except Exception as e:
print(e)
return False | 3ec040b150966767d00cf5c239f2fcbd0bb7b5a9 | 108,032 |
def _get_node_properties(node):
"""Get the properties from a neo4j.v1.types.graph.Node object."""
# 1.6.x and newer have it as `_properties`
if hasattr(node, '_properties'):
return node._properties
# 1.5.x and older have it as `properties`
else:
return node.properties | 332506870910e58a379cf9dc63aacbcbafe3ef7a | 108,035 |
def any_match(match_fn, seq, patterns):
"""Return True if match_fn(s, pat) is true for some (s, pat) in seq x patterns
match_fn: seq x patterns -> bool,
seq, patterns: iterables, generators.
Equality:
>>> any_match(lambda x, y: x == y, (1, 2, 3), (5, 6, 1))
True
>>> any_match(lambda x, y: x == y, (1, 2, 3), (5, 6))
False
fnmatch.fnmatchcase (fnmatch.fnmatchcase is case-sensitive)
>>> import fnmatch
>>> names = (s for s in ('pair', 'P.pair', 'pair.getter', 'P.pair.getter'))
>>> patterns = ('pair', )
>>> any_match(fnmatch.fnmatchcase, names, patterns)
True
>>> any_match(fnmatch.fnmatchcase, names, patterns) # names generator is now 'spent', list(names) is empty
False
Regexp
>>> import re
>>> patterns = (r'a.*b', r'[0-9]{2,2}[a-z]')
>>> matcher = lambda s, pat: re.match(pat, s)
>>> any_match(matcher, ('aaaa', 'bbb', '0q'), patterns)
False
>>> any_match(matcher, ('aaaab',), patterns)
True
>>> any_match(matcher, ('aa', '23z'), patterns)
True
"""
return any(
match_fn(s, pat)
for s in seq
for pat in patterns
) | 0529d190eceb48fc09c4d01097246c7104d095f7 | 108,038 |
def bool_to_string(value):
"""
Convert a bool to a string
"""
return str(value) | 55fa9d713cc6b92c7a174c3b9f04c5419f68f785 | 108,042 |
from typing import Collection
def read_profiles(files: Collection[str]) -> Collection[str]:
"""Reads in profile HMMs from a list of files."""
profiles = []
for file in files:
with open(file) as fp:
profile = fp.read()
profiles.append(profile)
return profiles | 31a981665b117c2634007341056943c5f1d13e81 | 108,043 |
def freezept(S, P=0):
"""Compute freezing temperature of sea water
Usage: freezept(S, [P])
Input:
S = Salinity, [psu]
P = Pressure, [dbar]
P is optional, with a default value = 0
Output:
T = Freezing point, [�C]
Algorithm: UNESCO 1983
"""
a0 = -0.0575
a1 = 1.710523e-3
a2 = -2.154996e-4
b = -7.53e-4
Tf = a0*S + a1*S**1.5 + a2*S**2 + b*P
return Tf | fcf411f3e33a8c9f4266c5092194a30410ff5d93 | 108,044 |
from datetime import datetime
def getUniqueTaskId(prefix=None):
"""
unique id generator for varius DLS tasks
:param prefix: prefix for task type
:return: unique string index (list of indexes can be sorted by date)
"""
tidx = datetime.now().strftime('%Y%m%d-%H%M%S-%f')
if prefix is not None:
tret = '%s-%s' % (prefix, tidx)
else:
tret = tidx
return tret | 754218ec0a175f0a0bd20ea1a068b7b4aa76ec63 | 108,045 |
def calc_bounds(conformer):
"""
Calculate the X,Y bounds of a molecule, the minimum and maximum X and Y coordinates of any atom in the molecule.
It is assumed that the molecule already has 2D coordinates.
:param mol:
:return: X and Y bounds as lists of length 2
"""
x = [0, 0]
y = [0, 0]
num = conformer.GetOwningMol().GetNumAtoms()
for i in range(num):
pos = conformer.GetAtomPosition(i)
if pos.x < x[0]:
x[0] = pos.x
if pos.x > x[1]:
x[1] = pos.x
if pos.y < y[0]:
y[0] = pos.y
if pos.y > y[1]:
y[1] = pos.y
return x, y | 337a6d96710eb4e96793aa63c7317b34bac13f85 | 108,047 |
def hosting_provider_with_sample_user(hosting_provider, sample_hoster_user):
"""
Return a hosting provider that's been persisted to the database,
and has a user associated with it
"""
hosting_provider.save()
sample_hoster_user.hostingprovider = hosting_provider
sample_hoster_user.save()
return hosting_provider | 9f3d8f44301eeafc50d1335bc2bb274a6d840fd6 | 108,051 |
def validate_token_sequence(token_sequence: str) -> bool:
"""Returns True, if `token_sequence` is properly formed.
Token sequences are strings or words which are separated by
single blanks with no leading or trailing blank.
"""
return token_sequence[:1] != ' ' and token_sequence[-1:] != ' ' \
and token_sequence.find(' ') < 0 | 9419ce9c217d2cf2fa84b3fd34e5c6ac9b6de66f | 108,060 |
def calculate_age(year1: int, year2: int) -> int:
"""Find the difference between years (age)."""
age = abs(year2 - year1)
return age | a4b346f09ad880894852e6554827713477073f78 | 108,061 |
def cmp(x: int, y: int) -> bool:
"""
Compare two integers for demo purposes.
"""
return True if x == y else False | 6569c28d5e85a495126b5dccce8f969701247b76 | 108,063 |
def lazy_if2(*args):
"""Return first non-empty argument."""
for a in args:
val = a()
if val:
return val
return '' | 2971413e42fd179541dce9e3b2e0c5abea7ff504 | 108,065 |
def get_time(t):
"""Time in min sec
:param t: Run time
:type t: float
:returns: str
"""
minutes = t // 60
seconds = t % 60
return f"""{minutes:.0f} min:{seconds:.0f} sec""" | f5bca4ecdd314080eeba0dffb92d7992352fddd2 | 108,067 |
def ovn_metadata_name(id_):
"""Return the OVN metadata name based on an id."""
return 'metadata-%s' % id_ | 45d22a07b1761f94ef0ca8202f857a3751fc9be6 | 108,069 |
def GetFirstEnergyGuess(PotentialArray):
"""Defines the first energy level as a value between the the average potential and the minimum value. More explicitly: (1/50000)*((V_average + V_min)/2)
Parameters:
-----------
PotentialArray (numpy.ndarray) : a numpy array that contains the potential value between 'x_V_min' and 'x_V_max' at every interval of length 'Division'
Returns:
--------
First_E_guess (float) : the first energy guess that will be used in the Numerov algorithm. It correponds to the average of the minimum value of the potential and the average of the
potential times 1/50000
"""
First_E_guess = PotentialArray.min() + (1/500000) * (PotentialArray.mean() + PotentialArray.min())
return First_E_guess | a3a42d47a92caded0ebfde865dc2b07b695c0622 | 108,071 |
def needs_quote(arg):
"""Return True if the given string needs to be shell-quoted.
Quoting is need if any of the following are found:
* a double quotation mark (")
* a single quotation mark (')
* whitespace
"""
for c in arg:
if c in ('"', "'"):
return True
if c.isspace():
return True
else:
return False | b153a27388063d4fa0713fdec08e456c5e87fe7e | 108,072 |
def standard_slices(problem_size, num_agents, overlap=0):
"""Create standard slices for a problem.
We assume that the problem size is exactly divisible by the number
of agents; hence all agents have exactly the same subproblem size.
Parameters
----------
problem_size : int
problem size
num_agents : int
number of agents
overlap : int (optional)
how many rows overlap should the agents have. The overlap is cyclic,
i.e. the last agent shares some rows with the first agent.
Returns
-------
list of lists of ints
a list of row indices corresponding to each agent
"""
if num_agents < 1:
raise ValueError("Number of agents must be greater or equal one")
if overlap < 0:
raise ValueError("Overlap must be greater or equal zero")
if problem_size % num_agents != 0:
raise ValueError(
"Problem size must be an exact multiple of number of agents"
)
stride = problem_size // num_agents
if stride + overlap > problem_size:
raise ValueError("Overlap is too large, repeating rows")
return [
[(i + j) % problem_size for j in range(stride + overlap)]
for i in range(0, problem_size, stride)
] | d98e9b6b4be42352bcd9f1060074bcdb1809fb78 | 108,073 |
import mmap
import re
def str_in_file(str_, file_):
"""
Returns Boolean: True if str_ in file_, otherwise False.
First converts string to Bytes.
Uses mmap in order not to read entire file each time.
"""
b_ = str_.encode()
with open(file_, 'rb', 0) as file, \
mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as s:
if re.search(b_, s):
return True
return False | 862178a85566dd906c08609a17f6041144373da8 | 108,077 |
import platform
def get_processor() -> str:
"""Return the platform's processor if it can be found, otherwise 'Unknown'
Returns:
str: the platform's processor or 'Unknown'
"""
return platform.processor() if platform.processor() else 'Unknown' | 1b6ff6ccd45f96caf8cff6c5753c542cfbabd16b | 108,078 |
from typing import Tuple
from typing import Optional
def parse_display_name(name : str) -> Tuple[Optional[str], str]:
"""
Splits the display name into two item tuple which represents
category and block name.
Args:
name:
Any string that represents the block display name.
Returns:
A two item tuple: first item is for category and
second one is for block name. Category can be None if name doesn't contain
any "." (dot) character.
"""
_parts = name.split(".")
if len(_parts) == 1:
return (None, _parts[0])
return (_parts[0], ".".join(_parts[1:])) | fe3fc6d74dffadac6bec70bc188cf166eccd07a4 | 108,081 |
import re
def create_mad_lib_story(mad_lib_template, responses):
"""
Summary of create_mad_lib_story function: The user input responses will be used to populate the template in the proper position.
Parameters:
mad_lib_template (string): Madlib template from input file
responses (array): the responses entered by user
Returns:
madlib_result (string): a string containing the Madlib template with user input reponses placed in the proper position of the template.
"""
madlib_result = mad_lib_template
prompts = re.findall(r'{[^}]+}', mad_lib_template)
# The regex sub function is invoked to replace the matching prompt in the
# Madlib string with the user input. It replaces just the first occurence
# of the matching prompt. The madlib result string is recreated each time.
for i in range(len(responses)):
madlib_result = re.sub(prompts[i], responses[i], madlib_result, 1)
return madlib_result | 33ccea094569825ddffb7d33f7b5d9dfe2b5bf83 | 108,084 |
def getMostFrequent(counts, exclWordList, topNumber):
"""
This function takes a dictionary of word counts and returns the most
frequent ones. The user defines how many of the most frequent words are
returned.
INPUT:
counts: Dictionary with word counts.
A list of words to be excluded from consideration
topNumber: Number of top freqent words to be extracted.
OUTPUT:
topFreqWords: Dictionary of top words where keys represent words and values represent number of time a word has been counted.
"""
sort_words = sorted(counts, key=counts.__getitem__, reverse=True) # Sort words by count
sort_count = sorted(counts.values(), reverse=True) # Sort counts
sort_words = sort_words[:topNumber] # Removing everything except the top words
sort_count = sort_count[:topNumber]
topCount = {}
for i in range(len(sort_words)):
topCount[sort_words[i]] = sort_count[i] # Creating dictionary with the top words
return topCount | 66fadb6fd2d45cf2e36a5fd1b3b5a99d25e82ad2 | 108,091 |
def calculate_ap_from_pr(precisions: list, recalls: list) -> float:
"""Calculate Average Percision from P-R pairs"""
AP = 0
for i in range(len(precisions) - 1):
AP += (recalls[i] - recalls[i+1]) * (precisions[i] + precisions[i+1]) / 2
return AP | 4c09f11b62254ed60693707cb249709939f25377 | 108,092 |
from typing import Union
from typing import Optional
def _decode(data: Union[None, str, bytes], encoding: str) -> Optional[str]:
""" Encode given `data` with `encoding` format or return string|None.
Args:
data: data to encode.
encoding: selected encoding of the given `data`.
Return:
String representation of the given `data` or None.
"""
if data is None:
return None
else:
if isinstance(data, str):
return data
else:
return str(data, encoding) | 8da88d9d796c0c34a0621b39795c5485a73e6ea9 | 108,109 |
def CreateDefaultBootAttachedDiskMessage(
messages, disk_type, disk_device_name, disk_auto_delete, disk_size_gb,
image_uri):
"""Returns an AttachedDisk message for creating a new boot disk."""
return messages.AttachedDisk(
autoDelete=disk_auto_delete,
boot=True,
deviceName=disk_device_name,
initializeParams=messages.AttachedDiskInitializeParams(
sourceImage=image_uri,
diskSizeGb=disk_size_gb,
diskType=disk_type),
mode=messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE,
type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT) | c5cce3858c2348e0bfccca40ef1db4be110cc345 | 108,111 |
def check_circular_call_dependencies(manifests):
"""
Check if there is a circular dependency between the partitions
described by the manifests.
A circular dependency might happen if there is a scenario in which a
partition calls a Root of Trust Service in another partition which than
calls another Root of Trust Service which resides in the
originating partition.
For example: Partition A has a Root of Trust Service A1 and extern sid B1,
partition B has a Root of Trust Service B1 and extern sid A1.
:param manifests: List of the partition manifests.
:return: True if a circular dependency exists, false otherwise.
"""
# Construct a call graph.
call_graph = {}
for manifest in manifests:
call_graph[manifest.name] = {
'calls': manifest.find_dependencies(manifests),
'called_by': set()
}
for manifest_name in call_graph:
for called in call_graph[manifest_name]['calls']:
call_graph[called]['called_by'].add(manifest_name)
# Run topological sort on the call graph.
while len(call_graph) > 0:
# Find all the nodes that aren't called by anyone and
# therefore can be removed.
nodes_to_remove = [x for x in list(call_graph.keys()) if
len(call_graph[x]['called_by']) == 0]
# If no node can be removed we have a circle.
if not nodes_to_remove:
return True
# Remove the nodes.
for node in nodes_to_remove:
for called in call_graph[node]['calls']:
call_graph[called]['called_by'].remove(node)
call_graph.pop(node)
return False | e4038fb632aeab515d67dc6ea5c944743db31dbf | 108,112 |
def masked_average(tensor, mask, eps=1e-8):
"""
Compute the average of the tensor while ignoring some masked elements.
Args:
tensor (Tensor): tensor to be averaged.
mask (Tensor): a mask indicating the element-wise weight.
eps (float): eps for numerical stability.
Returns:
FloatTensor: the average of the input tensor.
"""
tensor = tensor.float()
mask = mask.float()
masked = tensor * mask
return masked.sum() / mask.sum().clamp(min=eps) | 83c8cd736d956646b13574f5887c7156b7cee631 | 108,113 |
def _return_false(self, *args, **kwargs):
"""Returns False."""
return False | 489b7872d65cc287dd6c8f5d78c5a9998aa6cedf | 108,114 |
def cull_by_retired(data):
"""Return a copy of a DataFrame with only retired subjects"""
rets = [i for i in range(len(data))
if data['subject_data'][i]['retired'] is not None]
return data.iloc[rets].copy() | 49fe1868b0e3a9244f9bd8075815f572191ecd45 | 108,117 |
import torch
def accuracy(outputs: torch.Tensor, labels: torch.Tensor) -> float:
"""
Calculate the accuracy given the predicted probability distribution and label.
Args:
outputs: Model output given as tensor of shape `[batch_size, num_classes]`.
labels: True class given as tensor of shape `[batch_size,]`.
Returns:
The accuracy for this batch.
"""
assert outputs.size(0) == labels.size(0)
_, pred = torch.max(outputs.data, 1)
correct = (pred == labels).sum()
total = labels.size(0)
acc = 1.0 * correct / total
return float(acc.item()) | fb9084210998f1314458b7b19330cddd10e35475 | 108,118 |
import re
def extract_url_from_git_ssh_url(url):
"""Extract project name from SSH URL"""
needle = r'git@(\S+):(.*)\.git'
matched = re.match(needle, url)
if matched:
return "/".join(matched.group(1, 2))
return None | d67adaa3debab04d593de2d2f71c973f4659f08f | 108,119 |
def isoformat(dt):
"""
Function to act as ``datetime.datetime`` instance encoder
:param dt: instance of ``datetime.datetime``
:return: ISO 8601 formatted string
:rtype: str
"""
return dt.isoformat() | ce3a7f91aa8f079ab801602780481a2b80f591a0 | 108,127 |
def do_wordcount(s):
"""
Count the words in that string.
"""
return len([x for x in s.split() if x]) | 7a4aa8a131393268cfbd511620fa14c8c439824b | 108,129 |
def filter_coordinate(coordinate):
"""Returns the coordinate given in parameter, rounder three decimal places"""
# 1mm accuracy is enough for coordinates
return round(coordinate, 3) | 02da55abe2077af810e5983f7a31bc4ecddc67c4 | 108,134 |
def format_coords(coord):
"""Make a set of coordinated PDB-format ready."""
new_x = f"{coord[0]:.3f}".rjust(7, " ")
new_y = f"{coord[1]:.3f}".rjust(7, " ")
new_z = f"{coord[2]:.3f}".rjust(7, " ")
return new_x, new_y, new_z | 8396a8cafc9b2b22ea6501d830645d5899e99ecb | 108,139 |
def unwrap_text(text):
"""Turn wrapped text into flowing paragraphs, ready for rewrapping by
the console, browser, or textwrap.
"""
all_grafs = []
cur_graf = []
for line in text.splitlines():
line = line.strip()
if line:
cur_graf.append(line)
else:
all_grafs.append(' '.join(cur_graf))
cur_graf = []
if cur_graf:
all_grafs.append(' '.join(cur_graf))
return '\n\n'.join(all_grafs) | a0e4eb5d7ab34b3bc476d2a7e1254bd4960d1f84 | 108,141 |
def charVarNumber(charVar):
"""
Maps binary number (eg [1, 0, 1]) to its decimal value (5).
"""
p = 1
sum = 0
downTo = len(charVar)-1
while downTo >= 0:
sum += p * int(charVar[downTo])
p *= 2
downTo -= 1
return sum | 5d405a2f5eee9393802cf2792c8fda37927355ac | 108,144 |
import re
def remove_chars(s):
"""
Remove characters from a string that have unintended effects on file paths.
:param s: str
:return: str
"""
return ''.join(re.split('[$/\\\\]', s)) | fdc911b13f7bf082ad0eb835d41eef0182f38fd3 | 108,150 |
def makeParList(objlist, prefix=''):
"""wrap objlist into a comma separated string of str(objects)"""
parlist = ', '.join([prefix + str(i) for i in objlist])
return parlist | 7c765b6df2ac0fa1d5352a98d318b0fec9ffc7eb | 108,154 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.