content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import copy
def match_files(input_files, results_files):
"""Takes in results from parse_input_files and parse_docuscope_results and sets the 'present' values
in the results dictionary to True for all the input files present in it. Does nothing if an input file
isn't in the docuscope results"""
job = copy.copy(results_files)
for f in input_files:
if f in job:
job[f]['present'] = True
return job
|
35fb1797ee5dc73cf01f878f893507d4cbcbde9b
| 75,058
|
def date_message(n_clicks, freq, start_date, end_date):
"""
Function that returns a message when the user enters a date range.
Args:
n_clicks: Number of clicks registered by the submit button.
freq: Radio option for selecting the requests per day, hour, minute.
start_date: Start date for the historical analysis.
end_date: End date for the historical analysis.
Returns:
A message to the user as a response to the inputs for the historical
analysis.
"""
if start_date is None or end_date is None:
return 'Please enter the date range'
elif freq == 'Hour' or freq == 'Minute':
start = int(start_date.split('-')[2])
end = int(end_date.split('-')[2])
if (end - start) == 1:
return 'Data from {} to {}'.format(start_date, end_date)
else:
return 'For hours or minutes please enter two consecutive days'
elif freq == 'Day':
start = int(start_date.split('-')[1])
end = int(end_date.split('-')[1])
if (end - start) == 0:
return 'Data from {} to {}'.format(start_date, end_date)
else:
return 'For days please enter a range within the same month'
else:
return 'Data from {} to {}'.format(start_date, end_date)
|
93d1c413fb2c34cf35e3c6fb9a94d2cb19ef857a
| 75,059
|
def get_vacant_binding_index(num_agents, bindings, lowest_binding_index,
force_scheduling=False):
"""Return a vacant binding_index to use and whether or not it exists.
This method can be used with DHCP and L3 agent schedulers. It will return
the lowest vacant index for one of those agents.
:param num_agents: (int) number of agents (DHCP, L3) already scheduled
:param bindings: (NetworkDhcpAgentBinding, RouterL3AgentBinding) agent
binding object, must have "binding_index" field.
:param lowest_binding_index: (int) lowest index number to be scheduled.
:param force_scheduling: (optional)(boolean) if enabled, the method will
always return an index, even if this number
exceeds the maximum configured number of agents.
"""
binding_indices = [b.binding_index for b in bindings]
all_indices = set(range(lowest_binding_index, num_agents + 1))
open_slots = sorted(list(all_indices - set(binding_indices)))
if open_slots:
return open_slots[0]
if not force_scheduling:
return -1
# Last chance: if this is a manual scheduling, we're gonna allow
# creation of a binding_index even if it will exceed
# dhcp_agents_per_network.
if max(binding_indices) == len(binding_indices):
return max(binding_indices) + 1
else:
# Find binding index set gaps and return first free one.
all_indices = set(range(lowest_binding_index,
max(binding_indices) + 1))
open_slots = sorted(list(all_indices - set(binding_indices)))
return open_slots[0]
|
a3842da804ffb2f53d3089e5c4696fcec911aaa6
| 75,063
|
import re
def remove_comments(text):
"""Remove C-style /*comments*/ from a string."""
p = r'/\*[^*]*\*+([^/*][^*]*\*+)*/|("(\\.|[^"\\])*"|\'(\\.|[^\'\\])*\'|.[^/"\'\\]*)'
return ''.join(m.group(2) for m in re.finditer(p, text, re.M | re.S) if m.group(2))
|
638b646545ec3b85c9c2b7b799e56a951e68bd4b
| 75,064
|
from typing import Optional
def is_limit_reached(num_messages: int, limit: Optional[int]) -> bool:
"""Determine whether the number of messages has reached a limit.
Args:
num_messages: The number of messages to check.
limit: Limit on the number of messages.
Returns:
`True` if the limit has been reached, otherwise `False`.
"""
return limit is not None and num_messages >= limit
|
03c31cfe9bca1ac829b9d12243c454ef778d78bd
| 75,065
|
def compress_feature_metadata(tip_metadata, int_metadata):
"""Converts tip/internal node metadata DataFrames to dicts to save space.
This is a pretty early optimization -- ideally we would use 2-D lists as
our final metadata structure, similar to the table / sample metadata
compression. This should be revisited when the tree data node-name
revamping has been merged in.
Parameters
----------
tip_metadata: pd.DataFrame or None
Metadata for tip nodes. If not None, the index should describe node
names, and the columns should describe feature metadata fields.
int_metadata: pd.DataFrame or None
Metadata for internal nodes. If not None, the index should describe
node names, and the columns should describe feature metadata fields.
Note that the columns of tip_metadata and int_metadata should be identical,
even if the feature metadata only describes tip or internal nodes. (In that
case, then the other feature metadata parameter should still be a DataFrame
-- albeit an empty one, with no feature names in its index.) The only case
in which the parameters should be None is if there was no feature metadata
at all.
Returns
-------
(metadata_columns, compressed_tip_metadata, compressed_int_metadata)
metadata_columns: list
List of the feature metadata column names, all converted to
strings. If both input DFs are None, this will be {}.
compressed_tip_metadata: dict
Maps node names in tip_metadata to a list of feature metadata
values, in the same order as in metadata_columns and converted to
strings. If tip_metadata was empty, or if both input DFs were None,
this will be {}.
compressed_int_metadata: dict
Maps node names in int_metadata to a list of feature metadata
values, in the same order as in metadata_columns and converted to
strings. If int_metadata was empty, or if both input DFs were None,
this will be {}.
Raises
------
ValueError
- If only one of tip_metadata and int_metadata is None.
- If the columns of tip_metadata are not identical to the columns of
int_metadata.
- If both the tip and internal node metadata DataFrames are empty.
References
----------
- Inspired by redbiom and Qurro's JSON data models.
"""
# If the user didn't pass in any feature metadata, we'll get to this block
if tip_metadata is None and int_metadata is None:
return [], {}, {}
# *This* should never happen. If it did, it's a sign that this function is
# being misused. (The ^ is a logical XOR; see
# https://stackoverflow.com/a/432844/10730311.)
if (tip_metadata is None) ^ (int_metadata is None):
raise ValueError(
"Only one of tip & int. node feature metadata is None."
)
# Verify that columns match up btwn. tip and internal node metadata
if not tip_metadata.columns.equals(int_metadata.columns):
raise ValueError("Tip & int. node feature metadata columns differ.")
# Verify that at least one feature metadata entry exists (since at this
# point we know that there should be at least *some* feature metadata)
if tip_metadata.empty and int_metadata.empty:
raise ValueError("Both tip & int. node feature metadata are empty.")
fm_cols = [str(c) for c in tip_metadata.columns]
# We want dicts mapping each feature ID to a list of the f.m. values for
# this feature ID. Since we're not mapping feature IDs to indices first,
# this is pretty simple to do with DataFrame.to_dict() using the
# orient="list" option -- however, orient="list" uses column-major order,
# so we transpose the metadata DFs before calling to_dict() in order to
# make sure our dicts are in row-major order (i.e. feature IDs are keys).
#
# (Also, while we're at it, we make sure that both DFs' values are all
# converted to strings.)
compressed_tm = tip_metadata.astype(str).T.to_dict(orient="list")
compressed_im = int_metadata.astype(str).T.to_dict(orient="list")
return fm_cols, compressed_tm, compressed_im
|
dd78a7791bc61fd260325e778dc302d96484c16d
| 75,066
|
import math
def gg2gc(gg):
"""transforms coordinates from geographic (lat, lon, h) to geocentric"""
factor = math.pi/180 # conversion factor from degrees to radians
r_earth = 6378.135 # earth radius (km) and flatness factor
g = 1.00673944 # earth flatness factor
lat = gg[0]*factor
lon = gg[1]*factor
h = gg[2]
hor = (r_earth/math.sqrt(1+math.tan(lat)**2/g)+h*math.cos(lat))
gc = [hor*math.cos(lon), hor*math.sin(lon), r_earth/math.sqrt(g+g**2/math.tan(lat)**2)+h*math.sin(lat)]
return gc
|
3fc4267aa2198e636e7d952945f77689102c6c45
| 75,070
|
def bound(low, value, high):
"""
If value is between low and high, return value.
If value is below low, return low.
If value is above high, return high.
If low is below high, raise an exception.
"""
assert low <= high
return max(low, min(value, high))
|
eecdd663a7fc2712183f341677d7c918ede00fea
| 75,071
|
def char_to_string(ll):
"""Convert 2-D list of chars to 1-D list of strings."""
# https://stackoverflow.com/questions/23618218/numpy-bytes-to-plain-string
# bytes_string.decode('UTF-8')
# We might be able to do this a bit more shorthand as we did in Python2.x
# i.e return [''.join(x).strip() for x in ll]
# But this works for now.
result = []
for i in range(len(ll)):
x = ll[i]
string = ''
for j in range(len(x)):
c = x[j]
if type(c) == str:
string += c
else:
string += c.decode()
result.append(string.strip())
return result
|
09d31eee4217ae04fc85485163c5b2528b340874
| 75,072
|
def sum_valid_rooms(valid_rooms):
"""
Get the sum of all the sector IDs in valid rooms.
Args:
valid_rooms (list): List containting tuples of valid rooms.
Returns:
int
"""
# Valid Room:
# (room_name, sector_id, checksum)
return sum([i[1] for i in valid_rooms])
|
6e4fd9220a1fcc5f5fcf96b0c5ce71b49ccb833c
| 75,073
|
def _reward_scaler(reward: int) -> float:
"""
Scale a reward for a model
:param reward: The reward to be scaled
:return: The scaled reward
"""
if reward == 1:
return 1.0
if reward == 0:
return 0.0
return -1.0
|
ca341cb182ceba34a87c76378328003b4bff71d4
| 75,087
|
import json
def readjson(sFile):
"""
Purpose:
Read the JSON from a file
Inputs:
sFile string, filename to read from
Return value:
aJS list of JSON/dictionary elements
"""
aJS= []
with open(sFile) as f:
for line in f:
try:
aJS.append(json.loads(line))
except:
print("Line in a non-valid format.")
return aJS
|
9882c9797783bbe387b8c95753e08d68ab009472
| 75,088
|
import string
import random
def random_letter(only_capital=True):
"""Randomly select a letter from the alphabet"""
if only_capital:
letters = string.ascii_uppercase
else:
letters = string.ascii_letters
return random.choice(letters)
|
703f54239e0937d046c676eec798725a778027f2
| 75,090
|
def sgn(val):
"""Sign function. Returns -1 if val negative, 0 if zero, and 1 if
positive.
"""
try:
return val._sgn_()
except AttributeError:
if val == 0:
return 0
if val > 0:
return 1
else:
return -1
|
aaaa88e4620ce87b931bf156a5d30087c59b956f
| 75,096
|
def bottom_up_longest_common_subsequence(text_1, text_2):
"""
Parameters
----------
text_1 : str
first string
text_2 : str
second string
Returns
-------
int
length of the longest common subsequence
>>> bottom_up_longest_common_subsequence("abcde", "ace")
3
"""
cache = [[0 for _ in range(len(text_2) + 1)] for _ in range(len(text_1) + 1)]
for idx_1 in range(1, len(text_1) + 1):
for idx_2 in range(1, len(text_2) + 1):
char_1, char_2 = text_1[idx_1 - 1], text_2[idx_2 - 1]
if char_1 == char_2:
cache[idx_1][idx_2] = cache[idx_1 - 1][idx_2 - 1] + 1
else:
cache[idx_1][idx_2] = max(cache[idx_1 - 1][idx_2], cache[idx_1][idx_2 - 1])
return cache[len(text_1)][len(text_2)]
|
ddbc090a0231ef51810ccf749dc7a030bf6017a0
| 75,100
|
import re
def get_display_name(class_name):
"""
Converts class names to a title case style.
For example, 'CelebrityApprovalReport' would become 'Celebrity Approval
Report'.
"""
# Thanks to Django's code for a portion of this regex
display = re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name)
display = display.replace('_', ' ').title()
return display.strip()
|
a2963e14a4a57a6a679220a0117242a68544942c
| 75,101
|
def _cryptography_encrypt(cipher_factory, plaintext, key, iv):
"""Use a cryptography cipher factory to encrypt data.
:param cipher_factory: Factory callable that builds a cryptography Cipher
instance based on the key and IV
:type cipher_factory: callable
:param bytes plaintext: Plaintext data to encrypt
:param bytes key: Encryption key
:param bytes IV: Initialization vector
:returns: Encrypted ciphertext
:rtype: bytes
"""
encryptor = cipher_factory(key, iv).encryptor()
return encryptor.update(plaintext) + encryptor.finalize()
|
d4ded5a2ab52a8b0116d90d11d846546a65fe23f
| 75,102
|
def to_dict(meta):
"""
Convert a coords, meta, attrs or masks object to a python dict.
"""
return {name: var for name, var in meta.items()}
|
23d2a77a28930a8460d461a0115a534e89a115c0
| 75,104
|
def split_chunks(lst: list, size: int = 100) -> "list[list]":
"""Splits ids into list of list according to api limit
`lst`: The list of ids
`size`: Max length of inner list"""
return [lst[i: i + size] for i in range(0, len(lst), size)]
|
081e53ba6d764ea88a4bc3020a6d3b0c55adc2c8
| 75,108
|
def cite(fmt='bibtex'):
"""
Return the citation string of the slab module.
Arguments:
fmt (str): if bibtex, return the bibtex citation string, otherwise return the text reference
"""
if fmt == 'bibtex':
return """@article{Schönwiesner2021, doi = {10.21105/joss.03284}, url = {https://doi.org/10.21105/joss.03284}, year = {2021}, publisher = {The Open Journal}, volume = {6}, number = {62}, pages = {3284}, author = {Marc Schönwiesner and Ole Bialas}, title = {s(ound)lab: An easy to learn Python package for designing and running psychoacoustic experiments.}, journal = {Journal of Open Source Software}}"""
return """Schönwiesner et al., (2021). s(ound)lab: An easy to learn Python package for designing and running psychoacoustic experiments. Journal of Open Source Software, 6(62): 3284, https://doi.org/10.21105/joss.03284"""
|
334e3a708401bec3cf681f24aed5ef79cc03bc92
| 75,109
|
def group_lines(lines):
"""Split a list of lines using empty lines as separators."""
groups = []
group = []
for line in lines:
if line.strip() == "":
groups.append(group[:])
group = []
continue
group.append(line)
if group:
groups.append(group[:])
return groups
|
2237e063de3608486b4e1ef11ec8af250465977b
| 75,113
|
def _spark_calc_values_chunk(points):
"""
Compute some basic information about the chunk points values
The returned information are :
* count : the number of points in chunk
* max : the maximum value in chunk
* min : the minimum value in chunk
* sum : the sum of the values in chunk
* sqr_sum : the sum of the square values in chunk (used for variance calculation)
:param points: list of data values for each point in the chunk
:type points: numpy.array
:return: a dict composed of the basic information computed
:rtype: dict
"""
try:
nb_points = len(points)
except TypeError:
return None
if nb_points > 0:
sum_chunk_value = sum(points)
square_sum_chunk_value = sum([x * x for x in points])
max_chunk_value = max(points)
min_chunk_value = min(points)
else:
# Empty chunk, skip it
return None
return {
"count": nb_points,
"max": max_chunk_value,
"min": min_chunk_value,
"sum": sum_chunk_value,
"sqr_sum": square_sum_chunk_value,
}
|
04dd208038bf1df0172b821943337533f9759291
| 75,114
|
import yaml
def _load_yaml(config_path, logger=None):
"""Load config yaml file
Args:
config_path (str): the path of config yaml.
logger (logging.Logger): the logger object to store logs.
Returns:
dict: config, a dictionary of configs.
"""
try:
with open(config_path, 'r') as yaml_file:
config = yaml.safe_load(yaml_file)
return config
except OSError:
if logger is None:
print("Cannot open", config_path)
else:
logger.error("Cannot open", config_path)
raise
|
10be02369d28ad3854ab91584376baedf1b2ee94
| 75,115
|
def get_country_key(my_lang):
"""
Country keys
:param my_lang: my_lang language name as String
:return: country_key
"""
if my_lang == "Finnish":
country_key = "fi-FI,en-US"
elif my_lang == "English":
country_key = "en-US,fi-FI"
elif my_lang == "French":
country_key = "fr-FR,en-US"
else:
country_key = "en-US,fi-FI"
return country_key
|
6548e442764b60291a804808761017fa02b3c569
| 75,121
|
def get_paths_for_object(paths, object):
"""
returns a list of paths starting with the given object
"""
paths_for_object = []
for path in paths:
if path.startswith('/' + object + '/'):
paths_for_object.append(path)
return paths_for_object
|
04a0c7381cdafe11c4d48d1097409b926e7d5306
| 75,122
|
def print_msg_closure(msg: str):
"""
Closure function that returns a function that prints a message.
:param str msg: Message to attach to print function.
"""
def printer():
print(msg)
return printer
|
4f142de4f7d2102269328641e443ae408171fe4e
| 75,123
|
def make_response(request, code, data=None):
"""Make response based on passed request, status code and data."""
return {
'action': request.get('action'),
'time': request.get('time'),
'data': data,
'code': code,
}
|
f321341766011a19dc219021b577715d8312c67a
| 75,125
|
def get_sample(name, gender):
""" Get the features of an input sample.
The method takes a training sample as input and computes the following 4 features from every name:
- Ends with A
- Frequency of A
- Ends with E
- Second from last character is N
Args
name: name of a person
gender: The label corresponding to the gender. Possible values are Male 'M' or Female 'F'.
Returns
tuple:
features : list of numeric feature values. (4 x 1)
classification : '0' for Male and '1' for Female.
"""
features = []
name = name.strip()
##Ends with A
if name[-1] == 'A':
features.append(1)
else:
features.append(0)
##Ends with 'E'
if name[-1] == 'E':
features.append(1)
else:
features.append(0)
#Freq of A
features.append( name.count('A') )
##2nd character from end is N
if name[-2] == 'N':
features.append(1)
else:
features.append(0)
#Gender Label
if gender == 'M':
classification = 0
else:
classification = 1
return (features, classification)
|
f75be1f4b186ef7be21b0d3061eccb1f2d65a8a8
| 75,129
|
import logging
def getLowerTabs(bookYaml):
"""Gets the lower tabs from a parsed book.yaml dictionary.
Args:
bookYaml: the parsed book.yaml file
Returns:
An array of objects with the lower tabs
"""
result = []
try:
for tab in bookYaml['upper_tabs']:
if 'lower_tabs' in tab and 'other' in tab['lower_tabs']:
for lowerTab in tab['lower_tabs']['other']:
lt = {}
lt['name'] = lowerTab['name']
if 'contents' in lowerTab and 'path' in lowerTab['contents'][0]:
lt['path'] = lowerTab['contents'][0]['path']
result.append(lt)
except Exception as e:
logging.exception('Unable to read/parse the lower tabs')
return result
|
139d9830a488f2822612c4eb09f0b0c5f20bc566
| 75,142
|
def tabular_tex(
body_df,
footer_df,
notes_tex,
render_options,
custom_index_names,
custom_model_names,
left_decimals,
sig_digits,
show_footer,
):
"""Return estimation table in LaTeX format as string.
Args:
body_df (pandas.DataFrame): the processed dataframe with parameter values and
precision (if applied) as strings.
footer_df (pandas.DataFrame): the processed dataframe with summary statistics as
strings.
notes_tex (str): a string with LaTex code for the notes section
render_options(dict): the pd.to_latex() kwargs to apply if default options
need to be updated.
lef_decimals (int): see main docstring
sig_digits (int): see main docstring
show_footer (bool): see main docstring
Returns:
latex_str (str): the string for LaTex table script.
"""
n_levels = body_df.index.nlevels
n_columns = len(body_df.columns)
# here you add all arguments of df.to_latex for which you want to change the default
default_options = {
"index_names": False,
"escape": False,
"na_rep": "",
"column_format": "l" * n_levels
+ "S[table-format ={}.{}]".format(left_decimals, sig_digits) * n_columns,
"multicolumn_format": "c",
}
if custom_index_names:
default_options.update({"index_names": True})
if render_options:
default_options.update(render_options)
if not default_options["index_names"]:
body_df.index.names = [None] * body_df.index.nlevels
latex_str = body_df.to_latex(**default_options)
if custom_model_names:
temp_str = "\n"
for k in custom_model_names:
max_col = max(custom_model_names[k]) + n_levels + 1
min_col = min(custom_model_names[k]) + n_levels + 1
temp_str += f"\\cmidrule(lr){{{min_col}-{max_col}}}"
temp_str += "\n"
latex_str = (
latex_str.split("\\\\", 1)[0]
+ "\\\\"
+ temp_str
+ latex_str.split("\\\\", 1)[1]
)
latex_str = latex_str.split("\\bottomrule")[0]
if show_footer:
stats_str = footer_df.to_latex(**default_options)
stats_str = (
"\\midrule" + stats_str.split("\\midrule")[1].split("\\bottomrule")[0]
)
latex_str += stats_str
latex_str += notes_tex
latex_str += "\\bottomrule\n\\end{tabular}\n"
if latex_str.startswith("\\begin{table}"):
latex_str += "\n\\end{table}\n"
return latex_str
|
5858b6334f33cf68bbd737a15117fec6e16c1c76
| 75,143
|
def get_id2label(labels):
"""
Get id2label mapping based on labels
Args:
labels: list of labels.
Return:
id2label map
"""
return {str(k): v for k, v in enumerate(labels)}
|
664ed43c17bed4af72d4a2b7c3f61e3e33e0e66b
| 75,145
|
from typing import List
def reduce(grouped_data: List[str]) -> List[str]:
"""
Reduces the grouped data to a single list with no whitespace
"""
return ["".join(set(entry.replace(" ", ""))) for entry in grouped_data]
|
b15f3cad105b15aded0c48897b41c6de9c5d1017
| 75,146
|
def is_protected(file_name, regex_list):
"""
Given a list of compiled regexes, verify if a filename matches and thus,
is "protected" from being deleted
"""
for regex in regex_list:
if regex.search(file_name):
return True
# Fall through to false
return False
|
351f09fa5ee4ee7e38012d33a2fc9b275c8fc29c
| 75,147
|
import socket
def ip6_to_bytes(ip):
"""Convert IPv6 string to :py:class:`bytes`
:param ip: IPv6 in dot-decimal notation
:type ip: str
:rtype: bytes
"""
return socket.inet_pton(socket.AF_INET6, ip)
|
24ccd6f60707813e515d6f2d6565b75f74950d46
| 75,149
|
def get_chain(struct, chain_id='A'):
"""Returns a specific chain from a Bio.PDB.Structure.Structure."""
return struct.child_list[0].child_dict[chain_id]
|
9c20309bd5e5d768e2465e13ca4cfa6207141578
| 75,152
|
import json
import requests
def find_most_recent_tag_dockerhub(name, url):
"""Find most recent tag of an image from Docker Hub
Arguments:
name {str} -- Name of the image
url {str} -- API URL of the Docker Hub image repository
Returns:
new_tag {str} -- Most recent image tag from Docker Hub
"""
res = json.loads(requests.get(url).text)
updates_sorted = sorted(res["results"], key=lambda k: k["last_updated"])
if updates_sorted[-1]["name"] == "latest":
new_tag = updates_sorted[-2]["name"]
else:
new_tag = updates_sorted[-1]["name"]
return new_tag
|
4b03dd1d20b252a36ad5b01f44bb9cacce041155
| 75,156
|
def scorefunction(tree, dataset):
"""
This function checks every row in dataset, calculating the output from the function
and comparing it to the real result. It adds up all the diffences, giving lower
values for better programs.
Return value 0 indicates that the program got every result correct
"""
dif = 0
for data in dataset:
v = tree.evaluate([data[0], data[1]])
dif += abs(v - data[2])
return dif
|
e5a6e392760952a5e00a26c2913f7802ab6d1336
| 75,160
|
def is_conf_setter(f):
"""
Decorator for functions that set :py:data:`~lrms.common.Config` attributes.
"""
f.is_conf_setter = True
return f
|
019cfb3a4a392fa7906e62a05c8fc40b99dd78e6
| 75,164
|
def flatten(nl):
"""Flattens a nested list/tuple/set, returns list"""
if isinstance(nl, (tuple, set)):
nl = list(nl)
# noinspection PySimplifyBooleanCheck
if nl == []: # don't change this
return nl
if isinstance(nl[0], (list, tuple, set)):
return flatten(list(nl)[0]) + flatten(list(nl)[1:])
return nl[:1] + flatten(nl[1:])
|
1f383815cf55b0e3ff59465e6e8e2248e074dca7
| 75,165
|
def _get_azure_file_url(cli_ctx, account_name, azure_file_share):
"""Returns Azure File URL for the given account
:param str account_name: account name
:param str azure_file_share: name of the share
:return str: Azure File URL to be used in mount volumes
"""
return 'https://{0}.file.{1}/{2}'.format(account_name, cli_ctx.cloud.suffixes.storage_endpoint, azure_file_share)
|
40098eb8d4eb9d584c0b3ef0390c98b6537ec12b
| 75,166
|
def deleteTagValue(fluiddb, objectId, path):
"""
Deletes a tag from an object.
"""
return fluiddb.objects[objectId][path].delete()
|
f966d038314b6966a451a3277e298ce2908a5962
| 75,167
|
def map_freqs(s):
"""
s é uma string não-vazia de caracteres
Retorna um dicionário que mapeia cada caractere em s que
é um dígito (0-9) ao número de vezes que ele ocorre em s.
"""
d = {}
for char in s:
if char.isdigit():
if char not in d:
d[char] = 1
else:
d[char] = d[char] + 1
return d
|
08150866dcb45e758cab9c0761061e14eba3fc76
| 75,171
|
def on_site(domain: str) -> str:
"""
Restrict search to a specific domain.
Can be repeated to restrict search on multiple sites.
:param domain: Domain name
:return: String in the format google understands
"""
return "site:{}".format(domain)
|
43f81b2acf1426fcd303f3b7723bc95a59c9474e
| 75,175
|
def rotate(string, n):
"""Rotate characters in a string.
Expects string and n (int) for number of characters to move.
"""
pref: str = ''
suf: str = ''
for pos in range(len(string)):
if n < 0:
if (len(string) + n) <= pos:
pref += string[pos]
else:
suf += string[pos]
else:
if n <= pos:
pref += string[pos]
else:
suf += string[pos]
return pref + suf
|
f1f1d0d14106996648dcc88239dde5e2520506e8
| 75,176
|
from typing import Optional
import requests
import json
def authenticate_username(
token: str,
github_api: str = 'https://api.github.com/graphql') -> Optional[str]:
"""Check that the token correspond to a valid GitHub username.
Using `GitHub GraphQL API v4 <https://developer.github.com/v4/>`_
Parameters
----------
token
GitHub token that gives read only authorization
github_api
URL of GitHub's API
Return
------
GitHub's username or None
"""
headers = {'Authorization': f'bearer {token}'}
query = "query { viewer { login }}"
reply = requests.post(github_api, json={'query': query}, headers=headers)
status = reply.status_code
if status != 200:
return None
data = json.loads(reply.text)['data']
return data['viewer']['login']
|
b8ba4b3fb2e42c27b474d364f37c6c7086450401
| 75,177
|
import json
def get_json(s):
"""
Parse JSON from string.
"""
try:
return json.loads(s)
except ValueError:
return None
|
f09b4913f5373831eb157a07f615ba2068d5a19a
| 75,178
|
from functools import reduce
def pipe(*funcs):
"""Performs left-to-right function composition. The leftmost function may have
any arity; the remaining functions must be unary.
In some libraries this function is named sequence.
Note: The result of pipe is not automatically curried"""
return lambda v: reduce(lambda accum, f: f(accum), funcs, v)
|
0f9884dac25a1d60ac54925d314c2c38f8a0c18a
| 75,179
|
def xy(v0x=2.0, v0y=5.0, t=0.6):
"""Computes horizontal and vertical positions at time t"""
g = 9.81 # acceleration of gravity
return v0x*t, v0y*t - 0.5*g*t**2
|
87abba7e5414bb0966c1e423a04f5216126013b8
| 75,181
|
def has_regular_python_ext(file_name):
"""Does name end with .py?"""
return file_name.endswith(".py")
# Note that the standard library on MacOS X 10.6 is shipped only as .pyc files, so we need to
# have them processed by the generator in order to have any code insight for the standard library.
|
01c3d02e3f9e2c1158f2fc3d5df7749a43f42f3a
| 75,183
|
import csv
def find_unique(annotations: str) -> dict:
"""
find_unique will loop through the annotations csv, and generate a dictionary which contains
the unique images, and all associated rows. This is neccesary as the csv sometimes contains
multiple entries for an image, specifically when an image has more than 1 lesion present.
Parameters
----------
annotations : str
path to csv file containing annotations.
Returns
-------
dict
Dictionary containing unique images. Of the form
{Unique_image: [[row1],[row2],...,[rowN]]}
"""
return_dict = {}
with open(annotations, "r") as f:
reader = csv.reader(f)
next(reader)
for line in reader:
if line[0] in return_dict.keys():
return_dict[line[0]].append(line)
else:
return_dict.update({line[0]:[line]})
return return_dict
|
387871929bc7244af5cc1f5c1bba60eb92cf0dbe
| 75,184
|
def build_path(pattern, keys):
"""Replace placeholders in `pattern` to build path to be scraped.
`pattern` will be a string like "measure/%(measure)s/ccg/%(entity_code)s/".
"""
substitutions = {
"practice_code": "L83100",
"ccg_code": "15N",
"stp_code": "E54000037",
"regional_team_code": "Y58",
"bnf_code": "0205051R0BBAIAN",
"measure": "ace",
}
path = pattern
for key in keys:
subst_key = None
if key in ["code", "entity_code"]:
for token in ["practice", "ccg", "stp", "regional-team"]:
if token in pattern:
subst_key = "{}_code".format(token).replace("-", "_")
if subst_key is None:
subst_key = "bnf_code"
else:
subst_key = key
path = path.replace("%({})s".format(key), substitutions[subst_key])
assert "%" not in path, "Could not interpolate " + pattern
return path
|
64e72a758ba4cb9c7f850cbc44e031e2bf2d9125
| 75,189
|
def is_prime(n):
"""Returns if the given number is a prime number or not
>>> is_prime(10)
False
>>> is_prime(7)
True
"""
"""BEGIN PROBLEM 1.4"""
if n in [2, 3]: # shortcut for if n equals 2 or 3
return True
if n % 2 == 0 or n < 2: # skip even numbers, since they are not prime anyways
return False
# only iterate the odd numbers from 3 to the nearest whole number of sqrt(n), skipping even numbers
for i in range(3, round(n ** 0.5), 2):
if n % i == 0: # if this conditional is true, n isn't prime
return False
return True
"""END PROBLEM 1.4"""
|
b0f54c17621dd02e1494cb90685cd2cde5651961
| 75,192
|
def iou(box1, box2):
"""Intersection over Union value."""
# Intersection rectangle
intersect_x1 = max(box1[0], box2[0])
intersect_y1 = max(box1[1], box2[1])
intersect_x2 = min(box1[2], box2[2])
intersect_y2 = min(box1[3], box2[3])
# Area of intersection rectangle
if intersect_x1 >= intersect_x2 or intersect_y1 >= intersect_y2:
return 0.0
area_intersection = (intersect_x2 - intersect_x1) * (intersect_y2 - intersect_y1)
# Area of both boxes
area_box1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
area_box2 = (box2[2] - box2[0]) * (box2[3] - box2[1])
# Intersection over union ratio: intersection_area/union_area
area_union = float(area_box1 + area_box2 - area_intersection)
return area_intersection / area_union if area_union != 0 else 1
|
0783079dfa3c4134d7e6e2083f35b327a2b891f6
| 75,197
|
def passthru_loader(data, metadata):
"""A loader that leaves the data unchanged."""
return data
|
cfaf9c38fb59d39c187ed3578a33ecd3600b4d98
| 75,203
|
def formatContexts(contexts, declaration=True):
"""Given a list of context type names, generate a list of declarations
for them as formal parameters (if 'declaration' is true), or
as arguments to a function (if 'declaration' is false).
"""
if declaration:
s = "".join(", const {0}_t *{0}_ctx".format(c) for c in contexts)
else:
s = "".join(", {0}_ctx".format(c) for c in contexts)
return s
|
089a4b44f45bd3e218eaeaa40279d8a83a954af1
| 75,205
|
def get_url(usr):
""" Returns Anime List URL """
return 'https://myanimelist.net/animelist/{}'.format(usr)
|
6225893a6f4006c12bbcbf48e650c42a999ad687
| 75,212
|
import io
import csv
def radio_id_csv() -> io.StringIO:
"""
Generates a placeholder RadioIDList.csv
"""
header = ["No.", "Radio ID", "Name"]
sio = io.StringIO()
writer = csv.writer(sio, dialect="d878uvii")
writer.writerow(header)
placeholder_radio_id = ("1", "268000", "CT0ZZZ")
writer.writerow(
[
f"{placeholder_radio_id[0]}",
f"{placeholder_radio_id[1]}",
f"{placeholder_radio_id[2]}",
]
)
return sio
|
05aceba61149238e0734618eaf3ae8f59adbf8ab
| 75,229
|
import pathlib
def load_fixture(name):
"""Load a fixture."""
return (pathlib.Path(__file__).parent / "fixtures" / name).read_text()
|
41065de1fa3c3f0b5803ecd804db01fec6c354fb
| 75,232
|
import pkg_resources
def find_and_insert_user_refstate(entry_point_plugin_name='espei.reference_states', namespace=globals()):
"""Discover user reference states entered as a setuptools entry_point
Parameters
----------
entry_point_plugin_name : str
Name of the key in the setuptools.setup entry_points dictionary.
namespace : dict
A dictionary that the stable reference state and lattice stabilities
will be added to. Defaults to ``globals()``, which is this module's
namespace.
Notes
-----
By default, it will enter the data into the ``globals()`` namespace, meaning
this module's namespace. Since ESPEI looks up reference states by something
like ``getattr(espei.refdata, 'SGTE91Stable')``, this is usually the desired
behavior.
Some helpful links on how this works:
* using package metadata entry_points: https://packaging.python.org/guides/creating-and-discovering-plugins/#using-package-metadata
* entry_points specification https://packaging.python.org/specifications/entry-points/
* how to find plugins with setuptools: https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins
Packages wanting to hook into this should add the following keyword argument
to their setuptools.setup function call in their setup.py file:
``entry_points={'espei.reference_states': 'BOCK2015 = refstate'}``,
where ``BOCK2015`` is the name of the reference state and ``refstate`` is the
name of the module containing the dictionaries for ``BOCK2015Stable``,
``BOCK2015`` and ``BOCK2015SER``, which define the reference states.
"""
found_refstates = []
for entry_point in pkg_resources.iter_entry_points(entry_point_plugin_name):
user_module = entry_point.load()
refstate_name = entry_point.name
namespace[refstate_name] = getattr(user_module, refstate_name)
namespace[refstate_name+'Stable'] = getattr(user_module, refstate_name+'Stable')
namespace[refstate_name+'SER'] = getattr(user_module, refstate_name+'SER', {})
found_refstates.append(refstate_name)
return found_refstates
|
408d5ea4574e5e55ad4157d9ff833cf4fec6394b
| 75,243
|
def addr(idx: int) -> str:
"""
Return an IP address for a given node index
"""
return '10.0.0.%d' % idx
|
2ff0d2f0a81dc1eb1a6d5690cdfecef1e5cb78d0
| 75,244
|
def pythagorus(num1, num2=0):
"""
### Description:
Calculates the root-sum-of-square of two numbers
### Args:
`num1`: The first number
`num2`: The second number, default `0`
### Returns:
The result of the **pythagorus** formula
$$dist = \\sqrt { a^2+b^2 }$$
"""
return (num1**2 + num2**2)**0.5
|
9258d876cbce37aa4c7f5e489871b2238c12d497
| 75,246
|
def hz2khz(frequency_in_hz):
"""Convert from Hz to kHz
Args:
frequency_in_hz: A float containing the frequency value in Hz
that is to be converted.
Return:
The frequency in kHz.
"""
return frequency_in_hz / 1000
|
178de0b7562e5155f65757dadbbf954ff64ac278
| 75,249
|
def filter_threshold(tf1_scores, tf2_scores, pref_scores, tf1_threshold, tf2_threshold):
"""
Filters scores in a preferences bed file by the following rules
- if tf1_score < tf1_threshold and tf2_score < tf2_threshold, None
- if pref_score > 0.0 and tf1_score > tf1_threshold, include
- if pref_score < 0.0 and tf2_score > tf2_threshold, include
- if pref_score == 0.0, include
:param tf1_scores: list of strings containing the scores for TF1
:param tf2_scores: list of strings containing the scores for TF2
:param pref_scores: list of strings containing the preference values for TF1 vs TF2
:param tf1_threshold: NegCtrl threshold for TF1 scores
:param tf2_threshold: NegCtrl threshold for TF2 scores
:return: A list of preference scores, filtered by the above
"""
output_pref_scores = list(pref_scores)
for i in range(len(tf1_scores)):
tf1_score, tf2_score, pref_score = float(tf1_scores[i]), float(tf2_scores[i]), float(pref_scores[i])
if tf1_score < tf1_threshold and tf2_score < tf2_threshold:
output_pref_scores[i] = None # Signal this to be filtered out later
elif pref_score > 0.0: # favors tf_1
# if the source TF score is below the cutoff, zero this score
if tf1_score < tf1_threshold: output_pref_scores[i] = '0'
elif pref_score < 0.0: # favors tf_2
if tf2_score < tf2_threshold: output_pref_scores[i] = '0'
else: # must be 0.0
pass
return output_pref_scores
|
900a8b5c1c41e638303f0f96b89dc4e32de84512
| 75,253
|
def get_short_url(short_id):
"""Returns a short_url for a given short_id."""
base_url = 'http://buscatextual.cnpq.br/buscatextual/'\
'visualizacv.do?id='
return base_url + str(short_id)
|
38c49e43de52775ee411f5f9c8526ee9326d045a
| 75,256
|
def dim_transform_inv(dims, mean=None, std=None):
""" Unnormalizes (usually regression results) to get predicted dims.
The mean and std are the mean and std as applied in the generator. They are unnormalized in this function and then applied to the dims.
Args
dims : np.array of shape (B, N, 3*num_classes), where B is the batch size, N the number of anchors, with num_classes (height, width, length) for each anchor.
mean : The mean value used when computing dims.
std : The standard deviation used when computing dims.
Returns
A np.array of the same shape as dims, but unnormalized.
The mean and std are used during training to normalize the regression values (networks love normalization).
"""
pred_dims = dims * std + mean
return pred_dims
|
76a9590f619bbe767fa8324d0f945c37f081147d
| 75,262
|
def diff(x):
"""Takes a list and returns a list of differences"""
diff_x = []
for a1 in x:
for b1 in x:
diff_x.append(a1 - b1)
return diff_x
|
da0055a1a3b272a3c1355820ca4d9fbd5d814f0e
| 75,264
|
import re
def replace_table_name_with_query(table_name: str, table_query: str, original_query: str):
"""
Replaces the table_name with the table_query in the original_query.
"""
regular_expression = r" ?`?rj-smtr.*{}`? ?".format(table_name)
find_counts = len(re.findall(regular_expression, original_query))
return re.sub(regular_expression, f"({table_query})", original_query), find_counts
|
31ee8b69499a3626fe5062451ae4d3466090d7ce
| 75,267
|
def filter_output_fields(configs):
"""Remove fields that are not required by CloudWatch agent config file."""
desired_keys = ["log_stream_name", "file_path", "timestamp_format", "log_group_name"]
return [{desired_key: config[desired_key] for desired_key in desired_keys} for config in configs]
|
0ac5f68799f4170becf88049f4608da02bda6a56
| 75,269
|
async def remove_level_role(bot, guild_id, level):
"""Remove a level role"""
level_role = await bot.level_db.fetch_one(
"SELECT * FROM leveling_roles WHERE guild_id=:guild_id AND level=:level",
{"guild_id": guild_id, "level": level},
)
if not level_role:
return f"No level role found on level `{level}`."
await bot.level_db.execute(
"DELETE FROM leveling_roles WHERE guild_id=:guild_id AND level=:level",
{"guild_id": guild_id, "level": level},
)
return "Level role removed."
|
129495ffd5a0be426747395f74f6a19293592aa1
| 75,273
|
def append(data, value):
"""Append value to list
:param data: Data to append to, in list form
:param value: Value to append
:returns: List
"""
data.append(value)
return data
|
db9a9030dca80aecd10fb7e6fb2e6673a73298ae
| 75,274
|
def apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is.
Parameters
----------
maybe_callable : possibly a callable
obj : NDFrame
**kwargs
"""
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
return maybe_callable
|
08bf973e22c34235fccfbb1d8728b23a0d8efbc9
| 75,276
|
def clarke_error_zone_detailed(act, pred):
"""
This function outputs the Clarke Error Grid region (encoded as integer)
for a combination of actual and predicted value
Based on 'Evaluating clinical accuracy of systems for self-monitoring of blood glucose':
https://care.diabetesjournals.org/content/10/5/622
"""
# Zone A
if (act < 70 and pred < 70) or abs(act - pred) < 0.2 * act:
return 0
# Zone E - left upper
if act <= 70 and pred >= 180:
return 8
# Zone E - right lower
if act >= 180 and pred <= 70:
return 7
# Zone D - right
if act >= 240 and 70 <= pred <= 180:
return 6
# Zone D - left
if act <= 70 <= pred <= 180:
return 5
# Zone C - upper
if 70 <= act <= 290 and pred >= act + 110:
return 4
# Zone C - lower
if 130 <= act <= 180 and pred <= (7/5) * act - 182:
return 3
# Zone B - upper
if act < pred:
return 2
# Zone B - lower
return 1
|
83ddd6d5df4515c274cc4fa919a8716d6d08d4e6
| 75,278
|
def summary(task):
"""Given an ImportTask, produce a short string identifying the
object.
"""
if task.is_album:
return u'Album {1} by artist: {0}'.format(
task.cur_artist or 'Unknown', task.cur_album or 'Unknown'
)
else:
return u'Track {1} by artist: {0}'.format(
task.item.artist or 'Unknown', task.item.title or 'Unknown'
)
|
601e988cc45686284e26ab849519e4cf608edfa4
| 75,282
|
import requests
def auth(
username=None, password=None, apikey=None, refresh_token=None, iam_endpoint=None
):
"""
Makes a authentication request to the IAM API to retrieve an IAM token and
IAM Refresh token.
:param username: Username
:param password: Password
:param apikey: IBMCloud/Bluemix API Key
:param refresh_token: IBM IAM Refresh Token,
if specified the refresh token is used to authenticate,
instead of the API key
:param iam_endpoint: base URL that can be specified
to override the default IAM endpoint, if one, for example,
wanted to test against their own IAM or an internal server
:return: Response
"""
if not iam_endpoint:
iam_endpoint = "https://iam.cloud.ibm.com/"
if iam_endpoint[-1] == "/":
iam_endpoint = iam_endpoint[:-1]
api_endpoint = iam_endpoint + "/oidc/token"
# HTTP Headers
headers = {"Authorization": "Basic Yng6Yng=", "Accept": "application/json"}
# HTTP Payload
data = {
"response_type": "cloud_iam",
"uaa_client_id": "cf",
"uaa_client_secret": "",
}
# Setup grant type
if apikey:
data["grant_type"] = "urn:ibm:params:oauth:grant-type:apikey"
data["apikey"] = apikey
elif refresh_token:
data["grant_type"] = "refresh_token"
data["refresh_token"] = refresh_token
elif username and password:
data["grant_type"] = "password"
data["username"] = username
data["password"] = password
else:
raise ValueError(
"Must specify one of username/password, apikey, or refresh_token!"
)
resp = requests.post(api_endpoint, data=data, headers=headers)
if resp.status_code == 200:
return resp.json()
return resp.text
|
35147496500f80860a0066fed4a4b3a811fa2763
| 75,285
|
def vec3_to_list(vec):
"""
Converts an indexable structure with len >= 3 to a list containing the first three elements.
:type vec: iterable
:rtype: list
"""
return [vec[0], vec[1], vec[2]]
|
a0834c6e25b6a7517ab8fc2af30b78c4bb155ae3
| 75,286
|
def makeRecordClass(db,record_class,field_names):
"""Generate a subclass of record_class, specifying a Base instance
and a list of field names and types"""
class _Record(record_class):
pass
setattr(_Record,'db',db)
setattr(_Record,'fields',list(field_names))
setattr(_Record,'types',[ db.fields[f] for f in field_names ])
return _Record
|
b03d1d44632633025b4a64dec37c36d88114b647
| 75,288
|
def dis_to_nearest_gene(region, genes, distance_cutoff=10000):
"""Compute the distance of a specified region to the nearest gene TSS.
Parameters
----------
region : GenomicRegion
GenomicRegion object.
genes : list
List of genes on that chromosome.
distance_cutoff : int
Maximal distance cutoff.
Returns
-------
int or None
A signed distance, positive if the region is located at the downstream
of the nearest gene and vice versa. Or None if the distance is beyond
the distance cutoff.
"""
dis_min = distance_cutoff
target_gene = None
for gene in genes:
tmp_dis = region.start - gene.tss
if abs(tmp_dis) < dis_min:
dis_min = tmp_dis
target_gene = gene
if target_gene is None:
return None
else:
if target_gene.strand == '-':
# signed value, positive for downstream and negative for upstream
dis_min = -dis_min
return dis_min
|
41f6d9ab2a0c730698d6abf8455a9bc6791d84cd
| 75,295
|
from pathlib import Path
def get_version() -> str:
"""
Get the version of a2j.
:return: Version of a2j.
:rtype: str
"""
# OPEN VERSION FILE
with open(Path.cwd() / "VERSION", "r") as file:
# READ VERSION FROM FILE
version = file.read().strip()
# CLOSE FILE
file.close()
return version
|
c8e508e6230a205221844cb8c1e891e6f7c51bf5
| 75,296
|
from typing import List
def find_substr_in_list(
substr: str, strs: List[str], start_pos: int = 0, exact: bool = False
) -> int:
"""Find a specific substring within the given list of strings,
starting from a given position."""
for i, s in enumerate(strs):
if i < start_pos:
continue
# Exact match
if exact and substr == s:
return i
# Contain
if not exact and substr in s:
return i
return -1
|
7f6348b3c39523171839bf2c62f6be2d66db0b40
| 75,301
|
def split_factory2(delimiter: str = ',', cast_to: type = list):
"""
Separates a text by a specific delimiter into separate sub-lists.
Returns a FUNCTION that can then process the text accordingly.
It DOES NOT ignore whitespaces.
*-------------------------------------------------------------*
ESCAPING ESCAPES *ONLY* A SINGLE CHARACTER AFTER ESCAPE SYMBOL.
IF YOU HAVE TO ESCAPE MORE THAN ONE CHARACTER THEN ESCAPE EVERY
SINGLE ONE INDIVIDUALLY.
*-------------------------------------------------------------*
Created because the original split factory had a huge flaw where
the vocabulary of a splitter did not accept delimiters and newlines.
:param str delimiter: separates individual objects
:param type cast_to: defines the type of iterable that the item container is
going to be
:return function: a function that does the separation
"""
def wrapper(text, remove_empty: bool = False)->list:
"""
Return-function that actually processes the text.
:param str text: text to process
:param bool remove_empty: defines if empty items are going to be removed or not
:return list: a list of split lines and items
"""
def list_to_str(this: list)->str:
"""
Concatenates all items from a list to a single string.
:param list this: a list of items
:return str: concatenated string
"""
res = ''
for item in this:
res = item if isinstance(item, str) else repr(item)
return res
def add_to_result(line: str):
"""
Adds a line to the result
:param line: a line
:return:
"""
if remove_empty:
if line:
result.append(line)
else:
result.append(line)
result = []
text = list(text)
current_item = ''
while text:
char = text.pop(0)
if char == '\\':
new_char = text.pop(0)
current_item += char + new_char
elif char == delimiter[0]:
if char + list_to_str(text[:len(delimiter) - 1]) == delimiter:
add_to_result(current_item)
current_item = ''
del(text[:len(delimiter) - 1])
else:
current_item += char
add_to_result(current_item)
return cast_to(result)
return wrapper
|
3e5683a751ecde8768d6b8b0ecb3ad37c0effe60
| 75,304
|
def get_rc(re):
"""
Return the reverse complement of a DNA/RNA RE.
"""
return re.translate(str.maketrans('ACGTURYKMBVDHSWN', 'TGCAAYRMKVBHDSWN'))[::-1]
|
2889d242ad8d3218a66ec52e7f22f00bc2aeff77
| 75,316
|
def is_weekend(d_or_dt):
"""Check if a datetime is weekend.
"""
return d_or_dt.isoweekday() in [6, 7]
|
e495543b5c4f25fedcbfb785445a27699e94d6a9
| 75,317
|
def _flat(commands):
"""Flatten commands while preserving order"""
commands = [cmd for cmd in commands if cmd is not None]
flattened = []
for command in commands:
if isinstance(command, str):
flattened.append(command)
elif isinstance(command, list):
flattened.extend(command)
else:
raise TypeError('Invalid command: %s' % str(command))
return flattened
|
bb53b112a89ae60ad5afee075df3a2baf39c7ad7
| 75,318
|
def get_dict_val(dictionary: dict, key_list: list = []):
"""
Return `dictionary` value at the end of the key path provided
in `key_list`.
Indicate what value to return based on the key_list provided.
For example, from left to right, each string in the key_list
indicates another nested level further down in the dictionary.
If no value is present, a `None` is returned.
Parameters:
----------
- dictionary (dict) : the dictionary object to traverse
- key_list (list) : list of strings indicating what dict_obj
item to retrieve
Returns:
----------
- key value (if present) or None (if not present)
Raises:
----------
- TypeError
Examples:
---------
# Create dictionary
dictionary = {
"a" : 1,
"b" : {
"c" : 2,
"d" : 5
},
"e" : {
"f" : 4,
"g" : 3
},
"h" : 3
}
### 1. Finding an existing value
# Create key_list
key_list = ['b', 'c']
# Execute function
get_dict_val(dictionary, key_list)
# Returns
2
~~~
### 2. When input key_path doesn't exist
# Create key_list
key_list = ['b', 'k']
# Execute function
value = get_dict_val(dictionary, key_list)
# Returns NoneType because the provided path doesn't exist
type(value)
NoneType
"""
if not isinstance(dictionary, dict):
raise TypeError("`dictionary` must be of type `dict`")
if not isinstance(key_list, list):
raise TypeError("`key_list` must be of type `list`")
retval = dictionary
for k in key_list:
# If retval is not a dictionary, we're going too deep
if not isinstance(retval, dict):
return None
if k in retval:
retval = retval[k]
else:
return None
return retval
|
4bcff4d35e81a34ff23f47e5311de5b4985743af
| 75,319
|
def det_to_sci(image, detid):
"""Detector to science orientation
Reorient image from detector coordinates to 'sci' coordinate system.
This places +V3 up and +V2 to the LEFT. Detector pixel (0,0) is assumed
to be in the bottom left. Simply performs axes flips.
Parameters
----------
image : ndarray
Input image to tranform.
detid : int or str
NIRCam detector/SCA ID, either 481-490 or A1-B5.
"""
# Check if SCA ID (481-489) where passed through detname rather than A1-B5
try:
detid = int(detid)
except ValueError:
detname = detid
else:
scaids = {481:'A1', 482:'A2', 483:'A3', 484:'A4', 485:'A5',
486:'B1', 487:'B2', 488:'B3', 489:'B4', 490:'B5'}
detname = scaids[detid]
xflip = ['A1','A3','A5','B2','B4']
yflip = ['A2','A4','B1','B3','B5']
for s in xflip:
if detname in s:
image = image[:,::-1]
for s in yflip:
if detname in s:
image = image[::-1,:]
return image
|
32f36c76cffc01e1edcd086722eff949489b756e
| 75,322
|
def num_labeled_samples(y, y_pred):
"""
Get the number of labeled samples for a given set of model predictions
Arguments:
- y: label array
- y_pred: array of label predictions for which ground truth is known
Returns:
- num_labeled_samples: number of labeled samples for a given set of model predictions
"""
return y_pred.sum()
|
4944042bdb45f698a1ae30b28ea6707c34685af7
| 75,324
|
def clean_list(data_list: list) -> list:
"""Returns a list with any none values removed
Args:
data_list (list): The list to be cleaned
Returns (list): The list cleaned of None values.
"""
return list(filter(None, data_list))
|
ec329fcaf25ef8f84479519ac7b79c0f51e8e625
| 75,326
|
def _bool_argument(s: str):
"""
Parse an argument that should be a bool
"""
if isinstance(s, bool):
return s
# Copying FastAPI booleans:
# https://fastapi.tiangolo.com/tutorial/query-params
return s.strip().lower() in ("1", "true", "on", "yes")
|
5012ca6cde89c6c662e96fca2d4eefb23f5fd5e3
| 75,327
|
import importlib
def import_string(import_name: str):
"""
Import an object based on the import string.
Separate module name from the object name with ":". For example,
"linuguee_api.downloaders:HTTPXDownloader"
"""
if ":" not in import_name:
raise RuntimeError(
f'{import_name} must separate module from object with ":". '
f'For example, "linguee_api.downloaders:HTTPXDownloader"'
)
module_name, object_name = import_name.rsplit(":", 1)
mod = importlib.import_module(module_name)
return getattr(mod, object_name)
|
af3d9014acb99c359bf5c313075a163096f1bd2e
| 75,328
|
import json
def SendRequestToServer(url, http_client, post_data=None):
"""Sends GET/POST request to arbitrary url and returns response content.
Args:
url (str): The url to send the request to.
http_client (HttpClient): The httpclient object with which to make the
server calls.
post_data (dict): Data/params to send with the request, if any.
Returns:
content (dict), error (dict): the content from
the server and the last error encountered trying to retrieve it.
"""
error = None
headers = {}
if post_data:
post_data = json.dumps(post_data, sort_keys=True, separators=(',', ':'))
headers['Content-Type'] = 'application/json; charset=UTF-8'
headers['Content-Length'] = len(post_data)
if post_data:
status_code, content, _response_headers = http_client.Post(
url, post_data, headers=headers)
else:
status_code, content, _response_headers = http_client.Get(
url, headers=headers)
if status_code == 200:
# Also return the last error encountered to be handled in the calling
# code.
return content, error
else:
# The retry upon 50x (501 excluded) is automatically handled in the
# underlying http_client, which by default retries 5 times with
# exponential backoff.
return None, {
'code': status_code,
'message': 'Unexpected status code from http request'
}
|
0b8aa1dc734a5d4a27f26e145749f1dcb4c38b8d
| 75,329
|
def span(data, **kwargs):
"""
Compute the difference of largest and smallest data point.
"""
return max(data) - min(data)
|
885cb57cc337c3ed19137da601d43a0ce0f0b750
| 75,330
|
def avgFriendDegree(v, G):
""" Calculate the average degree of the neighbors of a node"""
degSum = 0
for u in G.neighbors(v):
degSum += G.degree(u)
return degSum / G.degree(v)
|
436466406f6ee61c452f410112881af51c93ceed
| 75,336
|
def cwt_synthesis(wavelet_matrix, mean = 0):
"""Synthesizing a signal given a wavelet dataset
Parameters
----------
wavelet_matrix: ndarray
The wavelet data matrix.
mean: float
The mean to translate the signal.
Returns
-------
arraylike
The generated signal
"""
return sum(wavelet_matrix[:])+mean
|
62ecc30a190dd5be0b03406ef839ceb3f09ba647
| 75,344
|
def stripiter(target: list):
"""Striping all of texts in list."""
out = []
for txt in target:
out.append(
str(txt)
.rstrip()
.lstrip()
)
return out
|
440ce39a3027e3bebff7fec12b10761742dbc075
| 75,345
|
import re
def get_pattern(delimiter, s):
"""
Get the pattern info between delimiters from the string
"""
regex = "%s(.*?)%s" % (delimiter, delimiter)
return re.findall(regex, s)
|
9233549de43a9c7d8d19240d0eaf7076fc9ce5a8
| 75,349
|
import torch
def dbbox2roi(dbbox_list):
"""
Convert a list of dbboxes to droi format.
:param dbbox_list: (list[Tensor]): a list of dbboxes corresponding to a batch of images
:return: Tensor: shape (n, 6) [batch_ind, x_ctr, y_ctr, w, h, angle]
"""
drois_list = []
for img_id, dbboxes in enumerate(dbbox_list):
if dbboxes.size(0) > 0:
img_inds = dbboxes.new_full((dbboxes.size(0), 1), img_id)
drois = torch.cat([img_inds, dbboxes[:, :5]], dim=-1)
else:
drois = dbboxes.new_zeros((0, 6))
drois_list.append(drois)
drois = torch.cat(drois_list, 0)
return drois
|
d4dbe984d814ec5b68ec4269d03bd8c907c83ba3
| 75,350
|
def find_n_kickers(cards, used, n):
"""
Finds the top n kickers that are not already used.
Args:
cards(list(str)): A list of card strings sorted by ordinal value.
used(set(str)): A set of strings that have been used.
n(int): The number of cards to find.
Returns:
list(str): A list with the top n cards that are not used.
"""
kickers = []
# Start with the highest to lowest cards.
for card in cards:
# If the card has already been used, skip it.
if card in used:
continue
# Add the card.
kickers.append(card)
# If we hit the desired amount, stop.
if len(kickers) == n:
return kickers
|
ec58bba8f791ff3f87ecbda336edefc999007892
| 75,351
|
def get_git_repo_name_from_url(url: str) -> str:
"""
Get the git repository name from the url
:param url: The git repository url (either https or ssh)
:return: The repository name
"""
git_repo_name = url.split("/")[-1]
return git_repo_name.split(".git")[0]
|
b85b6473c814f88ce62d5bd0beb7fa6ff92a4a2c
| 75,353
|
def _consume_single_get(response_iterator):
"""Consume a gRPC stream that should contain a single response.
The stream will correspond to a ``BatchGetDocuments`` request made
for a single document.
Args:
response_iterator (~google.cloud.exceptions.GrpcRendezvous): A
streaming iterator returned from a ``BatchGetDocuments``
request.
Returns:
~google.cloud.proto.firestore.v1beta1.\
firestore_pb2.BatchGetDocumentsResponse: The single "get"
response in the batch.
Raises:
ValueError: If anything other than exactly one response is returned.
"""
# Calling ``list()`` consumes the entire iterator.
all_responses = list(response_iterator)
if len(all_responses) != 1:
raise ValueError(
"Unexpected response from `BatchGetDocumentsResponse`",
all_responses,
"Expected only one result",
)
return all_responses[0]
|
34e7d1226ac09423c29c9e22972c2ec4e8b34635
| 75,361
|
def index_date_formatter(base, date):
"""
Returns a time based index representation for the given base, date
:param str base: the index's base name
:param datetime.datetime date:the date to format the index for
:rtype: str
:return: the index representation for the given date
"""
return '{b}-{y}.{m}.{d}'.format(
b=base,
y=date.year,
m=str(date.month).zfill(2),
d=str(date.day).zfill(2),
)
|
44d966e4c2170665e666dfe8934e681bc0cbc05a
| 75,362
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.