content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def unit_at(x, y, units):
"""
Returns the unit present at x,y or None.
"""
for u in units:
if u.x == x and u.y == y:
return u
return None | 65b45a708f8c013e524848332d0d0046c10365c8 | 115,251 |
def map_module(mod):
"""Map module names as needed"""
if mod == "lambda":
return "awslambda"
return mod | a4959a2c2a392e81c1764a56276c446984a6f25f | 115,253 |
def read_file(filename, default=None):
"""
Read the contents of a file, returning default if the file cannot be read.
:param filename: name of file to read
:type filename: str
:param default: value to return if file cannot be read
:return: contents of the file or default
"""
contents = default
try:
fh = open(filename, "r", encoding="latin-1")
try:
contents = fh.read()
finally:
fh.close()
except EnvironmentError:
pass
return contents | 46ec020b42768fb9b328834e5152a85753716693 | 115,254 |
def is_bookmarks_folder(node):
"""True if current node is the folder where YouTube links are stored."""
return "name" in node and node["name"] == "ytmp3" | fa984a213459e8c8f22bcdeabcafb25ed56ddb4c | 115,255 |
def clean(tagged):
"""clean tags and new lines out of list of attributes"""
n = len(tagged)
# strip tags
untagged = [tagged[i].text for i in range(n)]
# replace newlines with space
stripped = [untagged[i]
.replace('\n\n+', ' ')
.replace('\n', ' ')
.replace('\r', ' ')
for i in range(n)]
# strip trailing spaces
nolines = [stripped[i].strip() for i in range(n)]
return nolines | 56b7a72f7592b175a10ca3826a9a347ed6d3d1dc | 115,258 |
import json
def load(
action_verb_json: str = 'cb_scripts/data/json/action_verbs.json',
key: str = 'ACTION_VERBS'
):
"""Load the action verbs."""
with open(action_verb_json, 'r') as raw_data:
data = json.load(raw_data)
return data['ACTION_VERBS'] | 9109048726c2dda280f826c23b9ba80c91ce448e | 115,259 |
from typing import List
from typing import Tuple
import math
def split_to_ranges(vector_lenght: int, range_count: int) -> List[Tuple[int, int]]:
"""
Divide single range into rangeCount parts. Each part takes form of [a, b)
Example: splitIntoRanges(10, 3) => [0, 4), [4, 8), [8, 10)
"""
part_size = math.ceil(vector_lenght / range_count)
ranges = []
index = 0
for _ in range(range_count):
if index < vector_lenght:
r = index, min(index + part_size, vector_lenght)
ranges.append(r)
index = r[1]
else: # There are more ranges than vector length
ranges.append((0, 0))
return ranges | b106459492066589bb5ae7df9bd2f8ad5bc7b5e7 | 115,260 |
def _listify(x):
"""
If x is not a list, make it a list.
"""
return list(x) if isinstance(x, (list, tuple)) else [x] | 79b114f952901a48fa19ed9a4c7430d7dc835659 | 115,262 |
def boxline_(D,X,L,U):
""" This routine finds the smallest t>=0 for which X+t*D hits the box [L,U]
Parameters
----------
D = [dbl] [n-by-1] Direction
L = [dbl] [n-by-1] Lower bounds
X = [dbl] [n-by-1] Current Point (assumed to live in [L,U])
U = [dbl] [n-by-1] Upper bounds
Returns
----------
t = [dbl] Value of smallest t>=0 for which X+t*D hits a constraint. Set to 1 if t=1 does not hit constraint for t<1.
"""
n = len(X)
t = 1
for i in range(0,n):
if D[i] > 0:
t = min(t,(U[i] - X[i]) / D[i])
else:
if D[i] < 0:
t = min(t,(L[i] - X[i]) / D[i])
return t | c0a73b0d9487113df7b4324dff040f123bcb8771 | 115,267 |
def get_num_skips(parent, child):
"""
Get the number of intermediate nodes between a parent and a child.
"""
if parent == child:
return 0
path = parent.get_path(child)
return len(path) - 1 | 4b1a11161c826493877742fd20f10aec007268d8 | 115,272 |
def will_reciprocate(data_dict, index):
"""Searches the text of the request for phrases that suggest a promise to
give pizza to someone else. data_dict is a dictionary of lists,and any given
index in the lists corresponds to the same post for different keys.
"""
title = data_dict["title"][index]
body = data_dict["body"][index]
phrases = ["pay it forward", "return the favor", "reciprocate"]
for p in phrases:
if p in title or p in body:
return True
return False | 6fc7631e1cbd66ac093e8f5c955fc2f1d220ec84 | 115,273 |
from typing import OrderedDict
def dict_2_OrderedDict(d):
"""Convert a standard dict to a sorted, OrderedDict """
od = OrderedDict()
if d is not None:
keys = list(d.keys())
keys.sort()
for k in keys:
od[k] = d[k]
return od | 790a0ce15a375a8430789747d648b7e69ecad0f5 | 115,280 |
from typing import List
def element_before(element: int, list_: List[int]) -> int:
"""Return the element before a given element in a list, or None if the
given element is first or not in the list.
"""
if element not in list_ or element == list_[0]:
raise ValueError("No element before {} in {}".format(element, list_))
else:
index = list_.index(element)
return list_[index - 1] | c0fa67ef6f3c6ec4496b2ece15d15d49b3ed4a8e | 115,283 |
def _tree_query_parallel_helper(tree, *args, **kwargs):
"""Helper for the Parallel calls in KNeighborsMixin.kneighbors.
The Cython method tree.query is not directly picklable by cloudpickle
under PyPy.
"""
return tree.query(*args, **kwargs) | 3df121013c9cf1bc5a361d8eef04c7d9f981ac50 | 115,284 |
from functools import reduce
def lazy_reduce(f, xs):
"""Lazy version of functools.reduce"""
return reduce(f, xs) | d419ca086d2d9c516d074d19056856e487733576 | 115,293 |
def read_file(file_name):
"""Reads an entire file and returns it as a string
Arguments
:param file_name: A path to a file
"""
with open(file_name, 'r') as f:
return f.read() | 7064d884b92861aa754aeba85597778da90d0225 | 115,297 |
import re
def format_slack_text(text: str):
"""This method is used to remove the Slack special characters from a text that will be sent.
You can test different formats in this interface: https://app.slack.com/block-kit-builder/T03AWMGU2#%7B%22blocks%22:%5B%7B%22type%22:%22section%22,%22text%22:%7B%22type%22:%22mrkdwn%22,%22text%22:%22MR%20%3Cwww.url.com%7Cname%3E%22%7D%7D%5D%7D
Special characters are documented here: https://api.slack.com/reference/surfaces/formatting#escaping
"""
text = re.sub(r"&(?!(amp|lt|gt);)", "&", text)
text = text.replace("<", "<")
text = text.replace(">", ">")
return text | 9a753431907a58d1bfab8414b5698b2975b3bf27 | 115,299 |
def iso_time_format(datetime_):
""" Returns ISO time formatted string """
return '%04d-%02d-%02dT%02d:%02d:%02d.%03dZ' % (
datetime_.year, datetime_.month, datetime_.day, datetime_.hour,
datetime_.minute, datetime_.second, int(datetime_.microsecond / 1000)) | 3c04d4c711ed99e3da44f84fcf63fbd229076f8a | 115,303 |
def grid_traveller(m: int, n: int) -> int:
"""
:param m: number of rows in grid
:param n: number of columns in grid
:return: number of ways to reach bottom right of the grid from a top right corner
"""
if m == 1 and n == 1:
# base case
return 1
if m == 0 or n == 0:
# either dims empty no way to travel
return 0
return grid_traveller(m - 1, n) + grid_traveller(m, n - 1) | e61e32f338798ce16e77f68815fad0da10df9d5f | 115,311 |
def rectangles_from_histogram(H):
"""Largest Rectangular Area in a Histogram
:param H: histogram table
:returns: area, left, height, right, rect. is [0, height] * [left, right)
:complexity: linear
"""
best = (float('-inf'), 0, 0, 0)
S = []
H2 = H + [float('-inf')] # extra element to empty the queue
for right in range(len(H2)):
x = H2[right]
left = right
while len(S) > 0 and S[-1][1] >= x:
left, height = S.pop()
# first element is area of candidate
rect = (height * (right - left), left, height, right)
if rect > best:
best = rect
S.append((left, x))
return best | 930032100d1a28c53041fa6bd6c5462a7a2ca463 | 115,313 |
import re
def is_comment(s):
""" Checks if a string looks like a comment line (first non whitespace char is #)
:param s: string to check
:return: True/False
"""
if re.match(r'(\s+|)#(.*?)', s):
return True
else:
return False | 311d47f8efce8b826f86b936644234127c9a6aca | 115,314 |
def parse_comments(commentThread):
"""
Parse data from the comment JSON structure
Parameters
----------
comment : JSON object of strings
The comment resource requested from YouTube API v3
Returns
----------
comment_dict : dictionary of strings
The dictionary contianing comment text, author, date, no. of likes
"""
comment = commentThread['snippet']['topLevelComment']
snippet = comment['snippet']
text = snippet['textOriginal'].replace("\n", " ").replace("\t", " ").replace("\r", " ")
author = snippet['authorDisplayName']
date = snippet['publishedAt']
likes = snippet['likeCount']
comment_dict = {
'text' : text,
'author' : author,
'date' : date,
'likes' : likes
}
return comment_dict | 27eb10640a84f680e03f1e9990cf6ecf81864968 | 115,316 |
def indent(text, count=1, prefix=" "):
"""Indent the given text prefix times the count.
Args:
text (str): string to indent (line by line).
count (number): number of indents.
prefix (str): the prefix of the indent.
Returns:
str. the indented text.
"""
lines = text.split("\n")
return "\n".join("{}{}".format(prefix * count, line)
for line in lines) | 09590ef707b818b23bbf4d92aba43e69b925b104 | 115,318 |
def from_datastore(entity):
"""Translates Datastore results into the format expected by the
application.
Datastore typically returns:
[Entity{key: (kind, id), prop: val, ...}]
This returns:
[ name, street, city, state, zip, open_hr, close_hr, phone, drink, rating, website ]
where name, street, city, state, open_hr, close_hr, phone, drink, and website are Python strings
and where zip and rating are Python integers
"""
if not entity:
return None
if isinstance(entity, list):
entity = entity.pop()
return [entity['name'],entity['street'],entity['city'],entity['state'],entity['zip'],entity['open_hr'],entity['close_hr'],entity['phone'],entity['drink'],entity['rating'],entity['website']] | 24d274a1261aedeff976ee89c913c1ca73fa42d5 | 115,319 |
def calculate_strong_beats(beats_in_bar, grouping):
"""
Given a particular number of beats in a bar and grouping option, we need to calculate what
beats are to be considered strong beats. for example in 7/4 time signature, with grouping of
2,2,3 the strong beats are 1, 3, and 5.
:param beats_in_bar: integer. The number of beats in a bar.
:param grouping: list of integers. The beat grouping chosen by the user.
"""
strong_beats = [1]
for beat in grouping:
strong_beat = strong_beats[-1] + (beat)
if strong_beat < beats_in_bar:
strong_beats.append(strong_beat)
return strong_beats | ca5ff31d213656a4859c3e705e8a1aff2e0d06bd | 115,320 |
def compute_stats(parameter_list):
""" Return average, max, min for a data parameter list"""
avg_param = sum(parameter_list) / len(parameter_list)
max_param = max(parameter_list)
min_param = min(parameter_list)
return avg_param, max_param, min_param | 4502fd389bc41264f44626a055dd00561ef82d23 | 115,321 |
def _is_mode_line(line):
"""
`ls` output starts a category with a separate line like `Vertex Types:`, we need to recognize this line
to know what follows it.
"""
return line.endswith(":") | 80934c6b7d745d4427a7d1b19c22cd463a7e89de | 115,322 |
def _warning_on_one_line(message, category, filename, lineno, file=None,
line=None):
"""Formats warning messages to appear on one line."""
return '%s:%s: %s: %s\n' % (filename, lineno, category.__name__, message) | ff680712ba371da248dbba19c7891645268c9aa2 | 115,323 |
def f_to_c(temperature):
"""
Converts a temperature in Fahrenheit to Celsius.
"""
return (temperature - 32) * 5 / 9 | b17997e1e1002b43095814b4b2ffe665268ff793 | 115,324 |
import itertools
def whitelist_barcodes(whitelist):
"""
Calculate allowable barcodes from whitelist.
Finds all 1-N substitutions that uniquely map to whitelist barcodes.
Parameters
----------
whitelist : str
Path to file containing whitelist barcodes
Returns
-------
dict
Dictionary where keys are possible barcodes and values are whitelisted
barcodes to map to.
"""
barcodes = {}
mapping = {}
conflicting = {}
# iterate through lines of whitelist barcodes
with open(whitelist, 'r') as f:
n_line = 0
for line in f:
n_line += 1
print("{:0.2f}".format((n_line) / 147456), flush=True, end='\r')
bc = line.strip()
# map barcode to barcode
barcodes[bc] = bc
# find sequences with a single substitution
for i, base in itertools.product(range(len(bc)),
['A', 'T', 'C', 'G', 'N']):
# check if substitution possible
if bc[i] != base:
sub_bc = bc[:i] + base + bc[i + 1:]
if sub_bc not in conflicting and sub_bc not in mapping:
mapping[sub_bc] = bc
elif sub_bc in mapping:
conflicting[sub_bc] = ''
mapping.pop(sub_bc)
# iterate through mapped barcodes, don't keep if substitution matches
# whitelist barcode
print('\n')
print(f"mapping size: {len(mapping)}")
print(f"conflicting substitutions: {len(conflicting)}")
for k, v in mapping.items():
if k not in barcodes:
barcodes[k] = v
return barcodes | 65334c2d1f828c1c5cd44d59dd42256c4efbcc85 | 115,340 |
def get_wiki_arxiv_url(wiki_dump_url, href):
"""Return a full URL from the href of a .bz2 archive."""
return '{}/{}'.format(wiki_dump_url, href) | 88c9dc1f62eabfc676e6833640e5c6a4263c916c | 115,344 |
def get_role_arn(user_arn, role, account_id=None):
"""
Creates a role ARN string based on a role name and, optionally, an
account ID.
If role is None or empty, '' will be returned. This value will indicate to
the mfa method that no role should be assumed.
Arguments:
user_arn: Arn returned from a call to iam_client.get_user()
role: the role name
account_id: AWS account ID
Returns:
The ARN as a string.
"""
if not role:
return ''
if account_id:
base_arn = user_arn.rsplit(':', 2)[0]
return '{0}:{1}:role/{2}'.format(
base_arn,
account_id,
role
)
else:
base_arn = user_arn.rsplit(':', 1)[0]
return '{0}:role/{1}'.format(base_arn, role) | 7d0a99ed7656524483281491b0ee6c1d2d850e07 | 115,350 |
def is_html(record):
"""Return true if (detected) MIME type of a record is HTML"""
html_types = ['text/html', 'application/xhtml+xml']
if (('WARC-Identified-Payload-Type' in record.rec_headers) and
(record.rec_headers['WARC-Identified-Payload-Type'] in
html_types)):
return True
content_type = record.http_headers.get_header('content-type', None)
if content_type:
for html_type in html_types:
if html_type in content_type:
return True
return False | b07af5ff6bd79cb2e8b83835eb2c4874972e261a | 115,357 |
def set_file_name(fig_name, fmt):
"""
Returns a file name with the base given by fig_name and the extension
given by fmt.
Parameters
----------
fig_name : str
base name string
fmt : str
extension of the figure (e.g. png, pdf, eps)
Returns
--------
filename : str
a string of the form "fig_name.fmt"
Example
-------
set_file_name("figure","jpg") returns "figure.jpg"
"""
if not isinstance(fig_name, str):
raise TypeError("fig_name must be a string.")
if not isinstance(fmt, str):
raise TypeError("fmt must be a string.")
if len(fig_name) == 0:
raise ValueError("fig_name must contain at least one character.")
if len(fmt) == 0:
raise ValueError("fmt must contain at least one character.")
filename = fig_name + "." + fmt
return filename
#################### | 6fa7f6cf61ce2686e3b2b3ddc9f123730d899f60 | 115,361 |
def inverse(mat): # pylint: disable=R1710
"""Calculate the inverse of a matrix.
This function is equivalent to the `inverse` function in GLSL.
Args:
mat (:class:`taichi.Matrix`): The matrix of which to take the inverse.
Returns:
Inverse of the input matrix.
Example::
>>> @ti.kernel
>>> def test():
>>> m = mat3([(1, 1, 0), (0, 1, 1), (0, 0, 1)])
>>> print(inverse(m))
>>>
>>> test()
[[1.000000, -1.000000, 1.000000],
[0.000000, 1.000000, -1.000000],
[0.000000, 0.000000, 1.000000]]
"""
return mat.inverse() | 7934f3dece6d1ca04378dfba5e2085730bd0aa96 | 115,365 |
def usgs_lead_url_helper(*, year, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
if int(year) < 2013:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2016-lead.xls')
elif int(year) < 2014:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2017-lead.xls')
else:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/s3fs-public/media/files/myb1-2018-lead-advrel.xlsx')
url = build_url
return [url] | 41abac404996ffa025725d5d4e21d25857458048 | 115,366 |
def get_player_url(playerid):
"""
Gets the url for a page containing information for specified player from NHL API.
:param playerid: int, the player ID
:return: str, https://statsapi.web.nhl.com/api/v1/people/[playerid]
"""
return 'https://statsapi.web.nhl.com/api/v1/people/{0:s}'.format(str(playerid)) | ef08f84961e90bd81f1d493a9d7063e143dc149e | 115,367 |
from typing import List
from typing import Set
def send_probe(wfg: List[List[int]], visited: Set[int], initiator: int, source: int, destination: int) -> bool:
"""Run through the complete graph to check for cycles
Args:
wfg (List[List[int]]): The wait for graph in form of a 2D mtrix of ints
visited (set): Set to denote which sites have been probed
initiator (int): The original initiator for the probe
source (int): The current probe's source site
destination (int): The current probe's destination site
Returns:
bool: Is there a cycle between initiator and destination
"""
tmp = False
print(f"Probe: ({initiator+1}, {source+1}, {destination+1})")
## Check all sites wrt to the dest
for site, is_wait in enumerate(wfg[destination]):
## If the dest is waiting for a site
if site not in visited and site != destination and is_wait == 1:
visited.add(site)
## If we find a probe with initiator same as destination,
## Return True
if initiator == destination:
print("Deadlock detected")
return True
## Otherwise, send a new probe to the new site
## from destination to the site its waiting for
tmp = send_probe(wfg, visited, initiator, destination, site)
if tmp:
return True
## If all sites are exhausted,
## Return False
return tmp | 75445d510b147d7c3fcfb4aebd824964d1aae1db | 115,377 |
def output_html(lst, header=[], style=""):
"""
Output the I{lst} as an html table with I{header} items
and the applied I{style}
@param lst: The list of items to be shown
@type lst: list
@param header: The table header
@type header: list
@param style: The style to apply to the table
@type style: string
"""
s = " <table border=1 style='%s'> <tr> " % style
for item in header:
s += "<td><b>%s</b></td>" % item
s += "</tr>"
for item in lst:
s += "<td><pre>%s</pre></td>" % item
s += "</tr></table>"
return s | 538dc178dcc26bc5f5f3ff57ea1c85a5ee21f2d4 | 115,378 |
def if_then(arg, clause):
"""Factored out ternary for when a filter clause contains a None arg."""
return clause if arg is not None else True | bc9f5ae1c2aae690b44e2b2796f755a0ad8b27be | 115,379 |
def groupby_type(estimators):
"""
finds the number of estimators for each estimator class
:param estimators: list of estimators (not necessarily trained)
:return: a dictionary of estimator class as key and frequency as value
"""
unique_classes = {}
for estimator in estimators:
clf_name = estimator.__class__.__name__
if clf_name not in list(unique_classes.keys()):
unique_classes[clf_name] = 0
unique_classes[clf_name] += 1
return unique_classes | 5b77777d55876978d7a1bbdb211de08f478f1723 | 115,381 |
def list_check(listA, listB):
"""
a = [1, 2, 3]
b = [4, 5, 6]
list_check(a, b) == 0
b = [3, 4, 5]
list_check(a, b) == 1
"""
ans = 0
for A in listA:
for B in listB:
if A == B:
ans = 1
return ans | 42412d70012159ead43e36ff6650193e5d135969 | 115,382 |
def final_temp(t_i, p_f, p_i, gamma):
"""
Computes the final temperature of adiabatic expansion
:param t_i: initial temperature (K)
:param p_f: total final pressure
:param p_i: total initial pressure
:param gamma: heat capacity ratio ("adiabaattivakio")
:return: final temperature (K)
"""
return t_i * (p_f / p_i)**((gamma-1)/gamma) | dcec51089a6fddd721c2fde3c3b863db6e2be6c0 | 115,389 |
import re
def is_integer(number):
"""Return True if number is an integer"""
pattern = r"^\-*?\d$"
result = re.match(pattern, str(number))
return True if result else False | c7e022d39580c9c85a4002967fdb435f2ebb74e5 | 115,391 |
def filter_values(pred, d):
""" Returns a new dict with only those k, v pairs where pred(v) holds.
>>> D = {'a': 1, 'b': 2, 'c': 3}
>>> odd = lambda x: x % 2 != 0
>>> filter_values(odd, D)
{'a': 1, 'c': 3}
"""
return dict((k, v) for k, v in d.iteritems() if pred(v)) | 316b8b531cfb7c0941d3af230a80059a957d7b5f | 115,395 |
from typing import List
def filter_result_by_tag(image_data: list, tag: str) -> List:
"""
filters the search result by the given tag
:param image_data: list of dict. the raw data from TID repo
:param tag: the tag to apply filter
:return: list of dict. filtered dict
"""
return [image for image in image_data if tag in image["tags"]] | c0cab4f302d50cb788be9545e20b2f2dfeef3349 | 115,403 |
def spherical(lag, range1, sill): # , nugget=0):
"""
Spherical variogram model assuming nugget = 0
Args:
lag: float or array of lags as # of pixels/cells
range1 (float): range of spherical model
sill (float): sill of spherical model
Returns:
semivariance as float or array, depending on type(lag)
"""
nugget = 0.
range1 = range1 / 1.
out = nugget + sill * ((1.5 * (lag / range1)) -
(0.5 * ((lag / range1) ** 3.0)))
if isinstance(out, float):
if lag > range1:
out = nugget + sill
else:
out[lag > range1] = nugget + sill
return out | 270fc9190be7117c89fe831b86a6b5caffa4ddea | 115,404 |
def combine(batch):
"""
>>> combine(['ls -l', 'echo foo'])
'set -x; ( ls -l; ) && ( echo foo; )'
"""
return 'set -x; ' + (' && '.join(
'( %s; )' % (cmd,)
for cmd in batch
)) | bb24aeda8697464ee0a64d1b27bde5bf6f9b3a52 | 115,407 |
def resolve_diff(diff):
"""Find the shortest BF code to change a cell by `diff`, making use of overflow."""
diff %= 256
if diff < 128:
return "+" * diff
else:
return "-" * (256 - diff) | 9ff2694a26834d96e4e84c6e1bac692e81a2f954 | 115,412 |
def ScalarMultiply(val, name='unnamed'):
# language=rst
"""
Multiply an input by a constant scalar value
:param val - The constant value
"""
def init_fun(key, input_shape):
params = ()
state = ()
return name, input_shape, params, state
def apply_fun(params, state, inputs, **kwargs):
return inputs*val, state
return init_fun, apply_fun | 1c61277e75ef5566545e854b3aede82231ee6bfe | 115,414 |
def agg_concat(s):
"""
Join given list with semicolons
:param s: list of objects to join
:return: joined string
"""
return ';'.join(s) | 750eeaecdaa0e225519e4550699033adb2fc7b41 | 115,416 |
import inspect
def call_with_args(element, method_name, program, segment):
"""Calls the given method, optionally with the program and segment arguments set
if they exist as parameters on the method.
"""
method = getattr(element, method_name)
params = inspect.signature(method).parameters
args = {}
if "program" in params:
args["program"] = program
if "segment" in params:
args["segment"] = segment
return method(**args) | c9693808fec85434326f626ffc22e864fe166e11 | 115,417 |
def collect_consecutive_values(seq):
"""Given a sequence of values, output a list of pairs (v, n) where v is a
value and n is the number of consecutive repetitions of that value.
Example:
>>> collect_consecutive_values([53, 92, 92, 92, 96, 96, 92])
[(53,1), (92,3), (96,2), (92,1)]
"""
pairs = []
sq = list(seq)
while len(sq) > 0:
val = sq[0]
xs = [x == val for x in sq]
if all(xs):
count = len(xs)
else:
count = xs.index(False)
pairs.append((val, count))
sq = sq[count:]
return pairs | 9502bfbf6f9d696848314c56c307dab8dac39858 | 115,418 |
def _fill_histogram(idf, hist, features):
"""Fill input histogram with column(s) of input dataframe.
Separate function call for parallellization.
:param idf: input data frame used for filling histogram
:param hist: empty histogrammar histogram about to be filled
:param list features: histogram column(s)
"""
name = ":".join(features)
clm = features[0] if len(features) == 1 else features
# do the actual filling
hist.fill.numpy(idf[clm])
return name, hist | ceaff3404d264044308f949482198b05806d553c | 115,419 |
def compare_list_of_committees(list1, list2):
"""Check whether two lists of committees are equal when the order (and multiplicities)
in these lists are ignored.
To be precise, two lists are equal if every committee in list1 is contained in list2 and
vice versa.
Committees are, as usual, sets of positive integers.
Parameters
----------
list1, list2 : iterable of sets"""
for committee in list1 + list2:
assert isinstance(committee, set)
return all(committee in list1 for committee in list2) and all(
committee in list2 for committee in list1
) | 136d837b2957d4882a2dd85a989164a1346e08e3 | 115,422 |
import math
def normalised_payoff(payoff_matrix, turns):
"""
The per-turn averaged payoff matrix and standard deviations
Parameters
----------
payoff : list
A matrix of the form:
[
[[a, j], [b, k], [c, l]],
[[d, m], [e, n], [f, o]],
[[g, p], [h, q], [i, r]],
]
i.e. one row per player, containing one element per opponent (in
order of player index) which lists payoffs for each repetition.
turns : integer
The number of turns in each round robin.
Returns
-------
list
A per-turn averaged payoff matrix and its standard deviations.
"""
repetitions = len(payoff_matrix[0][0])
averages = []
stddevs = []
for res in payoff_matrix:
averages.append([])
stddevs.append([])
for s in res:
perturn = [1.0 * rep / turns for rep in s]
avg = sum(perturn) / repetitions
dev = math.sqrt(
sum([(avg - pt)**2 for pt in perturn]) / repetitions)
averages[-1].append(avg)
stddevs[-1].append(dev)
return averages, stddevs | 6f1042d73cde6fac910d438b2d52a3dd71c64d88 | 115,423 |
import json
def update_permissions_for_annotation(
gc, annotation_id=None, annotation=None,
groups_to_add=None, replace_original_groups=True,
users_to_add=None, replace_original_users=True):
"""Update permissions for a single annotation.
Parameters
----------
gc : gider_client.GirderClient
authenticated girder client instance
annotation_id : str
girder id of annotation
annotation : dict
overrides annotation_id if given
groups_to_add : list
each entry is a dict containing the information about user groups
to add and their permission levels. A sample entry must have the
following keys
- level, int -> 0 (view), 1 (edit) or 2 (owner)
- name, str -> name of user group
- id, st -> girder id of user group
replace_original_groups : bool
whether to replace original groups or append to them
users_to_add : list
each entry is a dict containing the information about user
to add and their permission levels. A sample entry must have the
following keys
- level, int -> 0 (view), 1 (edit) or 2 (owner)
- login, str -> username of user
- id, st -> girder id of user
replace_original_users
whether to replace original users or append to them
Returns
-------
dict
server response
"""
groups_to_add = [] if groups_to_add is None else groups_to_add
users_to_add = [] if users_to_add is None else users_to_add
if annotation is not None:
annotation_id = annotation['_id']
elif annotation_id is None:
raise Exception(
"You must provide either the annotation or its girder id.")
# get current permissions
current = gc.get('/annotation/%s/access' % annotation_id)
# add or replace as needed
if replace_original_groups:
current['groups'] = []
current_group_ids = []
else:
current_group_ids = [j['id'] for j in current['groups']]
if replace_original_users:
current['users'] = []
current_user_ids = []
else:
current_user_ids = [j['id'] for j in current['users']]
for group in groups_to_add:
if group['id'] not in current_group_ids:
current['groups'].append(group)
for user in users_to_add:
if user['id'] not in current_user_ids:
current['users'].append(user)
# now update accordingly
# BAD WAY!! -- do NOT do this!
# return gc.put('/annotation/%s/access?access=%s' % (
# annotation_id, json.dumps(current)))
# PROPER WAY
return gc.put('/annotation/%s/access' % annotation_id, data={
'access': json.dumps(current)}) | e4e10862d3d11551197f281200497542a723f947 | 115,428 |
import hashlib
def _file_key(file_path):
"""Converts a file path into a key for storing the file's committed contents in the _fragments/ directory."""
return hashlib.sha256(('%s:%s' % (__package__, file_path)).encode('utf8')).hexdigest() | 5403888556125ea245220c5f79c15bb364e159c2 | 115,429 |
def tile_h_2(current_state, goal_state): # silly slow currently
"""
Sum the distances for every tile out of position to their destination
:param current_state:
:param goal_state:
:return:
"""
count = 0
for y, row in enumerate(current_state.state):
for x, item in enumerate(row):
for y2, i in enumerate(goal_state.state):
try:
x2 = i.index(item)
except ValueError:
continue
count += abs(x - x2) + abs(y - y2)
return count | ec2f6dff36824a9714554d8448fdf2e2480764fe | 115,430 |
def linearRGBtoSRGB(color):
""" Convert a grayscale value (single channel) value from Linear to sRGB """
# Note that range of input should be 0 to 1
if color > 1:
return 1
elif color < 0.00313:
return color * 12.92
else:
return (((color ** (1/2.4)) * 1.055) - 0.055) | f3235b3f2eee642dbe2ad5d0438d23708ca3a93f | 115,432 |
def cartesian(netx):
"""
Converts node coordinates from a :py:class:`networkx.Graph` object
to a list of dicts with following format:
[{'node': <node id>,
'x': <x position>,
'y': <y position>}]
:param G:
:return: coordinates
:rtype: list
"""
return [{'node': n,
'x': float(netx.pos[n][0]),
'y': float(netx.pos[n][1])} for n in netx.pos] | 5d4329d82fd86f8f877bb96b41c06280464483d7 | 115,436 |
def build_cgi_environ(wsgi_environ, git_project_root, user=None):
"""Build a CGI environ from a WSGI environment:
CONTENT_TYPE
GIT_PROJECT_ROOT = directory containing bare repos
PATH_INFO (if GIT_PROJECT_ROOT is set, otherwise PATH_TRANSLATED)
QUERY_STRING
REMOTE_USER
REMOTE_ADDR
REQUEST_METHOD
The git_project_root parameter must point to a directory that contains
the git bare repo designated by PATH_INFO. See the git documentation.
The git repo (my-repo.git) is located at GIT_PROJECT_ROOT + PATH_INFO
(if GIT_PROJECT_ROOT is defined) or at PATH_TRANSLATED.
If REMOTE_USER is set in wsgi_environ, you should normally leave user
alone.
"""
cgi_environ = dict(wsgi_environ)
none_string_keys = []
for key, value in cgi_environ.items(): # NOT iteritems, due to "del"
if not isinstance(value, str):
none_string_keys.append(key)
for key in none_string_keys:
del cgi_environ[key]
cgi_environ['GIT_HTTP_EXPORT_ALL'] = '1'
cgi_environ['GIT_PROJECT_ROOT'] = git_project_root
if user:
cgi_environ['REMOTE_USER'] = user
cgi_environ.setdefault('REMOTE_USER', 'unknown')
return cgi_environ | da802e892fd61dad6345271f2045e05e59012b18 | 115,440 |
import re
def extract_adif_column(adif_file, column_name):
"""
Extract data column from ADIF file (e.g. 'OPERATOR' column).
Parameters
----------
adif_file: file object
ADIF file opened using open().
column_name: str
Name of column (e.g. OPERATOR).
Returns
-------
matches: list of str
List of values extracted from the ADIF file.
"""
pattern = re.compile('^.*<' + column_name + ':\d+>([^<]*)<.*$', re.IGNORECASE)
matches = [re.match(pattern, line)
for line in adif_file]
matches = [line[1].strip() for line in matches if line is not None]
adif_file.seek(0)
if len(matches) > 0:
return matches
else:
return None | 7baded27e22659490895b371a073e7c0860658d2 | 115,443 |
from typing import Optional
import random
def get_invert(invert: Optional[bool]) -> bool:
"""
Automatically generate an invert value if missing (None).
If invert is not missing, then simply return the input value
"""
if invert is None:
invert = random.sample([True, False], 1)[0]
return invert | 0906784919ba8abcf9af84db0a1961f86b9da217 | 115,444 |
import struct
def get_int(encoded: bytearray, idx: int) -> tuple:
"""
Returns an integer and next to be read index
:param encoded: bytearray
:param idx: index to start read from
:return: tuple of int and next index
"""
return struct.unpack(">i", encoded[idx:idx + 4])[0], idx + 4 | d60cca2b982cc8a9cfd8a90552d8f1c45c4cd94e | 115,445 |
from typing import OrderedDict
def filter_threshold_dict_from_args(args):
"""
Convert names of filters from external CLI options to more
consistent naming scheme of {min|max}_{Isovar property}.
Returns OrderedDict
"""
d = OrderedDict()
d["min_ratio_alt_to_other_fragments"] = args.min_ratio_alt_to_other_fragments
d["min_fraction_alt_fragments"] = args.min_alt_rna_fraction
d["min_num_alt_fragments"] = args.min_alt_rna_fragments
d["min_num_alt_reads"] = args.min_alt_rna_reads
return d | 89ece4b080fff64865cafe9bb0fd9dce6751688e | 115,447 |
import functools
def BitOr(x):
"""bitwise OR: same as BIT_OR in SQL."""
return functools.reduce(lambda a,b: (a|b), x) | e2074e1ddf6e271671a104bdd9fa7251177b78f2 | 115,450 |
def separate_name_from_title(title_and_name: str) -> str:
"""
Return just name
Parameters
----------
title_and_name: str
e.g. Mayor Ted Wheeler
Returns
-------
name: str
tile_name_name with first word removed e.g. Ted Wheeler
Notes
-----
first word in title_and_name is presumed to be title
"""
# title_and_name:
# The title (Mayor of Commissioner) and name of a Portland City Commission
# member.
# e.g., Mayor Ted Wheeler, Commissioner Carmen Rubio
name_index = title_and_name.find(" ")
return title_and_name[name_index + 1 :] | da6b1657cde9a9fa47b4ca2380629056226302eb | 115,452 |
def generate_url(year, month, day):
"""Generates the dynamic Medium url to be scraped
"""
return "https://towardsdatascience.com/archive/{}/{:0>2}/{:0>2}".format(year, month, day) | a4d7386d9506cdc6ce02c46970ee64de16b3f57b | 115,454 |
import math
def calculate_player_wins_fight(player, boss):
"""Calculate whether player would win a fight against boss
Returns True if player wins and False if boss wins.
Player always attacks first.
"""
player_total_damage = player.damage - boss.armor
if player_total_damage <= 0:
player_total_damage = 1
boss_total_damage = boss.damage - player.armor
if boss_total_damage <= 0:
boss_total_damage = 1
player_rounds = math.ceil(boss.hit_points / player_total_damage)
boss_rounds = math.ceil(player.hit_points / boss_total_damage)
# Player wins if it takes the same number or fewer attacks to
# deplete the boss's hit points (as the player starts)
return player_rounds <= boss_rounds | 73d2a2db76e308e193a9c77062f790cc89302146 | 115,458 |
def create_starter_script(user, is_master):
"""
Create starter bash script.
"""
template = [
'#!/bin/bash',
'',
'source /home/{}/.ros/env/distributed_ros.bash'.format(user),
'rosrun distributed_system_upstart distributed_ros_{}'.format('master' if is_master else 'slave'),
]
return '\n'.join(template) | 890da236a08a07cc9557a70dcc34bd32199aa859 | 115,463 |
def get_valid_keys(target, prefix):
"""Lists the valid keys to be used on specific proto"""
keys = map(lambda key: key.replace(f'{prefix}_', ''), target.keys())
return list(filter(lambda item: item != 'UNSPECIFIED', list(keys))) | 06deaee35361078cc662d18d31a540124dd88dc0 | 115,464 |
def get_word(word):
"""Return word text from word/syllable tuple"""
return word[0] | 9d6630cf1b535f23f360d41df2e830a4f57331ad | 115,470 |
import re
def tile_filter(sequence_id, filter_list):
""" Takes a sequence identifier - first line of the four lines for each
sequence in a FASTQ file (begins with a @) - and a list of tiles to be
removed, and returns and returns True if the tile is in the list
and False if the tile is not in the list. Assumes Casava 1.8 and later
format, where the tile is the fifth item (the 6th index)
in the identifier.
"""
parsed = re.split('[ :]+', sequence_id)
if parsed[4] in filter_list:
return True
else:
return False | a69ceaa1ab1308cd72ad0d3570ded85e294e7580 | 115,487 |
def integer_kth_root(n: int, k: int) -> int:
"""
Returns the integer kth root of n >= 0 - the integer m satisfying
m**k <= n < (m + 1)**k.
Parameters:
n: int (n >= 0)
k: int (k >= 2)
Examples:
>>> integer_sqrt(10)
3
>>> integer_sqrt(121)
11
"""
if n < 0:
raise ValueError("integer_kth_root: must have n >= 0.")
if k < 1:
raise ValueError("integer_kth_root: must have k >= 2.")
if n <= 1:
return n
x = 2**((n.bit_length())//k + 1)
while True:
y = ((k - 1)*x + n//x**(k - 1))//k
if y < x:
x = y
else:
break
return x | 7ea56817b707a28d04f273d57836849e076223a0 | 115,488 |
def replace_dict_values(d, replace_keys):
"""Returns a dictionnary with replaced values
replace_keys is a dictionary
replace_keys = {'key': 'value_to_display'}"""
d_hidden = d.copy()
for key, replace_value in replace_keys.items():
if key in d_hidden.keys():
d_hidden[key] = replace_value
return d_hidden | 4524ec7c7a07a265122143c603192933ed55a416 | 115,489 |
def groupID_callback(x):
"""
image_name = 1_1-1256_1264_2461_2455-12003110450161(_1).jpg
big_image_name = 12003110450161
"""
img_path = x['info']['image_path']
big_img_name = img_path.split('.')[-2]
big_img_name = big_img_name.split('-')[-1].split('_')[0]
return big_img_name | ef7e1ef4b7f5caad4bfa9e91adac7ba8420341d9 | 115,491 |
def is_tiein_survey(sheet):
"""Is this worksheet a "tie-in survey"?"""
return sheet.name.replace('-','').lower() == 'tiein' | 87168ead3a576c681ed27af1ab2906aad7e3b1aa | 115,495 |
def memoize(f):
""" Cache function results, similar to py3.7 functools.lru_cache """
memo = {}
def helper(*args):
key = tuple(args)
if key not in memo:
memo[key] = f(*args)
return memo[key]
return helper | 847a69654644812f1e610d5f029a2b25e524a0ca | 115,498 |
import math
def DF(mat, r):
"""
DF(mat, r) - Discount factor, for a given maturity (years) and zero rate r.
:param mat: float
:param r: float
:return: float
May also input equally-sized lists.
For example, calculate the discount factor for a 5% zero rate after one year
(rounded to 4 decimal places).
>>> round(DF(1., .05), 4)
0.9524
Example with list inputs. Maturities from 0 - 5 years, flat zero rate of 5%
>>> mats = []
>>> zerorates = []
>>> for i in range(0, 6):
... mats.append(float(i))
... zerorates.append(.05)
...
>>> out = DF(mats, zerorates)
>>> [round(x, 4) for x in out] # Round to 4 decimal places
[1.0, 0.9524, 0.907, 0.8638, 0.8227, 0.7835]
"""
if type(mat) is list:
out = []
for m, ZR in zip(mat, r):
out.append(math.pow(1. + ZR, -m))
else:
out = math.pow(1. + r, -mat)
return out | c9cc1d6402f267d660d5768fd616881bf8d22a9d | 115,501 |
def pk(y_true, y_pred, k):
"""
Function to calculate precision at k
:param y_true: list of values, actual classes
:param y_pred: list of values, predicted classes
:param k: the value for k
:return: precision at given value k
"""
# we are only interested in top-k predictions
y_pred = y_pred[:k]
# convert predictions to set
pred_set = set(y_pred)
# convert actual values to set
true_set = set(y_true)
# find common values
common_values = pred_set.intersection(true_set)
# return length of common values over k
return len(common_values) / len(y_pred[:k]) | d7aff2b13000a9360f4f82b423a8dc9fa9ab9f54 | 115,502 |
def _set_wavefield_save_strategy(requires_grad, dt, inner_dt, scalar_wrapper):
"""Decides which of the source wavefield saving strategies to use.
The source wavefield must be saved for backpropagation if model gradients
required. The C code provides multiple ways of doing this, which are
applicable in different situations.
Args:
requires_grad: Boolean specifying whether model gradients are required
dt: The time interval between source samples
inner_dt: The time interval between time steps of the wave propagator
scalar_wrapper: The object that contains enum values for the strategies
Returns:
An enum value specifying which strategy to use
"""
if requires_grad:
wavefield_save_strategy = scalar_wrapper.STRATEGY_COPY
else:
wavefield_save_strategy = scalar_wrapper.STRATEGY_NONE
return wavefield_save_strategy | 0d884c768827f409545fa855470f618e3a1ce711 | 115,503 |
def apply_subs_map(line, map):
"""
Do string replacements on line indicated by map
"""
for key, val in map.items():
line = line.replace(key, val)
return line | d4e3f4f5a0a07c5daee2829bd7812bdc15684457 | 115,506 |
def substitute_params(sql, sql_params):
"""
Substitute SQL dict of parameter values
Args:
sql : sql statement
sql_params : dict of parameters to substitute
Returns:
string containing SQL with parameters substituted in
"""
for param_name in sql_params:
sql = sql.replace(('{' + '{0}'.format(param_name) + '}'),
sql_params[param_name])
return sql | 6dc10ce29093b28e0bc99e2627c514973cdcc2cb | 115,507 |
import click
def output_option(func):
"""Click decorator used for output option, shared by several commands."""
option = click.option(
"--output",
"-o",
default=None,
type=click.Choice(["yaml", "json"]),
help="Output data using yaml or json format",
)
return option(func) | df289da35e64af155a967d8d6bf1f4778f3099ea | 115,511 |
def _get_wanted_channels(wanted_sig_names, record_sig_names, pad=False):
"""
Given some wanted signal names, and the signal names contained in a
record, return the indices of the record channels that intersect.
Parameters
----------
wanted_sig_names : list
List of desired signal name strings
record_sig_names : list
List of signal names for a single record
pad : bool, optional
Whether the output channels is to always have the same number
of elements and the wanted channels. If True, pads missing
signals with None.
Returns
-------
wanted_channel_inds
"""
if pad:
return [record_sig_names.index(s) if s in record_sig_names else None for s in wanted_sig_names]
else:
return [record_sig_names.index(s) for s in wanted_sig_names if s in record_sig_names] | b933d953c2258cc40c19a7312548d6d1237df5e4 | 115,515 |
def is_service(interface):
"""Return `True` if `interface` is a service."""
return hasattr(interface, 'Request') and hasattr(interface, 'Response') | 1830b5a0f12824dc1d64ed862411c47904e5f387 | 115,516 |
def is_almost_white(im_array,threshold = 200):
"""
Tell if pixels of an image are white or not.
im_array : the image represented as a numpy array in RGB (width*height*3)
threshold: the threshold above which a pixel is considered as white
Return : a numpy mask of size width*height
"""
return (im_array[:,:,0] > threshold) & (im_array[:,:,1] > threshold) & (im_array[:,:,2] > threshold) | 46e2df11b3eeb3a7d1c87b589929d74fa8908b71 | 115,518 |
def card_rank(card):
"""Return the card's rank as a character."""
return card[0] | c2e599459945acfaf50dc172dfaa12d76bff43c5 | 115,526 |
def workspace_folder(tmpdir_factory):
"""Create a temporary folder to perform tests from."""
return str(tmpdir_factory.mktemp("workspace")) | 9e56d2f0f25ea9615d05fa53938e134517cd0c87 | 115,529 |
def get_bounding_boxes(img, kernel_size, stride):
"""Gives all bounding boxes covering an image with a sliding window
Args:
img (np.array or torch.tensor)
kernel_size (int): size of the kernel, kernel is a square
stride (int): stride of the sliding window motion
Returns:
bounding boxes list (list of (x, xp,y, yp)), convention of bounding box
(top, bottom, left, right)
Notice: If not all image covered by sliding window, additional
bounding boxes are created ending at the end of each dimension
"""
h, w = img.shape
h_p, w_p = kernel_size, kernel_size
s = stride
# All x locations
x = [i*s for i in range(0, int((h-h_p)/s)+1)]
# Add one more point if didn't cover all range
if x[-1]+h_p!=h:
x.append(h-h_p)
# All y locations
y = [j*s for j in range(0, int((w-w_p)/s)+1)]
# Add one more point if didn't cover all range
if y[-1]+w_p!=w:
y.append(w-w_p)
# All bounding boxes in the form (x,xp,y,yp)
# x,y: the top left corner/ xp,yp: the bottom right corner
bbList = [(xi,xi+h_p, yi, yi+w_p) for xi in x for yi in y]
return bbList | d3fc3b0ad387abbe1ffab4b3cdd9bdbe39f6da0a | 115,531 |
from typing import Callable
import inspect
def _get_func_str(f: Callable) -> str:
"""Returns the name of a function including the argument names.
:param f: The function of which the name should be returned
:return: The name of the function f
"""
f_name = str(f).split(' ')[1]
f_args = str(inspect.getfullargspec(f).args) \
.replace('[', '(') \
.replace(']', ')') \
.replace('\'', '')
return f_name + f_args | b14a27132740f24d0756956b19bbfea90caf6b31 | 115,532 |
def non_empty(sequence):
"""Test if a given sequence is non-empty."""
return len(sequence) > 0 | 01b76432136aad8fc2cd728f5b1f7785b466ac97 | 115,534 |
from typing import List
from typing import Dict
import importlib
def get_package_versions(
packages: List[str], *, na_str="not available"
) -> Dict[str, str]:
"""tries to load certain python packages and returns their version
Args:
na_str (str): Text to return if package is not available
Returns:
dict: Dictionary with version for each package name
"""
versions: Dict[str, str] = {}
for name in sorted(packages):
try:
module = importlib.import_module(name)
except ImportError:
versions[name] = na_str
else:
versions[name] = module.__version__ # type: ignore
return versions | bc51ef38a2bac20156a5e6b935e18369a5a46119 | 115,535 |
from typing import OrderedDict
def extract_metadata_from_frame(frame):
"""Extract metadata from I3 frame.
Parameters
----------
frame : icetray.I3Frame
Returns
-------
event_meta : OrderedDict
Keys are 'I3EventHeader', 'run', and 'event_id'
"""
event_meta = OrderedDict()
event_header = frame['I3EventHeader']
event_meta['run'] = event_header.run_id
event_meta['event_id'] = event_header.event_id
return event_meta | d7a59e10c5e8a12ca90252e32e4797fb505c40a4 | 115,536 |
import ast
def gen_subscript(var_name, index):
""" Generates code like variable[1] """
return ast.Expr(value=ast.Subscript(value=ast.Name(id=var_name, ctx=ast.Load()),
slice=ast.Index(value=ast.Num(n=index, kind="")), ctx=ast.Load())) | 95e2295b75c44cc1316ea4328a4f466533c5b03a | 115,538 |
def three_measurement_window_sum(list_int):
"""
This function calculates the sums of all three-measurement sliding windows in the list.
This is part of the answer to the second puzzle.
Parameters:
list_int (list): list of measurements(integers)
Returns:
new_list (list): list of sums of all three-measurement sliding windows (integers).
"""
new_list = []
for i in range(len(list_int) - 2):
window_value = list_int[i] + list_int[i + 1] + list_int[i + 2]
new_list.append(window_value)
return new_list | 035efd4346de23d2e61eaf53cc609901ff51500b | 115,539 |
def gen_reg (class_names):
""" Generates registration calls for funtions with the given class names.
Args:
class_names (list of str): The class names.
Returns:
str: The registration calls.
"""
out = ""
for class_name in class_names:
if len(out) > 0:
out += '\n '
#out += f'evaluator.addFunction(new {class_name}());'
out += f'new {class_name}(),'
out = out.rstrip(',')
return out | 8ae9adc319dd4a2923ab9cb3fd8cec2a2ae9301b | 115,545 |
from pathlib import Path
def cookiecutter_path(tmp_path: Path) -> Path:
"""Cookiecutter path."""
path = tmp_path / "cookiecutter"
path.mkdir()
return path | 7e69a76a4a72bd35beb9da5c02a6e7bbcf233419 | 115,546 |
def offline_ap_processing(offline_aps: list):
"""
Args:
offline_aps: list of offline APs from response.json()['list']
Returns:
list with offline AP name
"""
offline_aps_device_names = list()
for offline_ap in offline_aps:
device_data = f"Hostname: <{offline_ap.get('deviceName')}> / " \
f"MAC: <{offline_ap.get('apMac')}> / IP: <{offline_ap.get('ip')}>"
offline_aps_device_names.append(device_data)
return offline_aps_device_names | ac644f6ee3b298a48992cc87530f8be8951cc48e | 115,547 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.