content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import copy
def pad_grid(grid, moves):
"""pad the grid with a white tile in any blank spot next to a black tile"""
new_grid =copy.deepcopy(grid)
for y in grid:
for x in grid[y]:
if grid[y][x] == "b":
for move in moves:
ymove = y + moves[move][1]
xmove = x + moves[move][0]
if ymove not in new_grid:
new_grid[ymove] = {}
if xmove not in new_grid[ymove]:
new_grid[ymove][xmove] = "w"
return new_grid
|
8781abf1e1d8a27a13090cb9e1c61c4b481bb9ee
| 55,537
|
import torch
def setup_optimizer_and_scheduler(param_lr_maps, base_lr, epochs, steps_per_epoch):
"""Create a PyTorch AdamW optimizer and OneCycleLR scheduler with `param_lr_maps` parameter mapping,
with base LR `base_lr`, for training for `epochs` epochs, with `steps_per_epoch` iterations
per epoch."""
optimizer = torch.optim.AdamW(param_lr_maps, lr=base_lr)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, base_lr, epochs=epochs, steps_per_epoch=steps_per_epoch
)
return optimizer, scheduler
|
381d88e9cf1755362ace131add0a198828e91e2e
| 55,540
|
def seat_ids(tickets):
"""Convert an array of (row, seat) tuples to seat IDs."""
return [
row * 8 + seat
for (row, seat) in tickets
]
|
1f1ad3d79c6d08e7d71684a245eeeb4d63d193e3
| 55,542
|
def get_snippet_line_col(source: str, index: int) -> tuple[int, int, str]:
"""Returns line number, column number and line of code at the given index."""
line, col = 1, 0
current = 0
snippet_start_index = 0
for char in source:
if current == index:
break
if char == "\n":
snippet_start_index = current + 1
line += 1
col = 0
else:
col += 1
current += 1
while current < len(source) and source[current] != "\n":
current += 1
snippet_end_index = current
snippet = source[snippet_start_index:snippet_end_index]
return line, col, snippet
|
d16477a269a2b44e3ad81537ee23f4a2cf9a7360
| 55,543
|
def geometric_mean(precisions):
"""Compute geometric mean of list of variables."""
prod = 1.0
for prec in precisions:
prod *= prec
return prod**(1.0 / len(precisions))
|
1e21c1aa80b5cfcbed938671c06e71e120d8fc1f
| 55,544
|
def count_rows(path: str):
"""Count number of rows in CSV at path."""
assert path[-4:] == '.csv'
with open(path) as f:
return sum(1 for _ in f)
|
a7d77d81ff3798d67fdfa7d3bb0fa667aba39105
| 55,546
|
def format_as_pct(value, decimal=2):
"""
Formats any value as a percentage.
Parameters
----------
value : int or float
The value you want to format as a percentage.
decimal : int, optional
The number of decimal places the output should have. The default
value is 2.
Returns
-------
pct : string
The value formatted as a string and percentage.
Examples
--------
>>> import pyexample
>>> pyexample.format_as_pct(0.15678)
'15.68%'
>>> pyexample.format_as_pct(0.15678, decimal=1)
'15.7%'
"""
fmt = '{0:.%df}%%' % decimal
return fmt.format(float(value) * 100)
|
df723d964e41c66d6f8431dd046bf1b64243e3fa
| 55,547
|
import json
def saveJSONFile(filepath, data):
""" Saves JSON Data to File"""
with open(filepath, 'w') as outfile:
try:
json.dump(data, outfile, indent=2)
except ValueError as e:
return False
return True
|
7f4885f8166d45120f0bbaa28161aca332e85dd7
| 55,552
|
def checkIfMessagerIsBooster(self, user):
"""
Function would be called by Robot class
:param self: instance from Robot
:param user: instance from Discord.User
:return: True if user is a booster
"""
for role in user.roles:
if role == self.boostedRole:
return True
return False
|
8f6b34fba8b8d723c510adc529286642c4959931
| 55,560
|
def _BuildOutputFilename(filename_suffix):
"""Builds the filename for the exported file.
Args:
filename_suffix: suffix for the output file name.
Returns:
A string.
"""
if filename_suffix is None:
return 'results.html'
return 'results-{}.html'.format(filename_suffix)
|
ae4087a3bcf50b0715685f5359962813e2fdc70d
| 55,568
|
def get_tensors(model, layer_names):
"""Gets all the tensor outputs of the given layer names.
# Arguments
model: Keras/tensorflow model.
layer_names: List of strings which each string is a layer name.
# Returns
List of Keras tensors.
"""
tensors = []
for layer_name in layer_names:
tensors.append(model.get_layer(layer_name).output)
return model, tensors
|
2bff2dc9a1be8ea31dd03760c4ccab4311a1596a
| 55,575
|
import logging
import json
def extract_initial_data(html):
"""Extract and parse the JSON string from the HTML of a playlist webpage.
Parameters
----------
html : str
HTML to extract the string from.
Returns
-------
Dict
Parsed JSON data.
"""
logging.info("Extracting JSON string from playlist HTML data")
for line in html.split("\n"):
if line.strip().startswith('window["ytInitialData"]'):
return json.loads(line.strip()[26:-1])
return None
|
52f66db3e341cd5f400b6e649d4ed37fed2674ff
| 55,577
|
import textwrap
def format_description(data):
"""Formats the event description."""
return textwrap.indent(
text=textwrap.fill(text=data["description"], width=77), prefix="// "
)
|
c5848e0db1f43a6606c44f3ffeb295dde0953161
| 55,578
|
import warnings
def ParseSMFString(stringList):
"""v,f,n,c,r <-- ParseSMFString(stringList)
Parse an ascii SMF file and returnes a list of 3D vertices, triangular faces
(0-based indices, faces with more edges generate warnings), normals, colors
and 2D texture coordinates.
"""
vertices = []
faces = []
normals = []
colors = []
textures = []
fi = 0
for l in stringList:
w = l.split()
if w[0]=='v': # vertices
vertices.append( [float(w[1]),float(w[2]),float(w[3])] )
elif w[0]=='f': # faces
if len(w) > 4:
warnings.warn("face %d has more than 3 edges"%fi);
faces.append( [int(w[1])-1,int(w[2])-1,int(w[3])-1] )
fi += 1
if w[0]=='n': # normal vectors
normals.append( [float(w[1]),float(w[2]),float(w[3])] )
if w[0]=='c': # colors
colors.append( [float(w[1]),float(w[2]),float(w[3])] )
if w[0]=='r': # 2D texture indices
textures.append( [float(w[1]),float(w[2])] )
return vertices, faces, normals, colors, textures
|
542131afc6731acd27ab8ece00d734f6f5549f8f
| 55,580
|
def istag(line):
"""
:param str line: Input line to check
:retrun: True if input line is a tag, i.e. starts with `@@`; otherwise returns False.
:rtype: bool
"""
return line.startswith('@@') and len(line) > 2
|
ab331318dadf3f05ce4dc947b9775d0ec9f40794
| 55,582
|
def prepare_results(p, r, f):
"""
Formats and reports the in thw following format:
{metric} {Precision} {Recall} {F1 score}
"""
return '\t{}:\t{}: {:5.2f}\t{}: {:5.2f}\t{}: {:5.2f}'.format(
'metric', 'P', 100.0 * p, 'R', 100.0 * r, 'F1', 100.0 * f)
|
342c358e1a88ff9023cfcd822ef49ca8ae37b4ea
| 55,594
|
from typing import Tuple
from typing import List
from typing import Mapping
def parse_transport(transport: str) -> Tuple[List[str], Mapping[str, str]]:
"""Parse Transport header in SETUP response."""
params = []
options = {}
for option in transport.split(";"):
if "=" in option:
key, value = option.split("=", maxsplit=1)
options[key] = value
else:
params.append(option)
return params, options
|
1d8ef1dd34230825a6cc11efb999ad70bae9ae7b
| 55,597
|
def parse_academic_year(year):
"""
Parses an academic year eg 2014/15 into an int with the first year
"""
return int(year.split("/")[0])
|
146c4da185ffa826302975059bd7f38d34da5920
| 55,601
|
def stations_highest_rel_level(stations, N):
"""Given a list of stations, returns N stations at highest risk of flooding"""
def rel_level(x):
level = x.relative_water_level()
if level == None:
return -69669696969669696969699420
return level
stations.sort(key=rel_level, reverse=True)
return stations[:N]
|
4a57154888a3050c2e196f2919bbf00f71827f0b
| 55,603
|
import torch
import math
def periodic(inputs: torch.Tensor, period: int) -> torch.Tensor:
"""Returns periodic representation assuming 0 is start of period."""
return torch.cos(inputs * 2 * math.pi / period)
|
7032d63b807a7cd5e8be640af34e738bdb0534c8
| 55,605
|
def libname_from_dir(dirname):
"""Reconstruct the library name without it's version"""
parts = []
for part in dirname.split("-"):
if part[0].isdigit():
break
parts.append(part)
return "-".join(parts)
|
c5ad8c4ca40d61fdc6db8013fe6a93410b3571b0
| 55,606
|
def init_weights(rule, voters):
"""Generates a weight object for the given rule with and all
the voters
Parameters
----------
rule : str
The name of the rule that is used.
voters : list
A list with all voters.
Returns
-------
weights
The initial weights for the rule.
"""
if (rule == "per_multiplication_offset" or
rule == "per_nash" or
rule == "per_equality" or
rule == "per_phragmen"):
return dict.fromkeys(voters, 0)
elif (rule == "per_quota"
or rule == "per_quota_min"
or rule == "per_quota_mod"):
return (dict.fromkeys(voters, 0), dict.fromkeys(voters, 0))
else:
return dict.fromkeys(voters, 1)
|
b427095d398489754a38c3222e83736982674b91
| 55,608
|
def keep_line(line):
"""Returns true for lines that should be compared in the disassembly
output."""
return "file format" not in line
|
c8885ee67a8f884f60c913251c99c3eae42406c3
| 55,609
|
def get_linear_bezier(cps, t):
"""
Linear Bezier curve interpolation
B(t) = (1-t)*P0 + t*P1
"""
p = (1-t) * cps[0, :]
p += t * cps[1, :]
return p
|
f4f1ed5c9ec883249b2b270366a95e73bdbdab77
| 55,614
|
import torch
def mul_complex(t1, t2):
"""multiply two complex valued tensors element-wise. the two last dimensions are
assumed to be the real and imaginary part
complex multiplication: (a+bi)(c+di) = (ac-bd) + (bc+ad)i
"""
# real and imaginary parts of first tensor
a, b = t1.split(1, 4)
# real and imaginary parts of second tensor
c, d = t2.split(1, 4)
# multiply out
return torch.cat((a * c - b * d, b * c + a * d), 4)
|
15b676321f9e5846a8e3da12eba17ffd052cb6ff
| 55,617
|
import csv
def write_to_csv(file, rows: list):
"""
Write a row to given CSV file.
:param file: string. CSV file to write in.
:param rows: list. List of rows. Row is a list of values to write in the csv file as a row.
:return: boolean. True for successful write.
"""
print('Writing to csv...')
with open(file, 'a', newline='') as f:
writer = csv.writer(f)
for row in rows:
writer.writerow(row)
return True
|
451a2c3e0b95da604e412ef62e531411476ac3fb
| 55,618
|
def maxima(records):
"""
Calculate the indices of the records in the provided list with the min buy and max sell prices.
:param records: List of dictionaries containing price information.
:return: A tuple containing the indices for the records with min buy and max sell price in the input list.
"""
bi = 0
si = 0
bmin = records[0]['buy']
smax = records[0]['sell']
for ii in range(1, len(records)):
record = records[ii]
if record['buy'] < bmin:
bi = ii
bmin = record['buy']
if record['sell'] > smax:
si = ii
smax = record['sell']
return bi, si
|
0479ff872d53acd42a9b60812a5597e1cf4b8039
| 55,624
|
def ngpu(gpus):
"""
count how many gpus used.
"""
gpus = [gpus] if isinstance(gpus, int) else list(gpus)
return len(gpus)
|
0db9f13f2dea836cb313e2cc0216f3bc1efbc227
| 55,627
|
def brackets(string: str) -> str:
"""Wraps a string with a pair of matching brackets."""
if len(string) != 0:
return "[" + string + "]"
else:
return ""
|
122acb7bc781dce4256c4db2a077fb4e19bb2e46
| 55,629
|
def load_releases(driver, url):
"""
load future sneakers release from https://stockx.com/new-releases/sneakers
:param driver: selenium chrome driver
:param url: stockx release page url
:return: list of future releases
"""
driver.get(url)
return driver.find_elements_by_class_name("release-tile")
|
594f9a29498dbcf0e6bb3fa3f75134f06d11063e
| 55,633
|
import functools
import logging
def _wrap(behavior):
"""Wraps an arbitrary callable behavior in exception-logging."""
@functools.wraps(behavior)
def _wrapping(*args, **kwargs):
try:
return behavior(*args, **kwargs)
except Exception as e:
logging.exception('Unexpected exception from task run in logging pool!')
raise
return _wrapping
|
f070f409f8c2cc7fb07697b68544e603b377bdd0
| 55,639
|
import inspect
def func_name() -> str:
"""Return name of calling function."""
return inspect.stack()[1].function
|
439da40abebfcd5d7d6c976fa833090223863952
| 55,642
|
def str2fmap(line):
"""converts a string of the type 'f1=v1 f2=v2' into a feature map {f1: v1, f2: v2}"""
return {k: float(v) for k, v in (pair.split('=') for pair in line.split())}
|
ffd6430f8062e97bb93404e91a0f7807c019304d
| 55,647
|
def _parse_variable_name(line):
"""
:param line: Line in the file
:returns: The variable name being assigned.
>>> line = 'X = [1];'
>>> _parse_variable_name(line)
X
"""
rhs = line.split('=')[0].strip()
if rhs.find('(') >= 0:
rhs = rhs[:rhs.find('(')]
return rhs
|
d8f79b187fdc1e808f94b342889b74732bfce4ce
| 55,648
|
def updateMenuItem (theDictionary, item, price):
"""Update the price of item.
:param dict[str, float] theDictionary:
Dict containing menu items as keys and respective prices as
prices.
:param str item:
The item whose price is to be updated.
:param float or int price:
The updated price of item.
:return:
True if item exists on the menu, else False.
:rtype: bool
"""
item = item.upper()
price = round(float(price), 2)
if item in theDictionary:
theDictionary[item] = price
return True
return False
# if item not in theDictionary:
# return False
#
# theDictionary[item] = price
#
# return True
|
46d05c2f4929dd5b93d328bcf6ad282db5377a63
| 55,649
|
def mod_inverse(x, n):
"""
Compute the inverse of x in the multiplicative
group of Z/nZ, i.e. the integer y such that
x * y = 1 mod N.
The algorithm uses the extended Euclidean
algorithm to find the inverse efficiently.
"""
a, b = n, x
ta, tb = 0, 1
while b != 0:
q = a / b
a, b = b, a % b
ta, tb = tb, ta - (q * tb)
if ta < 0:
ta += n
return ta
|
6a4fc1998827391893457d2b318796c317e4d693
| 55,666
|
def seconds_to_hhmmss(time_s):
"""Converts a time interval given in seconds to hh:mm:ss.
Input can either be a string or floating point.
'ss' is rounded to the closest integer.
Returns a string: []h[]m[]s
"""
time_s = float(time_s)
hh = int(time_s/3600)
time_s = time_s % 3600
mm = int(time_s/60)
ss = round(time_s % 60)
return "{}h{}m{}s".format(hh, mm, ss)
|
b6092a8d7dfb7219665270f763e5c3c05f77b6e9
| 55,667
|
def genCSS(font, base):
"""Generates a CSS snippet for webfont usage based on:
http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax"""
style = "normal"
if font["post"].italicAngle != 0:
style = "oblique"
weight = font["OS/2"].usWeightClass
family = font["name"].getName(nameID=1, platformID=1, platEncID=0)
css = """
@font-face {
font-family: %(family)s;
font-style: %(style)s;
font-weight: %(weight)s;
src: url('%(base)s.eot') format('eot');
src: url('%(base)s.eot?#iefix') format('embedded-opentype'),
url('%(base)s.woff2') format('woff2'),
url('%(base)s.woff') format('woff'),
url('%(base)s.ttf') format('truetype');
}
""" %{"style":style, "weight":weight, "family":family, "base":base}
return css
|
e11b400fdd3b12cb66c689d3adf0ab2f5211fae9
| 55,668
|
def calibrate_magnitude(instr_mag, airmass, ref_color,
zero, extinct, color, instr=None):
"""
Calibrate instrumental magnitude to a standard photometric system.
This assumes that ref_color has already been adjusted for any
non-linearities between the instrumental magnitudes and the
photometric system. Otherwise use `~calibrate_magnitude_full`
"""
if instr is None:
result = instr_mag-extinct*airmass+zero+color*ref_color
else:
result = instr*(instr_mag-extinct*airmass)+zero+color*ref_color
return result
|
964f137860288c2349affc65d0bfe5fe2abf5998
| 55,669
|
def _linear_velocity_calculation(velocity, z_start, velocity_bottom):
"""
Calculation of the velocity assuming a liner increase with depth
Assume a linear increase with depth where where the velocity is 0 for
coordines.z > z_start and for coordinates.z[0] < z < z_start.
"""
lin_velocity = velocity_bottom * (velocity.z - z_start) / velocity.z[0]
return lin_velocity
|
0996cc57937b17c5735c150455505c1061bec3c1
| 55,670
|
def split_and_add(number):
"""Split number digits and add them."""
sum_of_digits = 0
while number > 0:
last_digit = number % 10
sum_of_digits += last_digit
number = number // 10 # Removing the last_digit from the given number
return sum_of_digits
|
82a39c90faa15abadcfa82e53c0d2c5dbb0b2338
| 55,671
|
def _register_access_method(destination, controller, key):
"""
Helper function for register access methods.
This helper creates distinct set and get methods for each key
and adds them to the destination object.
"""
def set(value): controller[key] = value
setattr(destination, 'set_'+key, set)
def get(): return controller[key]
setattr(destination, 'get_'+key, get)
|
ab021bde2052d8814abe3ca4399bbd767a662ae9
| 55,672
|
def input_to_int(value):
"""
Checks that user input is an integer.
Parameters
----------
n : user input to check
Returns
-------
integer : n cast to an integer
Raises
------
ValueError : if n is not an integer
"""
if str(value).isdigit():
return int(value)
else:
raise ValueError('Expecting integer. Got: "{0}" ({1})'
.format(value, type(value)))
|
702f6102a0abe37b139afcf298308ed1fc51ecf4
| 55,676
|
def shc_vec_len(nmax, nmin = 1, include_n_zero = False, include_zeros = False):
"""
Returns number of spherical harmonic coefficients up to degree nmax with possible leading truncation before degree nmin
- nmax: Maximum shc degree
- nmin: Minimum shc degree
- include_n_zero: Possibility of adding degree zero (required by functionality in e.g. SHTOOLS)
- include_zeros: Include the zero value present for each order = 0
"""
flag_n_zero = False
#vec_len = np.sum(np.arange(1,nmax+1)*2+1)
vec_len = (nmax - nmin + 1)*(nmax + nmin + 1)
if include_n_zero == True:
vec_len += 1
flag_n_zero = True
if include_zeros == True:
vec_len += nmax-nmin+1
if flag_n_zero == True:
vec_len += 1
return vec_len
|
d940e54945111532a4d624e8f94d064d13928f5b
| 55,678
|
def font_color(
label, f_color):
"""
Changes font color in Message object attached to GUI
:param label: font used by Message in Tkinter GUI
:param f_color: font color the user selects
:return: label with new font color
"""
label.config(fg=f_color)
return label
|
40bd7af9ba5b558f11c3691901dca804916fb07c
| 55,679
|
def all_combos(list_of_combos: list):
""" returns all possible combinations in a list of lists
e.g. [[1, 2], [3, 4]] -> ["1 3", "1 4", "2 3", "2 4"]
"""
result = []
# find the longest list in list_of_combos
longest = max(list_of_combos, key=len)
for i in range(len(longest) * (len(list_of_combos) - 1)):
result.append([])
for i in list_of_combos:
iterator = 0
# get all combinations of the list
for j in i:
# add to the result
for k in range(round(len(longest) / len(i))):
result[iterator].append(j)
iterator += 1
return result
|
adff1682a2447be00429e6c7bc909ae1d24769d3
| 55,681
|
def getGDXoutputOptions(project_vars: dict) -> tuple:
"""Extract from project_variables.csv the formats on which the resulting GDX file will be converted. Options are CSV, PICKLE, and VAEX.
Args:
project_vars (dict): project variables collected from project_variables.csv.
Raises:
Exception: features values must be "yes" or "no"
Returns:
tuple: 4-element tuple containing
- **csv_bool** (*bool*): boolean
- **pickle_bool** (*bool*): boolean
- **vaex_bool** (*bool*): boolean
- **convert_cores** (*int*): number of cores used to convert the symbols from GDX file to output formats.
"""
features = [
"gdx_convert_parallel_threads",
"gdx_convert_to_csv",
"gdx_convert_to_pickle",
"gdx_convert_to_vaex",
]
selection = {}
for feat in features:
if feat == "gdx_convert_parallel_threads":
selection[feat] = int(project_vars[feat])
else:
if project_vars[feat] == "yes":
selection[feat] = True
elif project_vars[feat] == "no":
selection[feat] = False
else:
raise Exception(f'{feat} must be "yes" or "no"')
convert_cores = selection["gdx_convert_parallel_threads"]
csv_bool = selection["gdx_convert_to_csv"]
pickle_bool = selection["gdx_convert_to_pickle"]
vaex_bool = selection["gdx_convert_to_vaex"]
return csv_bool, pickle_bool, vaex_bool, convert_cores
|
0263f017dcd23d751aff46101d74d3ec9101faf6
| 55,682
|
import json
def get_landuse_price(input_path):
"""
读取单价表
:param input_path:输入路径
:return:单价字典
"""
with open(input_path, 'r', encoding='utf8')as jsonfile:
json_data = json.load(jsonfile)
return json_data
|
692a6c293ed5f949c61ea08bc8da49e52ad20bf0
| 55,683
|
def find_support(pattern, supports):
"""
This method considers support of a pattern as the minimum support among its items
patterns: List. list of items in pattern.
supports: Dict
"""
min_support = None
for item in pattern:
if min_support is None:
min_support = supports[item]
else:
if supports[item] < min_support:
min_support = supports[item]
return min_support
|
a7cb42a6b816bc6cb2f2e527f08e319c0adc5317
| 55,684
|
def grids_ggd_points_triangles(grid):
"""
Return points and triangles in grids_ggd structure
:param grid: a ggd grid such as 'equilibrium.grids_ggd[0].grid[0]'
:return: tuple with points and triangles
"""
# objects_per_dimension: 0 = nodes, 1 = edges, 2 = faces, 3 = cells / volumes
points = grid['space[0].objects_per_dimension[0].object[:].geometry']
triangles = grid['space[0].objects_per_dimension[2].object[:].nodes']
return points, triangles
|
7d3700cd74a98e7ee688ace55440f864c881b773
| 55,685
|
def get_prefixes(bucket, prefix=None):
"""Retrieves the directories for a bucket using the prefix
Args:
bucket (obj): Bucket object to retrieve prefixes against
prefix (str): Prefix to look for nested prefixes underneath
"""
iterator = bucket.list_blobs(prefix=prefix, delimiter="/")
prefixes = []
for page in iterator.pages:
prefixes.extend(list(page.prefixes))
return prefixes
|
1fc5cf523aa0564f0406b172fd0b8b623e7c2975
| 55,690
|
def fahrenheit2kelvin(theta):
"""Convert temperature in Fahrenheit to Kelvin"""
return 5./9 * (theta - 32) + 273.15
|
bb8c1203d1adfc01950ac78efab53ed6240809f7
| 55,691
|
def check_uniques(example, uniques):
"""Check if current hash is still in set of unique hashes and remove if true."""
if example["hash"] in uniques:
uniques.remove(example["hash"])
return True
else:
return False
|
81cd89729cac3bfefcff2b2ac3d91cc175306cbe
| 55,694
|
def first(a, fn):
"""
Example: first([3,4,5,6], lambda x: x > 4)
:param a: array
:param fn: function to evaluate items
:return: None or first item matching result
"""
return next((x for x in a if fn(x)), None)
|
72fd47eff62a406f42a0fcd6901f010c98954464
| 55,697
|
def to_pass(line):
# The MIT License (MIT)
#
# Copyright (c) 2019 Michael Dawson-Haggerty
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Replace a line of code with a pass statement, with
the correct number of leading spaces
Arguments
----------
line : str, line of code
Returns
----------
passed : str, line of code with same leading spaces
but code replaced with pass statement
"""
# the number of leading spaces on the line
spaces = len(line) - len(line.lstrip(' '))
# replace statement with pass and correct leading spaces
passed = (' ' * spaces) + 'pass'
return passed
|
f7210ea18fa44d6e1e801ab7909f48e2a0e9a2cd
| 55,698
|
from typing import List
def chunkify_files(files: List[str], size: int) -> List[str]:
"""Splits a list of files into 'equally' sized chunks.
Args:
files (List[str]): [description]
size (int): [description]
Returns:
List[str]: list of chunks of size 'size'
"""
result = []
for idx in range(0, len(files), size):
result.append(files[idx:idx+size])
return result
|
d1d421c7181229b71c6900ef38152031f6d49913
| 55,699
|
import json
import base64
def parse_creative_serving_decision(data):
"""Parses the Creative Serving Decision from the Cloud Pub/Sub response.
Args:
data: a base64-encoded JSON string representing a creative's
CreativeServingDevision.
Returns:
A JSON representation of the creative's CreativeServingDecision.
"""
return json.loads(base64.b64decode(data))
|
5ece1de41b9b0e505e948a23d0416759859802dd
| 55,701
|
def nicenumber(number, binsize, lower=False):
"""Returns the next higher or lower "nice number", given by binsize.
Examples:
---------
>>> nicenumber(12, 10)
20
>>> nicenumber(19, 50)
50
>>> nicenumber(51, 50)
100
>>> nicenumber(51, 50, lower=True)
50
"""
e, _ = divmod(number, binsize)
if lower:
return e * binsize
else:
return (e + 1) * binsize
|
4f3e9c957d51d1763534bc4e07416fc15c1053a6
| 55,721
|
import win32file
import warnings
def is64bitbinary(filename):
"""Check if the file is 64-bit binary"""
try:
binary_type = win32file.GetBinaryType(filename)
return binary_type != win32file.SCS_32BIT_BINARY
except Exception as exc:
warnings.warn('Cannot get binary type for file "{}". Error: {}' \
''.format(filename, exc), RuntimeWarning, stacklevel=2)
return None
|
fab8086458b61d2594745c7ab1dc2bb46f8aa3f7
| 55,723
|
def binary_search(arr, key):
"""
Searches for the key in a list and returns the position of the key in the array.
If the key can not be found in the list, raise a ValueError
Will keep shifting the middle point for as long as the middle value is not equal to the search key
If, the search key is less than the middle value, we shift the high point by the middle value - 1
the same applies if the key is greater than the low point, only difference is, we increase the low point by 1
:param arr: The array, which will be a list of numbers
:param key: search key, what we will search for
:return: Position of the key in the array
:raises: ValueError if the key is not in the array
"""
low = 0
high = len(arr) - 1
while low <= high:
middle = (low + high) // 2
if arr[middle] > key:
high = middle - 1
elif arr[middle] < key:
low = middle + 1
else:
return middle
raise ValueError("Value not found")
|
8b7fbd9ded4741c32c766c57d3698d87ea739e18
| 55,725
|
def is_s3_path(file_path):
""" Return true if file_path is an S3 path, else false.
"""
schema, _, rest = file_path.partition('://')
return schema == 's3'
|
6b637eb8e032780ce682a7d7656bef7f5579d350
| 55,726
|
def rect_corners_mids_and_center(rect):
"""Returns a tuple with each corner, mid, and the center for a given rect.
Clockwise from the top left corner and ending with the center point.
"""
return (
rect.topleft,
rect.midtop,
rect.topright,
rect.midright,
rect.bottomright,
rect.midbottom,
rect.bottomleft,
rect.midleft,
rect.center,
)
|
c39f52bf5afadef5f4b781a866ace8a5c0521dbb
| 55,730
|
def get_discard_mtfcc_by_desc() -> list:
"""Discard these road types from the mtfcc categories."""
return [
"Bike Path or Trail",
"Parking Lot Road",
"Alley",
"Vehicular Trail (4WD)",
"Walkway/Pedestrian Trail",
"Private Road for service vehicles (logging, oil fields, ranches, etc.)",
]
|
2caf305ee2cf55852c3d370df31fe658da76f52f
| 55,731
|
def merge_dicts(dicts):
"""
Merge a list of dictionaries into one.
Later entries override the earlier ones.
"""
if len(dicts) == 0:
return {}
if len(dicts) == 1:
return dicts[0]
first, *rest = dicts
result = dict(first)
for other in rest:
result.update(other)
return result
|
8e8236e1f18214dad59645618c52a17bed11bd2f
| 55,733
|
def filter_deltas(deltas, *, retain):
"""
Example:
>>> filter_deltas([{'deltas': {'foo': 2, 'bar': 1}}, {'deltas': {'foo': 5}}],
... retain={'foo': lambda x: abs(x) > 3})
[{'deltas': {'bar': 1}}, {'deltas': {'foo': 5}}]
>>> filter_deltas([{'deltas': {'foo': 2}}, {'deltas': {'foo': 5}}],
... retain={'bar': lambda x: False})
[{'deltas': {'foo': 2}}, {'deltas': {'foo': 5}}]
"""
filtered = (
{**d,
'deltas': {k: v for k, v in d['deltas'].items()
if retain.get(k, lambda x: True)(v)}}
for d in deltas
)
return [d for d in filtered if d['deltas']]
|
39e94a3cdeb69c987e181da457f02d559f98afe1
| 55,735
|
def simplify_list(parent, key):
"""
Remove trivial key and regroup it's children as a "child" property of the trivial key's parent
Properly map empty children as 'None'
:param parent: Dictionary containing key to simplify
:param key: Dictionary key containing the term "*List"
:return: simplified dictionary
"""
child_value = parent[key]
if child_value is not None and child_value != "0":
child_value = [child_value[child] for child in child_value if child][0]
if isinstance(child_value, dict) or isinstance(child_value, str):
child_value = [child_value]
parent[key] = child_value
else:
parent[key] = None
return parent
|
31e0899c2562629d4daeb09b0a58fdad47ae6e51
| 55,740
|
from typing import List
def insertion_sort(arr: List) -> List:
"""
Input: sequence of number store in array (list): [a1,a2,a3 ,.... an]
Output: permutation of that array that following the ascending value property: [a1', a2', a3', ... , an']
where a1'<= a2' <= a3' <= ... <= an'
Property:
- Stable
- Runtime: O(n^2) amortize
- Space: O(1)
Description:
- For each sorted subarray of arr[0,1,2,...,j - 1] that is sorted, we find the appropriate index for the current value arr[j] in the sorted array and update it to the sorted array.
- Start with j = 1, meaning that arr[0,..,0] is sorted itself, than building up from there until we cover all of the rest unsorted subarray
Doctest:
>>> insertion_sort([2,5,4,1])
[1, 2, 4, 5]
>>> insertion_sort([1])
[1]
>>> insertion_sort([2,3,3,2,1,1,1])
[1, 1, 1, 2, 2, 3, 3]
>>> insertion_sort([2, -1, 3, -2, 1, 5])
[-2, -1, 1, 2, 3, 5]
"""
if len(arr) < 2:
return arr
j = 1
while j < len(arr):
i = j - 1
cur_val = arr[j]
while i >= 0 and arr[i] > cur_val:
arr[i + 1] = arr[i]
i -= 1
arr[i + 1] = cur_val
j += 1
return list(arr)
|
420c19e7a3f5ffff2158f33d9002ea72a2687d8f
| 55,741
|
import string
def _preprocess_text(text):
"""
Removes punctuation and converts a string to lowercase
:param text: input text to preprocess
:return: string which has no punctuation and is lower case
"""
exclude = set(string.punctuation)
no_punctuation = ''.join(ch for ch in text if ch not in exclude)
return no_punctuation.lower()
|
b44935a1c63813230a4ce707efe63f6bb7696046
| 55,744
|
def get_path(leaf):
"""Gets the path from leaf to root in order root -> leaf"""
if leaf.parent_action is None: # if we're already at the root, just return empty list (otherwise we'll return [[]])
return []
action_sequence = []
current_node = leaf
while current_node.parent_action is not None: # while we're not at the root node
action_sequence += [current_node.parent_action]
current_node = current_node.parent_action.source
action_sequence.reverse() # reverse the order
return [action_sequence]
|
d77b07c1f826f70428be5b8c765c93221afbd3c3
| 55,749
|
def sifting_metadata_for_volume_info(list_of_row_lists):
"""
Sifts full title values in metadata records for which
to look for keywords relevant to potential volume information.
Parameters
----------
list_of_row_lists: list
A list of the metadata records returned
from the original CSV.
Returns
----------
check_full_titles_for_volume_info: list
A list with values indicating whether the full title fields
need to be checked for volume information manually.
"""
full_title_list = []
check_full_titles_for_volume_info = []
for row in list_of_row_lists:
full_title = row[1]
clean_full_title = str(full_title).lower()
cleaner_full_title = clean_full_title.replace("\n", "")
cleanest_full_title = cleaner_full_title.replace(" ", "")
full_title_list.append(cleanest_full_title)
for title in full_title_list:
if "volume" in title:
check_for_volume_info = "YES"
check_full_titles_for_volume_info.append(check_for_volume_info)
elif "vol." in title:
check_for_volume_info = "YES"
check_full_titles_for_volume_info.append(check_for_volume_info)
else:
check_for_volume_info = "N"
check_full_titles_for_volume_info.append(check_for_volume_info)
return check_full_titles_for_volume_info
|
36bdf8ef625a8f3213e68f62e73a2086ff402733
| 55,752
|
def php_strripos(_haystack, _needle, _offset=0):
"""
>>> php_strripos('ababcd', 'aB')
2
>>> php_strripos('ababcd', 'axx')
False
"""
pos = _haystack.lower().rfind(_needle.lower(), _offset)
if pos == -1:
return False
return pos
|
e92bd94b83eac6c6c5f54d242fbe6b91ea2d4e23
| 55,754
|
def check_clipping(alignment,log=None):
"""
checks if there are soft clipping bases in the segment.
If the read is clipping, the read is filtered and returns False.
:param alignment: pysam AlignedSegment
:return: Boolean
"""
if not alignment.is_unmapped:
cigar = alignment.cigarstring
if 'S' in cigar or 'H' in cigar:
alignment.is_qcfail = True
if log:
log.clipping(alignment)
return False
else:
return True
else:
return True
|
f8f196e9e13e74a35f13083bbe10df31de090f14
| 55,758
|
def build_protected_range_request_body(
start_row_index,
num_rows,
start_col_index,
num_cols,
worksheet_id=0,
warning_only=False,
description=None,
): # pylint: disable=too-many-arguments
"""
Builds a request body that will be sent to the Google Sheets API to create a protected range on a spreadsheet.
Args:
start_row_index (int): The zero-based index of the row of the range that will be protected
num_rows (int): The number of rows that this range will span
start_col_index (int): The zero-based index of the column of the range that will be protected
num_cols (int): The number of columns that this range will span
worksheet_id (int): The worksheet id in the given spreadsheet (the first worksheet id is always 0)
warning_only (bool): If True, the range will be editable, but will display a warning/confirmation dialog
before edits are accepted
description (str or None): An optional description for the protected range
Returns:
dict: A request body that will be sent to the Google Sheets API to create a protected range
"""
extra_params = {} if description is None else {"description": description}
return {
"addProtectedRange": {
"protectedRange": {
"range": {
"sheetId": worksheet_id,
"startRowIndex": start_row_index,
"endRowIndex": start_row_index + num_rows,
"startColumnIndex": start_col_index,
"endColumnIndex": start_col_index + num_cols,
},
"warningOnly": warning_only,
**extra_params,
}
}
}
|
3d42c311afafe132a9d111cb6f2cd11885a64467
| 55,765
|
def get_string_from_user(msg):
"""
Summary:
Asks the user to enter a string and
- if any error occurs => print:
"***Oops, something went wrong! Try again!" and ask again
Returns the user input, as string, when no errors occurred.
Usage:
user_input = get_string_from_user("enter a user name: ")
Arguments:
msg {[string]} -- [the string to be displayed to the user,]
Returns:
[string] -- [the string entered from user]
"""
while True:
try:
user_input = input(msg)
except:
print("\n***Oops, something went wrong! Try again!\n")
else:
return user_input
|
d911d1ed89f1426c8bd2602f2433e127a4047818
| 55,766
|
import requests
from requests.exceptions import ConnectionError, InvalidSchema, MissingSchema
def is_opendap_url(url):
"""
Check if a provided url is an OpenDAP url.
The DAP Standard specifies that a specific tag must be included in the
Content-Description header of every request. This tag is one of:
"dods-dds" | "dods-das" | "dods-data" | "dods-error"
So we can check if the header starts with `dods`.
Note that this might not work with every DAP server implementation.
"""
try:
content_description = requests.head(url, timeout=5).headers.get(
"Content-Description"
)
except (ConnectionError, MissingSchema, InvalidSchema):
return False
if content_description:
return content_description.lower().startswith("dods")
else:
return False
|
f1009d8bee00bc882bd87726389abc734b2cd527
| 55,768
|
from pathlib import Path
import plistlib
def get_min_os_ver(f: Path) -> str:
"""Get the minium OS version required
:param f (Path): Info.plist file to pull OS requirements from"""
result = None
with open(f, "rb") as plist_file:
plist = plistlib.load(plist_file)
result = plist.get("LSMinimumSystemVersion")
return result
|
ba3c35b267aa4c2d9a693dfee80d7339306c7784
| 55,779
|
def get_D(u, v):
"""Compute the rank-1 dictionary associated with u and v
Parameters
----------
u: array (n_atoms, n_channels)
v: array (n_atoms, *atom_support)
Return
------
D: array (n_atoms, n_channels, *atom_support)
"""
n_atoms, *atom_support = v.shape
u = u.reshape(*u.shape, *[1 for _ in atom_support])
v = v.reshape(n_atoms, 1, *atom_support)
return u*v
|
58559b58ed9ece77e0ebed6e8c143e510dae4153
| 55,783
|
def parse_single_line(line):
"""Generic parser: splits off the label, and return the rest."""
label, data = line.split(None, 1)
return data.rstrip()
|
744fbf023d2892f5866110c719f7ff613d948b63
| 55,784
|
def curie_to_str(prefix: str, identifier: str) -> str:
"""Combine a prefix and identifier into a CURIE string."""
return f"{prefix}:{identifier}"
|
a95bc70afaaf2fc488e781c6493cf8719235b6ad
| 55,786
|
def stringify(obj_list: list[object]) -> list[str]:
"""Convert list of objects into list of strings.
:param obj_list: List of objects.
:return: List of strings.
"""
return [obj.__str__() for obj in obj_list]
|
956f019c621506d32f37eee8a1186f162e0ba330
| 55,788
|
def normalize_title(title, body):
"""Normalize the title if it spills over into the PR's body."""
if not (title.endswith("…") and body.startswith("…")):
return title
else:
# Being paranoid in case \r\n is used.
return title[:-1] + body[1:].partition("\r\n")[0]
|
1b88beb631620c01679ef459ae4fb7dcc230d925
| 55,790
|
import requests
from bs4 import BeautifulSoup
def player_href_list_grab(year):
"""Grabs a list of hrefs for a given year based on descending fantasy performance"""
url = f'https://www.pro-football-reference.com/years/{year}/fantasy.htm'
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
table = soup.find_all('table')[0]
a = table.find_all('a')
href_list = [i.get('href') for i in a if 'players' in str(i)]
return href_list
|
326db1e0253cc099ddb3478adbc141bd9d6db6f1
| 55,791
|
def remove_dup_on_chrY(snp_dfm, verbose=True):
"""
It is known that each of "rs700442", "rs5989681" and "rs4446909" has 2 entries in
snp146, hg19 of UCSC Genome Browser, one on chrX and the other chrY.
Remove the one on chrY as suggested by Steve:
> I would just use the ChrX entry, for this SNP.
> My reasoning: we very likely don't have haplotype data for chrY.
"""
is_dup = snp_dfm.duplicated(subset="name", keep=False)
on_chrY = (snp_dfm.loc[:, "chrom"] == "chrY")
if verbose and is_dup.any():
print("remove_dup_on_chrY: duplication detected: \r\n%s" % snp_dfm.loc[is_dup, :])
snp_dfm = snp_dfm.drop(snp_dfm[is_dup & on_chrY].index)
still_dup = snp_dfm.duplicated(subset="name", keep=False)
assert not still_dup.any(), "remove_dup_on_chrY: still duplicated after removal: \r\n%s" % snp_dfm.loc[still_dup, :]
return snp_dfm
|
19e04cf51695673231e72bb4681ba9f9b562fb42
| 55,793
|
import binascii
def _read_signify_ed25519_pubkey(pubkey_file):
"""
Read a public key from a Ed25519 key pair created with OpenBSD signify.
http://man.openbsd.org/OpenBSD-current/man1/signify.1
"""
with open(pubkey_file) as f:
# signature file format: 2nd line is base64 of 'Ed' || 8 random octets || 32 octets Ed25519 public key
pubkey = binascii.a2b_base64(f.read().splitlines()[1])[10:]
if len(pubkey) != 32:
raise Exception('bogus Ed25519 public key: raw key length was {}, but expected 32'.format(len(pubkey)))
return pubkey
|
5dd162890eedc8319ab6f037cd62b629aa7a21e3
| 55,795
|
def valid_links(links):
"""Determines validity of input link JSON.
JSON must have a single entry 'links' which contains a list of strings.
:param links: Dictionary representing input JSON
:returns: True if valid, False if not
:rtype: boolean
"""
if type(links) is not dict:
return False
if len(links) is not 1:
return False
if 'links' not in links:
return False
if type(links['links']) is not list:
return False
if not list or not all(isinstance(s, str) for s in links['links']):
return False
return True
|
2dfd116bf095cdccfc0153b85ef1c78ca674bc95
| 55,797
|
def findtype(s, makeintfloats = False):
"""
Return a tuple with the data type of the string along with
the string converted to the data type.
args:
s : mandatory, string
returns:
tuple: (type, sprime)
type is either int, float or str
sprime is the quantaty s converted into its type.
example usage:
t, s = findtype(s)
status:
seems to be working,
R. Biswas, Sun Mar 24 21:40:53 CDT 2013
Copied from ioutilst as is
R. Biswas, Tue Oct 22 14:47:48 CDT 2013
"""
try:
int(s)
if makeintfloats:
return 'f4', float(s)
else:
return 'i8' , int(s)
except ValueError:
pass
try:
float(s)
return 'f4' , float(s)
except ValueError:
pass
return "a20", s
|
2a20a83340c82126ab0791bd85c49cec2af5dc99
| 55,799
|
import typing
def asdict(obj) -> typing.Dict:
"""
Provides a "look" into any Python class instance by returning a dict
into the attribute or slot values.
:param obj: any Python class instance
:returns: the attribute or slot values from :paramref:`.asdict.obj`
"""
if hasattr(obj, '__dict__'):
return {
k: v for k, v in obj.__dict__.items()
if not k.startswith('_')
}
return {
k: getattr(obj, k) for k in obj.__slots__
if not k.startswith('_')
}
|
b537d1fc6acd1fc2fcc54f7df623f65ae1e662db
| 55,807
|
def _transpose_dicts(items, template=None):
""" Build dictionary of arrays from array of dictionaries
Example:
> in = [{'a':1, 'b':3}, {'a':2}]
> _transpose_dicts(in, template=in[0])
{'a':[1,2], 'b':[3, None]}
"""
result = {}
if not items:
return result
if not template:
template = items[0]
for key, template_val in template.items():
if isinstance(template_val, dict):
result[key] = _transpose_dicts(
[item[key] for item in items if key in item], template_val)
else:
result[key] = [item.get(key, None) for item in items]
return result
|
d7132d4737c821390d5a0361bd7d29effc8be893
| 55,811
|
def _get_manager_log_hashes(manager, logger):
"""Get a dict mapping file paths to hashes for logs on the specified
manager.
File paths will be relative to the root of the logs (/var/log/cloudify).
journalctl.log and supervisord.log will be excluded as they can't be kept
from changing.
"""
logger.info('Checking log hashes for %s`', manager.private_ip_address)
log_hashes = {
f.split()[1][len('/var/log/cloudify'):]: f.split()[0]
for f in manager.run_command(
'find /var/log/cloudify -type f -not -name \'supervisord.log\''
' -exec md5sum {} + | sort',
use_sudo=True
).stdout.splitlines()
}
logger.info('Calculated log hashes for %s are %s',
manager.private_ip_address,
log_hashes)
return log_hashes
|
bb3d3bfb85ee923eacebdcf8051b97507165d36a
| 55,815
|
def construct_auth_bearer(token):
"""
Helper function to construct authorization bearer header data
:param token: Token string
:type token: str
:return: Authorization header dictionary object with Bearer data
:rtype: dict
"""
return {"Authorization": f"Bearer {token}"}
|
6ed2753093c72ac8c9bcf1484570282b915d507d
| 55,816
|
def compare_lists(list_a, list_b):
"""Compare the content of tho lists regardless items order."""
if len(list_a) != len(list_b):
return False
if not list_a:
return True
value, *new_a = list_a
if value not in list_b:
return False
new_b = list(filter(lambda x: x != value, list_b))
return compare_lists(new_a, new_b)
|
0d78b20a2eaf570f98b9487b8fef7f0ab49bb40e
| 55,820
|
def integer(maybe_string, base):
"""Make an integer of a number or a string"""
if isinstance(maybe_string, int):
return maybe_string
else:
return int(maybe_string, base)
|
d159f065565ef03efbbcdd3d168e03c0a71734b6
| 55,822
|
import torch
def to_onehot(indices, num_classes):
"""Convert a tensor of indices to a tensor of one-hot indicators."""
onehot = torch.zeros(indices.size(0), num_classes, device=indices.device)
return onehot.scatter_(1, indices.unsqueeze(1), 1)
|
483f45f87d29503860eec51cbdf455e76545da41
| 55,826
|
import json
def read_json(f):
"""
Read json file.
Args:
f: (str) file path
"""
with open(f, 'r') as con:
return json.load(con)
|
cf81e8cf90d93f8745900f142f4e63b1f72ff88a
| 55,830
|
def dict_param_to_nsmlib_bin_str(params):
"""A simple function to convert a dictionary of params to a string that can be
understood by NMSLIB binary.
:param params: a dictionary of parameters, e.g., {'M': 30, 'indexThreadQty': 4}
:return: a string, e.g., M=30,indexThreadQty=4
"""
return ','.join([str(k) + '=' + str(v) for k, v in params.items()])
|
da45615c7d9011f6a9ff903a913375167d7047b7
| 55,832
|
def calculate_velocity(c, t):
"""
Calculates a velocity given a set of quintic coefficients and a time.
Args
c: List of coefficients generated by a quintic polynomial
trajectory generator.
t: Time at which to calculate the velocity
Returns
Velocity
"""
return 5 * c[0] * t**4 + 4 * c[1] * t**3 + 3 * c[2] * t**2 + 2 * c[3] * t + c[4]
|
715eaed7e77e01910816f25f474936920493de09
| 55,836
|
import copy
def prepare_return_datasets(datasets):
"""Prepares the datasets to return them"""
datasets = copy.deepcopy(datasets)
# removing modifications to trainign set such as adding the test set for anti generalizatiom
if "train_unmodified" in datasets:
datasets["train_modified"] = datasets["train"]
datasets["train"] = datasets["train_unmodified"]
return datasets
|
14437af60f43198713b21a13219ad9a5fcb19375
| 55,840
|
def get_ip_and_port(pao, wrappers):
""" This function obtains ip and port of given pao wrapper from list of wrappers.
:param pao: Given unit of active defense.
:param wrappers: List of wrappers.
:return: ip and port to access the wrapper of given 'pao'.
"""
ip = ''
port = ''
for wrapper in wrappers['paos']:
if wrapper['pao'] == pao:
ip = wrapper['ip']
port = wrapper['port']
break
return ip, port
|
37962d505135d4a32e50615b58ed816e23783bad
| 55,843
|
def get_output_spec(cls, name):
""" Get named output spec from self or superclass
"""
klasses = iter(cls.__mro__)
base = cls
while base and hasattr(base, '_output_spec_table'):
if name in base._output_spec_table:
return base._output_spec_table[name]
base = klasses.next()
return None
|
517d1f4712c2ae4068655230b172de69a4ddce20
| 55,849
|
def merge(left, right):
"""
Helper function that merges two sorted sublists into one sorted list
Time complexity: O(n)
Space complexity: O(n)
"""
result = []
left_index = right_index = 0
while left_index < len(left) and right_index < len(right): # O(n)
if left[left_index] < right[right_index]:
result.append(left[left_index])
left_index += 1
else:
result.append(right[right_index])
right_index += 1
while left_index < len(left): # O(n)
result.append(left[left_index])
left_index += 1
while right_index < len(right): # O(n)
result.append(right[right_index])
right_index += 1
return result
|
8865e9bb1731023d68b1077595833e8a884cc6fb
| 55,855
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.