content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def range_ngram_distrib(text, n, top_most=-1):
"""
List n-grams with theis probabilities from the most popular to the smaller ones
:param text: text
:param n: n of n-gram
:param top_most: count of most popular n-grams to be returned, or -1 to return all
:return: list of ngrams, list of probs
"""
ngram_counts = count_ngrams(text, n)
if top_most >= 0:
ngrams = np.asarray(ngram_counts.most_common(top_most))[:, 0]
counts = np.asarray(np.asarray(ngram_counts.most_common(top_most))[:, 1], dtype=int)
else:
ngrams = np.asarray(ngram_counts.most_common())[:, 0]
counts = np.asarray(np.asarray(ngram_counts.most_common())[:, 1], dtype=int)
return ngrams, counts | 26,100 |
def extract_group_ids(caps_directory):
"""Extract list of group IDs (e.g. ['group-AD', 'group-HC']) based on `caps_directory`/groups folder."""
import os
try:
group_ids = os.listdir(os.path.join(caps_directory, "groups"))
except FileNotFoundError:
group_ids = [""]
return group_ids | 26,101 |
def process_dataset(rcp, var, model, year, target_bucket):
"""
Download a NetCDF file from s3, extract and convert its contents to
json, and upload the json to a target bucket.
"""
s3path = generate_s3_path(rcp, var, model, year)
(s3key, path) = read_from_s3(s3path)
s3basename = os.path.splitext(os.path.basename(s3key))[0]
try:
tempdir = tempfile.mkdtemp()
logger.info('Tiling to %s', tempdir)
nex2json(path, tempdir, var, s3basename, model, BASE_TIMES[model])
try:
upload_to_s3(tempdir, var, rcp, model, target_bucket)
finally:
logger.info('Deleting directory %s', tempdir)
shutil.rmtree(tempdir)
finally:
logger.info('Deleting %s', path)
os.remove(path) | 26,102 |
def require_methods(*methods):
"""Returns a decorator which produces an error unless request.method is one
of |methods|.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(request, *args, **kwds):
if request.method not in methods:
allowed = ', '.join(methods)
rsp = HttpTextResponse('This requires a specific method: %s' % allowed,
status=405)
rsp['Allow'] = allowed
return func(request, *args, **kwds)
return wrapped
return decorator | 26,103 |
def getUnit3d(prompt, default=None):
"""
Read a Unit3d for the termial with checking. This will accapt and
directon in any format accepted by Unit3d().parseAngle()
Allowed formats
* x,y,z or [x,y,z] three floats
* theta,psi or [theta,psi], in radians (quoted in "" for degrees)
* theta in radians (or quotes in "" for degrees)
:param prompt: the promp to be displayed
:type prompt: str
:param default: the default Unit3d
:type: Unit3d
"""
while True:
val = __getInput(prompt, default)
try:
if isinstance(val, str): # Its a string
val = eval(val) # Eval list
u = Unit3d().parseAngle(val)
return u
except (ValueError, NameError, ZeroDivisionError, SyntaxError):
logger.error("Conversion of '{0:s}' to Unit3d failed.".format(str(val))) | 26,104 |
def checkOverlap(ra, rb):
"""
check the overlap of two anchors,ra=[chr,left_start,left_end,chr,right_start,right_end]
"""
if checkOneEndOverlap(ra[1], ra[2], rb[1], rb[2]) and checkOneEndOverlap(
ra[4], ra[5], rb[4], rb[5]):
return True
return False | 26,105 |
def test_memoryview_supports_deepcopy(valid_bytes_128):
"""
Assert that instances of :class:`~ulid.ulid.MemoryView` can be copied using
:func:`~copy.deepcopy`.
"""
mv = ulid.MemoryView(valid_bytes_128)
data = dict(a=dict(b=dict(c=mv)))
copied = copy.deepcopy(data)
assert copied == data | 26,106 |
def saveable(item: praw.models.reddit.base.RedditBase) -> dict[str, typing.Any]:
"""Generate a saveable dict from an instance"""
result = {k: legalize(v) for k, v in item.__dict__.items() if not k.startswith("_")}
return _parent_ids_interpreted(result) | 26,107 |
def decode_Tex_accents(in_str):
"""Converts a string containing LaTex accents (i.e. "{\\`{O}}") to ASCII
(i.e. "O"). Useful for correcting author names when bib entries were
queried from web via doi
:param in_str: input str to decode
:type in_str: str
:return: corrected string
:rtype: str
"""
# replaces latex accents with ascii letter (no accent)
pat = "\{\\\\'\{(\w)\}\}"
out = in_str
for x in re.finditer(pat, in_str):
out = out.replace(x.group(), x.groups()[0])
# replace latex {\textsinglequote} with underscore
out = out.replace('{\\textquotesingle}', "_")
# replace actual single quotes with underscore for bibtex compatibility
out = out.replace("'", '_')
return out | 26,108 |
def tensor_dict_eq(dict1: Mapping, dict2: Mapping) -> bool:
"""Checks the equivalence between 2 dictionaries, that can contain torch Tensors as value. The dictionary can be
nested with other dictionaries or lists, they will be checked recursively.
:param dict1: Dictionary to compare.
:param dict2: Dictionary to compare.
:return: True, if dict1 and dict2 are equal, false otherwise.
"""
if len(dict1) != len(dict2):
return False
for (key1, value1), (key2, value2) in zip(dict1.items(), dict2.items()):
key_equal = key1 == key2
value_equal = tensor_container_element_eq(value1, value2)
if (not key_equal) or (not value_equal):
return False
return True | 26,109 |
def calculate_stability(derivatives):
"""
Calculate the stability-axis derivatives with the body-axis derivatives.
"""
d = derivatives
if 'stability' not in d:
d['stability'] = {}
slat = calculate_stability_lateral(d['body'], np.deg2rad(d['alpha0']))
slong = calculate_stability_longitudinal(d['body'], np.deg2rad(d['alpha0']))
d['stability'].update(slat)
d['stability'].update(slong)
return d | 26,110 |
def main(infile):
"""Main code block"""
# variables
n = 0
i = 0
sd = 0
seq = ''
# open file
f = open(infile, 'r')
# interate through file line by line
for line in f:
if (re.match('>',line)):
# count the number of fasta entries
n = n + 1
seq = ''
else:
if (n == 0):
raise Exception("ERROR - no defline at line 1. Is '{}' a fasta file?".format(infile))
if (len(seq) > 1):
# skip multiline fasta entries
continue
if (re.match('ATG', line)):
# count start codons
i = i + 1
else:
try:
# search for S-D sequence
if (shineDelgarno(line)):
sd = sd + 1
except Exception as e:
print("Error at line {}: {}".format(n, e))
# store sequence for test for multiline entries
seq = line
f.close()
print("Percent start codons: {:.2%}".format(i/n))
print("Number of Shine-Delgarno sequences: {}".format(sd))
print("\tas a % of all seqs: {:.2%}".format(sd/n)) | 26,111 |
def apply_config(keys, options, path=None):
# type: (Any, optparse.Values, Optional[str]) -> Dict[str, str]
"""
Read setup.cfg from path or current working directory and apply it to the
parsed options
Parameters
----------
keys
options : optparse.Values
parsed options
path : Optional[str]
Returns
-------
Dict[str, str]
default types by argument name
"""
if not path:
path = os.getcwd()
config_file = os.path.join(path, 'setup.cfg')
parser = ConfigParser()
parser.read(config_file)
def addopt(key, typ, default):
if hasattr(options, key):
return
methname = 'get'
if typ not in ('string', 'choice'):
methname += typ
method = getattr(parser, methname)
try:
val = method('doc484', key)
except (NoSectionError, NoOptionError):
val = default
setattr(options, key, val)
for key, typ, default in keys:
addopt(key, typ, default)
try:
return dict(parser.items('doc484:default_arg_types'))
except NoSectionError:
return {} | 26,112 |
def func_call_compile(): # function: Compile the generated LaTeX file.
"""Compile the generated LaTeX file."""
print("-" * 80)
print("* Info: Compilation")
for e in [".aux", ".idx", ".ind", ".log", ".ilg", ".pdf", ".out"]:
remove_LaTeX_file(e)
print("\n* Info: XeLaTeX")
if verbose:
print("* Info: Call:", call_compile)
try:
process_compile1 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile1_errormessage = process_compile1.stderr
compile1_message = process_compile1.stdout
if len(compile1_errormessage) > 0:
print("* Error: Error in compilation")
print(compile1_errormessage)
sys.exit()
else:
if verbose:
print("* Info: more information in '" + direc + output_name + ".log'\n")
print("* Info: Compilation OK")
except:
if verbose:
print("* Info: more information in '" + direc + output_name + ".log'")
sys.exit("* Error: Error in compilation")
# ...................................................................
print("." * 80)
print("* Info: XeLaTeX")
if verbose:
print("* Info: Call:", call_compile)
try:
process_compile2 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile2_errormessage = process_compile2.stderr
compile2_message = process_compile2.stdout
if len(compile2_errormessage) > 0:
print("* Error: Error in compilation:")
print(compile2_errormessage)
sys.exit()
else:
if verbose:
print("* Info: more information in '" + direc + output_name + ".log'\n")
print("* Info: Compilation OK")
except:
if verbose:
print("* Info: more information in '" + direc + output_name + ".log'")
sys.exit("* Error: Error in compilation")
# ...................................................................
print("." * 80)
print("* Info: Makeindex")
if verbose:
print("* Info: Call:", call_index)
try:
process_index = subprocess.run(call_index, capture_output=True, universal_newlines=True)
index_errormessage = process_index.stderr
index_message = process_index.stdout
except:
if verbose:
print("* Info: more information in '" + direc + output_name + ".ilg'\n")
sys.exit("* Error: Error in Makeindex")
if verbose:
print("* Info: more information in '" + direc + output_name + ".ilg'\n")
print("* Info: Makeindex OK")
# ...................................................................
print("." * 80)
print("* Info: XeLaTeX")
if verbose:
print("* Info: Call:", call_compile)
try:
process_compile3 = subprocess.run(call_compile, capture_output=True, universal_newlines=True)
compile3_errormessage = process_compile3.stderr
compile3_message = process_compile3.stdout
if len(compile3_errormessage) > 0:
print("* Error: Error in compilation:")
print(compile3_errormessage)
sys.exit()
else:
if verbose:
print("* Info: more information in '" + direc + output_name + ".log'")
print("* Info: result in '" + direc + output_name + ".pdf'\n")
print("* Info: Compilation OK")
except:
if verbose:
print("* Info: more information in '" + direc + output_name + ".log'")
sys.exit("* Error: Error in compilation")
# ...................................................................
for e in [".aux", ".idx", ".ind", ".out"]:
remove_LaTeX_file(e) | 26,113 |
def convert_v1_to_v2(v1, max, asm, v2=None, first=0):
"""Converts a given v1 timecodes file to v2 timecodes.
Original idea from tritical's tcConv.
"""
ts = fn1 = fn2 = last = 0
asm = correct_to_ntsc(asm, True)
o = []
ap = o.append
en = str.encode
for line in v1:
ovr = line.split(',')
if len(ovr) == 3:
fn1, fn2, fps = ovr
fn1 = int(fn1)
fn2 = int(fn2)
ovf = correct_to_ntsc(fps, True)
while (last < fn1 and last < max):
ap(ts)
last, ts = last + 1, ts + asm
while (last <= fn2 and last < max):
ap(ts)
last, ts = last + 1, ts + ovf
while last < max:
ap(ts)
last, ts = last + 1, ts + asm
if v2:
with open(v2, 'wb') as v2f:
from os import linesep as ls
header = [en('# timecode format v2' + ls)] if first == 0 else [b'']
v2f.writelines(header + [en(('{0:3.6f}'.format(s)) + ls) for s in
o[first:]])
return o[first:] | 26,114 |
def read_malmipsdetect(file_detect):
"""
This function is used to read the MALMI detection file which contains detection
information, that is for each detected event how many stations are triggered,
how many phases are triggered. Those information can be used for quality control.
Parameters
----------
file_detect : str
The filename including path of the input file.
Raises
------
ValueError
datetime format is not consistent with defined one.
Returns
-------
detect_info : dic
detect_info['starttime'] : list of datetime
starttime and folder name of the detected event;
detect_info['endtime'] : list of datetime
endtime of the detected event;
detect_info['station'] : list of float
number of stations triggered of the detected event;
detect_info['phase'] : list of float
number of phase triggered of the detected event;
"""
format_f = ['starttime', 'endtime', 'station', 'phase']
datetime_format_26 = '%Y-%m-%dT%H:%M:%S.%f' # datetime format in the input file
datetime_format_19 = '%Y-%m-%dT%H:%M:%S' # datetime format in the input file
# read file
df = pd.read_csv(file_detect, delimiter=' ', header=None, names=format_f,
skipinitialspace=True, encoding='utf-8', comment='#')
# format output data
detect_info = {}
detect_info['starttime'] = []
detect_info['endtime'] = []
for ii in range(len(df)):
if len(df.loc[ii,'starttime']) == 19:
detect_info['starttime'].append(datetime.datetime.strptime(df.loc[ii,'starttime'], datetime_format_19)) # origin time
elif len(df.loc[ii,'starttime']) == 26:
detect_info['starttime'].append(datetime.datetime.strptime(df.loc[ii,'starttime'], datetime_format_26)) # origin time
else:
raise ValueError('Error! Input datetime format not recoginzed!')
if len(df.loc[ii,'endtime']) == 19:
detect_info['endtime'].append(datetime.datetime.strptime(df.loc[ii,'endtime'], datetime_format_19)) # origin time
elif len(df.loc[ii,'endtime']) == 26:
detect_info['endtime'].append(datetime.datetime.strptime(df.loc[ii,'endtime'], datetime_format_26)) # origin time
else:
raise ValueError('Error! Input datetime format not recoginzed!')
detect_info['station'] = list(df['station'])
detect_info['phase'] = list(df['phase'])
return detect_info | 26,115 |
def GeneratePermissionUrl(client_id, scope='https://mail.google.com/'):
"""Generates the URL for authorizing access.
This uses the "OAuth2 for Installed Applications" flow described at
https://developers.google.com/accounts/docs/OAuth2InstalledApp
Args:
client_id: Client ID obtained by registering your app.
scope: scope for access token, e.g. 'https://mail.google.com'
Returns:
A URL that the user should visit in their browser.
"""
params = {}
params['client_id'] = client_id
params['redirect_uri'] = REDIRECT_URI
params['scope'] = scope
params['response_type'] = 'code'
return '%s?%s' % (AccountsUrl('o/oauth2/auth'),
FormatUrlParams(params)) | 26,116 |
def download(request):
"""Download images as .zip file. """
def make_archive(source, destination):
print(source, destination)
base = os.path.basename(destination)
name = base.split('.')[0]
format = base.split('.')[1]
archive_from = os.path.dirname(source)
archive_to = os.path.basename(source.strip(os.sep))
print(source, destination, archive_from, archive_to)
shutil.make_archive(name, format, archive_from, archive_to)
shutil.move('%s.%s' % (name, format), destination)
user_id = request.session['user_id']
user_root = request.session['user_root']
search_id = request.session['search_id']
logger = Logger(user_root,user_id)
logger.write("start compressing images..")
t_start_zip=time.time()
zip_target = os.path.join(user_root, search_id)
zip_path = os.path.join(user_root, search_id, "Color_images.zip")
make_archive(zip_target, zip_path)
print("finish zip.")
zip_file = open(zip_path, '+rb')
response = HttpResponse(zip_file, content_type='application/zip')
response[
'Content-Disposition'] = 'attachment; filename=%s' % "dataset.zip"
response['Content-Length'] = os.path.getsize(zip_path)
zip_file.close()
logger.write("compressing images finished ("+convert_duration_time(time.time(),t_start_zip)+"s)")
return response | 26,117 |
def get_scorekeeper_details():
"""Retrieve a list of scorekeeper and their corresponding
appearances"""
return scorekeepers.get_scorekeeper_details(database_connection) | 26,118 |
def audio_from_video(src_video: str, dst_audio: str):
"""Pull audio from source mp4 to destination wav.
to compress the audio into mp3 add the "-map 0:a" before the
destination file name. The
Args:
src_video (str): Path to src video
dst_audio (str): Path to dst audio
"""
ensure_destination_exists(dst_audio)
sys_call = f"ffmpeg -i {src_video} -ac 1 {dst_audio}"
os.popen(sys_call)
return | 26,119 |
def _process_seq(seq, strict):
"""Adds info to seq, and to Aligned object if seq is hidden."""
if hasattr(seq, 'data'):
real_seq = seq.data
else:
real_seq = seq
if seq.Info and 'Name' in seq.Info:
seq.Name = seq.Info.Name
if seq is not real_seq:
real_seq.Name = seq.Name
real_seq.Info = seq.Info | 26,120 |
def layer_norm(input_tensor, axis):
"""Run layer normalization on the axis dimension of the tensor."""
layer_norma = tf.keras.layers.LayerNormalization(axis = axis)
return layer_norma(input_tensor) | 26,121 |
def compute_depth(disparity, focal_length, distance_between_cameras):
"""
Computes depth in meters
Input:
-Disparity in pixels
-Focal Length in pixels
-Distance between cameras in meters
Output:
-Depth in meters
"""
with np.errstate(divide='ignore'): #ignore division by 0
# standard depth and disparity formula
depth = (focal_length * distance_between_cameras) / disparity
return depth | 26,122 |
def lecture(source=None,target=None,fseed=100,fpercent=100):
"""
Create conversion of the source file and the target file
Shuffle method is used, base on the seed (default 100)
"""
seed(fseed)
try:
copysource = []
copytarget = []
if(source!=None and target!=None):
source = create_inter_without(source)
target = create_inter_without(target)
shuffle(source)
shuffle(target)
for i in range(0,(int(len(source)*fpercent/100))):
copysource.append(source[i])
if(len(copysource)==0):
copysource.append(source[0])
for i in range(0,(int(len(target)*fpercent/100))):
copytarget.append(target[i])
if(len(copytarget)==0):
copytarget.append(target[0])
return copysource,copytarget
except Exception as e:
print(e) | 26,123 |
def getScale(im, scale, max_scale=None):
"""
获得图片的放缩比例
:param im:
:param scale:
:param max_scale:
:return:
"""
f = float(scale) / min(im.shape[0], im.shape[1])
if max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale:
f = float(max_scale) / max(im.shape[0], im.shape[1])
return f | 26,124 |
def load_swc(path):
"""Load swc morphology from file
Used for sKCSD
Parameters
----------
path : str
Returns
-------
morphology : np.array
"""
morphology = np.loadtxt(path)
return morphology | 26,125 |
def getFileName(in_path, with_extension=False):
"""Return the file name with the file extension appended.
Args:
in_path (str): the file path to extract the filename from.
with_extension=False (Bool): flag denoting whether to return
the filename with or without the extension.
Returns:
Str - file name, with or without the extension appended or a
blank string if there is no file name in the path.
"""
filename = os.path.basename(in_path)
if os.path.sep in in_path[-2:] or (with_extension and not '.' in filename):
return ''
if with_extension:
return filename
else:
filename = os.path.splitext(filename)[0]
return filename | 26,126 |
def post_space_message(space_name, message):
"""
This function will post the {message} to the Webex Teams space with the {space_name}
Call to function get_space_id(space_name) to find the space_id
Followed by API call /messages
:param space_name: the Webex Teams space name
:param message: the text of the message to be posted in the space
:return: none
"""
space_id = get_space_id(space_name)
payload = {'roomId': space_id, 'text': message}
url = WEBEX_TEAMS_URL + '/messages'
header = {'content-type': 'application/json', 'authorization': WEBEX_TEAMS_AUTH}
requests.post(url, data=json.dumps(payload), headers=header, verify=False) | 26,127 |
def generate_simple_csv(user_dict, outfile=None, limit=0.0,
month=None, year=None):
"""
output account-based spends to a CSV. can create a new file, or append to an
existing one.
the CSV header is defined in CSV_HEADER and can be used to customize the
field names you want to output.
if you want to change the fields that are printed out, please update
the list definitions of 'line' w/the variables you would like to display.
the default settings for this reflect the way in which our lab categorizes
projects, and may require tweaking for other types of orgs.
args:
limit: only print the OU spend that's greater than this
outfile: name of the CSV to write to.
month: month of the report (gleaned from the billing CSV)
year: year of the report (gleaned from the billing CSV)
"""
CSV_HEADER = ['year', 'month', 'id', 'name', 'spend']
account_details = list()
limit = float(limit) or 0.0
locale.setlocale(locale.LC_ALL, '')
if os.path.isfile(outfile):
append = True
else:
append = False
# add the header to the CSV if we're creating it
if append is False:
with open(outfile, 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(CSV_HEADER)
# for each user, get the OU that they are the member of
for id in user_dict.keys():
u = user_dict[id]
account_details.append((u['name'], id, u['total'], u['currency']))
for acct in sorted(account_details, key = lambda acct: acct[2], reverse = True):
(acct_name, acct_num, acct_total, acct_total_currency) = acct
if acct_total < limit:
continue
acct_total_str = locale.format("%.2f", acct_total, grouping=True)
acct_total_str = '$' + str(acct_total_str)
with open(outfile, 'a', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
line = [year, month, acct_num, acct_name, acct_total_str]
writer.writerow(line)
awslib.uploadcsv_to_s3() | 26,128 |
def quintic_extrap((y1,y2,y3,y4,y5,y6), (x1,x2,x3,x4,x5,x6)):
"""
Quintic extrapolate from three x,y pairs to x = 0.
y1,y2...: y values from x,y pairs. Note that these can be arrays of values.
x1,x2...: x values from x,y pairs. These should be scalars.
Returns extrapolated y at x=0.
"""
# This horrid implementation came from using CForm in Mathematica.
Power = numpy.power
return (-(x1*(x1 - x3)*x3*(x1 - x4)*(x3 - x4)*x4*(x1 - x5)* (x3 - x5)*(x4 - x5)*x5*(x1 - x6)*(x3 - x6)* (x4 - x6)*(x5 - x6)*x6*y2) + Power(x2,5)*(-(x1*(x1 - x4)*x4*(x1 - x5)* (x4 - x5)*x5*(x1 - x6)*(x4 - x6)*(x5 - x6)* x6*y3) + Power(x3,4)* (-(x1*(x1 - x5)*x5*(x1 - x6)*(x5 - x6)*x6* y4) + Power(x4,3)* (x1*x6*(-x1 + x6)*y5 + Power(x5,2)*(x6*y1 - x1*y6) + x5*(-(Power(x6,2)*y1) + Power(x1,2)*y6)) + Power(x4,2)* (x1*x6*(Power(x1,2) - Power(x6,2))*y5 + Power(x5,3)*(-(x6*y1) + x1*y6) + x5*(Power(x6,3)*y1 - Power(x1,3)*y6)) + x4*(Power(x1,2)*Power(x6,2)*(-x1 + x6)* y5 + Power(x5,3)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,3)*y1) + Power(x1,3)*y6))) + Power(x3,3)* (x1*x5*x6*(Power(x1,3)*(x5 - x6) + x5*x6*(Power(x5,2) - Power(x6,2)) + x1*(-Power(x5,3) + Power(x6,3)))*y4 + Power(x4,4)* (x1*(x1 - x6)*x6*y5 + Power(x5,2)*(-(x6*y1) + x1*y6) + x5*(Power(x6,2)*y1 - Power(x1,2)*y6)) + x4*(Power(x1,2)*Power(x6,2)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,4)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,2)* (x1*x6*(-Power(x1,3) + Power(x6,3))*y5 + Power(x5,4)*(x6*y1 - x1*y6) + x5*(-(Power(x6,4)*y1) + Power(x1,4)*y6))) + x3*(Power(x1,2)*(x1 - x5)*Power(x5,2)* (x1 - x6)*(x5 - x6)*Power(x6,2)*y4 + Power(x4,4)* (Power(x1,2)*(x1 - x6)*Power(x6,2)*y5 + Power(x5,3)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,3)*y1 - Power(x1,3)*y6)) + Power(x4,2)* (Power(x1,3)*(x1 - x6)*Power(x6,3)*y5 + Power(x5,4)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,3)* (Power(x1,2)*Power(x6,2)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,4)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,4)*y1) + Power(x1,4)*y6))) + Power(x3,2)* (x1*x5*x6*(Power(x5,2)*Power(x6,2)* (-x5 + x6) + Power(x1,3)* (-Power(x5,2) + Power(x6,2)) + Power(x1,2)*(Power(x5,3) - Power(x6,3))) *y4 + Power(x4,4)* (x1*x6*(-Power(x1,2) + Power(x6,2))*y5 + Power(x5,3)*(x6*y1 - x1*y6) + x5*(-(Power(x6,3)*y1) + Power(x1,3)*y6)) + Power(x4,3)* (x1*x6*(Power(x1,3) - Power(x6,3))*y5 + Power(x5,4)*(-(x6*y1) + x1*y6) + x5*(Power(x6,4)*y1 - Power(x1,4)*y6)) + x4*(Power(x1,3)*Power(x6,3)*(-x1 + x6)* y5 + Power(x5,4)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,4)*y1) + Power(x1,4)*y6)))) + Power(x2,4)*(x1*(x1 - x4)*x4*(x1 - x5)* (x4 - x5)*x5*(x1 - x6)*(x4 - x6)*(x5 - x6)* x6*(x1 + x4 + x5 + x6)*y3 + Power(x3,5)*(x1*(x1 - x5)*x5*(x1 - x6)* (x5 - x6)*x6*y4 + Power(x4,3)* (x1*(x1 - x6)*x6*y5 + Power(x5,2)*(-(x6*y1) + x1*y6) + x5*(Power(x6,2)*y1 - Power(x1,2)*y6)) + x4*(Power(x1,2)*(x1 - x6)*Power(x6,2)*y5 + Power(x5,3)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,3)*y1 - Power(x1,3)*y6)) + Power(x4,2)* (x1*x6*(-Power(x1,2) + Power(x6,2))*y5 + Power(x5,3)*(x6*y1 - x1*y6) + x5*(-(Power(x6,3)*y1) + Power(x1,3)*y6))) + Power(x3,2)* (x1*x5*(Power(x1,2) - Power(x5,2))*x6* (Power(x1,2) - Power(x6,2))* (Power(x5,2) - Power(x6,2))*y4 + Power(x4,5)* (x1*x6*(Power(x1,2) - Power(x6,2))*y5 + Power(x5,3)*(-(x6*y1) + x1*y6) + x5*(Power(x6,3)*y1 - Power(x1,3)*y6)) + x4*(Power(x1,3)*Power(x6,3)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,5)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,3)* (x1*x6*(-Power(x1,4) + Power(x6,4))*y5 + Power(x5,5)*(x6*y1 - x1*y6) + x5*(-(Power(x6,5)*y1) + Power(x1,5)*y6))) + Power(x3,3)* (x1*x5*x6*(-(Power(x5,4)*x6) + x5*Power(x6,4) + Power(x1,4)*(-x5 + x6) + x1*(Power(x5,4) - Power(x6,4)))*y4 + Power(x4,5)* (x1*x6*(-x1 + x6)*y5 + Power(x5,2)*(x6*y1 - x1*y6) + x5*(-(Power(x6,2)*y1) + Power(x1,2)*y6)) + Power(x4,2)* (x1*x6*(Power(x1,4) - Power(x6,4))*y5 + Power(x5,5)*(-(x6*y1) + x1*y6) + x5*(Power(x6,5)*y1 - Power(x1,5)*y6)) + x4*(Power(x1,2)*Power(x6,2)* (-Power(x1,3) + Power(x6,3))*y5 + Power(x5,5)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,5)*y1) + Power(x1,5)*y6))) + x3*(Power(x1,2)*Power(x5,2)*Power(x6,2)* (-(Power(x5,3)*x6) + x5*Power(x6,3) + Power(x1,3)*(-x5 + x6) + x1*(Power(x5,3) - Power(x6,3)))*y4 + Power(x4,5)* (Power(x1,2)*Power(x6,2)*(-x1 + x6)*y5 + Power(x5,3)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,3)*y1) + Power(x1,3)*y6)) + Power(x4,3)* (Power(x1,2)*Power(x6,2)* (Power(x1,3) - Power(x6,3))*y5 + Power(x5,5)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,2)* (Power(x1,3)*Power(x6,3)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,5)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,5)*y1) + Power(x1,5)*y6)))) + Power(x2,3)*(-(x1*(x1 - x4)*x4*(x1 - x5)* (x4 - x5)*x5*(x1 - x6)*(x4 - x6)*(x5 - x6)* x6*(x5*x6 + x4*(x5 + x6) + x1*(x4 + x5 + x6))*y3) + Power(x3,5)*(x1*x5*x6* (-(Power(x5,3)*x6) + x5*Power(x6,3) + Power(x1,3)*(-x5 + x6) + x1*(Power(x5,3) - Power(x6,3)))*y4 + Power(x4,4)* (x1*x6*(-x1 + x6)*y5 + Power(x5,2)*(x6*y1 - x1*y6) + x5*(-(Power(x6,2)*y1) + Power(x1,2)*y6)) + Power(x4,2)* (x1*x6*(Power(x1,3) - Power(x6,3))*y5 + Power(x5,4)*(-(x6*y1) + x1*y6) + x5*(Power(x6,4)*y1 - Power(x1,4)*y6)) + x4*(Power(x1,2)*Power(x6,2)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,4)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,4)*y1) + Power(x1,4)*y6))) + Power(x3,4)* (x1*x5*x6*(Power(x1,4)*(x5 - x6) + x5*x6*(Power(x5,3) - Power(x6,3)) + x1*(-Power(x5,4) + Power(x6,4)))*y4 + Power(x4,5)* (x1*(x1 - x6)*x6*y5 + Power(x5,2)*(-(x6*y1) + x1*y6) + x5*(Power(x6,2)*y1 - Power(x1,2)*y6)) + x4*(Power(x1,2)*Power(x6,2)* (Power(x1,3) - Power(x6,3))*y5 + Power(x5,5)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,2)* (x1*x6*(-Power(x1,4) + Power(x6,4))*y5 + Power(x5,5)*(x6*y1 - x1*y6) + x5*(-(Power(x6,5)*y1) + Power(x1,5)*y6))) + x3*(Power(x1,2)*Power(x5,2)* Power(x6,2)* (Power(x5,2)*(x5 - x6)*Power(x6,2) + Power(x1,3)* (Power(x5,2) - Power(x6,2)) + Power(x1,2)*(-Power(x5,3) + Power(x6,3)))*y4 + Power(x4,5)* (Power(x1,2)*Power(x6,2)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,4)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,2)* (Power(x1,4)*(x1 - x6)*Power(x6,4)*y5 + Power(x5,5)* (-(Power(x6,4)*y1) + Power(x1,4)*y6) + Power(x5,4)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,4)* (Power(x1,2)*Power(x6,2)* (-Power(x1,3) + Power(x6,3))*y5 + Power(x5,5)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,5)*y1) + Power(x1,5)*y6))) + Power(x3,2)* (x1*x5*x6*(Power(x5,3)*Power(x6,3)* (-x5 + x6) + Power(x1,4)* (-Power(x5,3) + Power(x6,3)) + Power(x1,3)*(Power(x5,4) - Power(x6,4))) *y4 + Power(x4,5)* (x1*x6*(-Power(x1,3) + Power(x6,3))*y5 + Power(x5,4)*(x6*y1 - x1*y6) + x5*(-(Power(x6,4)*y1) + Power(x1,4)*y6)) + Power(x4,4)* (x1*x6*(Power(x1,4) - Power(x6,4))*y5 + Power(x5,5)*(-(x6*y1) + x1*y6) + x5*(Power(x6,5)*y1 - Power(x1,5)*y6)) + x4*(Power(x1,4)*Power(x6,4)*(-x1 + x6)* y5 + Power(x5,5)* (Power(x6,4)*y1 - Power(x1,4)*y6) + Power(x5,4)* (-(Power(x6,5)*y1) + Power(x1,5)*y6)))) + x2*(-(Power(x1,2)*(x1 - x4)*Power(x4,2)* (x1 - x5)*(x4 - x5)*Power(x5,2)*(x1 - x6)* (x4 - x6)*(x5 - x6)*Power(x6,2)*y3) + Power(x3,5)*(-(Power(x1,2)*(x1 - x5)* Power(x5,2)*(x1 - x6)*(x5 - x6)* Power(x6,2)*y4) + Power(x4,4)* (Power(x1,2)*Power(x6,2)*(-x1 + x6)*y5 + Power(x5,3)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,3)*y1) + Power(x1,3)*y6)) + Power(x4,3)* (Power(x1,2)*Power(x6,2)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,4)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,2)* (Power(x1,3)*Power(x6,3)*(-x1 + x6)*y5 + Power(x5,4)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,4)*y1) + Power(x1,4)*y6))) + Power(x3,4)* (Power(x1,2)*Power(x5,2)*Power(x6,2)* (Power(x1,3)*(x5 - x6) + x5*x6*(Power(x5,2) - Power(x6,2)) + x1*(-Power(x5,3) + Power(x6,3)))*y4 + Power(x4,5)* (Power(x1,2)*(x1 - x6)*Power(x6,2)*y5 + Power(x5,3)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,3)*y1 - Power(x1,3)*y6)) + Power(x4,2)* (Power(x1,3)*Power(x6,3)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,5)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,3)* (Power(x1,2)*Power(x6,2)* (-Power(x1,3) + Power(x6,3))*y5 + Power(x5,5)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,5)*y1) + Power(x1,5)*y6))) + Power(x3,2)* (Power(x1,3)*(x1 - x5)*Power(x5,3)*(x1 - x6)* (x5 - x6)*Power(x6,3)*y4 + Power(x4,5)* (Power(x1,3)*(x1 - x6)*Power(x6,3)*y5 + Power(x5,4)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,3)* (Power(x1,4)*(x1 - x6)*Power(x6,4)*y5 + Power(x5,5)* (-(Power(x6,4)*y1) + Power(x1,4)*y6) + Power(x5,4)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,4)* (Power(x1,3)*Power(x6,3)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,5)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,5)*y1) + Power(x1,5)*y6))) + Power(x3,3)* (Power(x1,2)*Power(x5,2)*Power(x6,2)* (Power(x5,2)*Power(x6,2)*(-x5 + x6) + Power(x1,3)* (-Power(x5,2) + Power(x6,2)) + Power(x1,2)*(Power(x5,3) - Power(x6,3))) *y4 + Power(x4,5)* (Power(x1,2)*Power(x6,2)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,4)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,4)*y1) + Power(x1,4)*y6)) + Power(x4,4)* (Power(x1,2)*Power(x6,2)* (Power(x1,3) - Power(x6,3))*y5 + Power(x5,5)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,2)* (Power(x1,4)*Power(x6,4)*(-x1 + x6)*y5 + Power(x5,5)* (Power(x6,4)*y1 - Power(x1,4)*y6) + Power(x5,4)* (-(Power(x6,5)*y1) + Power(x1,5)*y6)))) + Power(x2,2)*(x1*(x1 - x4)*x4*(x1 - x5)* (x4 - x5)*x5*(x1 - x6)*(x4 - x6)*(x5 - x6)* x6*(x4*x5*x6 + x1*(x5*x6 + x4*(x5 + x6)))*y3 + Power(x3,5)* (x1*x5*x6*(Power(x5,2)*(x5 - x6)* Power(x6,2) + Power(x1,3)* (Power(x5,2) - Power(x6,2)) + Power(x1,2)*(-Power(x5,3) + Power(x6,3)))*y4 + Power(x4,4)* (x1*x6*(Power(x1,2) - Power(x6,2))*y5 + Power(x5,3)*(-(x6*y1) + x1*y6) + x5*(Power(x6,3)*y1 - Power(x1,3)*y6)) + x4*(Power(x1,3)*(x1 - x6)*Power(x6,3)*y5 + Power(x5,4)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,3)* (x1*x6*(-Power(x1,3) + Power(x6,3))*y5 + Power(x5,4)*(x6*y1 - x1*y6) + x5*(-(Power(x6,4)*y1) + Power(x1,4)*y6))) + Power(x3,3)* (x1*x5*x6*(Power(x5,3)*(x5 - x6)* Power(x6,3) + Power(x1,4)* (Power(x5,3) - Power(x6,3)) + Power(x1,3)*(-Power(x5,4) + Power(x6,4)))*y4 + Power(x4,5)* (x1*x6*(Power(x1,3) - Power(x6,3))*y5 + Power(x5,4)*(-(x6*y1) + x1*y6) + x5*(Power(x6,4)*y1 - Power(x1,4)*y6)) + x4*(Power(x1,4)*(x1 - x6)*Power(x6,4)*y5 + Power(x5,5)* (-(Power(x6,4)*y1) + Power(x1,4)*y6) + Power(x5,4)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,4)* (x1*x6*(-Power(x1,4) + Power(x6,4))*y5 + Power(x5,5)*(x6*y1 - x1*y6) + x5*(-(Power(x6,5)*y1) + Power(x1,5)*y6))) + Power(x3,4)* (-(x1*x5*(Power(x1,2) - Power(x5,2))*x6* (Power(x1,2) - Power(x6,2))* (Power(x5,2) - Power(x6,2))*y4) + Power(x4,5)* (x1*x6*(-Power(x1,2) + Power(x6,2))*y5 + Power(x5,3)*(x6*y1 - x1*y6) + x5*(-(Power(x6,3)*y1) + Power(x1,3)*y6)) + Power(x4,3)* (x1*x6*(Power(x1,4) - Power(x6,4))*y5 + Power(x5,5)*(-(x6*y1) + x1*y6) + x5*(Power(x6,5)*y1 - Power(x1,5)*y6)) + x4*(Power(x1,3)*Power(x6,3)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,5)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,5)*y1) + Power(x1,5)*y6))) + x3*(-(Power(x1,3)*(x1 - x5)*Power(x5,3)* (x1 - x6)*(x5 - x6)*Power(x6,3)*y4) + Power(x4,5)* (Power(x1,3)*Power(x6,3)*(-x1 + x6)*y5 + Power(x5,4)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,4)*y1) + Power(x1,4)*y6)) + Power(x4,4)* (Power(x1,3)*Power(x6,3)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,5)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,3)* (Power(x1,4)*Power(x6,4)*(-x1 + x6)*y5 + Power(x5,5)* (Power(x6,4)*y1 - Power(x1,4)*y6) + Power(x5,4)* (-(Power(x6,5)*y1) + Power(x1,5)*y6)))))/((x1 - x2)*(x1 - x3)*(-x2 + x3)*(x1 - x4)* (-x2 + x4)*(-x3 + x4)*(x1 - x5)*(x2 - x5)* (x3 - x5)*(x4 - x5)*(x1 - x6)*(x2 - x6)* (x3 - x6)*(x4 - x6)*(x5 - x6)) | 26,129 |
def test_handle_merge_csv():
"""Tests merging CSV files algorithm"""
# pylint: disable=import-outside-toplevel
import workflow_docker as wd
# Load the result
with open(WORKFLOW_MERGECSV_RESULT, 'r', encoding='utf8') as in_file:
compare_json = json.load(in_file)
# Setup fields for test
input_folder = os.path.dirname(WORKFLOW_MERGECSV_FOLDER)
# Setup fields for test
parameters = _params_from_queue('merge_csv')
for one_parameter in parameters:
if one_parameter['field_name'] == 'top_path':
one_parameter['value'] = WORKFLOW_MERGECSV_FOLDER
# Create a working folder
working_folder = os.path.realpath(os.path.join(os.getcwd(), 'tmpmergecsv'))
os.makedirs(working_folder, exist_ok=True)
# Clear messages and run the function
_helper_msg_func((), False)
res = wd.handle_merge_csv(parameters, input_folder, working_folder, _helper_msg_func, _helper_msg_func)
assert res is not None
assert res == compare_json
for one_file in WORKFLOW_MERGECSV_FILES:
assert os.path.exists(os.path.join(working_folder, one_file))
try:
shutil.rmtree(working_folder)
except Exception as ex:
print('test_handle_merge_csv: exception caught deleting working folder', working_folder, str(ex)) | 26,130 |
def answer_question_interactively(question):
"""Returns True or False for t yes/no question to the user"""
while True:
answer = input(question + '? [Y or N]: ')
if answer.lower() == 'y':
return True
elif answer.lower() == 'n':
return False | 26,131 |
def connect(
instance_id,
database_id,
project=None,
credentials=None,
pool=None,
user_agent=None,
):
"""Creates a connection to a Google Cloud Spanner database.
:type instance_id: str
:param instance_id: The ID of the instance to connect to.
:type database_id: str
:param database_id: The ID of the database to connect to.
:type project: str
:param project: (Optional) The ID of the project which owns the
instances, tables and data. If not provided, will
attempt to determine from the environment.
:type credentials: Union[:class:`~google.auth.credentials.Credentials`, str]
:param credentials: (Optional) The authorization credentials to attach to
requests. These credentials identify this application
to the service. These credentials may be specified as
a file path indicating where to retrieve the service
account JSON for the credentials to connect to
Cloud Spanner. If none are specified, the client will
attempt to ascertain the credentials from the
environment.
:type pool: Concrete subclass of
:class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`.
:param pool: (Optional). Session pool to be used by database.
:type user_agent: str
:param user_agent: (Optional) User agent to be used with this connection's
requests.
:rtype: :class:`google.cloud.spanner_dbapi.connection.Connection`
:returns: Connection object associated with the given Google Cloud Spanner
resource.
:raises: :class:`ValueError` in case of given instance/database
doesn't exist.
"""
client_info = ClientInfo(
user_agent=user_agent or DEFAULT_USER_AGENT, python_version=PY_VERSION
)
if isinstance(credentials, str):
client = spanner.Client.from_service_account_json(
credentials, project=project, client_info=client_info
)
else:
client = spanner.Client(
project=project, credentials=credentials, client_info=client_info
)
instance = client.instance(instance_id)
if not instance.exists():
raise ValueError("instance '%s' does not exist." % instance_id)
database = instance.database(database_id, pool=pool)
if not database.exists():
raise ValueError("database '%s' does not exist." % database_id)
conn = Connection(instance, database)
if pool is not None:
conn._own_pool = False
return conn | 26,132 |
def intersect(linked_list_1: List, linked_list_2: List):
"""Intersection point of two linked list."""
length_diff = len(linked_list_1) - len(linked_list_2)
enum1 = list(enumerate(linked_list_1))
enum2 = list(enumerate(linked_list_2))
if length_diff < 0:
enum2 = _helper(length_diff=length_diff, linked_list=enum2)
else:
enum1 = _helper(length_diff=length_diff, linked_list=enum1)
for (i, j,) in zip(enum1, enum2):
if i[1] == j[1]:
return (i[0], j[0],)
return None | 26,133 |
def _enable_check(proxy, check, group, suite):
"""
Enable a check for a given group/suite
debile-remote enable-check <check> <group> <suite>
"""
print(proxy.enable_check(check, group, suite)) | 26,134 |
def recognize_package_manifests(location):
"""
Return a list of Package objects if any package_manifests were recognized for this
`location`, or None if there were no Packages found. Raises Exceptions on errors.
"""
if not filetype.is_file(location):
return
T = contenttype.get_type(location)
ftype = T.filetype_file.lower()
mtype = T.mimetype_file
_base_name, extension = splitext_name(location, is_file=True)
filename = file_name(location)
extension = extension.lower()
if TRACE:
logger_debug(
'recognize_packages: ftype:', ftype, 'mtype:', mtype,
'pygtype:', T.filetype_pygment,
'fname:', filename, 'ext:', extension,
)
recognized_package_manifests = []
for package_type in PACKAGE_TYPES:
# Note: default to True if there is nothing to match against
metafiles = package_type.metafiles
if any(fnmatch.fnmatchcase(filename, metaf) for metaf in metafiles):
for recognized in package_type.recognize(location):
if TRACE:
logger_debug(
'recognize_packages: metafile matching: recognized:',
recognized,
)
if recognized and not recognized.license_expression:
# compute and set a normalized license expression
recognized.license_expression = recognized.compute_normalized_license()
if TRACE:
logger_debug(
'recognize_packages: recognized.license_expression:',
recognized.license_expression,
)
recognized_package_manifests.append(recognized)
return recognized_package_manifests
type_matched = False
if package_type.filetypes:
type_matched = any(t in ftype for t in package_type.filetypes)
mime_matched = False
if package_type.mimetypes:
mime_matched = any(m in mtype for m in package_type.mimetypes)
extension_matched = False
extensions = package_type.extensions
if extensions:
extensions = (e.lower() for e in extensions)
extension_matched = any(
fnmatch.fnmatchcase(extension, ext_pat)
for ext_pat in extensions
)
if type_matched and mime_matched and extension_matched:
if TRACE:
logger_debug(f'recognize_packages: all matching for {package_type}')
try:
for recognized in package_type.recognize(location):
# compute and set a normalized license expression
if recognized and not recognized.license_expression:
try:
recognized.license_expression = recognized.compute_normalized_license()
except Exception:
if SCANCODE_DEBUG_PACKAGE_API:
raise
recognized.license_expression = 'unknown'
if TRACE:
logger_debug('recognize_packages: recognized', recognized)
recognized_package_manifests.append(recognized)
except NotImplementedError:
# build a plain package if recognize is not yet implemented
recognized = package_type()
if TRACE:
logger_debug('recognize_packages: recognized', recognized)
recognized_package_manifests.append(recognized)
if SCANCODE_DEBUG_PACKAGE_API:
raise
return recognized_package_manifests
if TRACE: logger_debug('recognize_packages: no match for type:', package_type) | 26,135 |
def test_cleared_rat_request(test_client,
test_rat_request_nonadmin,test_login_ll3,test_delete_request_db_contents):
""" Clears the single rat request by Manuel (lightserv-test, a nonadmin) (with clearer='ll3')
"""
print('----------Setup test_cleared_request_ahoag fixture ----------')
now = datetime.now()
data = dict(time_pbs_wash1=now.strftime('%Y-%m-%dT%H:%M'),
pbs_wash1_notes='some rat notes',submit=True)
response = test_client.post(url_for('clearing.clearing_entry',username="lightserv-test",
request_name="Nonadmin_rat_request",
clearing_protocol="iDISCO abbreviated clearing (rat)",
antibody1="",antibody2="",
clearing_batch_number=1),
data = data,
follow_redirects=True,
)
yield test_client # this is where the testing happens
print('-------Teardown test_cleared_request_ahoag fixture --------') | 26,136 |
def run_pinch_and_spread(use_case):
"""Run script to test multi finger tap."""
def zoom(element_id):
use_case.driver.execute_script(
'mobile: pinch', {'id': element_id, 'scale': '2', 'velocity': 1}
)
def pinch(element_id):
use_case.driver.execute_script(
'mobile: pinch', {'id': element_id, 'scale': '0.5', 'velocity': 1}
)
@minimum_execution_time(seconds=time_boundaries.PINCH_AND_SPREAD)
def simple_routine():
print(dir(use_case.paint))
for _ in range(loop_count.PINCH_AND_SPREAD):
pinch(use_case.paint.id)
zoom(use_case.paint.id)
try:
simple_routine()
except Exception as e:
click.secho("Error: {}.".format(e), fg='red') | 26,137 |
def content_loss(sharp_images, deblur_images, cont_net):
"""
Computes the Content Loss to compare the
reconstructed (deblurred) and the original(sharp) images
Takes the output feature maps of the relu4_3 layer of pretrained VGG19 to compare the content between
images as proposed in :
Johnson et. al. "Perceptual losses for real-time style transfer and super-resolution." (ECCV 2016)
"""
# Torchvision models documentation:
# All pre-trained models expect input images normalized in the same way, The images have to be loaded in
# to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225].
deblur_images = (deblur_images + 1) * 0.5
sharp_images = (sharp_images + 1) * 0.5
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
deblur_images = normalize(deblur_images)
sharp_images= normalize(sharp_images)
content_deblur = cont_net(deblur_images)
content_sharp = cont_net(sharp_images)
content_sharp = content_sharp.detach()
loss = nn.MSELoss()
lossC = torch.mean(loss(content_deblur,content_sharp))
return lossC | 26,138 |
def generate_cashflow_diagram(
cashflows, d=None, net=False, scale=None, color=None, title=None, **kwargs):
""" Generates a barplot showing cashflows over time
Given a set of cashflows, produces a stacked barplot with bars at each
period. The height of each bar is set by the amount of cash produced
by a cashflow at the specified period.
Note that this function does not display the produced plot; call
matplotlib.pyplot.show() to view the plot.
Args:
cashflows: A sequence of cashflows to plot
d: Optional; A two-integer list whose elements represent the first
and final periods to be plotted
net: Optional; When true, only the net cashflows are shown, and the
individual cashflow information is omitted.
scale: Optional; The y-axis scale; must be a member or key of Scales
kwargs: A list of keyword arguments to be passed to Dataframe.plot()
Returns:
A Figure and Axis for the plot
"""
# Parse Args
cashflows = (cashflows,) if isinstance(cashflows, Cashflow) else cashflows
d = parse_d(d or get_final_period(cashflows, finite=True) or 5)
net = bool(net)
if color:
color = color.colors if isinstance(color, ListedColormap) else color
else:
color = default_colormap.colors
if scale:
scale = (
scale if isinstance(scale, Scales)
else Scales[scale.upper()])
# Extract information
periods = list(range(d[0], d[1] + 1))
titles = [cashflow.get_title() for cashflow in cashflows]
cashflows = [
[cashflow[n].amount for cashflow in cashflows]
for n in periods
]
# Format information
if net:
cashflows = [[sum(cashflows[n])] for n in periods]
if scale:
cashflows = [
[cashflow * scale.value for cashflow in cashflows[n]]
for n in periods
]
# Plot the Cashflow Diagram with matplotlib
plotdata = pd.DataFrame(cashflows, index=periods, columns=titles)
fig, ax = plt.subplots()
plotdata.plot(kind="bar", stacked="true", ax=ax, color=color, **kwargs)
ax.set_title(title)
ax.set_ylabel("Cashflows" + (f" [{scale.name.title()}]" if scale else ""))
ax.set_xlabel("Period")
ax.axhline()
return fig, ax | 26,139 |
def test_is_unique_file_valid_in_set(pack):
"""
Given
- pack with pack_metadata file.
When
- is_unique_file_valid_in_set is called
Then
- Ensure it is valid and no error is returned.
"""
pack_metadata_data = {
"VMware": {
"name": "VMware",
"current_version": "1.1.0",
"author": "Cortex XSOAR",
"certification": "certified",
"tags": [],
"use_cases": [],
"categories": [
"IT Services"
],
"id": "VMware"
}
}
pack.pack_metadata.write_json(pack_metadata_data)
validator = IDSetValidations(is_circle=False, is_test_run=True, configuration=CONFIG)
is_valid, error = validator.is_unique_file_valid_in_set(pack_path=pack.path)
assert is_valid
assert not error | 26,140 |
def is_iterable(obj):
# type: (Any) -> bool
"""
Returns True if obj is a non-string iterable
"""
if is_str(obj) is True or isinstance(obj, collections.Iterable) is False:
return False
else:
return True | 26,141 |
def other_players(me, r):
"""Return a list of all players but me, in turn order starting after me"""
return list(range(me+1, r.nPlayers)) + list(range(0, me)) | 26,142 |
def mock_dd_slo_history(*args, **kwargs):
"""Mock Datadog response for datadog.api.ServiceLevelObjective.history."""
return load_fixture('dd_slo_history.json') | 26,143 |
def check_joints2d_visibility_torch(joints2d, img_wh):
"""
Checks if 2D joints are within the image dimensions.
"""
vis = torch.ones(joints2d.shape[:2], device=joints2d.device, dtype=torch.bool)
vis[joints2d[:, :, 0] > img_wh] = 0
vis[joints2d[:, :, 1] > img_wh] = 0
vis[joints2d[:, :, 0] < 0] = 0
vis[joints2d[:, :, 1] < 0] = 0
return vis | 26,144 |
def getIntervalIntersectionLength(aa, bb, wrapAt=360):
"""Returns the length of the intersection between two intervals."""
intersection = getIntervalIntersection(aa, bb, wrapAt=wrapAt)
if intersection is False:
return 0.0
else:
if wrapAt is None:
return (intersection[1] - intersection[0])
else:
return (intersection[1] - intersection[0]) % wrapAt | 26,145 |
def test_condition_is_can_have_target_type_object(if_statement_validator):
"""When condition is `is` target can be object."""
test = {
'condition': 'is',
'target': {'test': 'bob'},
'then': ['test'],
}
assert is_successful(if_statement_validator(test)) | 26,146 |
def test_jvp_construct_single_input_single_output_default_v_graph():
"""
Features: Function jvp
Description: Test jvp with Cell construct, single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
class Net(nn.Cell):
def __init__(self, network):
super(Net, self).__init__()
self.net = network
def construct(self, inputs, vectors):
net_out, jvp_out = jvp(self.net, inputs, vectors)
return net_out, jvp_out
test_net = Net(SingleInputSingleOutputNet())
primal, grad = test_net(x, v)
expect_primal = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
assert np.allclose(primal.asnumpy(), expect_primal.asnumpy())
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy()) | 26,147 |
def get_environment() -> Environment:
"""
Parses environment variables and sets their defaults if they do not exist.
"""
return Environment(
permission_url=get_endpoint("PERMISSION"),
media_url=get_endpoint("MEDIA"),
datastore_reader_url=get_endpoint("DATASTORE_READER"),
datastore_writer_url=get_endpoint("DATASTORE_WRITER"),
) | 26,148 |
def test_cwd_with_absolute_paths():
"""
cd() should append arg if non-absolute or overwrite otherwise
"""
existing = '/some/existing/path'
additional = 'another'
absolute = '/absolute/path'
with settings(cwd=existing):
with cd(absolute):
eq_(env.cwd, absolute)
with cd(additional):
eq_(env.cwd, existing + '/' + additional) | 26,149 |
def get_kubernetes_cluster(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKubernetesClusterResult:
"""
Use this data source to access information about an existing Managed Kubernetes Cluster (AKS).
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.containerservice.get_kubernetes_cluster(name="myakscluster",
resource_group_name="my-example-resource-group")
```
:param str name: The name of the managed Kubernetes Cluster.
:param str resource_group_name: The name of the Resource Group in which the managed Kubernetes Cluster exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:containerservice/getKubernetesCluster:getKubernetesCluster', __args__, opts=opts, typ=GetKubernetesClusterResult).value
return AwaitableGetKubernetesClusterResult(
addon_profiles=__ret__.addon_profiles,
agent_pool_profiles=__ret__.agent_pool_profiles,
api_server_authorized_ip_ranges=__ret__.api_server_authorized_ip_ranges,
disk_encryption_set_id=__ret__.disk_encryption_set_id,
dns_prefix=__ret__.dns_prefix,
fqdn=__ret__.fqdn,
id=__ret__.id,
identities=__ret__.identities,
kube_admin_config_raw=__ret__.kube_admin_config_raw,
kube_admin_configs=__ret__.kube_admin_configs,
kube_config_raw=__ret__.kube_config_raw,
kube_configs=__ret__.kube_configs,
kubelet_identities=__ret__.kubelet_identities,
kubernetes_version=__ret__.kubernetes_version,
linux_profiles=__ret__.linux_profiles,
location=__ret__.location,
name=__ret__.name,
network_profiles=__ret__.network_profiles,
node_resource_group=__ret__.node_resource_group,
private_cluster_enabled=__ret__.private_cluster_enabled,
private_fqdn=__ret__.private_fqdn,
private_link_enabled=__ret__.private_link_enabled,
resource_group_name=__ret__.resource_group_name,
role_based_access_controls=__ret__.role_based_access_controls,
service_principals=__ret__.service_principals,
tags=__ret__.tags,
windows_profiles=__ret__.windows_profiles) | 26,150 |
def gene_expression_conv_base():
"""Hparams for GeneExpressionConv model."""
hparams = common_hparams.basic_params1()
batch_size = 10
output_length = 2048
inputs_per_output = 128
chunk_size = 4
input_length = output_length * inputs_per_output // chunk_size
hparams.batch_size = input_length * batch_size
hparams.dropout = 0.1
hparams.add_hparam("num_conv_layers", 4)
hparams.add_hparam("num_dconv_layers", 7)
# The product of these pooling windows should match
# input_length/target_length.
hparams.add_hparam("pooling_windows", [2, 2, 2, 4])
hparams.hidden_size = 256
hparams.kernel_width = 20
hparams.add_hparam("stride", 1)
return hparams | 26,151 |
def is_callable(type_def, allow_callable_class: bool = False) -> bool:
"""
Checks whether the ``type_def`` is a callable according to the following rules:
1. Functions are callable.
2. ``typing.Callable`` types are callable.
3. Generic aliases of types which are ``is_callable`` are callable.
4. If ``allow_callable_class`` is set to ``True``, then classes which have a ``__call__`` method are callable.
:param type_def: the type to check.
:param allow_callable_class: set to ``True`` to consider classes which have a ``__call__`` method callable.
:return: ``True`` if ``type_def`` is a callable type, ``False`` otherwise.
"""
if isinstance(type_def, type(_check_callable_signature)):
return True
if isinstance(type_def, typing._SpecialForm):
return False
if isinstance(type_def, _GenericAlias):
if type_def.__origin__ == typing.Callable or type_def.__origin__ == collections.abc.Callable:
return True
if type_def._special:
return False
return is_callable(type_def.__origin__, allow_callable_class=allow_callable_class)
if allow_callable_class and hasattr(type_def, "__call__"):
return True
return False | 26,152 |
def vgg19(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_pretrained_model(model_zoo.load_url(model_urls['vgg19']))
return model | 26,153 |
def get_formats(input_f, input_case="cased", is_default=True):
"""
Adds various abbreviation format options to the list of acceptable input forms
"""
multiple_formats = load_labels(input_f)
additional_options = []
for x, y in multiple_formats:
if input_case == "lower_cased":
x = x.lower()
additional_options.append((f"{x}.", y)) # default "dr" -> doctor, this includes period "dr." -> doctor
additional_options.append((f"{x[0].upper() + x[1:]}", f"{y[0].upper() + y[1:]}")) # "Dr" -> Doctor
additional_options.append((f"{x[0].upper() + x[1:]}.", f"{y[0].upper() + y[1:]}")) # "Dr." -> Doctor
multiple_formats.extend(additional_options)
if not is_default:
multiple_formats = [(x, f"|raw_start|{x}|raw_end||norm_start|{y}|norm_end|") for (x, y) in multiple_formats]
multiple_formats = pynini.string_map(multiple_formats)
return multiple_formats | 26,154 |
def db_select_entry(c, bibkey):
""" Select entry from database
:argument
c: sqlite3 cursor
:returns
entry_dict: dict
"""
fields = ['bibkey', 'author', 'genre', 'thesis', 'hypothesis',
'method', 'finding', 'comment', 'img_linkstr']
sql = "SELECT {:s} FROM note WHERE bibkey = (?)".format(','.join(fields))
c.execute(sql, (bibkey,))
result = c.fetchall()[0]
a_dict = {}
for field, value in zip(fields, result):
a_dict[field] = value
c.execute("SELECT tag FROM tags WHERE bibkey = (?) ORDER BY tag ASC",
(a_dict['bibkey'],))
tags = tuple(r[0] for r in c.fetchall())
return a_dict, tags | 26,155 |
def to_bytes(obj, encoding='utf-8', errors='strict'):
"""Makes sure that a string is a byte string.
Args:
obj: An object to make sure is a byte string.
encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid codecs error
handler may be specified.
Returns: Typically this returns a byte string.
"""
if isinstance(obj, bytes):
return obj
return bytes(obj, encoding=encoding, errors=errors) | 26,156 |
def generate_data(model, count):
"""
Generate random data
:param model: Django model
:param count: Number of objects
:return:
"""
for i in range(count):
obj = model(
boolean_field=random.choice([True, False]),
null_boolean_field=random.choice([True, False, None]),
small_integer_field=random.randint(-32768, 32767),
integer_field=random.randint(-2147483648, 2147483647),
float_field=random.uniform(sys.float_info.min, sys.float_info.max),
decimal_field=random.uniform(-1000000000, 1000000000),
char_field=generate_string(),
choice_field=random.choice(['a', 'b']),
text_field=generate_string(),
date_field=generate_datetime(),
time_field=generate_datetime(),
date_time_field=generate_datetime(),
geo_field=generate_geo(model._meta.get_field('geo_field')),
# fk_field =
# m2m_field =
)
obj.save() | 26,157 |
def login() -> Response:
"""
Login to Afterglow
GET|POST /auth/login
- login to Afterglow; authentication required using any of the methods
defined in USER_AUTH
:return: empty response with "afterglow_core_access_token" cookie
if successfully logged in
"""
# TODO Ensure CORS is disabled for POSTS to this endpoint
# TODO Allow additional domains for cookies to be specified in server config
next_url = request.args.get('next')
if not next_url:
next_url = url_for('default')
if request.method == 'GET':
try:
authenticate()
return redirect(next_url)
except NotAuthenticatedError:
pass
# Do not allow login if Afterglow Core has not yet been configured
if DbUser.query.count() == 0:
return redirect(url_for('initialize'))
# raise NotInitializedError()
if request.method == 'GET':
return render_template(
'login.html.j2', oauth_plugins=oauth_plugins.values(),
next_url=next_url)
username = request.args.get('username')
if not username:
raise ValidationError('username', 'Username cannot be empty')
password = request.args.get('password')
if not password:
raise ValidationError('password', 'Password cannot be empty')
user = DbUser.query.filter_by(username=username).one_or_none()
if user is None:
raise HttpAuthFailedError()
if not verify_password(password, user.password):
raise HttpAuthFailedError()
# set token cookies
request.user = user
return set_access_cookies(json_response()) | 26,158 |
def compact(number, strip_check_digit=True):
"""Convert the MEID number to the minimal (hexadecimal) representation.
This strips grouping information, removes surrounding whitespace and
converts to hexadecimal if needed. If the check digit is to be preserved
and conversion is done a new check digit is recalculated."""
# first parse the number
number, cd = _parse(number)
# strip check digit if needed
if strip_check_digit:
cd = ''
# convert to hex if needed
if len(number) == 18:
number = '%08X%06X' % (int(number[0:10]), int(number[10:18]))
if cd:
cd = calc_check_digit(number)
# put parts back together again
return number + cd | 26,159 |
def hashtag_getter(doc: Doc) -> List[str]:
"""
Extract hashtags from text
Args:
doc (Doc): A SpaCy document
Returns:
List[str]: A list of hashtags
Example:
>>> from spacy.tokens import Doc
>>> Doc.set_extension("hashtag", getter=dacy.utilities.twitter.hashtags)
>>> doc = nlp("Fuck hvor fedt! #yolo #life")
>>> doc._.hashtag # extrac the hashtags from your document
["#yolo", "#life"]
"""
def find_hashtags(
text,
valid_tags={"#", "#"},
valid_chars={"_", "-"},
invalid_tag_suffix={b"\xe2\x83\xa3", b"\xef\xb8\x8f"},
):
def is_letter(t):
if (
t.isalnum()
or t in valid_chars
or str.encode(t).startswith(b"\xcc")
or str.encode(t).startswith(b"\xe0")
):
return True
return False
start = None
for i, char in enumerate(text):
if (
char in valid_tags
and not (
i + 1 != len(text) and str.encode(text[i + 1]) in invalid_tag_suffix
)
and (i == 0 or not (is_letter(text[i - 1]) or text[i - 1] == "&"))
):
start = i
continue
if start is not None and not is_letter(char):
if char in valid_tags:
start = None
continue
print(start, i)
if not text[start + 1 : i].isnumeric():
yield "#" + text[start + 1 : i]
start = None
if start is not None and not text[start + 1 : i + 1].isnumeric():
print(start, i)
yield "#" + text[start + 1 : i + 1]
return list(find_hashtags(doc.text)) | 26,160 |
def options():
"""Stub version of the parsed command line options."""
class StubOptions(object):
profile = None
return StubOptions() | 26,161 |
def get_surface(shifts, orig_text, item, text, unit_shortening=0):
"""
Extract surface from regex hit.
"""
# handle cut end
span = (item.start(), item.end() - unit_shortening)
logging.debug('\tInitial span: %s ("%s")', span, text[span[0]:span[1]])
real_span = (span[0] - shifts[span[0]], span[1] - shifts[span[1] - 1])
surface = orig_text[real_span[0]:real_span[1]]
logging.debug('\tShifted span: %s ("%s")', real_span, surface)
while any(surface.endswith(i) for i in [' ', '-']):
surface = surface[:-1]
real_span = (real_span[0], real_span[1] - 1)
while surface.startswith(' '):
surface = surface[1:]
real_span = (real_span[0] + 1, real_span[1])
logging.debug('\tFinal span: %s ("%s")', real_span, surface)
return surface, real_span | 26,162 |
def serve_game_states(playerIdentifier: PlayerIdentifier) -> Generator[GameStateBuffer, None, None]:
"""
A generator for game state
:return: an iterator over game states
"""
with get_transactional_server_stub() as game_master_stub:
game_state_iterator = game_master_stub.stream_game_state(playerIdentifier)
for game_state_buffer in game_state_iterator:
yield game_state_buffer | 26,163 |
def plot_bootstrap_delta_grp(dfboot, df, grp, force_xlim=None, title_add=''):
"""Plot delta between boostrap results, grouped"""
count_txt_h_kws, mean_txt_kws, pest_mean_point_kws, mean_point_kws = _get_kws_styling()
if dfboot[grp].dtypes != 'object':
dfboot = dfboot.copy()
dfboot[grp] = dfboot[grp].map(lambda x: f's{x}')
mn = dfboot.groupby(grp).size()
f = plt.figure(figsize=(14, 2+(len(mn)*.2))) #, constrained_layout=True)
gs = gridspec.GridSpec(1, 2, width_ratios=[11, 1], figure=f)
ax0 = f.add_subplot(gs[0])
ax1 = f.add_subplot(gs[1], sharey=ax0)
_ = sns.boxplot(x='lr_delta', y=grp, data=dfboot, palette='cubehelix_r',
sym='', whis=[3, 97], showmeans=True, notch=True, ax=ax0)
_ = ax0.axvline(0, ls='--', lw=2, c='#555555', zorder=-1)
if force_xlim is not None:
_ = ax0.set(xlim=force_xlim)
_ = sns.countplot(y=grp, data=df, ax=ax1, palette='cubehelix_r')
ct = df.groupby(grp).size().tolist()
_ = [ax1.annotate(f'{v}', xy=(v, i%len(ct)), **count_txt_h_kws) for i, v in enumerate(ct)]
ypos = 1.02
if title_add != '':
ypos = 1.05
title_add = f'\n{title_add}'
title = (f'2-sample bootstrap test - grouped by {grp}')
_ = f.suptitle(f'{title}{title_add}', y=ypos)
f.tight_layout() # prefer over constrained_layout
return gs | 26,164 |
def inception_inspired_reservoir_model(
input_shape: Tuple[int, int, int],
reservoir_weight: np.ndarray,
num_output_channels: int,
seed: Optional[int] = None,
num_filters: int = 32,
reservoir_base: str = 'DenseReservoir',
reservoir_params: Optional[Dict[str, Any]] = None,
final_activation: Optional[str] = 'sigmoid',
task: str = 'segmentation',
) -> tf.keras.Model:
"""Builds a simple recurrent reservoir model with inception-style head.
The model is an SRN in the sense that a copy of the output of a first
reservoir is passed through a set of trainable weights and then through
a second identical reservoir.
Args:
input_shape: (image_height, image_width, num_channels) of the input image.
reservoir_weight: Weight matrix to be assigned to the fixed layers.
num_output_channels: how many output channels to use.
seed: int seed to use to get a deterministic set of "random" weights.
num_filters: how many filters to include in each layer of the inception
block.
reservoir_base: the reservoir base to use. Default is 'DenseReservoir'.
reservoir_params: the parameters to initialize the reservoir_base. (Any
field provided MUST be a Correct argument for the reservoir base,
e.g. common options include {
'recurrence_degree': 3,
'keep_memory': True,
'trainable_reservoir': True,
'use_bias': True,
'activation_within_recurrence': True,
'kernel_local_learning': 'hebbian',
'kernel_local_learning_params': {'eta': 0.1},
'recurrent_kernel_local_learning': 'hebbian',
'recurrent_kernel_local_learning_params': {'eta': 0.1},
'state_discount': 1.0,
}. If variable not included in the params, the default values are used.)
final_activation: 'sigmoid', 'softmax', 'tanh', or None.
task: which task this model is used for (options includes: 'segmentation',
'classification')
Returns:
A simple recurrent reservoir model with convolutional head
Raises:
ValueError: if task not in accepted tasks (segmentation, classification).
"""
if task not in ['segmentation', 'classification']:
raise ValueError(
f'Task not defined in accepted tasks (segmentation, classification). Got {task}'
)
# Create a sequential keras model
if reservoir_params is None:
reservoir_params = {}
reservoir_params['weight'] = reservoir_weight
inputs = tf.keras.layers.Input(input_shape)
if seed:
kernel_initializer = initializers.FixedRandomInitializer(seed=seed)
else:
kernel_initializer = tf.keras.initializers.RandomNormal()
# Inception 'stem'
x = tf.keras.layers.Conv2D(
num_filters, 8, padding='same', input_shape=input_shape,
activation='elu')(
inputs)
x = tf.keras.layers.MaxPooling2D(
pool_size=(3, 3), strides=(1, 1), padding='same')(
x)
x = tf.keras.layers.Conv2D(
num_filters, 1, activation='elu', padding='same')(
x)
x = tf.keras.layers.Conv2D(
num_filters, 3, activation='elu', padding='same')(
x)
x = tf.keras.layers.MaxPooling2D(
pool_size=(3, 3), strides=(1, 1), padding='same')(
x)
x = tf.keras.layers.Conv2D(
num_filters, 1, activation='elu', padding='same')(
x)
x = tf.keras.layers.Conv2D(
num_filters, 3, activation='elu', padding='same')(
x)
# Inception block
incepta = tf.keras.layers.Conv2D(
num_filters, [1, 1], strides=(1, 1), activation='elu', padding='same')(
x)
incepta = tf.keras.layers.Conv2D(
num_filters, [5, 5], strides=(1, 1), activation='elu', padding='same')(
incepta)
inceptb = tf.keras.layers.Conv2D(
num_filters, [1, 1], strides=(1, 1), activation='elu', padding='same')(
x)
inceptb = tf.keras.layers.Conv2D(
num_filters, [3, 3], strides=(1, 1), activation='elu', padding='same')(
inceptb)
inceptc = tf.keras.layers.MaxPooling2D(
pool_size=(3, 3), strides=(1, 1), padding='same')(
x)
inceptc = tf.keras.layers.Conv2D(
num_filters, [1, 1], strides=(1, 1), activation='elu', padding='same')(
inceptc)
inceptd = tf.keras.layers.Conv2D(
num_filters, [1, 1], strides=(1, 1), activation='elu', padding='same')(
x)
y = tf.concat([incepta, inceptb, inceptc, inceptd], -1)
# Dense layer
y = tf.keras.layers.Dense(reservoir_weight.shape[0], activation='elu')(y)
# The first reservoir layer
y = reservoir_registry.get_reservoir(reservoir_base)(**reservoir_params)(y)
# Trainable layer in between reservoirs
y = tf.keras.layers.Dense(reservoir_weight.shape[0], activation='elu')(y)
# The second fixed reservoir layer
y = reservoir_registry.get_reservoir(reservoir_base)(**reservoir_params)(y)
# Create outputs.
if task == 'classification':
y = tf.keras.layers.Flatten()(y)
outputs = tf.keras.layers.Dense(
units=num_output_channels,
activation=final_activation,
kernel_initializer=kernel_initializer)(
y)
model = tf.keras.models.Model(inputs, outputs)
return model | 26,165 |
def midnight(date):
"""Returns a copy of a date with the hour, minute, second, and
millisecond fields set to zero.
Args:
date (Date): The starting date.
Returns:
Date: A new date, set to midnight of the day provided.
"""
return date.replace(hour=0, minute=0, second=0, microsecond=0) | 26,166 |
async def api_call(method, data=None):
"""Slack API call."""
with aiohttp.ClientSession() as session:
token = os.environ.get('TOKEN')
if not token.startswith('xoxb-'):
return 'Define the token please'
form = aiohttp.FormData(data or {})
form.add_field('token', token)
async with session.post('https://slack.com/api/{0}'.format(method),data=form) as response:
assert 200 == response.status, ('{0} with {1} failed.'.format(method, data))
return await response.json() | 26,167 |
def main():
"""
this project plays breakout
"""
global NUM_LIVES
graphics = BreakoutGraphics()
score = 0 # the score you have
score2 = 0
delay = 0 # the speed you have
win = 1000
# Add animation loop here!
while NUM_LIVES > 0: # if your lives > 0 you die
if graphics.get_game_state(): # if true ( you are playing the game now )
dx = graphics.get_dx() # get dx
dy = graphics.get_dy() # get dy
NUM_LIVES, score, delay, score2, win = graphics.bounce_ball(dx, dy, NUM_LIVES, score, delay, score2) # bouncing the ball
pause(FRAME_RATE + delay + 20) # the speed of ball bouncing
if score2 == win: # if you break all of the bricks
break
graphics.remove_all(score) | 26,168 |
def foldr(fun: Callable[[Any, Any], Any], acc: Any, seq: Sequence[Any]) -> Any:
"""Implementation of foldr in Python3.
This is an implementation of the right-handed
fold function from functional programming.
If the list is empty, we return the accumulator
value. Otherwise, we recurse by applying the
function which was passed to the foldr to
the head of the iterable collection
and the foldr called with fun, acc, and
the tail of the iterable collection.
Below are the implementations of the len
and sum functions using foldr to
demonstrate how foldr function works.
>>> foldr((lambda _, y: y + 1), 0, [0, 1, 2, 3, 4])
5
>>> foldr((lambda x, y: x + y), 0, [0, 1, 2, 3, 4])
10
foldr takes the second argument and the
last item of the list and applies the function,
then it takes the penultimate item from the end
and the result, and so on.
"""
return acc if not seq else fun(seq[0], foldr(fun, acc, seq[1:])) | 26,169 |
def packages_list(ctx, sort_by):
"""Display a list of all WHDLoad Packages in the iGameLister WHDLoad Package data file (packages.dat).
The list of WHDLoad Packages can be sorted by the following criteria:
\b
id: The ID number of the WHDLoad Packages. (default)
date: The release date of the WHDLoad Packages.
name: The internal name of the WHDLoad Slaves.
"""
data = Packages(ctx)
data.load()
try:
data.cli_packages_list(sort_by)
except KeyboardInterrupt:
click.echo("\nInterrupted.") | 26,170 |
def p_meta_description(p):
"""meta_description_stmt : DESCRIPTION_ID COLON MULTILINES_STRING
"""
p[0] = Node("description", value=p[3]) | 26,171 |
def test_simple():
"""The most simple case; a single rectangle."""
B = 100
H = 20
E = 210000
sections = ((B, H, 0, E),)
EI, top, bot = bm.EI(sections, E)
EIc = E * B * (H ** 3) / 12
assert 0.99 < EI / EIc < 1.01
assert top == H / 2
assert bot == -H / 2 | 26,172 |
def nose(window):
"""
:param window:window
"""
nose_up = GOval(50, 20)
nose_up.filled = True
window.add(nose_up, x=window.width / 2-nose_up.width//2, y=245+40) | 26,173 |
def tj_dom_dem(x):
"""
Real Name: b'Tj Dom Dem'
Original Eqn: b'( [(1,0.08)-(365,0.09)],(1,0.08333),(2,0.08333),(3,0.08333),(4,0.08333),(5,0.08333),(6\\\\ ,0.08333),(7,0.08333),(8,0.08333),(9,0.08333),(10,0.08333),(11,0.08333),(12,0.08333\\\\ ),(13,0.08333),(14,0.08333),(15,0.08333),(16,0.08333),(17,0.08333),(18,0.08333),(19\\\\ ,0.08333),(20,0.08333),(21,0.08333),(22,0.08333),(23,0.08333),(24,0.08333),(25,0.08333\\\\ ),(26,0.08333),(27,0.08333),(28,0.08333),(29,0.08333),(30,0.08333),(31,0.08333),(32\\\\ ,0.08333),(33,0.08333),(34,0.08333),(35,0.08333),(36,0.08333),(37,0.08333),(38,0.08333\\\\ ),(39,0.08333),(40,0.08333),(41,0.08333),(42,0.08333),(43,0.08333),(44,0.08333),(45\\\\ ,0.08333),(46,0.08333),(47,0.08333),(48,0.08333),(49,0.08333),(50,0.08333),(51,0.08333\\\\ ),(52,0.08333),(53,0.08333),(54,0.08333),(55,0.08333),(56,0.08333),(57,0.08333),(58\\\\ ,0.08333),(59,0.08333),(60,0.08333),(61,0.08333),(62,0.08333),(63,0.08333),(64,0.08333\\\\ ),(65,0.08333),(66,0.08333),(67,0.08333),(68,0.08333),(69,0.08333),(70,0.08333),(71\\\\ ,0.08333),(72,0.08333),(73,0.08333),(74,0.08333),(75,0.08333),(76,0.08333),(77,0.08333\\\\ ),(78,0.08333),(79,0.08333),(80,0.08333),(81,0.08333),(82,0.08333),(83,0.08333),(84\\\\ ,0.08333),(85,0.08333),(86,0.08333),(87,0.08333),(88,0.08333),(89,0.08333),(90,0.08333\\\\ ),(91,0.08333),(92,0.08333),(93,0.08333),(94,0.08333),(95,0.08333),(96,0.08333),(97\\\\ ,0.08333),(98,0.08333),(99,0.08333),(100,0.08333),(101,0.08333),(102,0.08333),(103,\\\\ 0.08333),(104,0.08333),(105,0.08333),(106,0.08333),(107,0.08333),(108,0.08333),(109\\\\ ,0.08333),(110,0.08333),(111,0.08333),(112,0.08333),(113,0.08333),(114,0.08333),(115\\\\ ,0.08333),(116,0.08333),(117,0.08333),(118,0.08333),(119,0.08333),(120,0.08333),(121\\\\ ,0.08333),(122,0.08333),(123,0.08333),(124,0.08333),(125,0.08333),(126,0.08333),(127\\\\ ,0.08333),(128,0.08333),(129,0.08333),(130,0.08333),(131,0.08333),(132,0.08333),(133\\\\ ,0.08333),(134,0.08333),(135,0.08333),(136,0.08333),(137,0.08333),(138,0.08333),(139\\\\ ,0.08333),(140,0.08333),(141,0.08333),(142,0.08333),(143,0.08333),(144,0.08333),(145\\\\ ,0.08333),(146,0.08333),(147,0.08333),(148,0.08333),(149,0.08333),(150,0.08333),(151\\\\ ,0.08333),(152,0.08333),(153,0.08333),(154,0.08333),(155,0.08333),(156,0.08333),(157\\\\ ,0.08333),(158,0.08333),(159,0.08333),(160,0.08333),(161,0.08333),(162,0.08333),(163\\\\ ,0.08333),(164,0.08333),(165,0.08333),(166,0.08333),(167,0.08333),(168,0.08333),(169\\\\ ,0.08333),(170,0.08333),(171,0.08333),(172,0.08333),(173,0.08333),(174,0.08333),(175\\\\ ,0.08333),(176,0.08333),(177,0.08333),(178,0.08333),(179,0.08333),(180,0.08333),(181\\\\ ,0.08333),(182,0.08333),(183,0.08333),(184,0.08333),(185,0.08333),(186,0.08333),(187\\\\ ,0.08333),(188,0.08333),(189,0.08333),(190,0.08333),(191,0.08333),(192,0.08333),(193\\\\ ,0.08333),(194,0.08333),(195,0.08333),(196,0.08333),(197,0.08333),(198,0.08333),(199\\\\ ,0.08333),(200,0.08333),(201,0.08333),(202,0.08333),(203,0.08333),(204,0.08333),(205\\\\ ,0.08333),(206,0.08333),(207,0.08333),(208,0.08333),(209,0.08333),(210,0.08333),(211\\\\ ,0.08333),(212,0.08333),(213,0.08333),(214,0.08333),(215,0.08333),(216,0.08333),(217\\\\ ,0.08333),(218,0.08333),(219,0.08333),(220,0.08333),(221,0.08333),(222,0.08333),(223\\\\ ,0.08333),(224,0.08333),(225,0.08333),(226,0.08333),(227,0.08333),(228,0.08333),(229\\\\ ,0.08333),(230,0.08333),(231,0.08333),(232,0.08333),(233,0.08333),(234,0.08333),(235\\\\ ,0.08333),(236,0.08333),(237,0.08333),(238,0.08333),(239,0.08333),(240,0.08333),(241\\\\ ,0.08333),(242,0.08333),(243,0.08333),(244,0.08333),(245,0.08333),(246,0.08333),(247\\\\ ,0.08333),(248,0.08333),(249,0.08333),(250,0.08333),(251,0.08333),(252,0.08333),(253\\\\ ,0.08333),(254,0.08333),(255,0.08333),(256,0.08333),(257,0.08333),(258,0.08333),(259\\\\ ,0.08333),(260,0.08333),(261,0.08333),(262,0.08333),(263,0.08333),(264,0.08333),(265\\\\ ,0.08333),(266,0.08333),(267,0.08333),(268,0.08333),(269,0.08333),(270,0.08333),(271\\\\ ,0.08333),(272,0.08333),(273,0.08333),(274,0.08333),(275,0.08333),(276,0.08333),(277\\\\ ,0.08333),(278,0.08333),(279,0.08333),(280,0.08333),(281,0.08333),(282,0.08333),(283\\\\ ,0.08333),(284,0.08333),(285,0.08333),(286,0.08333),(287,0.08333),(288,0.08333),(289\\\\ ,0.08333),(290,0.08333),(291,0.08333),(292,0.08333),(293,0.08333),(294,0.08333),(295\\\\ ,0.08333),(296,0.08333),(297,0.08333),(298,0.08333),(299,0.08333),(300,0.08333),(301\\\\ ,0.08333),(302,0.08333),(303,0.08333),(304,0.08333),(305,0.08333),(306,0.08333),(307\\\\ ,0.08333),(308,0.08333),(309,0.08333),(310,0.08333),(311,0.08333),(312,0.08333),(313\\\\ ,0.08333),(314,0.08333),(315,0.08333),(316,0.08333),(317,0.08333),(318,0.08333),(319\\\\ ,0.08333),(320,0.08333),(321,0.08333),(322,0.08333),(323,0.08333),(324,0.08333),(325\\\\ ,0.08333),(326,0.08333),(327,0.08333),(328,0.08333),(329,0.08333),(330,0.08333),(331\\\\ ,0.08333),(332,0.08333),(333,0.08333),(334,0.08333),(335,0.08333),(336,0.08333),(337\\\\ ,0.08333),(338,0.08333),(339,0.08333),(340,0.08333),(341,0.08333),(342,0.08333),(343\\\\ ,0.08333),(344,0.08333),(345,0.08333),(346,0.08333),(347,0.08333),(348,0.08333),(349\\\\ ,0.08333),(350,0.08333),(351,0.08333),(352,0.08333),(353,0.08333),(354,0.08333),(355\\\\ ,0.08333),(356,0.08333),(357,0.08333),(358,0.08333),(359,0.08333),(360,0.08333),(361\\\\ ,0.08333),(362,0.08333),(363,0.08333),(364,0.08333),(365,0.08333))'
Units: b'Dmnl'
Limits: (None, None)
Type: lookup
b''
"""
return functions.lookup(x, [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185,
186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203,
204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293,
294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347,
348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365
], [
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333
]) | 26,174 |
def filter_chants_without_volpiano(chants, logger=None):
"""Exclude all chants with an empty volpiano field"""
has_volpiano = chants.volpiano.isnull() == False
return chants[has_volpiano] | 26,175 |
def split_trainer_ops_pass(program, config, default_device="cpu"):
"""
split cpu-trainer program from origin-program
1. find heter op (located on different device)
2. find input&output of every heter-block
3. create cpu-trainer program, add send&recv op
"""
# Todo: support user define default_device (MrChengmo)
default_device_ = default_device
program, heter_ops, default_ops, program_block_ops = find_heter_ops(
program, default_device_)
program_block_ops = union_forward_gradient_op(program_block_ops)
block_vars_detail = find_block_joints(program, program_block_ops, heter_ops)
trainer_program = program.clone()
create_trainer_program(trainer_program, program, config, program_block_ops,
block_vars_detail)
return trainer_program | 26,176 |
def get_builtin_templates_path() -> pathlib.Path:
"""Return the pathlib.Path to the package's builtin templates
:return: template pathlib.Path
"""
return pathlib.Path(
os.path.dirname(os.path.realpath(__file__))
) / 'templates' | 26,177 |
def sample_input():
"""Return the puzzle input and expected result for the part 1
example problem.
"""
lines = split_nonblank_lines("""
position=< 9, 1> velocity=< 0, 2>
position=< 7, 0> velocity=<-1, 0>
position=< 3, -2> velocity=<-1, 1>
position=< 6, 10> velocity=<-2, -1>
position=< 2, -4> velocity=< 2, 2>
position=<-6, 10> velocity=< 2, -2>
position=< 1, 8> velocity=< 1, -1>
position=< 1, 7> velocity=< 1, 0>
position=<-3, 11> velocity=< 1, -2>
position=< 7, 6> velocity=<-1, -1>
position=<-2, 3> velocity=< 1, 0>
position=<-4, 3> velocity=< 2, 0>
position=<10, -3> velocity=<-1, 1>
position=< 5, 11> velocity=< 1, -2>
position=< 4, 7> velocity=< 0, -1>
position=< 8, -2> velocity=< 0, 1>
position=<15, 0> velocity=<-2, 0>
position=< 1, 6> velocity=< 1, 0>
position=< 8, 9> velocity=< 0, -1>
position=< 3, 3> velocity=<-1, 1>
position=< 0, 5> velocity=< 0, -1>
position=<-2, 2> velocity=< 2, 0>
position=< 5, -2> velocity=< 1, 2>
position=< 1, 4> velocity=< 2, 1>
position=<-2, 7> velocity=< 2, -2>
position=< 3, 6> velocity=<-1, -1>
position=< 5, 0> velocity=< 1, 0>
position=<-6, 0> velocity=< 2, 0>
position=< 5, 9> velocity=< 1, -2>
position=<14, 7> velocity=<-2, 0>
position=<-3, 6> velocity=< 2, -1>""")
sky_lines = split_nonblank_lines("""
......................
......................
......................
......................
......#...#..###......
......#...#...#.......
......#...#...#.......
......#####...#.......
......#...#...#.......
......#...#...#.......
......#...#...#.......
......#...#..###......
......................
......................
......................
......................""")
expected = trim_sky(sky_lines)
return lines, expected | 26,178 |
def vector_angle(v):
"""Angle between v and the positive x axis.
Only works with 2-D vectors.
returns: angle in radians
"""
assert len(v) == 2
x, y = v
return np.arctan2(y, x) | 26,179 |
def get_user_record_tuple(param) -> ():
"""
Internal method for retrieving the user registration record from the DB.
:return:
"""
conn = mariadb.connect(host=DB_URI, user=DB_USERNAME, password=DB_PASSWORD, database=DB_NAME)
db = conn.cursor()
# discord_id provided
if isinstance(param, int):
cmd = '''SELECT last_updated, token, discord_id, discord_name, is_verified, callsign
FROM registration WHERE discord_id=%s'''
# token provided
# else:
elif isinstance(param, str):
cmd = '''SELECT last_updated, token, discord_id, discord_name, is_verified, callsign
FROM registration WHERE token=%s'''
else:
return None
db.execute(cmd, (param,))
result = db.fetchone()
return result | 26,180 |
def A_weighting(x, Fs):
"""A-weighting filter represented as polynomial transfer function.
:returns: Tuple of `num` and `den`.
See equation E.6 of the standard.
"""
f1 = _POLE_FREQUENCIES[1]
f2 = _POLE_FREQUENCIES[2]
f3 = _POLE_FREQUENCIES[3]
f4 = _POLE_FREQUENCIES[4]
offset = _NORMALIZATION_CONSTANTS['A']
numerator = np.array([(2.0 * np.pi * f4)**2.0 * (10**(-offset / 20.0)), 0.0, 0.0, 0.0, 0.0])
part1 = [1.0, 4.0 * np.pi * f4, (2.0 * np.pi * f4)**2.0]
part2 = [1.0, 4.0 * np.pi * f1, (2.0 * np.pi * f1)**2.0]
part3 = [1.0, 2.0 * np.pi * f3]
part4 = [1.0, 2.0 * np.pi * f2]
denomenator = np.convolve(np.convolve(np.convolve(part1, part2), part3), part4)
B, A = bilinear(numerator, denomenator, Fs)
return lfilter(B, A, x) | 26,181 |
def register(class_=None, **kwargs):
"""Registers a dataset with segment specific hyperparameters.
When passing keyword arguments to `register`, they are checked to be valid
keyword arguments for the registered Dataset class constructor and are
saved in the registry. Registered keyword arguments can be retrieved with
the `list_datasets` function.
All arguments that result in creation of separate datasets should be
registered. Examples are datasets divided in different segments or
categories, or datasets containing multiple languages.
Once registered, an instance can be created by calling
:func:`~gluonnlp.data.create` with the class name.
Parameters
----------
**kwargs : list or tuple of allowed argument values
For each keyword argument, it's value must be a list or tuple of the
allowed argument values.
Examples
--------
>>> @gluonnlp.data.register(segment=['train', 'test', 'dev'])
... class MyDataset(gluon.data.Dataset):
... def __init__(self, segment='train'):
... pass
>>> my_dataset = gluonnlp.data.create('MyDataset')
>>> print(type(my_dataset))
<class 'MyDataset'>
"""
def _real_register(class_):
# Assert that the passed kwargs are meaningful
for kwarg_name, values in kwargs.items():
try:
real_args = inspect.getfullargspec(class_).args
except AttributeError:
# pylint: disable=deprecated-method
real_args = inspect.getargspec(class_.__init__).args
if not kwarg_name in real_args:
raise RuntimeError(
('{} is not a valid argument for {}. '
'Only valid arguments can be registered.').format(
kwarg_name, class_.__name__))
if not isinstance(values, (list, tuple)):
raise RuntimeError(('{} should be a list of '
'valid arguments for {}. ').format(
values, kwarg_name))
# Save the kwargs associated with this class_
_REGSITRY_NAME_KWARGS[class_] = kwargs
register_ = registry.get_register_func(Dataset, 'dataset')
return register_(class_)
if class_ is not None:
# Decorator was called without arguments
return _real_register(class_)
return _real_register | 26,182 |
def random_population(pop_size, tune_params, tuning_options, max_threads):
"""create a random population of pop_size unique members"""
population = []
option_space = np.prod([len(v) for v in tune_params.values()])
assert pop_size < option_space
while len(population) < pop_size:
dna = [random.choice(v) for v in tune_params.values()]
if not dna in population and util.config_valid(dna, tuning_options, max_threads):
population.append(dna)
return population | 26,183 |
def check_genome(genome):
"""Check if genome is a valid FASTA file or genomepy genome genome.
Parameters
----------
genome : str
Genome name or file to check.
Returns
-------
is_genome : bool
"""
try:
Genome(genome)
return True
except Exception:
pass
return False | 26,184 |
def test_hbase_get_empty_key_to_error(sdc_builder, sdc_executor, cluster):
"""Check record is sent to error when there is no key in the record and ignore row missing field is set to false
dev_raw_data_source >> hbase_lookup >> wiretap
"""
data = {'columnField': 'cf1:column'}
json_data = json.dumps(data)
# Generate HBase Lookup's attributes.
lookup_parameters = [dict(rowExpr="${record:value('/row_key')}",
columnExpr="${record:value('/columnField')}",
outputFieldPath='/output',
timestampExpr='')]
# Get random table name to avoid collisions.
table_name = get_random_string(string.ascii_letters, 10)
pipeline_builder = sdc_builder.get_pipeline_builder()
# Create Dev Raw Data Source stage.
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source').set_attributes(data_format='JSON',
raw_data=json_data,
stop_after_first_batch=True)
# Create HBase Lookup processor.
hbase_lookup = pipeline_builder.add_stage('HBase Lookup').set_attributes(lookup_parameters=lookup_parameters,
table_name=table_name,
on_record_error='TO_ERROR',
ignore_row_missing_field=False)
# Create wiretap destination.
wiretap = pipeline_builder.add_wiretap()
# Build pipeline.
dev_raw_data_source >> hbase_lookup >> wiretap.destination
pipeline = pipeline_builder.build().configure_for_environment(cluster)
pipeline.configuration['shouldRetry'] = False
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating HBase table %s ...', table_name)
cluster.hbase.client.create_table(name=table_name, families={'cf1': {}})
sdc_executor.start_pipeline(pipeline).wait_for_finished()
scan = cluster.hbase.client.table(table_name).scan()
assert 0 == len(list(scan))
assert len(wiretap.error_records) == 1
finally:
# Delete HBase table.
logger.info('Deleting HBase table %s ...', table_name)
cluster.hbase.client.delete_table(name=table_name, disable=True) | 26,185 |
def prepare(hass):
""" Prepares the loading of components. """
# Load the built-in components
import homeassistant.components as components
AVAILABLE_COMPONENTS.clear()
AVAILABLE_COMPONENTS.extend(
item[1] for item in
pkgutil.iter_modules(components.__path__, 'homeassistant.components.'))
# Look for available custom components
custom_path = hass.get_config_path("custom_components")
if os.path.isdir(custom_path):
# Ensure we can load custom components using Pythons import
sys.path.insert(0, hass.config_dir)
# We cannot use the same approach as for built-in components because
# custom components might only contain a platform for a component.
# ie custom_components/switch/some_platform.py. Using pkgutil would
# not give us the switch component (and neither should it).
# Assumption: the custom_components dir only contains directories or
# python components. If this assumption is not true, HA won't break,
# just might output more errors.
for fil in os.listdir(custom_path):
if os.path.isdir(os.path.join(custom_path, fil)):
AVAILABLE_COMPONENTS.append('custom_components.{}'.format(fil))
else:
AVAILABLE_COMPONENTS.append(
'custom_components.{}'.format(fil[0:-3])) | 26,186 |
def extract_row_loaded():
"""extract_row as it should appear in memory"""
result = {}
result['classification_id'] = '91178981'
result['user_name'] = 'MikeWalmsley'
result['user_id'] = '290475'
result['user_ip'] = '2c61707e96c97a759840'
result['workflow_id'] = '6122'
result['workflow_name'] = 'DECaLS DR5'
result['workflow_version'] = '28.30'
result['created_at'] = '2018-02-20 10:44:42 UTC'
result['gold_standard'] = ''
result['expert'] = ''
result['metadata'] = {
'session': 'e69d40c94873e2e4e2868226d5567e0e997bf58e8800eef4def679ff3e69f97f',
'viewport': {
'width': 1081,
'height': 1049
},
'started_at':'2018-02-20T10:41:13.381Z',
'user_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:58.0) Gecko/20100101 Firefox/58.0',
'utc_offset': '0',
'finished_at': '2018-02-20T10:44:42.480Z',
'live_project': True,
'user_language': 'en',
'user_group_ids':[],
'subject_dimensions': [{
'clientWidth': 424,
'clientHeight': 424,
'naturalWidth': 424,
'naturalHeight': 424
}]}
result['annotations'] = [
{
'task': 'T0',
'task_label': 'Is the galaxy simply smooth and rounded, with no sign of a disk?',
'value':' Features or Disk'
},
{
'task': 'T2',
'task_label': 'Could this be a disk viewed edge-on?',
'value':' No'
},
{
'task': 'T4',
'task_label': 'Is there a bar feature through the centre of the galaxy?',
'value': 'No Bar'
},
{
'task': 'T5',
'task_label': 'Is there any sign of a spiral arm pattern?',
'value': 'Yes'
},
{
'task': 'T6',
'task_label': 'How tightly wound do the spiral arms appear?',
'value': ' Tight'
},
{
'task': 'T7',
'task_label':'How many spiral arms are there?',
'value':' Cant tell'
},
{
'task':'T8',
'task_label':'How prominent is the central bulge, compared with the rest of the galaxy?',
'value':' No bulge'
},
{
'task':'T11',
'task_label':'Is the galaxy currently merging, or is there any sign of tidal debris?',
'value':'Neither'
},
{
'task':'T10',
'task_label':'Do you see any of these rare features in the image?',
'value':[]
}
]
result['subject_data'] = {
'15715879': {
'retired': None,
'ra': 319.11521779916546,
'dec': -0.826509379829966,
'mag.g': 13.674222230911255,
'mag.i': 12.560198307037354,
'mag.r': 12.938228249549866,
'mag.u': 15.10558009147644,
'mag.z':12.32387661933899,
'nsa_id':189862.0,
'redshift':0.019291512668132782,
'mag.abs_r':-20.916738510131836,
'mag.faruv':16.92647397518158,
'petroflux':5388.59814453125,
'petroth50':13.936717987060547,
'mag.nearuv':16.298240423202515,
'petrotheta':28.682878494262695,
'absolute_size':11.334824080956198
}
}
result['subject_ids'] = '15715879'
return result | 26,187 |
def getb_reginsn(*args):
"""
getb_reginsn(ins) -> minsn_t
Skip assertions backward.
@param ins (C++: const minsn_t *)
"""
return _ida_hexrays.getb_reginsn(*args) | 26,188 |
def convolve_nk(myk, nkm, gfunc, klim, nk, kmin=0.02, kmax=1.5):
"""Convolve n(k) by going to J(p); apply resolution; and back.
Args:
myk (np.array): k
nkm (np.array): n(k)
gfunc (callable): resolution function
klim (float): maxmimum kvalue to include in convolution
nk (nk): number of points on linear grid
kmin (float, optional): minimum n(k) to keep after conv., default 0.02
kmax (float, optional): maximum n(k) to keep after conv., default 1.50
Return:
(np.array, np.array): (myk1, mynk1), convolved n(k)
Example:
>>> gfunc = lambda x:lorentz(x, 0.026)
>>> klim = 40. # need large number for lorentzian tail
>>> nk = 1024**2
>>> myk1, nkm1 = convolve_nk(myk, nkm, gfunc, klim, nk)
"""
from solith.li_nofk.int_nofk import calc_jp1d, calc_nk1d
# first integrate n(k) to J(p)
jpm = calc_jp1d(myk, nkm)
# second convolve J(p)
fjp = flip_and_clamp(myk, jpm)
finep = np.linspace(-klim, klim, nk)
jp1 = fft_convolve(fjp, gfunc, finep)
# third differentiate J(p) to n(k)
sel = (finep > kmin) & (finep < kmax)
myk1, nk1 = calc_nk1d(finep[sel], jp1[sel])
return myk1, nk1 | 26,189 |
def _builder_inited(app: sphinx.application.Sphinx) -> None:
"""Generates the rST files for API members."""
_write_member_documentation_pages(
_create_documenter(env=app.env,
documenter_cls=sphinx.ext.autodoc.ModuleDocumenter,
name='tensorstore')) | 26,190 |
def label_connected_components(label_images, start_label=1, is_3d=False):
"""Label connected components in a label image.
Create new label images where labels are changed so that each \
connected componnent has a diffetent label. \
To find the connected components, it is used a 8-neighborhood.
Parameters
----------
label_images : ndarray
Label images with size :math:`(N, H, W)`.
start_labeel: int
First label, by default 1.
Returns
-------
ndarray
Label images with new labels.
"""
# label_image = label_image.squeeze()
if label_images.ndim == 2:
label_images = np.expand_dims(label_images, 0)
if is_3d and label_images.ndim == 3:
label_images = np.expand_dims(label_images, 0)
new_label_images = np.zeros_like(label_images).astype(np.int)
_c = start_label
for label_image, new_label_image in zip(label_images, new_label_images):
num_labels = label_image.astype(np.int32).max()
#new_label_image = np.zeros_like(label_image).astype(np.int)
if is_3d:
structure = np.ones((3, 3, 3), dtype=np.uint8)
else:
structure = np.ones((3, 3), dtype=np.uint8)
for _l in range(1, num_labels + 1):
_label_image = label(label_image == _l, structure=structure)[0]
for new_l in range(1, _label_image.max() + 1):
mask = _label_image == new_l
new_label_image[mask] = _c
_c += 1
return new_label_images | 26,191 |
def passed_hardfailure_detector(elb_data):
"""
Checks for hard failures
"""
if debug:
logger.debug("Checking hard failure detector")
# 1. Verify the Classic Load Balancer does not have HTTP, HTTPS or SSL
# listeners
for listener in elb_data['LoadBalancerDescriptions'][0]['ListenerDescriptions']:
if listener['Listener']['Protocol'] in ['HTTP', 'HTTPS', 'SSL']:
logger.error(
"Error: HTTP, HTTPS and SSL listeners are not supported on Network Load Balancer.")
return False
# 2. Verify the Classic Load Balancer is not in EC2-Classic
if 'VPCId' not in elb_data['LoadBalancerDescriptions'][0]:
logger.error("Error: The Classic Load Balancer is in EC2-Classic instead of a VPC.\
A VPC is required for an Network Load Balancer.")
return False
# 3. Verify the Classic Load Balancer has more than 350 seconds idle
# timeout
if elb_data['LoadBalancerAttributes']['ConnectionSettings']['IdleTimeout'] > 350:
logger.error(
"Error: The idle timeout on Classic Load Balancer is larger than 350 seconds.")
return False
# 4. Verify unique backend ports is less than 200
if len(elb_data['LoadBalancerDescriptions'][0]['ListenerDescriptions']) > 200:
backend_ports = []
for listener in elb_data['LoadBalancerDescriptions'][0]['ListenerDescriptions']:
if listener['Listener']['InstancePort'] not in backend_ports:
backend_ports.append(listener['Listener']['InstancePort'])
if len(backend_ports) >= 200:
logger.error("Error: The number of unique backend ports exceeds 200. \
The default limit for target groups is 200.")
return False
# 5. Verify that the number of listeners is less than the default
if len(elb_data['LoadBalancerDescriptions'][0]['ListenerDescriptions']) > 10:
logger.error("Error: The number of listeners exceeds the default \
limit for an Network Load Balancer.")
return False
return True | 26,192 |
def setup_pgbackup():
"""
Copies postgresql backup script to host and adds to crontab
"""
sudo('mkdir -p /backups/postgres')
sudo('chown -R postgres:postgres /backups')
put('etc/pgbkup.sh', '/backups/postgres', use_sudo=True)
sudo('chmod +x /backups/postgres/pgbkup.sh')
sudo('echo "0 3 * * * /backups/postgres/pgbkup.sh" | crontab -', user='postgres') | 26,193 |
def _watchdog():
"""
Thread worker to maintain nornir proxy process and it's children liveability.
"""
child_processes = {}
while nornir_data["initialized"]:
nornir_data["stats"]["watchdog_runs"] += 1
# run FD limit checks
try:
if HAS_RESOURCE_LIB:
fd_in_use = len(os.listdir("/proc/{}/fd/".format(os.getpid())))
fd_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# restart if reached 95% of available file descriptors limit
if fd_in_use > fd_limit * 0.95:
log.critical(
"Nornir-proxy MAIN PID {} watchdog, file descriptors in use: {}, limit: {}, reached 95% threshold, restarting".format(
os.getpid(), fd_in_use, fd_limit
)
)
shutdown()
except:
log.error(
"Nornir-proxy MAIN PID {} watchdog, file descritors usage check error: {}".format(
os.getpid(), traceback.format_exc()
)
)
# run memory checks
try:
mem_usage = minion_process.memory_info().rss / 1024000
if mem_usage > nornir_data["memory_threshold_mbyte"]:
if nornir_data["memory_threshold_action"] == "log":
log.warning(
"Nornir-proxy {} MAIN PID {} watchdog, memory_threshold_mbyte exceeded, memory usage {}MByte".format(
nornir_data["stats"]["proxy_minion_id"],
os.getpid(),
mem_usage,
)
)
elif nornir_data["memory_threshold_action"] == "restart":
shutdown()
except:
log.error(
"Nornir-proxy MAIN PID {} watchdog, memory usage check error: {}".format(
os.getpid(), traceback.format_exc()
)
)
# check if worker thread is alive and restart it if not
try:
if not nornir_data["worker_thread"].is_alive():
nornir_data["worker_thread"] = threading.Thread(
target=_worker, name="{}_worker".format(opts["id"])
).start()
except:
log.error(
"Nornir-proxy MAIN PID {} watchdog, worker thread is_alive check error: {}".format(
os.getpid(), traceback.format_exc()
)
)
# Handle child processes lifespan
try:
for p in multiprocessing.active_children():
cpid = p.pid
if not p.is_alive():
_ = child_processes.pop(cpid, None)
elif cpid not in child_processes:
child_processes[cpid] = {
"first_seen": time.time(),
"process": p,
"age": 0,
}
elif (
child_processes[cpid]["age"] > nornir_data["child_process_max_age"]
):
# kill process
os.kill(cpid, signal.SIGKILL)
nornir_data["stats"]["watchdog_child_processes_killed"] += 1
log.info(
"Nornir-proxy MAIN PID {} watchdog, terminating child PID {}: {}".format(
os.getpid(), cpid, child_processes[cpid]
)
)
_ = child_processes.pop(cpid, None)
else:
child_processes[cpid]["age"] = (
time.time() - child_processes[cpid]["first_seen"]
)
except:
log.error(
"Nornir-proxy MAIN PID {} watchdog, child processes error: {}".format(
os.getpid(), traceback.format_exc()
)
)
# keepalive connections and clean up dead connections if any
try:
if nornir_data["proxy_always_alive"] and nornir_data[
"connections_lock"
].acquire(block=False):
try:
stats = HostsKeepalive(nornir_data["nr"])
nornir_data["stats"]["watchdog_dead_connections_cleaned"] += stats[
"dead_connections_cleaned"
]
except Exception as e:
raise e
finally:
nornir_data["connections_lock"].release()
except:
log.error(
"Nornir-proxy MAIN PID {} watchdog, HostsKeepalive check error: {}".format(
os.getpid(), traceback.format_exc()
)
)
time.sleep(nornir_data["watchdog_interval"]) | 26,194 |
def load(name, data_dir='data'):
"""
Opens a saved `.npz` file containing 'dm' and 'z' arrays.
Parameters
----------
name : str
The name of the file to load.
data_dir : str, optional
The directory containing the data. The whole path must be
specified except if :attr:`data_dir` == 'data' then it will
search in the `data` subdirectory of the source code.
Default: 'data'
Returns
-------
table: :obj:`numpy.lib.npyio.NpzFile`
The lookup table containing the 'dm' and 'z' arrays.
Example
-------
>>> table = fruitbat.table.load('Zhang2018_Planck18.npz')
>>> table["dm"]
array([0.00000000e+00, 1.62251609e+00, 3.24675204e+00, ...,
1.00004587e+04, 1.00010926e+04, 1.00017266e+04])
>>> table["z"]
array([0.00000000e+00, 2.00020002e-03, 4.00040004e-03, ...,
1.99959996e+01, 1.99979998e+01, 2.00000000e+01])
"""
if data_dir == 'data':
data_dir = os.path.join(os.path.dirname(__file__), 'data')
filename = os.path.join(data_dir, name)
return np.load(filename) | 26,195 |
def save_changes(_id, data):
"""commit the candidate details to the database
Args:
_id (integer): office id from the endpoint url
data ([object]): candidate instance
"""
query, values = Candidate.add_candidate(data, office_id=_id)
db().commit_changes(query, values) | 26,196 |
def set_user_agent(user_agent: str, ini_path: str = None) -> None:
"""设置user agent \n
:param user_agent: user agent文本
:param ini_path: 要修改的ini文件路径
:return: None
"""
set_argument('user-agent', user_agent, ini_path) | 26,197 |
def metadataAbstractElementEmptyValuesTest1():
"""
No empty values.
>>> doctestMetadataAbstractElementFunction(
... testMetadataAbstractElementEmptyValue,
... metadataAbstractElementEmptyValuesTest1(),
... requiredAttributes=["required1"],
... optionalAttributes=["optional1"])
[]
"""
metadata = """<?xml version="1.0" encoding="UTF-8"?>
<test required1="foo" optional1="foo" />
"""
return ElementTree.fromstring(metadata) | 26,198 |
def test_get_missing_resource(app):
"""If we GET a resource with an ID that doesn't exist, do we get a 404?"""
with app.test_client() as client:
response = client.get('/machine/31337')
assert response.status_code == 404 | 26,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.