content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def analyzer_options(*args):
"""
analyzer_options()
Allow the user to set analyzer options. (show a dialog box) (
'ui_analyzer_options' )
"""
return _ida_kernwin.analyzer_options(*args) | 32,100 |
def vector_vector_feature(v_a, v_b, weight, p_idx, frames, symmetric):
"""
Taking outer product, create matrix feature per pair, average, express in SO2 feature.
:param v_a: [E, 3]
:param v_b: [E, 3]
:param weight: [E]
:param p_idx: [E] index [0, V)
:param frames: [V, 3, 3] per vertex, rows are (X, Y, normal) vectors.
:param symmetric: bool
:return: [V, 2/3, 5] (2 channels if symmetric)
"""
m_pair = torch.einsum("ni,nj,n->nij", v_a, v_b, weight)
m_p = scatter_sum(m_pair, p_idx, dim=0) / scatter_sum(weight, p_idx)[:, None, None]
m_p_gauge = frames @ m_p @ frames.transpose(1, 2)
return (three_sym_matrix_to_so2_features if symmetric else three_matrix_to_so2_features)(
m_p_gauge
) | 32,101 |
def shopping_promos(bot, update):
""" Get Latest Shopping Promos """
promo_list = {'Lazada': 'https://www.couponese.com/store/lazada.sg/',
'Amazon': 'https://www.couponese.com/store/amazon.com.sg/',
'Redmart': 'https://www.couponese.com/store/redmart.com/',
'Zalora': 'https://www.couponese.com/store/zalora.sg/',
}
bot.sendChatAction(update.message.chat_id, action=ChatAction.TYPING)
text_ = promo_general.promo_loop(promo_list)
text_ = ":handbag::handbag::handbag:" + text_
bot.sendMessage(update.message.chat_id, text=emojize(text_, use_aliases=True),
parse_mode='HTML')
botan_track(update.message.from_user.id,
update.message, update.message.text) | 32,102 |
def parse_steps(filename):
"""
Read each line of FILENAME and return a dict where the key is the
step and the value is a list of prerequisite steps.
"""
steps = defaultdict(lambda: list())
all_steps = set()
with open(filename) as f:
for line in f:
words = line.split(' ')
steps[words[7]].append(words[1])
all_steps.add(words[1])
# Add steps with no prerequisites.
for step in all_steps:
if step not in steps:
steps[step] = []
return steps | 32,103 |
def swait_multiple(cos):
"""Sync-wait for the given coroutines."""
asyncio.get_event_loop().run_until_complete(asyncio.wait(cos)) | 32,104 |
def load(file): # real signature unknown; restored from __doc__
""" load(file) -- Load a pickle from the given file """
pass | 32,105 |
def sitestructure(config, path, extra):
"""Read all markdown files and make a site structure file"""
# no error handling here, because compile_page has it
entire_site = list()
for page in glob.iglob(path + '**/*.md', recursive=True):
merged = compile_page(None, config, page, extra)
if 'tags' in merged:
merged['tags'] = [x.strip() for x in merged['tags'].split(',')]
if 'content_raw' in merged:
merged['snippet'] = merged['content_raw'][:200] + "..."
# remote certain elements
if 'content' in merged:
del merged['content']
if 'content_raw' in merged:
del merged['content_raw']
if 'templates' in merged:
del merged['templates']
entire_site.append(merged)
return json.dumps(entire_site) | 32,106 |
def _create_component(tag_name, allow_children=True, callbacks=[]):
"""
Create a component for an HTML Tag
Examples:
>>> marquee = _create_component('marquee')
>>> marquee('woohoo')
<marquee>woohoo</marquee>
"""
def _component(*children, **kwargs):
if 'children' in kwargs:
children = kwargs.pop('children')
else:
# Flatten children under specific circumstances
# This supports the use case of div([a, b, c])
# And allows users to skip the * operator
if len(children) == 1 and isinstance(children[0], list):
# We want children to be tuples and not lists, so
# they can be immutable
children = tuple(children[0])
if 'style' in kwargs:
style = kwargs.pop('style')
else:
style = None
if 'attributes' in kwargs:
attributes = kwargs['attributes']
else:
attributes = dict(**kwargs)
if (tag_name == 'a') and ('href' not in attributes):
attributes['href'] = '#'
if not allow_children and children:
# We don't allow children, but some were passed in
raise ValueError(
'<{tag_name} /> cannot have children'.format(tag_name=tag_name))
for cb in callbacks:
cbname = cb['name']
if cbname in attributes:
if attributes[cbname] is not None:
# from google.colab import output as colab_output
callback_id = cbname + 'callback-' + str(uuid.uuid4())
register_callback(callback_id, attributes[cbname])
# js="google.colab.kernel.invokeFunction('{callback_id}', [], {kwargs})"
js = "window.vdomr_invokeFunction('{callback_id}', [], {kwargs})"
js = js.replace('{callback_id}', callback_id)
js = js.replace('{kwargs}', cb['kwargs'])
attributes[cbname] = js
else:
attributes[cbname] = ''
v = VDOM(tag_name, attributes, style, children)
return v
return _component | 32,107 |
def typeof(val, purpose=Purpose.argument):
"""
Get the Numba type of a Python value for the given purpose.
"""
# Note the behaviour for Purpose.argument must match _typeof.c.
c = _TypeofContext(purpose)
ty = typeof_impl(val, c)
if ty is None:
msg = _termcolor.errmsg(
"cannot determine Numba type of %r") % (type(val),)
raise ValueError(msg)
return ty | 32,108 |
def scale_site_by_jobslots(df, target_score, jobslot_col=Metric.JOBSLOT_COUNT.value, count_col=Metric.NODE_COUNT.value):
"""
Scale a resource environment (data frame with node type information) to the supplied share. This method uses
the number of jobslots in each node as a target metric.
"""
if df[jobslot_col].isnull().sum() > 0 or df[count_col].isnull().sum() > 0:
logging.warning("Node description has null values for jobslots or node target scores!")
slots_per_type = df[jobslot_col] * df[count_col]
total_slots = slots_per_type.sum()
share = target_score / total_slots
return scale_dataframe(df, share, count_col, jobslot_col) | 32,109 |
def primesfrom2to(n):
"""Input n>=6, Returns a array of primes, 2 <= p < n"""
sieve = np.ones(n / 3 + (n % 6 == 2), dtype=np.bool)
sieve[0] = False
for i in xrange(int(n ** 0.5) / 3 + 1):
if sieve[i]:
k = 3 * i + 1 | 1
sieve[((k * k) / 3)::2 * k] = False
sieve[(k * k + 4 * k - 2 * k * (i & 1)) / 3::2 * k] = False
return np.r_[2, 3, ((3 * np.nonzero(sieve)[0] + 1) | 1)] | 32,110 |
def file_types_diff(cwd, old_ver, new_ver):
"""
NB: Uses Git and the magic/ directory
Select only files that are Copied (C), Modified (M), Renamed (R),
have their type (i.e. regular file, symlink, submodule, ...) changed (T)
Returns a list of changed file types!
"""
# diff only Modified and Type changed
cmdline = "git diff -M --diff-filter=MT %s..%s" % (old_ver, new_ver)
proc = subprocess.Popen(cmdline.split(' '), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd)
text = proc.communicate()[0]
text = text.decode('UTF8', 'replace').strip('\n') # keep leading white space
if text:
return (FAIL, text)
else:
return (PASS, "None") | 32,111 |
def TimeFromTicks(ticks):
"""construct an object holding a time value from the given ticks value."""
return Time(*time.localtime(ticks)[3:6]) | 32,112 |
def check_key_exists(file_location, section, key):
"""
Searches an INI Configuration file for the existance of a section & key
:param file_location: The file to get a key value from
:param section: The section to find the key value
:param key: The key that can contain a value to retrieve
:return: The boolean value of whether or not the key exists
"""
config = ConfigParser()
config.read(file_location)
return config.has_option(section, key) | 32,113 |
def replace_na(str_value: str, ch: str = "0") -> str:
"""replaces \"0\" with na, specifically designed for category list, may not work for others need
Args:
str_value (str): category list
ch (str, optional): Replacemet char. Defaults to "0".
Returns:
str: clean cotegory name
"""
if str_value is not None:
len_str = len(str_value)
if len_str > 0:
if str_value == "0":
return "na"
all_indices = [i for i, ltr in enumerate(str_value) if ltr == ch]
if all_indices:
for i in all_indices:
if i == 0 and str_value[1].isalpha():
str_value = "na"+str_value[1:]
elif i == (len_str - 1) and (str_value[len_str-2].isalpha() or str_value[len_str-2] != "."):
str_value = str_value[:len_str] + "na"
elif str_value[len_str-2] != ".":
str_value = str_value[:i] + "na" + str_value[(i+1):]
return str_value | 32,114 |
def intent_requires():
"""
This view encapsulates the method get_intent_requirement
It requires an Intent.
:return: A dict containing the different entities required for an Intent
"""
data = request.get_json()
if "intent" in data:
return kg.get_intent_requirements(data["intent"])
else:
return {"message": "Must provide an intent name", "status": 404} | 32,115 |
def standardize_df_off_tr(df_tr:pd.DataFrame,
df_te:pd.DataFrame):
"""Standardize dataframes from a training and testing frame, where the means
and standard deviations that are calculated from the training dataset.
"""
for key in df_tr.keys():
if key != 'target':
# scale the testing data w/ the training means/stds
ssd = df_tr[key].values.std()
if np.abs(ssd) < .0001:
ssd = .001
df_te[key] = (df_te[key].values - df_tr[key].values.mean())/ssd
# scale the training data
df_tr[key] = (df_tr[key].values - df_tr[key].values.mean())/ssd
return df_tr, df_te | 32,116 |
def filter_clusters(aoi_clusters, min_ratio,
max_deviation, message, run=None):
"""
min_ratio: Has to have more than x % of
all dots in the corner within the cluster
max_deviation: Should not deviate more than x %
of the screen size from the respective AOI
"""
aoi_clusters = aoi_clusters \
.sort_values(by='quadrant') \
.assign(n_ratio=aoi_clusters['n_cluster'] / \
aoi_clusters['n_total']) \
.assign(x_deviation=aoi_clusters['x'] - \
pd.Series([0.25, 0.75, 0.25, 0.75])) \
.assign(y_deviation=aoi_clusters['y'] - \
pd.Series([0.75, 0.75, 0.25, 0.25]))
aoi_clusters['euclid_deviation'] = np.sqrt(
aoi_clusters['x_deviation'] ** 2 +
aoi_clusters['y_deviation'] ** 2)
realistic_clusters = aoi_clusters[
(aoi_clusters['n_ratio'] > min_ratio) &
(aoi_clusters['euclid_deviation'] < max_deviation)]
not_enough_gaze_points = len(aoi_clusters[
(aoi_clusters['n_ratio'] > min_ratio)]) < 4
too_far_away = len(aoi_clusters[
aoi_clusters[
'euclid_deviation'] < max_deviation]) < 4
if message:
if not_enough_gaze_points | too_far_away:
print(f"""\nRun {run} could not be clustered: """)
if not_enough_gaze_points:
print(f""" <{min_ratio * 100}% gaze point within """
f"""the AOIs for each corner""")
if too_far_away:
print(f""" >{max_deviation * 100}% from where the AOI """
f"""is supposed to be \n""")
else:
print(f"""\nRun {run} can be clustered: """)
print(f"""{aoi_clusters[[
'quadrant', 'n_cluster', 'cluster', 'n_ratio',
'x_deviation', 'y_deviation']]} \n"""
f"""Notes: """)
return realistic_clusters | 32,117 |
def set_to_true():
"""matches v1, which assign True to v1"""
key = yield symbol
res = Assign(key, True)
return res | 32,118 |
def fit_spectrum(spectrum, lineshapes, params, amps, bounds, ampbounds,
centers, rIDs, box_width, error_flag, verb=True, **kw):
"""
Fit a NMR spectrum by regions which contain one or more peaks.
Parameters
----------
spectrum : array_like
NMR data. ndarray or emulated type, must be slicable.
lineshape :list
List of lineshapes by label (str) or a lineshape class. See
:py:func:`fit_NDregion` for details.
params : list
P-length list (P is the number of peaks in region) of N-length lists
of tuples where each each tuple is the optimiztion starting parameters
for a given peak and dimension lineshape.
amps : list
P-length list of amplitudes.
bounds : list
List of bounds for parameter of same shape as params. If none of the
parameters in a given dimension have limits None can be used,
otherwise each dimension should have a list or tuple of (min,max) or
None for each parameter. min or max may be None when there is no
bounds in a given direction.
ampbounds : list
P-length list of bounds for the amplitude with format similar to
bounds.
centers : list
List of N-tuples indicating peak centers.
rIDs : list
P-length list of region numbers. Peak with the same region number
are fit together.
box_width : tuple
Tuple of length N indicating box width to add and subtract from peak
centers to form regions around peak to fit.
error_flag : bool
True to estimate errors for each lineshape parameter and amplitude.
verb : bool, optional
True to print a summary of each region fit, False (the default)
supresses all printing.
**kw : optional
Additional keywords passed to the scipy.optimize.leastsq function.
Returns
-------
params_best : list
Optimal values for lineshape parameters with same format as params
input parameter.
amp_best : list
List of optimal peak amplitudes.
param_err : list, only returned when error_flag is True
Estimated lineshape parameter errors with same format as params.
amp_err : list, only returned when error_flag is True
Estimated peak amplitude errors.
iers : list
List of interger flag from scipy.optimize.leastsq indicating if the
solution was found for a given peak. 1,2,3,4 indicates that a
solution was found. Other indicate an error.
"""
pbest = [[]] * len(params)
pbest_err = [[]] * len(params)
abest = [[]] * len(params)
abest_err = [[]] * len(params)
iers = [[]] * len(params)
shape = spectrum.shape
ls_classes = []
for l in lineshapes:
if isinstance(l, str):
ls_classes.append(ls_str2class(l))
else:
ls_classes.append(l)
cIDs = set(rIDs) # region values to loop over
for cID in cIDs:
cpeaks = [i for i, v in enumerate(rIDs) if v == cID]
# select the parameter
cparams = [params[i] for i in cpeaks]
camps = [amps[i] for i in cpeaks]
cbounds = [bounds[i] for i in cpeaks]
campbounds = [ampbounds[i] for i in cpeaks]
ccenters = [centers[i] for i in cpeaks]
# find the box edges
bcenters = np.round(np.array(ccenters).astype('int'))
bmin = bcenters - box_width
bmax = bcenters + box_width + 1
# correct for spectrum edges
for i in range(len(shape)):
bmin[:, i][np.where(bmin[:, i] < 0)] = 0
for i, v in enumerate(shape):
bmax[:, i][np.where(bmax[:, i] > v)] = v
# find the region limits
rmin = edge = np.array(bmin).min(0)
rmax = np.array(bmax).max(0)
# cut the spectrum
s = tuple([slice(mn, mx) for mn, mx in zip(rmin, rmax)])
region = spectrum[s]
# add edge to the box limits
ebmin = bmin - edge
ebmax = bmax - edge
# create the weight mask array
wmask = np.zeros(region.shape, dtype='bool')
for bmn, bmx in zip(ebmin, ebmax):
s = tuple([slice(mn, mx) for mn, mx in zip(bmn, bmx)])
wmask[s] = True
# add edges to the initial parameters
ecparams = [[ls.add_edge(p, (mn, mx)) for ls, mn, mx, p in
zip(ls_classes, rmin, rmax, g)] for g in cparams]
# TODO make this better...
ecbounds = [[zip(*[ls.add_edge(b, (mn, mx)) for b in zip(*db)])
for ls, mn, mx, db in zip(ls_classes, rmin, rmax, pb)]
for pb in cbounds]
# fit the region
t = fit_NDregion(region, ls_classes, ecparams, camps, ecbounds,
campbounds, wmask, error_flag, **kw)
if error_flag:
ecpbest, acbest, ecpbest_err, acbest_err, ier = t
cpbest_err = [[ls.remove_edge(p, (mn, mx)) for ls, mn, mx, p in
zip(ls_classes, rmin, rmax, g)] for g in ecpbest_err]
else:
ecpbest, acbest, ier = t
# remove edges from best fit parameters
cpbest = [[ls.remove_edge(p, (mn, mx)) for ls, mn, mx, p in
zip(ls_classes, rmin, rmax, g)] for g in ecpbest]
if verb:
print("-----------------------")
print("cID:", cID, "ier:", ier, "Peaks fit", cpeaks)
print("fit parameters:", cpbest)
print("fit amplitudes", acbest)
for i, pb, ab in zip(cpeaks, cpbest, acbest):
pbest[i] = pb
abest[i] = ab
iers[i] = ier
if error_flag:
for i, pb, ab in zip(cpeaks, cpbest_err, acbest_err):
pbest_err[i] = pb
abest_err[i] = ab
if error_flag is False:
return pbest, abest, iers
return pbest, abest, pbest_err, abest_err, iers | 32,119 |
def d1_to_q1(A, b, mapper, cnt, M):
"""
Constraints for d1 to q1
"""
for key in mapper['ck'].keys():
for i in range(M):
for j in range(i, M):
# hermetian constraints
if i != j:
A[cnt, mapper['ck'][key](i, j)] += 0.5
A[cnt, mapper['ck'][key](j, i)] += 0.5
A[cnt, mapper['kc'][key](j, i)] += 0.5
A[cnt, mapper['kc'][key](i, j)] += 0.5
b[cnt, 0] = 0.0
else:
A[cnt, mapper['ck'][key](i, j)] += 1.0
A[cnt, mapper['kc'][key](j, i)] += 1.0
b[cnt, 0] = 1.0
cnt += 1
return A, b, cnt | 32,120 |
def handle_args():
""" Gathers commmand line options and sets up logging according to the verbose param. Returns the parsed args """
parser = argparse.ArgumentParser(description='Checks the queue for new messages and caclulates the calendar as needed')
parser.add_argument('--verbose', '-v', action='count')
args = parser.parse_args()
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
elif args.verbose >= 3:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
return args | 32,121 |
def relative_of(base_path: str, relative_path: str) -> str:
"""Given a base file and path relative to it, get full path of it"""
return os.path.normpath(os.path.join(os.path.dirname(base_path), relative_path)) | 32,122 |
def _get_dict_roi(directory=None):
"""Get all available images with ROI bounding box.
Returns
-------
dict : {<image_id>: <ROI file path>}
"""
d = OrderedDict()
for f in listdir(directory or IJ_ROI_DIR):
d[splitext(f)[0]] = join(directory or IJ_ROI_DIR, f)
return d | 32,123 |
def geol_units(img, lon_w, lat, legend=None):
"""Get geological units based on (lon, lat) coordinates.
Parameters
----------
img: 2d-array
2D geol map image centered at 180°.
lon_w: float or array
Point west longitude(s).
lat: float or array
Point latitude(s).
legend: dict, optional
Table to mapping geol units to values.
Returns
-------
float, str or array
Geological unit(s).
"""
units = img[index(img, lon_w, lat)]
if not isinstance(legend, dict):
return units
if np.ndim(units) == 0:
return legend[units]
geol = np.vectorize(legend.get)(units)
if np.ma.is_masked(lon_w) or np.ma.is_masked(lat):
mask = np.ma.getmask(lon_w) | np.ma.getmask(lat)
return np.ma.array(geol, mask=mask)
return geol | 32,124 |
def Water_Mask(shape_lsc,Reflect):
"""
Calculates the water and cloud mask
"""
mask = np.zeros((shape_lsc[1], shape_lsc[0]))
mask[np.logical_and(Reflect[:, :, 3] < Reflect[:, :, 2],
Reflect[:, :, 4] < Reflect[:, :, 1])] = 1.0
water_mask_temp = np.copy(mask)
return(water_mask_temp) | 32,125 |
def unzip(sequence: Iterable) -> Tuple[Any]:
"""Opposite of zip. Unzip is shallow.
>>> unzip([[1,'a'], [2,'b'], [3,'c']])
((1, 2, 3), ('a', 'b', 'c'))
>>> unzip([ [1,'a','A'], [2, 'b','B'], [3,'c','C'] ])
((1, 2, 3), ('a', 'b', 'c'), ('A', 'B', 'C'))
shallow nature of unzip.
>>> unzip([ [[1,'num'],['a','str']], [[2,'num'],['b','str']] ])
(([1, 'num'], [2, 'num']), (['a', 'str'], ['b', 'str']))
Added in version: 0.1.0
"""
# TODO find better name for split?
def split(constructed, inner_lis):
# constructed is a nested list like [[1,2,3], ['a','b','c']]
return tuple(map(conj, constructed, inner_lis))
def create_nested_list(sequence):
# to be passed as an initial value to reduce
# the number of 2nd level lists corresponds
# to the number of elements in the inner list
# of sequence. for e.g
# [ [1,'a'], [2,'b], [3,'c'] ] -> ( (), () )
return (() for i in range(len(sequence[0])))
return reduce(split, sequence, create_nested_list(sequence)) | 32,126 |
def test_list_base64_binary_enumeration_1_nistxml_sv_iv_list_base64_binary_enumeration_2_3(mode, save_output, output_format):
"""
Type list/base64Binary is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/base64Binary/Schema+Instance/NISTSchema-SV-IV-list-base64Binary-enumeration-2.xsd",
instance="nistData/list/base64Binary/Schema+Instance/NISTXML-SV-IV-list-base64Binary-enumeration-2-3.xml",
class_name="NistschemaSvIvListBase64BinaryEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 32,127 |
def plot_vm(
vm_params_dict: dict,
srf_corners: np.ndarray,
land_outline_path: Path,
centre_line_path: Path,
mag: float,
outdir: Path,
ptemp: Path,
logger: Logger = qclogging.get_basic_logger(),
):
"""
Plots VM domain as well as SRF domain if possible
Parameters
----------
vm_params_dict :
srf_corners :
land_outline_path :
centre_line_path :
mag :
outdir :
ptemp :
logger :
"""
logger.debug("Plotting vm")
p = gmt.GMTPlot(ptemp / "optimisation.ps")
p.spacial("M", vm_params_dict["plot_region"], sizing=7)
p.coastlines()
srf_path = ptemp / "srf.path"
if srf_path.exists():
# filled slip area
p.path(srf_path, is_file=True, fill="yellow", split="-")
# top edge
for plane in srf_corners:
p.path(
"\n".join([" ".join(map(str, ll)) for ll in plane[:2]]), is_file=False
)
# vm domain (simple and adjusted)
p.path(vm_params_dict["path"], is_file=False, close=True, fill="black@95")
if vm_params_dict["adjusted"]:
p.path(
vm_params_dict["path_mod"],
is_file=False,
close=True,
fill="black@95",
split="-",
width="1.0p",
)
# info text for simple and adjusted domains
p.text(
sum(vm_params_dict["plot_region"][0:2]) / 2.0,
vm_params_dict["plot_region"][3],
"Mw: %.2f X: %.0fkm, Y: %.0fkm, land: %.0f%%"
% (mag, vm_params_dict["xlen"], vm_params_dict["ylen"], vm_params_dict["land"]),
align="CT",
dy=-0.1,
box_fill="white@50",
)
if vm_params_dict["adjusted"]:
p.text(
sum(vm_params_dict["plot_region"][0:2]) / 2.0,
vm_params_dict["plot_region"][3],
"MODIFIED land: %.0f%%" % (vm_params_dict["land_mod"]),
align="CT",
dy=-0.25,
box_fill="white@50",
)
# land outlines blue, nz centre line (for bearing calculation) red
p.path(land_outline_path, is_file=True, close=False, colour="blue", width="0.2p")
p.path(centre_line_path, is_file=True, close=False, colour="red", width="0.2p")
# actual corners retrieved from NZVM output or generated if args.novm
# not available if VM was skipped
corner_file = outdir / "VeloCorners.txt"
if corner_file.exists():
logger.debug("Getting corners from VeloModCorners.txt")
p.points(corner_file, fill="red", line=None, shape="c", size=0.05)
else:
logger.debug("VeloModCorners.txt doesn't exist, deriving corners from path mod")
p.points(
vm_params_dict["path_mod"],
is_file=False,
fill="red",
line=None,
shape="c",
size="0.05",
)
# store PNG
p.finalise()
logger.debug("Saving image")
p.png(
dpi=200,
clip=True,
background="white",
out_name=(outdir / vm_params_dict["name"]).resolve(),
) | 32,128 |
def get_gs_distortion(dict_energies: dict):
"""Calculates energy difference between Unperturbed structure and most favourable distortion.
Returns energy drop of the ground-state relative to Unperturbed (in eV) and the BDM distortion that lead to ground-state.
Args:
dict_energies (dict):
Dictionary matching distortion to final energy, as produced by organize_data()
Returns:
(energy_difference, BDM_ground_state_distortion)
"""
if len(dict_energies['distortions']) == 1:
energy_diff = dict_energies['distortions']['rattled'] - dict_energies['Unperturbed']
if energy_diff < 0 :
gs_distortion = 'rattled' #just rattle (no BDM)
else:
gs_distortion = "Unperturbed"
else:
lowest_E_RBDM = min(dict_energies['distortions'].values()) #lowest E obtained with RBDM
energy_diff = lowest_E_RBDM - dict_energies['Unperturbed']
if lowest_E_RBDM < dict_energies['Unperturbed'] : #if energy lower that with Unperturbed
gs_distortion = list(dict_energies['distortions'].keys())[list(dict_energies['distortions'].values()).index( lowest_E_RBDM )] #BDM distortion that lead to ground-state
else:
gs_distortion = "Unperturbed"
return energy_diff, gs_distortion | 32,129 |
def test__resolve_configuration__import_error(warnings_mock):
""" Test resolution for an none-existant module. """
parsed = _parse_configuration({'abcdefghijklmnopqrstuvwxyz': HParams()})
_resolve_configuration(parsed)
assert warnings_mock.warn.call_count == 1 | 32,130 |
def parse_accept_language(data: str = None):
"""Parse HTTP header `Accept-Language`
Returns a tuple like below:
```
((1.0, Locale('zh_Hant_TW')), (0.9, Locale('en')), (0.0, _fallback_ns))
```
"""
langs = {(0.0, _fallback_ns)}
if data is None:
return tuple(langs)
for s in data.split(","):
tags = s.strip().split(";")
loc_ins = Locale.parse(tags[0], sep="-")
q = 1.0
if len(tags) > 1:
q = float(tags[1][2:])
langs.add((q, loc_ins))
return tuple(sorted(langs, reverse=True)) | 32,131 |
def stop_service():
""" Stopping the service """
global __service_thread
dbg("Trying to stop service thread")
shutdown_service()
__service_thread.join()
__service_thread = None
info("Server stopped")
return True | 32,132 |
def get_constants_name_from_value(constant_dict, value) :
"""
@param constant_dict : constant dictionary to consider
@param value : value's constant name to retrieve
@rtype : a string
"""
try:
return constant_dict[value]
except KeyError:
log.error("The constant name corresponding to the value '%s' can not be found in the dictionary '%s'" % (value, constant_dict))
return ERROR_CONSTANT_NAME_NOT_FOUND | 32,133 |
def rotxyz(x_ang,y_ang,z_ang):
"""Creates a 3x3 numpy rotation matrix from three rotations done in the order
of x, y, and z in the local coordinate frame as it rotates.
The three columns represent the new basis vectors in the global coordinate
system of a coordinate system rotated by this matrix.
Args:
x_ang: angle for rotation about the x axis in radians
y_ang: angle for rotation about the y axis in radians
z_ang: angle for rotation about the z axis in radians
Returns:
The 3D rotation matrix for a x, y, z rotation
"""
# return rotx(x_ang) @ roty(y_ang) @ rotz(z_ang)
return np.matmul(np.matmul(rotx(x_ang), roty(y_ang)), rotz(z_ang)) | 32,134 |
def hpdi(proba, array):
"""
Give the highest posterior density interval. For example, the 95% HPDI
is a lower bound and upper bound such that:
1. they contain 95% probability, and
2. in total, have higher peaks than any other bound.
Parameters:
proba: float
A value between 0 and 1, inclusive. For example, if proba is 0.95,
then we'll get a 95% HPDI.
array: np.array
An array of samples.
Returns: tuple(integer, integer)
First item is the lower bound.
Second item is the upper bound.
"""
if proba < 0 or proba > 1:
raise ValueError(
f"Proba {proba} should be between 0 and 1, inclusive."
)
sorted_array = np.array(sorted(array))
# use binary search
length = sorted_array.shape[0]
normalizer = sorted_array.sum()
minimum_width = normalizer
start_index_to_return = None
end_index_to_return = None
limit = int((1 - proba) * length)
for start_index in range(limit):
end_index = length - limit + start_index
diff = sorted_array[end_index] - sorted_array[start_index]
if diff <= minimum_width:
minimum_width = diff
start_index_to_return = start_index
end_index_to_return = end_index
return (
sorted_array[start_index_to_return],
sorted_array[end_index_to_return]
) | 32,135 |
def upgrade():
"""Migrations for the upgrade."""
from aiida.storage.psql_dos.migrations.utils.migrate_repository import migrate_repository
migrate_repository(op.get_bind(), op.get_context().opts['aiida_profile']) | 32,136 |
def chunks(list_: list[Any],
chunk_size: int) -> Generator[list[Any], None, None]:
"""Equally-sized list chunks."""
for i in range(0, len(list_), chunk_size):
yield list_[i:i + chunk_size] | 32,137 |
def PAMI_for_delay(ts, n = 5, plotting = False):
"""This function calculates the mutual information between permutations with tau = 1 and tau = delay
Args:
ts (array): Time series (1d).
Kwargs:
plotting (bool): Plotting for user interpretation. defaut is False.
n (int): dimension for calculating delay. delault is 5 as explain in On the Automatic Parameter Selection for Permutation Entropy
Returns:
(int): tau, The embedding delay for permutation formation.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import find_peaks
cutoff = 0.01
max_delay = 100
m = 2
MP = []
tau_a = []
window_a = []
flag = False
delay = 0
while flag == False:
delay = delay+1
tau_a.append(delay)
window_a.append(delay*(n-1))
MI_Perm = mutualPerm(ts, delay, m)
MP.append(MI_Perm) #calculates mutual information
peaks,_ = find_peaks(-np.array(MP), height=-cutoff)
if MI_Perm < cutoff and len(peaks) > 0:
flag = True
if delay > max_delay:
delay = 0
cutoff = cutoff*10
MP = []
tau_a = []
window_a = []
delay_2 = delay
delay_n = int(delay_2/(n-1))
if plotting == True:
TextSize = 12
plt.figure(1)
plt.plot(tau_a, MP, label = 'n = ' + str(m), linewidth = 2)
plt.xlabel(r'$\tau(n-1)$', size = TextSize)
plt.ylabel(r'$I_p(\tau,n)$', size = TextSize)
plt.xticks(size = TextSize)
plt.yticks(size = TextSize)
plt.legend(loc = 'upper right', fontsize = TextSize)
plt.ylim(0)
plt.show()
return delay_n | 32,138 |
def _capabilities(repo, proto):
"""return a list of capabilities for a repo
This function exists to allow extensions to easily wrap capabilities
computation
- returns a lists: easy to alter
- change done here will be propagated to both `capabilities` and `hello`
command without any other action needed.
"""
# copy to prevent modification of the global list
caps = list(wireprotocaps)
if _allowstream(repo.ui):
if repo.ui.configbool('server', 'preferuncompressed', False):
caps.append('stream-preferred')
requiredformats = repo.requirements & repo.supportedformats
# if our local revlogs are just revlogv1, add 'stream' cap
if not requiredformats - set(('revlogv1',)):
caps.append('stream')
# otherwise, add 'streamreqs' detailing our local revlog format
else:
caps.append('streamreqs=%s' % ','.join(requiredformats))
if repo.ui.configbool('experimental', 'bundle2-exp', False):
capsblob = bundle2.encodecaps(repo.bundle2caps)
caps.append('bundle2-exp=' + urllib.quote(capsblob))
caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
caps.append('httpheader=1024')
return caps | 32,139 |
def findExonIntron(HLATypings, Sequence, Blocks):
"""
Find Exon and Intron sequence part from query based on the reference
One typing;
one or two blocks
"""
IMGTglstrings = re.sub("HLA-", "", HLATypings)
tplist = IMGTglstrings.split("/")
#query_seqs = [Seq(q, generic_dna) for q in query]
locus = tplist[0].split("*")[0]
if locus in ['A', 'C']: ## # 8 exons, 7 introns, 5'-UTR, 3'-UTR
Exonfields = ['Exon'+str(index+1) for index in range(8)]
fieldsNum = 17 # 8+ 7 + 1 + 1
# 0: five_prime_UTR
# odd: Exon
# even: Intron
# 16: three_prime_UTR
ExonSeq = IMGTdbIO.readIMGTsql(tplist[0], field = ", ".join(Exonfields))
# query_exons = []
# intron_seqs = []
annotated_seq = {}
if len(Blocks) == 1:
query_seq = Sequence[0]
intron_start = 0
for Exon_index in range(8):
if ExonSeq[Exon_index] in query_seq:
if Exon_index == 0: # Exon 1
annotated_seq["five_prime_UTR"] = query_seq[0:query_seq.index(ExonSeq[Exon_index])]
annotated_seq['Exon' + str(Exon_index + 1)] = ExonSeq[Exon_index]
# intron_start = query_seq.index(ExonSeq[Exon_index]) + len(ExonSeq[Exon_index])
query_seq = re.sub(query_seq[0:query_seq.index(ExonSeq[Exon_index])+len(ExonSeq[Exon_index])], "", query_seq)
elif Exon_index == 7: # Exon 8
# annotated_seq["three_prime_UTR"] = query_seq[intron_start:query_seq.index(ExonSeq[Exon_index])]
annotated_seq["Intron"+ str(Exon_index)] = query_seq[0:query_seq.index(ExonSeq[Exon_index])]
annotated_seq['Exon' + str(Exon_index + 1)] = ExonSeq[Exon_index]
annotated_seq["three_prime_UTR"] = re.sub(query_seq[0:query_seq.index(ExonSeq[Exon_index])+len(ExonSeq[Exon_index])], "", query_seq)
else: # middle exons
annotated_seq["Intron"+ str(Exon_index)] = query_seq[0:query_seq.index(ExonSeq[Exon_index])]
annotated_seq['Exon' + str(Exon_index + 1)] = ExonSeq[Exon_index]
#intron_start = query_seq.index(ExonSeq[Exon_index]) + len(ExonSeq[Exon_index])
query_seq = re.sub(query_seq[0:query_seq.index(ExonSeq[Exon_index])+len(ExonSeq[Exon_index])], "", query_seq)
else: ## ToDO: Finish ExonExtraction() function
print("ARS doens't match. ")
if Exon_index == 0: # Exon 1
# annotated_seq["five_prime_UTR"] = query_seq[0:query_seq.index(ExonSeq[Exon_index])]
annotated_seq['Exon' + str(Exon_index + 1)] = ExonExtraction(ExonSeq[Exon_index], query_seq)
# intron_start = query_seq.index(ExonSeq[Exon_index]) + len(ExonSeq[Exon_index])
query_seq = re.sub(query_seq[0:query_seq.index(ExonSeq[Exon_index])+len(ExonSeq[Exon_index])], "", query_seq)
elif Exon_index == 7: # Exon 8
# annotated_seq["three_prime_UTR"] = query_seq[intron_start:query_seq.index(ExonSeq[Exon_index])]
# annotated_seq["Intron"+ str(Exon_index)] = query_seq[0:query_seq.index(ExonSeq[Exon_index])]
annotated_seq['Exon' + str(Exon_index + 1)] = ExonExtraction(ExonSeq[Exon_index], query_seq)
# annotated_seq["three_prime_UTR"] = re.sub(query_seq[0:query_seq.index(ExonSeq[Exon_index])+len(ExonSeq[Exon_index])], "", query_seq)
else: # middle exons
# annotated_seq["Intron"+ str(Exon_index)] = query_seq[0:query_seq.index(ExonSeq[Exon_index])]
annotated_seq['Exon' + str(Exon_index + 1)] = ExonExtraction(ExonSeq[Exon_index], query_seq)
# intron_start = query_seq.index(ExonSeq[Exon_index]) + len(ExonSeq[Exon_index])
# query_seq = re.sub(query_seq[0:query_seq.index(ExonSeq[Exon_index])+len(ExonSeq[Exon_index])], "", query_seq)
else: # 2 blocks
print("")
elif locus == 'B': # 7 exons, 6 introns, 5'-UTR, 3'-UTR
# fields = ['Exon'+str(index+1) for index in range(7)]
fieldsNum = 15 # 7 + 6 + 1 + 1
# 0: five_prime_UTR
# odd: Exon
# even: Intron
# 14: three_prime_UTR
elif locus == 'DQB1': # Intron1-Exon2-Intron2-Exon3-Intron3-Exon4-Intron4
# fields = ['Exon'+str(index+2) for index in range(3)]
fieldsNum = 7 # 3 + 4
# odd: Intron
# even: Exon
elif locus in ['DRB1', 'DPB1']: # Intron1-Exon2-Intron2 and intron2-Exon3-Intron3
# fields = ['Exon'+str(index+2) for index in range(2)]
fieldsNum = 3 # 2 + 3
# odd: Intron
# even: Exon
else:
fields = "*"
ARSseq = {}
# Quick and dirty way: only check Exons 2 and 3:
if field
#if the two fields are different, then need to check only ARS region
for Seq_index in range(len(Sequence)):
ARSseq = IMGTdbIO.readIMGTsql(tplist[Seq_index], field = ", ".join(fields))
query_ARS = []
intron_seqs = []
query_seq = Sequence[Seq_index]
for ARS_index in range(len(fields)):
if ARSseq[ARS_index] in query_seq:
intron_seqs.append(query_seq[0:query_seq.index(ARSseq[ARS_index])])
query_ARS.append(ARSseq[ARS_index])
query_seq = query_seq[query_seq.index(ARSseq[ARS_index]):len(query_seq)]
else:
print("ARS doens't match. ")
# the last intron
if Sequence[Seq_index].index(ARSseq[ARS_index])+len(ARSseq[ARS_index]) <= len(Sequence[Seq_index]):
intron_seqs.append(Sequence[Seq_index][Sequence[Seq_index].index(ARSseq[ARS_index])+len(ARSseq[ARS_index]):len(Sequence[Seq_index])])
for ARS_index in range(len(fields)):
if fields[ARS_index] == "Exon1":
intronName = 'fivePrimeUTR'
elif locus in ['A', 'C'] and fields[ARS_index] == "Exon8" :
intronName = 'fivePrimeUTR'
Annotated_seqs['PS1'][intronName]
if ARSseq[0] in query[0] and ARSseq[1] in query[0]: # it's correct phase typing
# do nothing
correct_HLAtypings = typing_list
elif ARSseq[0] in query[1] and ARSseq[1] in query[1]: # swapped cases
correct_HLAtypings = [typing_list[1], typing_list[0]]
else: # if neither the case, then it's the wrong typing
correct_HLAtypings = []
print("The typings do not match the sequences. Please check the Typing!\n")
else: # check if the two are the same typings, otherwise check other exons
if tplist[0] == tplist[1]: # if the same typings, then do nothing.
ARSseq = IMGTdbIO.readIMGTsql(tplist[0], field='Exon2, Exon3')
if ARSseq[0] not in query[0] or ARSseq[1] not in query[0]:
# if the ARS doesn't match, then it's the wrong typing
correct_HLAtypings = []
print("The typings do not match the sequences. Please check the Typing!\n")
else: # corect typing
correct_HLAtypings = typing_list | 32,140 |
def dep(doclike: types.DocLike) -> Dict[str, int]:
"""
Count the number of times each syntactic dependency relation appears
as a token annotation in ``doclike``.
Args:
doclike
Returns:
Mapping of dependency relation to count of occurrence.
"""
return dict(collections.Counter(tok.dep_ for tok in doclike)) | 32,141 |
def fix_phonology_table(engine, phonology_table, phonologybackup_table, user_table):
"""Give each phonology UUID and modifier_id values; also give the phonology backups of
existing phonologies UUID values.
"""
print_('Fixing the phonology table ... ')
msgs = []
#engine.execute('set names latin1')
engine.execute('set names utf8;')
users = engine.execute(user_table.select()).fetchall()
phonologybackups = engine.execute(phonologybackup_table.select()).fetchall()
buffer1 = []
buffer2 = []
for row in engine.execute(phonology_table.select()):
values = row2dict(row)
values['UUID'] = str(uuid4())
backups = sorted([pb for pb in phonologybackups if pb['phonology_id'] == values['id']],
key=lambda pb: pb['datetimeModified'])
if backups:
try:
most_recent_backuper = json.loads(backups[-1]['backuper'])['id']
if [u for u in users if u['id'] == most_recent_backuper]:
values['modifier_id'] = most_recent_backuper
else:
values['modifier_id'] = values['enterer_id']
msgs.append('There is no user %d to serve as the most recent backuper for phonology %d' % (most_recent_backuper, values['id']))
except Exception:
msgs.append('''WARNING: there are %d backups for phonology %d; however,
it was not possible to extract a backuper from the most recent one (backuper value: %s)'''.replace('\n', ' ') % (
len(backups), values['id'], backups[-1]['backuper']))
values['modifier_id'] = values['enterer_id']
else:
values['modifier_id'] = values['enterer_id']
buffer1.append(values)
for pb in backups:
buffer2.append({'pb_id': pb['id'], 'UUID': values['UUID']})
update = phonologybackup_table.update().where(phonologybackup_table.c.id==bindparam('pb_id')).\
values(UUID=bindparam('UUID'))
engine.execute(update, buffer2)
if buffer1:
engine.execute('set names utf8;')
update = phonology_table.update().where(phonology_table.c.id==bindparam('id_')).\
values(modifier_id=bindparam('modifier_id'), UUID=bindparam('UUID'))
engine.execute(update, buffer1)
print 'done.'
return msgs | 32,142 |
def D(field, dynkin):
"""A derivative.
Returns a new field with additional dotted and undotted indices.
Example:
>>> D(L, "01")
DL(01001)(-1/2)
>>> D(L, "21")
DL(21001)(-1/2)
"""
undotted_delta = int(dynkin[0]) - field.dynkin_ints[0]
dotted_delta = int(dynkin[1]) - field.dynkin_ints[1]
# derivative can only change one dotted and one undotted index
assert abs(undotted_delta) == 1
assert abs(dotted_delta) == 1
# other info to construct field instance
deriv_symbol = "D"
symbol = deriv_symbol + field.label
new_field_dynkin = dynkin + field.dynkin[2:]
rest = {
"charges": field.charges,
"comm": field.comm,
"is_conj": field.is_conj,
"nf": field.nf,
"stripped": field.stripped,
}
new_field = Field(symbol, dynkin=new_field_dynkin, **rest)
new_field.latex = f"(D{strip_parens(field.get_latex())})"
new_field.derivs = field.derivs + 1
# only add this information for the first derivative
if new_field.stripped is None:
new_field.stripped = {
"label": field.label,
"dynkin": field.dynkin,
"symmetry": field.symmetry,
"charges": field.charges,
"latex": field.latex,
}
return new_field | 32,143 |
def _det(m, n):
"""Recursive calculation of matrix determinant"""
"""utilizing cofactors"""
sgn = 1
Det = 0
if n == 1:
return m[0][0]
cofact = [n*[0] for i in range(n)]
for i in range(n):
_get_cofact(m, cofact,0,i,n);
Det += sgn*m[0][i]*_det(cofact, n - 1);
sgn = -sgn;
return Det | 32,144 |
def get_vcps() -> List[LinuxVCP]:
"""
Interrogates I2C buses to determine if they are DDC-CI capable.
Returns:
List of all VCPs detected.
"""
vcps = []
# iterate I2C devices
for device in pyudev.Context().list_devices(subsystem="i2c"):
vcp = LinuxVCP(device.sys_number)
try:
with vcp:
pass
except (OSError, VCPIOError):
pass
else:
vcps.append(vcp)
return vcps | 32,145 |
def toGoatLatin(S):
"""
:type S: str
:rtype: str
"""
l_words = []
for i, word in enumerate(S.split()):
if not is_vowel(word[0]):
word = word[1:] + word[0]
aa = "a" * (i + 1)
l_words.append(word + "ma" + aa)
return " ".join(l_words) | 32,146 |
def test_autoinstrumentation():
"""Config auto instrumentation PL_IMPL_AUTO_INSTRUMENT=1"""
if sys.platform == "win32":
LOG("Skipped: Auto instrumentation is applicable only under Linux and with GCC")
return
build_target(
"testprogram",
"USE_PL=1 PL_IMPL_AUTO_INSTRUMENT=1",
)
# Create the lookup
res = run_cmd(
[
sys.executable,
os.path.join("..", "..", "..", "tools") + "/extStringCppParser.py",
"--exe",
"./bin/testprogram",
]
)
fh = open("./bin/testprogram.txt", "w")
fh.write(res.stdout)
fh.close()
# Set the external string
set_external_strings("./bin/testprogram.txt")
data_configure_events(
EvtSpec("RandomLCM::getNext()")
) # This symbol is present only in the auto-instrumented version
try:
launch_testprogram()
CHECK(True, "Connection established")
except ConnectionError:
CHECK(False, "No connection")
events = data_collect_events(timeout_sec=2.0)
CHECK(
events,
"Auto instrumented function related events are received",
)
process_stop()
set_external_strings() | 32,147 |
def cli_file_convert(file_path, replace=False):
"""Simple logic for converting files from the CLI"""
file_path = os.path.realpath(os.path.expanduser(file_path))
logger.info("Trying to convert: " + file_path)
with open(file_path, 'r') as f:
json_data = json.load(f, object_hook=clean_json._byteify)
old_file_data = clean_json.load(json_data)
file_data = FileConverter.get_converted_data(old_file_data)
out_file_name = os.path.basename(file_path)
out_file_name, _, ext = out_file_name.rpartition('.')
if replace:
ending = "."
else:
ending = "_CONVERTED."
out_file_name = out_file_name + ending + ext
out_file_dir = os.path.dirname(file_path)
out_file_path = os.path.join(out_file_dir, out_file_name)
with open(out_file_path, 'w') as out_file:
json.dump(file_data, out_file, indent=4, sort_keys=False,
separators=(',', ': '))
logger.info('Successfully saved "' + out_file_path + '"') | 32,148 |
def train(config: DictConfig, do_cross_validation: bool) -> Optional[float]:
"""Contains training pipeline.
Instantiates all PyTorch Lightning objects from config.
Args:
config (DictConfig): Configuration composed by Hydra.
do_cross_validation (bool): Whether to perform cross validation.
Returns:
Optional[float]: Metric score for hyperparameter optimization.
"""
run_name = utils.get_run_name(config)
# Set seed for random number generators in pytorch, numpy and python.random
if config.get("seed"):
seed_everything(config.seed, workers=True)
# Load csv
df = pd.read_csv(config.datamodule.csv_path)
kf = KFold(n_splits=config["folds"], shuffle=True, random_state=config.seed)
datamodule_params = dict(config.datamodule)
datamodule_cls = utils._locate(datamodule_params.pop("_target_"))
datamodule_params.pop("csv_path") # remove csv_path from params
res_dict = defaultdict(list)
best_paths = []
log.info(f"Start {utils.get_experiment_name(config)}")
assert (do_cross_validation and config.folds is not None) or config.ratio is not None
if do_cross_validation:
for i, (train, test) in enumerate(kf.split(df), start=1):
log.info(f"Start {i}th fold out of {kf.n_splits} folds")
train_df = df.iloc[train]
test_df = df.iloc[test]
valid_df, test_df = np.array_split(test_df, 2)
# Init lightning datamodule
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
with PrepareTmpFile(train_df, valid_df, test_df) as (ft, fv, fe):
datamodule: LightningDataModule = datamodule_cls(ft.name, fv.name, fe.name, **datamodule_params)
_train(datamodule, config, res_dict, best_paths)
else:
ratios = list(map(float, config.ratio.split(",")))
assert sum(ratios) == 1.0, f"Ratios must sum to 1.0, but got {ratios} -> {sum(ratios)}"
train_ratio = ratios[0]
val_ratio = ratios[0] + ratios[1]
train_df, valid_df, test_df = np.split(
df.sample(frac=1, random_state=config.seed), [int(train_ratio * len(df)), int(val_ratio * len(df))]
)
# Init lightning datamodule
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
with PrepareTmpFile(train_df, valid_df, test_df) as (ft, fv, fe):
datamodule: LightningDataModule = datamodule_cls(ft.name, fv.name, fe.name, **datamodule_params)
_train(datamodule, config, res_dict, best_paths)
# Log/Print results
utils.log_result(run_name, config, res_dict, best_paths) | 32,149 |
def list_startswith(_list, lstart):
"""
Check if a list (_list) starts with all the items from another list (lstart)
:param _list: list
:param lstart: list
:return: bool, True if _list starts with all the items of lstart.
"""
if _list is None:
return False
lenlist = len(_list)
lenstart = len(lstart)
if lenlist >= lenstart:
# if _list longer or as long as lstart, check 1st items:
return (_list[:lenstart] == lstart)
else:
# _list smaller than lstart: always false
return False | 32,150 |
def _get_default_scheduler():
"""Determine which scheduler system is being used.
It tries to determine it by running both PBS and SLURM commands.
If both are available then one needs to set an environment variable
called 'SCHEDULER_SYSTEM' which is either 'PBS' or 'SLURM'.
For example add the following to your `.bashrc`
```bash
export SCHEDULER_SYSTEM="PBS"
```
By default it is "SLURM".
"""
has_pbs = bool(find_executable("qsub")) and bool(find_executable("qstat"))
has_slurm = bool(find_executable("sbatch")) and bool(find_executable("squeue"))
DEFAULT = SLURM
default_msg = f"We set DefaultScheduler to '{DEFAULT}'."
scheduler_system = os.environ.get("SCHEDULER_SYSTEM", "").upper()
if scheduler_system:
if scheduler_system not in ("PBS", "SLURM"):
warnings.warn(
f"SCHEDULER_SYSTEM={scheduler_system} is not implemented."
f"Use SLURM or PBS. {default_msg}"
)
return DEFAULT
else:
return {"SLURM": SLURM, "PBS": PBS}[scheduler_system]
elif has_slurm and has_pbs:
msg = f"Both SLURM and PBS are detected. {default_msg}"
warnings.warn(msg)
return DEFAULT
elif has_pbs:
return PBS
elif has_slurm:
return SLURM
else:
msg = f"No scheduler system could be detected. {default_msg}"
warnings.warn(msg)
return DEFAULT | 32,151 |
def get_subscribers(subreddit_, *args):
"""Gets current sub count for one or more subreddits.
Inputs
-------
str: Desired subreddit name(s)
Returns
-------
int: sub count or dict:{subreddit: int(sub count)}
"""
if len(args) > 0:
subreddit = reddit.subreddit(subreddit_)
subcount = {subreddit_: subreddit.subscribers}
for page in args:
subreddit = reddit.subreddit(page)
subcount[page] = subreddit.subscribers
return subcount
else:
subreddit = reddit.subreddit(subreddit_)
return subreddit.subscribers | 32,152 |
def test_md001_front_matter_with_title():
"""
Test to make
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--disable-rules",
"MD003,MD022",
"--set",
"extensions.front-matter.enabled=$!True",
"scan",
"test/resources/rules/md001/front_matter_with_title.md",
]
expected_return_code = 1
expected_output = "test/resources/rules/md001/front_matter_with_title.md:5:1: MD001: Heading levels should only increment by one level at a time. [Expected: h2; Actual: h3] (heading-increment,header-increment)\n"
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
) | 32,153 |
def validate_mongo_role_definition_id(ns):
""" Extracts Guid role definition Id """
if ns.mongo_role_definition_id is not None:
ns.mongo_role_definition_id = _parse_resource_path(ns.mongo_role_definition_id, False, "mongodbRoleDefinitions") | 32,154 |
def pattern_match(value, pattern, env=None):
"""
Pattern match a value and a pattern.
Args:
value: the value to pattern-match on
pattern: a pattern, consisting of literals and/or locally bound
variables
env: a dictionary of local variables bound while matching
Returns: (True, env) if the match is successful, and (False, env) otherwise
Raises:
SyntaxError, if a variable name is used multiple times in the same
pattern
"""
env = {} if env is None else env
if isinstance(pattern, PatternMatchBind):
if pattern.name in env:
raise SyntaxError("Conflicting definitions for %s" % pattern.name)
env[pattern.name] = value
return True, env
elif isinstance(pattern, PatternMatchListBind):
head, tail = list(value[:len(pattern.head)]), value[len(pattern.head):]
matches, env = pattern_match(head, pattern.head, env)
if matches:
return pattern_match(tail, pattern.tail, env)
return False, env
elif type(value) == type(pattern):
if isinstance(value, ADT):
return pattern_match(nt_to_tuple(value), nt_to_tuple(pattern), env)
elif hasattr(value, "__iter__"):
matches = []
if len(value) != len(pattern):
return False, env
for v, p in zip(value, pattern):
match_status, env = pattern_match(v, p, env)
matches.append(match_status)
return all(matches), env
elif value == pattern:
return True, env
return False, env | 32,155 |
def dijkstra(adjacency_list, source_vertex, cull_distance = sys.maxsize):
"""
Implementation of Dijkstra's Algorithm for finding shortest
path to all vertices in a graph.
Parameters
----------
adjacency_list (dict of int : (dict of int : int))
Maps vertices to a dictionary of neighboring vertices as
keys and whose data is the distance between them with.
*** Distances must be non-negative. ***
source_vertex (int)
The vertex to start the algorithm (distance zero vertex)
cull_distance (int) *optional, defaults to sys.maxsize
The maximum distance desired to traverse plus 1
(Represents infinite distance)
Returns
-------
dict(int : int)
A dictionary whose keys are the reachable vertices of the
adjacency list and data is the distance required to reach
that vertex from the source vertex.
"""
pq = [] # Priority Queue (Min-Heap) holding vertices to traverse
distance = {} # Distance Map (Return Value)
count = 0 # Counter for Creating Unique IDs
valid_ids = {} # Maps Vertices to Their Valid ID
# Named tuple to be used in the priority queue
DistVtxId = namedtuple('DistVtxId', 'distance vertex id')
### SETUP
# Add each vertex in the adjacency list to the priority queue
for vertex in adjacency_list.keys():
id = count # Unique ID for each vertex in the priority queue
count += 1
temp = None # <- for name scope
if (vertex == source_vertex):
# Source vertex gets distance zero from itself
temp = DistVtxId(0, vertex, id)
# Add the source vertex to the final result
distance[source_vertex] = 0
else:
# Non-Source vertices start at infinite distance
temp = DistVtxId(cull_distance, vertex, id)
# Push the vertex onto the priority queue
heapq.heappush(pq, temp)
valid_ids[vertex] = temp.id
# Add this vertex's initial distance to the return value
distance[vertex] = temp.distance
### TRAVERSAL
# Iterates (at most) the number of vertices times
for i in range(0, len(adjacency_list)):
# Get the lowest edge distance from the priority queue
u_star = heapq.heappop(pq)
# Ignore this element if it does not have a valid ID
# Occurs when the priority of a vertex has been "updated"
if (valid_ids[u_star.vertex] != u_star.id):
continue
# For every neighboring vertex
for vertex, edge_weight in adjacency_list[u_star.vertex].items():
new_distance = u_star.distance + edge_weight
old_distance = distance[vertex]
# If we can reach the neighbor covering less distance from
# the source
if (new_distance < old_distance):
distance[vertex] = new_distance
# (Effectively) Update the priority (distance) of the
# vertex in the priority queue
temp = DistVtxId(new_distance, vertex, count)
heapq.heappush(pq, temp)
valid_ids[temp.vertex] = temp.id
count += 1
# Cull the vertices that were unreachable (or farther away from
# the source than the cull_distance)
distance = {vtx : dist for vtx, dist in distance.items() if dist != cull_distance}
return distance | 32,156 |
def process_existing_fiber(country):
"""
Load and process existing fiber data.
Parameters
----------
country : dict
Contains all country specfic information.
"""
iso3 = country['iso3']
iso2 = country['iso2'].lower()
folder = os.path.join(DATA_INTERMEDIATE, iso3, 'network_existing')
if not os.path.exists(folder):
os.makedirs(folder)
filename = 'core_edges_existing.shp'
path_output = os.path.join(folder, filename)
if os.path.exists(path_output):
return print('Existing fiber already processed')
path = os.path.join(DATA_RAW, 'afterfiber', 'afterfiber.shp')
shape = fiona.open(path)
data = []
for item in shape:
if item['properties']['iso2'].lower() == iso2.lower():
if item['geometry']['type'] == 'LineString':
if int(item['properties']['live']) == 1:
data.append({
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': item['geometry']['coordinates'],
},
'properties': {
'operators': item['properties']['operator'],
'source': 'existing'
}
})
if item['geometry']['type'] == 'MultiLineString':
if int(item['properties']['live']) == 1:
try:
geom = MultiLineString(item['geometry']['coordinates'])
for line in geom:
data.append({
'type': 'Feature',
'geometry': mapping(line),
'properties': {
'operators': item['properties']['operator'],
'source': 'existing'
}
})
except:
# some geometries are incorrect from data source
# exclude to avoid issues
pass
if len(data) == 0:
return print('No existing infrastructure')
data = gpd.GeoDataFrame.from_features(data)
data.to_file(path_output, crs='epsg:4326')
return print('Existing fiber processed') | 32,157 |
def wraps(wrapped):
"""A functools.wraps helper that handles partial objects on Python 2."""
# https://github.com/google/pytype/issues/322
if isinstance(wrapped, functools.partial): # pytype: disable=wrong-arg-types
return six.wraps(wrapped, assigned=_PARTIAL_VALID_ASSIGNMENTS)
else:
return six.wraps(wrapped) | 32,158 |
def byte_to_bits(byte):
"""Convert a byte to an tuple of 8 bits for use in Merkle-Hellman.
The first element of the returned tuple is the most significant bit.
Usage::
byte_to_bits(65) # => [0, 1, 0, 0, 0, 0, 0, 1]
byte_to_bits(b'ABC'[0]) # => [0, 1, 0, 0, 0, 0, 0, 1]
byte_to_bits('A') # => raises TypeError
:param byte: The byte to convert.
:type byte: int between 0 and 255, inclusive.
:raises: BinaryConversionError if byte is not in [0, 255].
:returns: An 8-tuple of bits representing this byte's value.
"""
if not 0 <= byte <= 255:
raise BinaryConversionError(byte)
out = []
for i in range(8):
out.append(byte & 1)
byte >>= 1
return tuple(out[::-1]) | 32,159 |
def load_translation(in_f, file):
""" Extract lines from a template and save it to a file. """
# vd( in_f)
with open(in_f, 'r') as f: l = f.read().split('\n')
n = 0; r = {}
for rs in l:
n += 1
# находим подчеркивание со скобочками
aa = re.findall(r'_\([^)]+\)', rs)
for res in aa:
# вырезаем саму строчку без подчеркивания скобок и кавычек
res = res[3:-2]
# смотрим нет ли еще такой строчки
if not res in r: r[res] = []
# Add line number
r[res].append(n)
for res, nums in r.iteritems():
file.write('#: '+in_f+':'+','.join([str(x) for x in nums])+'\n')
file.write('msgid "'+res+'"\n')
file.write('msgstr ""\n\n') | 32,160 |
def _init():
"""
Internal function which checks if Maxima has loaded the
"orthopoly" package. All functions using this in this
file should call this function first.
TEST:
The global starts ``False``::
sage: sage.functions.orthogonal_polys._done
False
Then after using one of these functions, it changes::
sage: from sage.functions.orthogonal_polys import laguerre
sage: laguerre(2,x)
1/2*x^2 - 2*x + 1
sage: sage.functions.orthogonal_polys._done
False
Note that because here we use a Pynac variable ``x``,
the representation of the function is different from
its actual doctest, where a polynomial indeterminate
``x`` is used.
"""
global _done
if _done:
return
maxima.eval('load("orthopoly");')
# TODO -- make it possible to use the intervals returned
# instead of just discarding this info!
maxima.eval('orthopoly_returns_intervals:false;')
_done = True | 32,161 |
def KK_RC48_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
R11 = params["R11"]
R12 = params["R12"]
R13 = params["R13"]
R14 = params["R14"]
R15 = params["R15"]
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
R24 = params["R24"]
R25 = params["R25"]
R26 = params["R26"]
R27 = params["R27"]
R28 = params["R28"]
R29 = params["R29"]
R30 = params["R30"]
R31 = params["R31"]
R32 = params["R32"]
R33 = params["R33"]
R34 = params["R34"]
R35 = params["R35"]
R36 = params["R36"]
R37 = params["R37"]
R38 = params["R38"]
R39 = params["R39"]
R40 = params["R40"]
R41 = params["R41"]
R42 = params["R42"]
R43 = params["R43"]
R44 = params["R44"]
R45 = params["R45"]
R46 = params["R46"]
R47 = params["R47"]
R48 = params["R48"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j * t_values[22]))
+ (R24 / (1 + w * 1j * t_values[23]))
+ (R25 / (1 + w * 1j * t_values[24]))
+ (R26 / (1 + w * 1j * t_values[25]))
+ (R27 / (1 + w * 1j * t_values[26]))
+ (R28 / (1 + w * 1j * t_values[27]))
+ (R29 / (1 + w * 1j * t_values[28]))
+ (R30 / (1 + w * 1j * t_values[29]))
+ (R31 / (1 + w * 1j * t_values[30]))
+ (R32 / (1 + w * 1j * t_values[31]))
+ (R33 / (1 + w * 1j * t_values[32]))
+ (R34 / (1 + w * 1j * t_values[33]))
+ (R35 / (1 + w * 1j * t_values[34]))
+ (R36 / (1 + w * 1j * t_values[35]))
+ (R37 / (1 + w * 1j * t_values[36]))
+ (R38 / (1 + w * 1j * t_values[37]))
+ (R39 / (1 + w * 1j * t_values[38]))
+ (R40 / (1 + w * 1j * t_values[39]))
+ (R41 / (1 + w * 1j * t_values[40]))
+ (R42 / (1 + w * 1j * t_values[41]))
+ (R43 / (1 + w * 1j * t_values[42]))
+ (R44 / (1 + w * 1j * t_values[43]))
+ (R45 / (1 + w * 1j * t_values[44]))
+ (R46 / (1 + w * 1j * t_values[45]))
+ (R47 / (1 + w * 1j * t_values[46]))
+ (R48 / (1 + w * 1j * t_values[47]))
) | 32,162 |
def test_complex_serialization(tmpdir):
"""
Test serializing a complex nested structure and checking it for validity (without deserializing) by inspecting Path
objects in the value hierarchy
"""
AlkymiConfig.get().cache = True
tmpdir = Path(str(tmpdir))
AlkymiConfig.get().cache_path = tmpdir # Use temporary directory for caching
subdir = tmpdir / "subdir"
subdir.mkdir()
file_a = tmpdir / "file_a.txt"
with file_a.open("w") as f:
f.write(f.name)
file_b = tmpdir / "file_a.txt"
with file_b.open("w") as f:
f.write(f.name)
# Cache object - everything should be valid at this point
value = (1, 2, 3, ["a", "b", "c"], [file_a, file_b])
obj = OutputWithValue(value, checksums.checksum(value))
obj_cached = serialization.cache(obj, subdir)
assert obj_cached.valid
# Touching an external file shouldn't cause invalidation
file_a.touch()
assert obj_cached.valid
# Changing one of the "external" files _should_ cause invalidation
with file_a.open("a") as f:
f.write("Changed!")
assert not obj_cached.valid
# Changing it back to the original value should cause things to work again
with file_a.open("w") as f:
f.write(f.name)
assert obj_cached.valid | 32,163 |
def race(deer, seconds):
""" Use the reindeer's speed and rest times to find the timed distance """
distance = 0
stats = reindeer[deer]
resting = False
while True:
if resting:
if seconds <= stats[2]:
break
seconds -= stats[2]
else:
if seconds <= stats[1]:
distance += seconds * stats[0]
break
seconds -= stats[1]
distance += stats[1] * stats[0]
resting = not resting
return distance | 32,164 |
def AsdlEqual(left, right):
"""Check if generated ASDL instances are equal.
We don't use equality in the actual code, so this is relegated to test_lib.
"""
if left is None and right is None:
return True
if isinstance(left, (int, str, bool, pybase.SimpleObj)):
return left == right
if isinstance(left, list):
if len(left) != len(right):
return False
for a, b in zip(left, right):
if not AsdlEqual(a, b):
return False
return True
if isinstance(left, pybase.CompoundObj):
if left.tag != right.tag:
return False
field_names = left.__slots__ # hack for now
for name in field_names:
# Special case: we are not testing locations right now.
if name == 'span_id':
continue
a = getattr(left, name)
b = getattr(right, name)
if not AsdlEqual(a, b):
return False
return True
raise AssertionError(left) | 32,165 |
def FindPriority(bug_entry):
"""Finds and returns the priority of a provided bug entry.
Args:
bug_entry: The provided bug, a IssueEntry instance.
Returns:
A string containg the priority of the bug ("1", "2", etc...)
"""
priority = ''
for label in bug_entry.label:
if label.text.lower().startswith('pri-'):
priority = label.text[4:]
return priority | 32,166 |
def attention_padding_mask(q, k, padding_index=0):
"""Generate mask tensor for padding value
Args:
q (Tensor): (B, T_q)
k (Tensor): (B, T_k)
padding_index (int): padding index. Default: 0
Returns:
(torch.BoolTensor): Mask with shape (B, T_q, T_k). True element stands for requiring making.
Notes:
Assume padding_index is 0:
k.eq(0) -> BoolTensor (B, T_k)
k.eq(0).unsqueeze(1) -> (B, 1, T_k)
k.eq(0).unsqueeze(1).expand(-1, q.size(-1), -1) -> (B, T_q, T_k)
"""
## we take the mean because we want to get rid of last dim.
### what we do to remove that dim deosn't matter, since we are only ending up with
### true/false for mask.
q = torch.mean(q,2)
mask = k.eq(padding_index).unsqueeze(1).expand(-1, q.size(-1), -1)
return mask | 32,167 |
def get_pcap_bytes(pcap_file):
"""Get the raw bytes of a pcap file or stdin."""
if pcap_file == "-":
pcap_bytes = sys.stdin.buffer.read()
else:
with open(pcap_file, "rb") as f:
pcap_bytes = f.read()
return pcap_bytes | 32,168 |
def run(s, output_cmd=True, stdout=False):
"""Runs a subprocess."""
if output_cmd:
print(f"Running: {s}")
p_out = subprocess.run(
s, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, check=False
)
if stdout:
return p_out.stdout.decode("utf-8").strip()
else:
return p_out | 32,169 |
def process_params(request, standard_params=STANDARD_QUERY_PARAMS,
filter_fields=None, defaults=None):
"""Parse query params.
Parses, validates, and converts query into a consistent format.
:keyword request: the bottle request
:keyword standard_params: query params that are present in most of our
(opinionated) APIs (ex. limit, offset, sort, q, and facets)
:keyword filter_fields: list of field names to allow filtering on
:keyword defaults: dict of params and their default values
:retuns: dict of query params with supplied values (string or list)
"""
if not filter_fields:
filter_fields = []
unfilterable = (set(request.query.keys()) - set(filter_fields) -
set(standard_params))
if unfilterable:
bottle.abort(400,
"The following query params were invalid: %s. "
"Try one (or more) of %s." %
(", ".join(unfilterable),
", ".join(filter_fields)))
query_fields = defaults or {}
for key in request.query:
if key in filter_fields:
# turns ?netloc=this.com&netloc=that.com,what.net into
# {'netloc': ['this.com', 'that.com', 'what.net']}
matches = request.query.getall(key)
matches = list(itertools.chain(*(k.split(',') for k in matches)))
if len(matches) > 1:
query_fields[key] = matches
else:
query_fields[key] = matches[0]
if 'sort' in request.query:
sort = request.query.getall('sort')
sort = list(itertools.chain(*(
comma_separated_strings(str(k)) for k in sort)))
query_fields['sort'] = sort
if 'q' in request.query:
search = request.query.getall('q')
search = list(itertools.chain(*(
comma_separated_strings(k) for k in search
if k)))
query_fields['q'] = search
return query_fields | 32,170 |
def all_attributes(cls):
"""
Each object will have the attributes declared directly on the object in the attrs dictionary. In addition there
may be attributes declared by a particular object's parent classes. This function walks the class hierarchy to
collect the attrs in the object's parent classes
For example if Location.City is a subclass of Location and Location has the attribute GPS_COORDS then
this function would combine GPS_COORDS and the existing attributes on the Location.City object and return the
combination
"""
attrs = cls.attrs.copy()
# walk the class hierarchy
for sub in inspect.getmro(cls):
for name, prop in getattr(sub, 'attrs', {}).iteritems():
if name in attrs:
continue
attrs[name] = prop
return attrs | 32,171 |
def _reshape_model_inputs(model_inputs: np.ndarray, num_trajectories: int,
trajectory_size: int) -> np.ndarray:
"""Reshapes the model inputs' matrix.
Parameters
----------
model_inputs: np.ndarray
Matrix of model inputs
num_trajectories: int
Number of trajectories
trajectory_size: int
Number of points in a trajectory
Returns
-------
input_matrix: np.ndarray
Reshaped input matrix.
"""
num_vars = model_inputs.shape[1]
input_matrix = model_inputs.reshape(num_trajectories, trajectory_size,
num_vars)
return input_matrix | 32,172 |
def _create_module(module_name):
"""ex. mod = _create_module('tenjin.util')"""
from types import ModuleType
mod = ModuleType(module_name.split('.')[-1])
sys.modules[module_name] = mod
return mod | 32,173 |
def test_file_lock_timeout_error(project, runner):
"""Test file lock timeout."""
with FileLock(".renku.lock"):
result = runner.invoke(cli, ["dataset", "import", "10.5281/zenodo.3715335"])
assert "Unable to acquire lock." in result.output | 32,174 |
def VIS(img, **normalization):
"""Unmixes according to the Vegetation-Impervious-Soil (VIS) approach.
Args:
img: the ee.Image to unmix.
**normalization: keyword arguments to pass to fractionalCover(),
like shade_normalize=True.
Returns:
unmixed: a 3-band image file in order of (soil-veg-impervious).
"""
endmembers = [soil, pv, urban]
endmember_names = ["soil", "pv", "impervious"]
unmixed = fractionalCover(img, endmembers, endmember_names, **normalization)
return unmixed | 32,175 |
def create_labels(mapfile, Nodes=None):
"""
Mapping from the protein identifier to the group
Format :
##protein start_position end_position orthologous_group protein_annotation
:param Nodes: set -- create mapping only for these set of nodes
:param mapfile: file that contains the mapping for the organism
:return:
"""
f = open(mapfile)
labels = defaultdict(str)
while True:
line = f.readline().strip()
if not line:
break
sp = line.split("\t")
if not Nodes:
labels[sp[0]] = sp[3]
elif sp[0] in Nodes:
labels[sp[0]] = sp[3]
return labels | 32,176 |
def threshold_calc(CurrentValue):
"""Adaptive threshold calculation to adapt to different mel values or environments"""
global filteredY, avgFilter, stdFilter, count, signal_time, used
if abs(CurrentValue - avgFilter) > config.THRESHOLD * stdFilter:
if CurrentValue > avgFilter:
if count > 31:
count = 0
count += 1
print("MIDI")
print("\n")
print("\n")
print("\n")
if used == 0:
conn.write(Message(NoteOn(count, 69), 1))
used = 1
filteredTmp = config.INFLUENCE * CurrentValue + (1 - config.INFLUENCE) * filteredY[-1]
else:
used = 0
filteredTmp = CurrentValue
print("\n")
filteredY[:-1] = filteredY[1:]
filteredY[-1] = filteredTmp
avgFilter = np.mean(filteredY[:(config.N_ROLLING_FFT_HISTORY)])
stdFilter = np.std(filteredY[:(config.N_ROLLING_FFT_HISTORY)]) | 32,177 |
def provide(annotation_path=None, images_dir=None):
"""Return image_paths and class labels.
Args:
annotation_path: Path to an anotation's .json file.
images_dir: Path to images directory.
Returns:
image_files: A list containing the paths of images.
annotation_dict: A dictionary containing the class labels of each
image.
Raises:
ValueError: If annotation_path does not exist.
"""
if not os.path.exists(annotation_path):
raise ValueError('`annotation_path` does not exist.')
annotation_json = open(annotation_path, 'r')
annotation_list = json.load(annotation_json)
image_files = []
annotation_dict = {}
for d in annotation_list:
image_name = d.get('image_id')
disease_class = d.get('disease_class')
if images_dir is not None:
image_name = os.path.join(images_dir, image_name)
image_files.append(image_name)
annotation_dict[image_name] = disease_class
return image_files, annotation_dict | 32,178 |
def test_plot_ci():
"""
Tests the ci_plot function to make sure the outputs are correct.
Returns
--------
None
The test should pass and no asserts should be displayed.
9 tests in total
"""
# test integration with calculate_boot_stats function
test_stat = calculate_boot_stats([1, 2, 3, 4],
1000,
level=0.95,
random_seed=123)
assert isinstance(test_stat, dict
)
# tests with invalid input type of title
with raises(TypeError) as e:
plot_ci([1, 2, 3, 4, 5, 6, 7],
1000,
n=100,
ci_level=0.95,
title=123)
assert str(e.value) == (
"The value of the argument 'title' must be type of str."
)
# tests with invalid input type of title
with raises(TypeError) as e:
plot_ci([1, 2, 3, 4, 5, 6, 7],
1000,
n=100,
ci_level=0.95,
title=True)
assert str(e.value) == (
"The value of the argument 'title' must be type of str."
)
# tests with invalid input type of x_axis
with raises(TypeError) as e:
plot_ci([1, 2, 3, 4, 5, 6, 7],
1000,
n=100,
ci_level=0.95,
x_axis=123)
assert str(e.value) == (
"The value of the argument 'x_axis' must be type of str."
)
# tests with invalid input type of x_axis
with raises(TypeError) as e:
plot_ci([1, 2, 3, 4, 5, 6, 7],
1000,
n=100,
ci_level=0.95,
x_axis=True)
assert str(e.value) == (
"The value of the argument 'x_axis' must be type of str."
)
# tests with invalid input type of y_axis
with raises(TypeError) as e:
plot_ci([1, 2, 3, 4, 5, 6, 7],
1000,
n=100,
ci_level=0.95,
y_axis=123)
assert str(e.value) == (
"The value of the argument 'y_axis' must be type of str."
)
# tests with invalid input type of y_axis
with raises(TypeError) as e:
plot_ci([1, 2, 3, 4, 5, 6, 7],
1000,
n=100,
ci_level=0.95,
y_axis=True)
assert str(e.value) == (
"The value of the argument 'y_axis' must be type of str."
)
# tests with invalid input type of path
with raises(TypeError) as e:
plot_ci([1, 2, 3, 4, 5, 6, 7],
1000,
n=100,
ci_level=0.95,
y_axis="",
path=0.5)
assert str(e.value) == (
"The value of the argument 'path' must be type of str or None."
)
#tests if a plot was drawn by the function
histogram = plot_ci([1, 2, 3, 4, 5, 6, 7], 1000,
n=100,
ci_level=0.95,
ci_random_seed=123,
title="Bootstrap",
path="./tests/")
assert histogram.gcf().number > 0, "Chart was not created correctly"
# tests with invalid input value of path
with raises(NameError) as e:
plot_ci([1, 2, 3, 4, 5, 6, 7], 1000, path="Users/")
assert str(e.value) == (
"The folder path you specified is invalid."
) | 32,179 |
def sortKSUID(ksuidList):
"""
sorts a list of ksuids by their date (recent in the front)
"""
return sorted(ksuidList, key=lambda x: x.getTimestamp(), reverse=False) | 32,180 |
def tearDownModule():
"""Delete the test instance, if it was created."""
if CREATE_INSTANCE:
Config.INSTANCE.delete() | 32,181 |
def plot_worm_data(w):
"""
Plot the tempo, loudness, and structural annotations of a worm
file against time.
:param w: a WormFile object
"""
ax1 = plt.subplot(313)
plt.plot(w.data[:,0],w.data[:,3],'|')
plt.ylabel('Structure')
plt.xlabel('Time (seconds)')
ax2 = plt.subplot(312, sharex=ax1)
plt.plot(w.data[:,0],w.data[:,2])
plt.ylabel('Loudness (Sone)')
plt.setp( ax2.get_xticklabels(), visible=False)
ax3 = plt.subplot(311, sharex=ax1)
plt.plot(w.data[:,0],w.data[:,1])
plt.ylabel('Tempo (BPM)')
plt.setp( ax3.get_xticklabels(), visible=False)
plt.title(u'{0} by {1} ({2})'.format(w.header['Piece'],
w.header['Performer'],
w.header['YearOfRecording']))
plt.show() | 32,182 |
def is_installable_dir(path): # type: (str) -> bool
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, "setup.py")
if os.path.isfile(setup_py):
return True
return False | 32,183 |
def get_vocabulary(list_):
"""
Computes the vocabulary for the provided list of sentences
:param list_: a list of sentences (strings)
:return: a dictionary with key, val = word, count and a sorted list, by count, of all the words
"""
all_the_words = []
for text in list_:
for word in text:
all_the_words.append(word)
vocabulary_counter = Counter(all_the_words)
vocabulary_sorted = list(map(lambda x: x[0], sorted(vocabulary_counter.items(), key=lambda x: -x[1])))
return vocabulary_sorted, vocabulary_counter | 32,184 |
def Import_Method():
"""The function Import_Method() allows the user to import an existing method and update the parameters.
The function calls the function Display_Method() to set the initial values of the widgets, and
PlotMethod() to display the graphical representation of the segments
"""
# Open a method file
name = askopenfilename(initialdir=Method_Dir_Std, filetypes =(("YAML files", "*.yaml"),("All Files","*.*")), title = "Choose a file.")
# Read the file and create dictionary List
with open(name,'r') as UseFile:
List = yaml.load(UseFile)
# Display content of the method in GUI
Display_Method(List) | 32,185 |
def localize_list(original, welsh):
"""Call localize on each element in a list."""
for index, value in enumerate(original):
localize(original, welsh, index, value) | 32,186 |
def lod_build_config(slurm_nodes, mds_list, oss_list, fsname, mdtdevs, ostdevs,
inet, mountpoint, index):
"""
Build lod configuration for LOD instance
"""
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
# take slurm nodes directly if found
node_list = slurm_nodes
client_list = slurm_nodes
if slurm_nodes:
logging.debug("Slurm node: %s.", slurm_nodes)
if inet is not None:
net = inet
else:
net = LOD_DEFAULT_NET
device = None
if mdtdevs:
mdt_device = mdtdevs
else:
logging.error("no mdtdevs found")
return None
if ostdevs:
ost_device = ostdevs
else:
logging.error("no ostdevs found")
return None
if fsname is not None:
fs_name = fsname
else:
fs_name = LOD_DEFAULT_FSNAME
if mountpoint:
mount_point = mountpoint
else:
mount_point = LOD_DEFAULT_MOUNTPOINT
return LodConfig(node_list, device, mdt_device, ost_device,
mds_list, oss_list, client_list,
net, fs_name, mount_point, index) | 32,187 |
def check_field(rule: tuple, field: int) -> bool:
"""check if a field is valid given a rule"""
for min_range, max_range in rule:
if min_range <= field <= max_range:
return True
return False | 32,188 |
def test_extern_vitis_ai_resnet18(dpu_target):
"""Test first part of Vitis AI on-the-fly quantization runtime with ResNet 18 model"""
dtype = "float32"
ishape = (1, 3, 224, 224)
mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=1)
ref_mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=1)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu(0)).evaluate()(
i_data, **params
)
verify_result(
mod,
{"data": i_data},
(1, 1000),
ref_res.numpy(),
tol=1e-5,
params=params,
dpu_target=dpu_target,
tvm_ops=7,
) | 32,189 |
def merge_adjacent(gen):
"""Merge adjacent messages that compare equal"""
gen = iter(gen)
last = gen.next()
for this in gen:
if this.merge_key == last.merge_key:
last.merge(this)
elif last < this:
yield last
last = this
else:
raise AssertionError('Bad order, %s > %s' % (last, this))
yield last | 32,190 |
def split(value, precision=1):
"""
Split `value` into value and "exponent-of-10", where "exponent-of-10" is a
multiple of 3. This corresponds to SI prefixes.
Returns tuple, where the second value is the "exponent-of-10" and the first
value is `value` divided by the "exponent-of-10".
Args
----
value : int, float
Input value.
precision : int
Number of digits after decimal place to include.
Returns
-------
tuple
The second value is the "exponent-of-10" and the first value is `value`
divided by the "exponent-of-10".
Examples
--------
.. code-block:: python
si_prefix.split(0.04781) -> (47.8, -3)
si_prefix.split(4781.123) -> (4.8, 3)
See :func:`si_format` for more examples.
"""
negative = False
digits = precision + 1
if value < 0.:
value = -value
negative = True
elif value == 0.:
return 0., 0
expof10 = int(math.log10(value))
if expof10 > 0:
expof10 = (expof10 // 3) * 3
else:
expof10 = (-expof10 + 3) // 3 * (-3)
value *= 10 ** (-expof10)
if value >= 1000.:
value /= 1000.0
expof10 += 3
elif value >= 100.0:
digits -= 2
elif value >= 10.0:
digits -= 1
if negative:
value *= -1
return value, int(expof10) | 32,191 |
def test_pipeline_slice():
"""Testing something."""
drop_num1 = SilentDropStage('num1')
drop_num2 = SilentDropStage('num2')
drop_char = SilentDropStage('char')
pipeline = PdPipeline([drop_num1, drop_num2, drop_char])
assert len(pipeline) == 3
pipeline = pipeline[0:2]
assert len(pipeline) == 2
df = _test_df()
res_df = pipeline.apply(df, verbose=True)
assert 'num1' not in res_df.columns
assert 'num2' not in res_df.columns
assert 'char' in res_df.columns | 32,192 |
def load_settings():
"""Load JSON data from settings file
:return: dictionary with settings details
:rtype: dict
"""
if os.path.exists(config.SETTINGS_FILE):
with open(config.SETTINGS_FILE, 'r') as sfile:
settings = json.loads(sfile.read())
else:
settings = {
'Chapters': config.CHAPTERS,
'Status': [],
}
return settings | 32,193 |
def init_logging(logfile=None, debug=False):
"""Customize log and send it to console and logfile"""
logging.addLevelName(25, 'PRINT')
if logging.getLoggerClass().__name__ == 'FatalLogger': # disable log deduplication by Pelican
logging.getLoggerClass().limit_filter.LOGS_DEDUP_MIN_LEVEL = logging.INFO # still ON for logger.info or less
colorama.init()
root_logger = logging.getLogger()
root_logger.setLevel(25) # don't log INFO or lower
# console handler
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setFormatter(ColorFormatter())
root_logger.addHandler(console_handler)
# file handler
if logfile:
file_handler = logging.FileHandler(logfile, mode='a')
file_handler.setFormatter(TextFormatter())
file_handler.setLevel(logging.INFO)
root_logger.addHandler(file_handler) | 32,194 |
def lock_access(lock: threading.Lock):
"""
Context manager syntatic sugar for lock
aquisition and release
"""
lock.aquire()
yield
lock.release() | 32,195 |
def test_cert(host, port=443, timeout=5, **kwargs):
"""Test that a cert is valid on a site.
Args:
host (:obj:`str`):
hostname to connect to.
can be any of: "scheme://host:port", "scheme://host", or "host".
port (:obj:`str`, optional):
port to connect to on host.
If no :PORT in host, this will be added to host.
Defaults to: 443
timeout (:obj:`str`, optional):
Timeout for connect/response.
Defaults to: 5.
kwargs: passed thru to requests.get()
Returns:
(:obj:`tuple` of (:obj:`bool`, :obj:`Exception`)):
True / False if cert was valid.
Exception that was thrown if cert not valid, or None if successfull.
"""
kwargs.setdefault("timeout", timeout)
kwargs.setdefault("url", build_url(host=host, port=port))
try:
requests.get(**kwargs)
return (True, None)
except requests.exceptions.SSLError as exc:
return (False, exc) | 32,196 |
def get_predicates(): # noqa: E501
"""get_predicates
Get a list of predicates used in statements issued by the knowledge source # noqa: E501
:rtype: List[BeaconPredicate]
"""
return controller_impl.get_predicates() | 32,197 |
def CheckUpdates():
"""Check for updates and inform the user.
"""
manager = update_manager.UpdateManager()
try:
manager.PerformUpdateCheck()
# pylint:disable=broad-except, We never want this to escape, ever. Only
# messages printed should reach the user.
except Exception:
pass | 32,198 |
def noisy_job_stage3(aht, ht, zz, exact=False):
"""Adds noise to decoding circuit.
Args:
=====
aht, ht, zz : numeric
Circuit parameters for decoding circuit
exact : bool
If True, works with wavefunction
Returns:
========
noisy_circuit : cirq.Circuit
Noisy version of input circuit
param_resolvers : list
"""
job = Job(decoder_circuit(aht, ht, zz, exact))
noisy = DepolarizerChannel(probability=noise_level)
noisy_job = noisy.transform_job(job)
param_resolvers = [ParamResolver({k:v for k, v in e})
for e in noisy_job.sweep.param_tuples()]
return noisy_job.circuit, param_resolvers | 32,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.