content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_spectra_onepixel(data, indx, MakeMock, seed, log, ntarget,
maxiter=1, no_spectra=False, calib_only=False):
"""Wrapper function to generate spectra for all targets on a single healpixel.
Parameters
----------
data : :class:`dict`
Dictionary with all the mock data (candidate mock targets).
indx : :class:`int` or :class:`numpy.ndarray`
Indices of candidate mock targets to consider.
MakeMock : :class:`desitarget.mock.mockmaker` object
Object to assign spectra to each target class.
seed: :class:`int`
Seed for the random number generator.
log : :class:`desiutil.logger`
Logger object.
ntarget : :class:`int`
Desired number of targets to generate.
maxiter : :class:`int`
Maximum number of iterations to generate targets.
no_spectra : :class:`bool`, optional
Do not generate spectra, e.g., for use with quicksurvey. Defaults to False.
calib_only : :class:`bool`, optional
Use targets as calibration (standard star) targets, only. Defaults to False.
Returns
-------
targets : :class:`astropy.table.Table`
Target catalog.
truth : :class:`astropy.table.Table`
Corresponding truth table.
objtruth : :class:`astropy.table.Table`
Corresponding objtype-specific truth table (if applicable).
trueflux : :class:`numpy.ndarray`
Array [npixel, ntarget] of observed-frame spectra. Only computed
and returned for non-sky targets and if no_spectra=False.
"""
targname = data['TARGET_NAME']
rand = np.random.RandomState(seed)
targets = list()
truth = list()
objtruth = list()
trueflux = list()
if ntarget == 0:
return [targets, truth, objtruth, trueflux]
# Generate the spectra iteratively until we achieve the required target
# density. Randomly divide the possible targets into each iteration.
iterseeds = rand.randint(2**31, size=maxiter)
rand.shuffle(indx)
iterindx = np.array_split(indx, maxiter)
makemore, itercount, ntot = True, 0, 0
while makemore:
chunkflux, _, chunktargets, chunktruth, chunkobjtruth = MakeMock.make_spectra(
data, indx=iterindx[itercount], seed=iterseeds[itercount], no_spectra=no_spectra)
MakeMock.select_targets(chunktargets, chunktruth, targetname=data['TARGET_NAME'])
keep = np.where(chunktargets['DESI_TARGET'] != 0)[0]
#if 'CONTAM_NAME' in data.keys():
# import pdb ; pdb.set_trace()
nkeep = len(keep)
if nkeep > 0:
ntot += nkeep
log.debug('Generated {} / {} ({} / {} total) {} targets on iteration {} / {}.'.format(
nkeep, len(chunktargets), ntot, ntarget, targname, itercount+1, maxiter))
targets.append(chunktargets[keep])
truth.append(chunktruth[keep])
if len(chunkobjtruth) > 0: # skies have no objtruth
objtruth.append(chunkobjtruth[keep])
if not no_spectra:
trueflux.append(chunkflux[keep, :])
itercount += 1
if itercount == maxiter or ntot >= ntarget:
if maxiter > 1:
log.debug('Generated {} / {} {} targets after {} iterations.'.format(
ntot, ntarget, targname, itercount))
makemore = False
else:
need = np.where(chunktargets['DESI_TARGET'] == 0)[0]
#import matplotlib.pyplot as plt
#noneed = np.where(chunktargets['DESI_TARGET'] != 0)[0]
#gr = -2.5 * np.log10( chunktargets['FLUX_G'] / chunktargets['FLUX_R'] )
#rz = -2.5 * np.log10( chunktargets['FLUX_R'] / chunktargets['FLUX_Z'] )
#plt.scatter(rz[noneed], gr[noneed], color='red', alpha=0.5, edgecolor='none', label='Made Cuts')
#plt.scatter(rz[need], gr[need], alpha=0.5, color='green', edgecolor='none', label='Failed Cuts')
#plt.legend(loc='upper left')
#plt.show()
if len(need) > 0:
# Distribute the objects that didn't pass target selection
# to the remaining iterations.
iterneed = np.array_split(iterindx[itercount - 1][need], maxiter - itercount)
for ii in range(maxiter - itercount):
iterindx[ii + itercount] = np.hstack( (iterindx[itercount:][ii], iterneed[ii]) )
if len(targets) > 0:
targets = vstack(targets)
truth = vstack(truth)
if ntot > ntarget: # Only keep up to the number of desired targets.
log.debug('Removing {} extraneous targets.'.format(ntot - ntarget))
keep = rand.choice(ntot, size=ntarget, replace=False)
targets = targets[keep]
truth = truth[keep]
if len(objtruth) > 0: # skies have no objtruth
objtruth = vstack(objtruth)
if ntot > ntarget:
objtruth = objtruth[keep]
if not no_spectra:
trueflux = np.concatenate(trueflux)
if ntot > ntarget:
trueflux = trueflux[keep, :]
return [targets, truth, objtruth, trueflux] | 4d6db87a9ee53b22e3607a364387e1792641740b | 23,300 |
def one_way_mi(df, feature_list, group_column, y_var, bins):
"""
Calculates one-way mutual information group variable and a
target variable (y) given a feature list regarding.
Parameters
----------
df : pandas DataFrame
df with features used to train model, plus a target variable
and a group column.
feature_list : list DataFrame
List of strings, feature names.
group_column : string
name of column for testing bias, should contain numeric categories
y_var : string
name of target variable column
bins : tuple
number of bins for each dimension
Returns
-------
mi_table : pandas DataFrame
data frame with mutual information values, with one row per feature
in the feature_list, columns for group and y.
"""
group_cats = df[group_column].values
y_cats = df[y_var].values
c_g = [
np.histogramdd([np.array(df[feature]), group_cats], bins=bins)[0]
for feature in feature_list
]
c_y = [
np.histogramdd([np.array(df[feature]), y_cats], bins=bins)[0]
for feature in feature_list
]
# compute mutual information (MI) between trait and gender/eth/y
mi_g = [mutual_info_score(None, None, contingency=i) for i in c_g]
mi_y = [mutual_info_score(None, None, contingency=i) for i in c_y]
mi_table = pd.DataFrame({'feature': feature_list,
group_column: mi_g,
y_var: mi_y})
# NOTE: Scale group and y where the highest MI is scaled to 1 to
# facilitate interpreting relative importance to bias and performance
mi_table["{}_scaled".format(group_column)] = (
mi_table[group_column] / mi_table[group_column].max()
)
mi_table["{}_scaled".format(y_var)] = (
mi_table[y_var] / mi_table[y_var].max()
)
return mi_table | 2b3e8336a5843a2e4bedc4f42699c233bb2118d3 | 23,301 |
import tqdm
def draw_parametric_bs_reps_mle(
mle_fun, gen_fun, data, args=(), size=1, progress_bar=False
):
"""Draw parametric bootstrap replicates of maximum likelihood estimator.
Parameters
----------
mle_fun : function
Function with call signature mle_fun(data, *args) that computes
a MLE for the parameters
gen_fun : function
Function to randomly draw a new data set out of the model
distribution parametrized by the MLE. Must have call
signature `gen_fun(*params, size)`.
data : one-dimemsional Numpy array
Array of measurements
args : tuple, default ()
Arguments to be passed to `mle_fun()`.
size : int, default 1
Number of bootstrap replicates to draw.
progress_bar : bool, default False
Whether or not to display progress bar.
Returns
-------
output : numpy array
Bootstrap replicates of MLEs.
"""
params = mle_fun(data, *args)
if progress_bar:
iterator = tqdm(range(size))
else:
iterator = range(size)
return np.array(
[mle_fun(gen_fun(*params, size=len(data), *args)) for _ in iterator]
) | 75569e4be203fe4614f55f077c5d0abff20b468e | 23,302 |
def parse_subpalette(words):
"""Turn palette entry into a list of color-to-index mappings.
For example, #AAA=2 or #AAAAAA=2 means that (170, 170, 170) will be
recognized as color 2 in that subpalette.
If no =number is specified, indices are recognized sequentially from 1.
Return a list of ((r, g, b), index) tuples.
"""
out = []
for i, word in enumerate(words):
color_index = word.split("=", 1)
color = parse_color(color_index[0])
index = int(color_index[1]) if len(color_index) > 1 else i + 1
out.append((color, index))
return out | 13d52a4b8092755cac28401356183025dac7dfb3 | 23,303 |
import json
def data_word2vec(input_file, num_labels, word2vec_model):
"""
Create the research data tokenindex based on the word2vec model file.
Return the class _Data() (includes the data tokenindex and data labels).
Args:
input_file: The research data
num_labels: The number of classes
word2vec_model: The word2vec model file
Returns:
The Class _Data() (includes the data tokenindex and data labels)
Raises:
IOError: If the input file is not the .json file
"""
vocab = dict([(k, v.index) for (k, v) in word2vec_model.wv.vocab.items()])
def _token_to_index(content):
result = []
for item in content:
word2id = vocab.get(item)
if word2id is None:
word2id = 0
result.append(word2id)
return result
def _create_onehot_labels(labels_index):
label = [0] * num_labels
for item in labels_index:
label[int(item)] = 1
return label
if not input_file.endswith('.json'):
raise IOError("[Error] The research data is not a json file. "
"Please preprocess the research data into the json file.")
with open(input_file) as fin:
testid_list = []
content_index_list = []
labels_list = []
onehot_labels_list = []
labels_num_list = []
total_line = 0
for eachline in fin:
data = json.loads(eachline)
testid = data['testid']
features_content = data['features_content']
labels_index = data['labels_index']
labels_num = data['labels_num']
testid_list.append(testid)
content_index_list.append(_token_to_index(features_content))
labels_list.append(labels_index)
onehot_labels_list.append(_create_onehot_labels(labels_index))
labels_num_list.append(labels_num)
total_line += 1
class _Data:
def __init__(self):
pass
@property
def number(self):
return total_line
@property
def testid(self):
return testid_list
@property
def tokenindex(self):
return content_index_list
@property
def labels(self):
return labels_list
@property
def onehot_labels(self):
return onehot_labels_list
@property
def labels_num(self):
return labels_num_list
return _Data() | b5330fce3c71a62896fdfdc40a20743df0a313aa | 23,304 |
from os import scandir
from pathlib import Path
def find_version_files(
root_dir: str, dont_search_dir_names: set = {"tests", "test"}
) -> list:
"""You can use this.
This function will recursively find the __init__.py(s) in a nontest directory.
:param str root_dir: Description of parameter `root_dir`.
:return: Description of returned object.
:rtype: List[Path]
"""
root_dir = Path(str(root_dir)).expanduser()
dont_search_dir_names = set(map(str, dont_search_dir_names))
try:
assert root_dir.exists() and root_dir.is_dir()
except AssertionError:
raise ValueError(
"Root directory is invalid: it either does not exist or is not a directory"
)
def recursive_find(rd, dsdn):
version_files = []
rd = Path(str(rd)).expanduser()
with scandir(str(rd)) as scan_rd:
for entry in scan_rd:
if entry.is_dir() and not (
entry.name.startswith(".")
or NONE_ALPHABET.sub("", entry.name.lower()) in dsdn
):
version_files.extend(recursive_find(entry.path, dsdn))
elif entry.name == "__init__.py" and entry.is_file():
version_files.append(Path(entry.path))
return version_files
return recursive_find(root_dir, dont_search_dir_names) | 02db4080b5cad353d58d9c4c0790e7de4e0d4260 | 23,305 |
def object_difference():
"""Compute the difference parts between selected shapes.
- Select two objects.
Original code from HighlightDifference.FCMacro
https://github.com/FreeCAD/FreeCAD-macros/blob/master/Utility/HighlightDifference.FCMacro
Authors = 2015 Gaël Ecorchard (Galou)
"""
global verbose
msg = verbose
m_actDoc = get_ActiveDocument(info=msg)
if m_actDoc is None:
return None
createFolders('WorkObjects')
error_msg =\
"INCORRECT Object(s) Selection:\n" +\
"You Must Select Two(2) Objects !"
result_msg = ": Difference object created into WorkFeatures/WorkObjects/"
name = "Part"
part = "Part::Feature"
grp = "WorkObjects"
try:
selectionObjects = Gui.Selection.getSelection()
if len(selectionObjects) < 2:
printError_msg(error_msg)
return
object_list = []
for obj in selectionObjects:
object_list.append(obj)
for i, object_a in enumerate(object_list):
shape_a = object_a.Shape
label_a = object_a.Label
for object_b in object_list[(i + 1):]:
shape_b = object_b.Shape
label_b = object_b.Label
shape_addition = shape_a.cut(shape_b)
if shape_addition.Volume < 1e-6:
print_gui_msg("No Cut of " +\
str(label_a.encode('utf-8')) +\
" by " +\
str(label_b.encode('utf-8')))
else:
print_msg("Volume of the red " +\
str(label_a.encode('utf-8')) +\
" Cut by " +\
str(label_b.encode('utf-8')) +\
": " +\
str(shape_addition.Volume) + "\n")
if not(App.ActiveDocument.getObject(grp)):
App.ActiveDocument.addObject("App::DocumentObjectGroup", grp)
added = FreeCAD.ActiveDocument.addObject(part)
added.Label = "Cut red (" +\
str(label_a.encode('utf-8')) +\
"-" +\
str(label_b.encode('utf-8')) +\
")"
added.Shape = shape_addition
App.ActiveDocument.getObject(grp).addObject(added)
added.ViewObject.ShapeColor = (1.0, 0.0, 0.0, 1.0)
shape_removal = shape_b.cut(shape_a)
if shape_removal.Volume < 1e-6:
print_gui_msg("No Cut of " +\
str(label_b.encode('utf-8')) +\
" by " +\
str(label_a.encode('utf-8')))
else:
print_msg("Volume of the green " +\
str(label_b.encode('utf-8')) +\
" Cut by " +\
str(label_a.encode('utf-8')) +\
": " +\
str(shape_removal.Volume) + "\n")
if not(App.ActiveDocument.getObject(grp)):
App.ActiveDocument.addObject("App::DocumentObjectGroup", grp)
removed = FreeCAD.ActiveDocument.addObject(part)
removed.Label = "Cut green (" +\
str(label_b.encode('utf-8')) +\
"-" +\
str(label_a.encode('utf-8')) +\
")"
removed.Shape = shape_removal
App.ActiveDocument.getObject(grp).addObject(removed)
removed.ViewObject.ShapeColor = (0.0, 0.5, 0.0, 1.0)
object_a.ViewObject.Transparency = 80
object_b.ViewObject.Transparency = 80
except:
printError_msg(error_msg) | b99fb61e0d85cc66ae395273f785f8f7441fd297 | 23,306 |
def diffusionkernel(sigma, N=4, returnt=False):
""" diffusionkernel(sigma, N=4, returnt=False)
A discrete analog to the continuous Gaussian kernel,
as proposed by Toni Lindeberg.
N is the tail length factor (relative to sigma).
"""
# Make sure sigma is float
sigma = float(sigma)
# Often refered to as the scale parameter, or t
sigma2 = sigma*sigma
# Where we start, from which we go backwards
# This is also the tail length
if N > 0:
nstart = int(np.ceil(N*sigma)) + 1
else:
nstart = abs(N) + 1
# Allocate kernel and times
t = np.arange(-nstart, nstart+1, dtype='float64')
k = np.zeros_like(t)
# Make a start
n = nstart # center (t[nstart]==0)
k[n+nstart] = 0
n = n-1
k[n+nstart] = 0.01
# Iterate!
for n in range(nstart-1,0,-1):
# Calculate previous
k[(n-1)+nstart] = 2*n/sigma2 * k[n+nstart] + k[(n+1)+nstart]
# The part at the left can be erroneous, so let's use the right part only
k[:nstart] = np.flipud(k[-nstart:])
# Remove the tail, which is zero
k = k[1:-1]
t = t[1:-1]
# Normalize
k = k / k.sum()
# the function T that we look for is T = e^(-sigma2) * I(n,sigma2)
# We found I(n,sigma2) and because we normalized it, the normalization term
# e^(-sigma2) is no longer necesary.
# Done
if returnt:
return k, t
else:
return k | 3fe620454963eec357072bd0ecd64fd66b392600 | 23,307 |
def CalculateTopologicalTorsionFingerprint(mol):
"""
#################################################################
Calculate Topological Torsion Fingerprints
Usage:
result=CalculateTopologicalTorsionFingerprint(mol)
Input: mol is a molecule object.
Output: result is a tuple form. The first is the number of
fingerprints. The second is a dict form whose keys are the
position which this molecule has some substructure. The third
is the DataStructs which is used for calculating the similarity.
#################################################################
"""
res = Torsions.GetTopologicalTorsionFingerprint(mol)
return res.GetLength(), res.GetNonzeroElements(), res | 0c646ea977ea257c523425a7f8526c16f8531e14 | 23,308 |
from typing import List
from typing import Dict
def prepare_answer_extraction_samples(context: str, answer_list: List[Dict] = None):
"""
Args:
context: str (assumed to be normalized via normalize_text)
answer_list: [
{'text': str, 'answer_start': int},
{'text': str, 'answer_start': int},
...
]
"""
prepare_target = True if answer_list else False
# split into sentences
sentence_list = sentence_tokenize(context)
num_sentences = len(sentence_list)
if prepare_target:
answer_list_per_sentence = get_answer_list_per_sentence(sentence_list, answer_list)
# prepare sources (and targets)
samples = []
for sentence_ind in range(num_sentences):
source_text = "extract answers:"
if prepare_target:
answer_list = answer_list_per_sentence[sentence_ind]
answer_list = [answer["text"] for answer in answer_list]
if not answer_list:
continue
answer_list = list(dict.fromkeys(answer_list)) # remove duplicate answers without changing the order
target_text = " <sep> ".join(answer_list) + " <sep>"
else:
target_text = None
for sentence_ind2, sentence in enumerate(sentence_list):
if sentence_ind == sentence_ind2:
sentence = f"<hl> {sentence} <hl>"
source_text = f"{source_text} {sentence}"
source_text = source_text.strip()
sample = {"source_text": source_text, "target_text": target_text, "answer_list": answer_list}
if sample["target_text"] is None:
sample
samples.append(sample)
return samples | 3cb431fa2ec6472f3e060cb7f85eb0a52cfbfe6c | 23,309 |
from typing import Optional
from typing import Callable
from typing import List
import abc
def mix_in(
source: type,
target: type,
should_copy: Optional[Callable[[str, bool], bool]] = None,
) -> List[str]:
"""
Copy all defined functions from mixin into target. It could be
usefull when you cannot inherit from mixin because incompatible
metaclass. It does not copy abstract functions. If `source` is
`ABCMeta`, will register `target` with it.
Returns list of copied methods.
"""
mixed_in_methods = []
try:
abstract_methods = source.__abstractmethods__ # type:ignore
except AttributeError:
abstract_methods = set()
target_members = dir(target)
for n in dir(source):
fn = getattr(source, n)
if isfunction(fn) and n not in abstract_methods:
already_exists = n not in target_members
if should_copy is None or should_copy(n, already_exists):
setattr(target, n, fn)
mixed_in_methods.append(n)
if isinstance(source, abc.ABCMeta):
source.register(target)
return mixed_in_methods | 702552f01d4a915f11d6d3d618aaf151bf5b8af3 | 23,310 |
def get_img_num_per_cls(cifar_version, imb_factor=None):
"""
Get a list of image numbers for each class, given cifar version
Num of imgs follows emponential distribution
img max: 5000 / 500 * e^(-lambda * 0);
img min: 5000 / 500 * e^(-lambda * int(cifar_version - 1))
exp(-lambda * (int(cifar_version) - 1)) = img_max / img_min
args:
cifar_version: str, '10', '100', '20'
imb_factor: float, imbalance factor: img_min/img_max,
None if geting default cifar data number
output:
img_num_per_cls: a list of number of images per class
"""
cls_num = int(cifar_version)
img_max = img_num(cifar_version)
if imb_factor is None:
return [img_max] * cls_num
img_num_per_cls = []
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
return img_num_per_cls | 941be25ed96ef336e11c16bd9b81f2973c25bcf2 | 23,311 |
def xml():
"""
Returns the lti.xml file for the app.
"""
try:
return Response(render_template(
'lti.xml'), mimetype='application/xml'
)
except:
app.logger.error("Error with XML.")
return return_error('''Error with XML. Please refresh and try again. If this error persists,
please contact support.''') | c40ffb52e4931fd41e6627778331a1c7acf9558b | 23,312 |
def log_get_level(client):
"""Get log level
Returns:
Current log level
"""
return client.call('log_get_level') | 2a3915d8c6576187a0af738d3021d495b4efda21 | 23,313 |
def cal_pivot(n_losses,network_block_num):
"""
Calculate the inserted layer for additional loss
"""
num_segments = n_losses + 1
num_block_per_segment = (network_block_num // num_segments) + 1
pivot_set = []
for i in range(num_segments - 1):
pivot_set.append(min(num_block_per_segment * (i + 1), network_block_num - 1))
return pivot_set | d23324fc39f2f1aeec807a4d65a51234a2b76cde | 23,314 |
import asyncio
async def scan_host(
host: IPv4Address,
semaphore: asyncio.Semaphore,
timeout: int,
verbose: bool,
):
"""
Locks the "semaphore" and tries to ping "host" with timeout "timeout" s.
Prints out the result of the ping to the standard output.
"""
async with semaphore:
try:
delay = await aioping.ping(
str(host), timeout, family=AddressFamily.AF_INET
)
print(f"{host} responded after {delay:.4f} ms")
return True
except TimeoutError:
if verbose:
print(f"{host} has not responded")
return False
except OSError as error:
if verbose:
print(
f"Ping to host {host} failed for the following reason: {error}"
)
return False | 66bd0165dbbb7b717fd6ded19c78cb901f89a588 | 23,315 |
def _emit_post_update_statements(
base_mapper, uowtransaction, cached_connections, mapper, table, update
):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
needs_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def update_stmt():
clauses = BooleanClauseList._construct_raw(operators.and_)
for col in mapper._pks_by_table[table]:
clauses.clauses.append(
col == sql.bindparam(col._label, type_=col.type)
)
if needs_version_id:
clauses.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type,
)
)
stmt = table.update().where(clauses)
if mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(("post_update", table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, records in groupby(
update,
lambda rec: (rec[3], set(rec[4])), # connection # parameter keys
):
rows = 0
records = list(records)
connection = key[0]
assert_singlerow = (
connection.dialect.supports_sane_rowcount
if mapper.version_id_col is None
else connection.dialect.supports_sane_rowcount_returning
)
assert_multirow = (
assert_singlerow
and connection.dialect.supports_sane_multi_rowcount
)
allow_multirow = not needs_version_id or assert_multirow
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, mapper_rec, connection, params in records:
c = cached_connections[connection].execute(statement, params)
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
rows += c.rowcount
else:
multiparams = [
params
for state, state_dict, mapper_rec, conn, params in records
]
check_rowcount = assert_multirow or (
assert_singlerow and len(multiparams) == 1
)
c = cached_connections[connection].execute(statement, multiparams)
rows += c.rowcount
for state, state_dict, mapper_rec, connection, params in records:
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched."
% (table.description, len(records), rows)
)
elif needs_version_id:
util.warn(
"Dialect %s does not support updated rowcount "
"- versioning cannot be verified."
% c.dialect.dialect_description
) | 13444703b45030a2d0e1dc1f2371a729cd812c11 | 23,316 |
import copy
def build_grid_search_config(params_dict):
"""
传入一个json,按网格搜索的方式构造出符合条件的N个json, 目前网格搜索只作用在optimization范围内
:param params_dict:
:return: param_config_list
"""
model_params_dict = params_dict.get("model")
opt_params = model_params_dict.get("optimization", None)
if not opt_params:
raise ValueError("optimization's params can't be none")
# 获取待网格搜索的dict
train_data_params = params_dict.get("dataset_reader").get("train_reader").get("config", None)
if not train_data_params:
raise ValueError("train_reader config's params can't be none")
# 在need_operate_params中加入待处理网格搜索的dict
need_operate_params = [opt_params, train_data_params]
all_combination_list = []
all_single_param_dict = []
dict_list_key_num = []
for one_operate_param in need_operate_params:
combination_list, single_param_dict = get_list_params(one_operate_param)
all_combination_list.extend(combination_list)
all_single_param_dict.append(single_param_dict)
dict_list_key_num.append(len(combination_list))
task_param_list = []
for params in product(*all_combination_list):
one_task_param = copy.deepcopy(params_dict)
# 在need_update_param中加入待更新的网格搜索的dict,注意顺序要和need_operate_params保持一致
need_update_param = [one_task_param["model"]["optimization"],
one_task_param["dataset_reader"]["train_reader"]["config"]]
i = 0
for index, one_single_param in enumerate(all_single_param_dict):
single_param = copy.deepcopy(one_single_param)
for one_grid in params[i:i + dict_list_key_num[index]]:
single_param.update(one_grid)
need_update_param[index].update(single_param)
i += dict_list_key_num[index]
task_param_list.append(one_task_param)
return task_param_list | a5f2b8249f9a50cad7da855d6a49a3225715fb00 | 23,317 |
def cutoff_countmin_wscore(y, scores, score_cutoff, n_cm_buckets, n_hashes):
""" Learned Count-Min (use predicted scores to identify heavy hitters)
Args:
y: true counts of each item (sorted, largest first), float - [num_items]
scores: predicted scores of each item - [num_items]
score_cutoff: threshold for heavy hitters
n_cm_buckets: number of buckets of Count-Min
n_hashes: number of hash functions
Returns:
loss_avg: estimation error
space: space usage in bytes
"""
if len(y) == 0:
return 0 # avoid division of 0
y_ccm = y[scores > score_cutoff]
y_cm = y[scores <= score_cutoff]
loss_cf = 0 # put y_ccm into cutoff buckets, no loss
loss_cm = count_min(y_cm, n_cm_buckets, n_hashes)
assert len(y_ccm) + len(y_cm) == len(y)
loss_avg = (loss_cf * np.sum(y_ccm) + loss_cm * np.sum(y_cm)) / np.sum(y)
print('\tloss_cf %.2f\tloss_rd %.2f\tloss_avg %.2f' % (loss_cf, loss_cm, loss_avg))
space = len(y_ccm) * 4 * 2 + n_cm_buckets * n_hashes * 4
return loss_avg, space | 99ab1b6174d49ccf4f4517fe713f98cecd65d978 | 23,318 |
def test_lcc_like_epi():
"""
Takes about 5 mins with epicyclic
If burnin is too short (say 200 steps) won't actually find true solution
"""
TORB_FUNC = trace_epicyclic_orbit
mean_now = np.array([50., -100., 25., 1.1, -7.76, 2.25])
age = 10.
mean = TORB_FUNC(mean_now, times=-age)
dx = 5.
dv = 2.
covmatrix = np.identity(6)
covmatrix[:3,:3] *= dx**2
covmatrix[3:,3:] *= dv**2
true_comp = SphereComponent(
attributes={'mean':mean, 'covmatrix':covmatrix, 'age':age,},
trace_orbit_func=TORB_FUNC,
)
nstars = 1000
tiny_measurement_error = 1e-10
# import ipdb; ipdb.set_trace()
best_comp, chain, lnprob, data_dict = run_fit_helper(
true_comp=true_comp, starcounts=nstars,
measurement_error=tiny_measurement_error,
trace_orbit_func=TORB_FUNC,
run_name='lcc_like',
)
assert np.allclose(true_comp.get_mean(), best_comp.get_mean(),
atol=3.0)
assert np.allclose(true_comp.get_age(), best_comp.get_age(),
atol=1.0)
assert np.allclose(true_comp.get_covmatrix(),
best_comp.get_covmatrix(),
atol=5.0)
comp_filename = 'temp_data/{}_compfitter_lcc_like_true_and_best_comp.npy'.format(
PY_VERS
)
SphereComponent.store_raw_components(comp_filename, [true_comp, best_comp])
return true_comp, best_comp, lnprob | 43583e06741632aba41400ee1a6562cd6fc226be | 23,319 |
import numpy as np
def uniquePandasIndexMapping(inputColumn):
"""quickly mapps the unique name entries back to input entries
Keyword arguments:
inputDataToAssess -- a SINGLE column from a pandas dataframe, presumably with
duplications. Will create a frequency table and a mapping back to the source entries.
"""
inputColumn.sort_values(by=['company'], inplace=True)
sortedInputColumn=inputColumn.reset_index()
sortedInputColumn.rename(columns={"index":"userIndex"},inplace=True)
tableUniqueFullNameCounts=inputColumn.iloc[:,0].value_counts()
tableUniqueFullNameCounts=tableUniqueFullNameCounts.reset_index()
tableUniqueFullNameCounts.rename(columns={"company":"count","index":"company"},inplace=True)
tableUniqueFullNameCounts.sort_values(by=['company'], inplace=True)
sortedTableUniqueFullNameCounts=tableUniqueFullNameCounts.reset_index()
sortedTableUniqueFullNameCounts['inputIndexMapping']=''
currentSum=0
for index, row in sortedTableUniqueFullNameCounts.iterrows():
currentRange=np.arange(currentSum,currentSum+sortedTableUniqueFullNameCounts['count'].iloc[index])
sortedTableUniqueFullNameCounts['inputIndexMapping'].iloc[index]=sortedInputColumn['userIndex'].iloc[currentRange].array
currentSum=currentSum+sortedTableUniqueFullNameCounts['count'].iloc[index]
return sortedInputColumn, sortedTableUniqueFullNameCounts; | c26fce9b8617963737c4b8dd05c0e8429c92daa3 | 23,320 |
def non_max_suppression(boxlist, thresh, max_output_size, scope=None):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. Note that this only works for a single class ---
to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
max_output_size: maximum number of retained boxes
scope: name scope.
Returns:
a BoxList holding M boxes where M <= max_output_size
Raises:
ValueError: if thresh is not in [0, 1]
"""
with tf.name_scope(scope, 'NonMaxSuppression'):
if not 0 <= thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
selected_indices = tf.image.non_max_suppression(
boxlist.get(), boxlist.get_field('scores'),
max_output_size, iou_threshold=thresh)
return gather(boxlist, selected_indices) | aa8990ea579ed5d4614561b617717c9e7b73f798 | 23,321 |
import inspect
import os
import json
def client_role_setting(realm, client_id):
"""クライアントロール設定 client role setting
Args:
realm (str): realm
client_id (str): client id
Returns:
[type]: [description]
"""
try:
globals.logger.debug('#' * 50)
globals.logger.debug('CALL {}: realm[{}] client_id[{}]'.format(inspect.currentframe().f_code.co_name, realm, client_id))
globals.logger.debug('#' * 50)
token_user = os.environ["EXASTRO_KEYCLOAK_USER"]
token_password = os.environ["EXASTRO_KEYCLOAK_PASSWORD"]
token_realm_name = os.environ["EXASTRO_KEYCLOAK_MASTER_REALM"]
# 引数を展開 Expand arguments
payload = request.json.copy()
globals.logger.debug(payload)
# tokenの取得 get toekn
token = api_keycloak_call.get_user_token(token_user, token_password, token_realm_name)
# ロールのみを最初に登録する Register only the role first
for role in payload["roles"]:
role_name = role["name"]
# 追加するロールを設定 Set the role to add
add_role = {
"name": role_name,
"attributes": role["attributes"],
}
# 1 role client role set
api_keycloak_call.keycloak_client_role_create(realm, client_id, add_role, token)
# tokenの取得 get toekn
token = api_keycloak_call.get_user_token(token_user, token_password, token_realm_name)
# 続いてロールの配下がある場合は、その要素を追加する Next, if there is a subordinate of the role, add that element
# ロール数分繰り返し処理する Repeat for the number of rolls
for role in payload["roles"]:
role_name = role["name"]
# 要素があれば子供のロール情報を取得して設定する If there is an element, get and set the child's role information
if len(role["composite_roles"]) > 0:
composite_roles = []
for composite_role in role["composite_roles"]:
role_info = api_keycloak_call.keycloak_client_role_get(realm, client_id, composite_role, token)
composite_roles.append(json.loads(role_info))
# user client role set
api_keycloak_call.keycloak_client_role_composite_create(realm, client_id, role_name, composite_roles, token)
ret = {
"result": "200",
}
return jsonify(ret), 200
except Exception as e:
return common.serverError(e) | bfade7a4e045b195fd00355a19c38875449d6434 | 23,322 |
def gen_weekly_ccy_df( start,end ):
""" Generate weekly ccy data table
"""
currency_li =[ "USD_Index",
"EURUSD","GBPUSD","AUDUSD","CADUSD",
"JPYUSD",
"CNYUSD","HKDUSD","TWDUSD",
"KRWUSD","THBUSD","SGDUSD","MYRUSD",
"BRLUSD","INRUSD",
"CNY_raw","JPY_raw"
]
currency_df = get_histroical_ccy(start,end)
temp = currency_df[["JPYUSD","CNYUSD"]]
currency_df["EURUSD"] = 1/currency_df["USDEUR"]
currency_df["GBPUSD"] = 1/currency_df["USDGBP"]
currency_df["AUDUSD"] = 1/currency_df["USDAUD"]
currency_df = currency_df/currency_df.iloc[0]
currency_df["CNY_raw"] = temp["CNYUSD"]
currency_df["JPY_raw"] = temp["JPYUSD"]
return currency_df[currency_li],currency_li | cf856535d148378826fd99a6d8afef5e1eb77778 | 23,323 |
def demean_and_normalise(points_a: np.ndarray,
points_b: np.ndarray):
"""
Independently centre each point cloud around 0,0,0, then normalise
both to [-1,1].
:param points_a: 1st point cloud
:type points_a: np.ndarray
:param points_b: 2nd point cloud
:type points_b: np.ndarray
:return: normalised points clouds, scale factor & translations
"""
translate_a = np.mean(points_a, axis=0)
translate_b = np.mean(points_b, axis=0)
a_demean = points_a - translate_a
b_demean = points_b - translate_b
norm_factor = np.max([np.max(np.abs(a_demean)),
np.max(np.abs(b_demean))])
a_normalised = a_demean / norm_factor
b_normalised = b_demean / norm_factor
scale_matrix = create_scaling_matrix(norm_factor)
translate_a_matrix = create_translation_matrix(translate_a)
translate_b_matrix = create_translation_matrix(translate_b)
return a_normalised, b_normalised, scale_matrix, \
translate_a_matrix, translate_b_matrix | 249bb8edf5ef423613748f0ce599c98e4f437960 | 23,324 |
import yaml
import six
def ParseCustomLevel(api_version):
"""Wrapper around ParseCustomLevel to accept api version."""
def VersionedParseCustomLevel(path):
"""Parse a YAML representation of custom level conditions.
Args:
path: str, path to file containing custom level expression
Returns:
string of CEL expression.
Raises:
ParseError: if the file could not be read into the proper object
"""
data = yaml.load_path(path)
if not data:
raise ParseError(path, 'File is empty')
messages = util.GetMessages(version=api_version)
message_class = messages.Expr
try:
expr = encoding.DictToMessage(data, message_class)
except Exception as err:
raise InvalidFormatError(path, six.text_type(err), message_class)
_ValidateAllCustomFieldsRecognized(path, expr)
return expr
return VersionedParseCustomLevel | e5e414ea29324233d4fc8291f0ea829176805d99 | 23,325 |
def specMergeMSA(*msa, **kwargs):
"""Returns an :class:`.MSA` obtained from merging parts of the sequences
of proteins present in multiple *msa* instances. Sequences are matched
based on species section of protein identifiers found in the sequence labels.
Order of sequences in the merged MSA will follow the order of sequences in the
first *msa* instance. Note that protein identifiers that map to multiple
sequences will be excluded."""
if len(msa) <= 1:
raise ValueError('more than one msa instances are needed')
lbl={}
try:
arrs = [m._getArray() for m in msa]
sets = []
labells = []
for m in msa:
aset = set([])
labell = {}
count = m.countLabel
for label in m.iterLabels():
lbl[label]=label.rsplit('_')[1]
if count(label) == 1 and lbl[label] not in aset:
aset.add(lbl[label])
labell[lbl[label]]=label
sets.append(aset)
labells.append(labell)
except AttributeError:
raise TypeError('all msa arguments must be MSA instances')
sets = iter(sets)
common = next(sets)
for aset in sets:
common = common.intersection(aset)
if not common:
return None
lens = [m.numResidues() for m in msa]
rngs = [0]
rngs.extend(cumsum(lens))
rngs = [(start, end) for start, end in zip(rngs[:-1], rngs[1:])]
idx_arr_rng = list(zip([m.getIndex for m in msa], arrs, rngs))
merger = zeros((len(common), sum(lens)), '|S1')
index = 0
labels = []
mapping = {}
for lbl in common:
merger[index, 0:start]=list(str(msa[0][msa[0].getIndex(labells[0][lbl])]))
merger[index, start:end]=list(str(msa[1][msa[1].getIndex(labells[1][lbl])]))
label = labells[0][lbl]
labels.append(label)
mapping[label] = index
index += 1
merger = MSA(merger, labels=labels, mapping=mapping,
title=' + '.join([m.getTitle() for m in msa]))
return merger | fdb605347bdc88df4c844fa8765640dc5a91b88d | 23,326 |
def parse(filePath):
"""
Returns a full parsed Maya ASCII file.
:type filePath: str
:rtype: mason.asciiscene.AsciiScene
"""
return asciifileparser.AsciiFileParser(filePath).scene | 2f7d50724fcda1d4ef240e362ae6ee3f18bdfacd | 23,327 |
import os
def getSpectrumFromMlinptFolder(inpFolder, fwhm, hv, angle, polarised=None, multEnergiesByMinusOne=True, database=None):
""" Description of function
Args:
inpFolder: (str) Path to folder containing *MLinpt.txt files
fwhm: (float) Full-Width at half maximum for the broadening function
hv: (float) Photon energy to calculate spectrum at (None means density-of-states)
angle: (float) Emission angle to calculate spectrum at (None means ignore angular effects)
polarised: (str, Optional) If None (default) then unpolarised light is assumed, If "linear" then simulate for linearly polarised light in direction of beam
multEnergiesByMinusOne: (Bool) Whether to multiply parsed energies by -1, to convert from eigenvalues(more -ve means more stable) to binding energies (more positive is more stable). Default is True
database: (Optional, CrossSectionDatabaseBase object), default = YehLindauXSectionDatabase
Returns
outSpectrum: (GenSpectraOutput object) - contains total spectrum (totalSpectraContributions) and all contributions (spectraContributions)
specCreator: (SpectrumCreatorStandard object) - contains all options used to create spectrum
"""
mlInptPaths = [x for x in os.listdir(inpFolder) if x.endswith('MLinpt.txt')]
assert len(mlInptPaths) > 0, "Need at least 1 input file, but none found in folder {}".format(inpFolder)
if database is None:
database = yhDb.YehLindauXSectionDatabase()
return getSpectrumFromMlinptFileList( mlInptPaths, fwhm, hv, angle, polarised, database, multEnergiesByMinusOne=multEnergiesByMinusOne ) | 10c8f36620cc455eb806adfa4a037cd87601dc4a | 23,328 |
import json
def extract_events_from_stream(stream_df, event_type):
""" Extracts specific event from stream.
"""
events = stream_df.loc[stream_df.EventType == event_type][['EventTime', 'Event']]
events_json = events['Event'].to_json(orient="records")
json_struct = json.loads(events_json)
# TODO : get rid of structs containing all `int` types
event_extracted = json_normalize(json_struct)
event_extracted = pd.merge(events['EventTime'].reset_index(), event_extracted, left_index=True, right_index=True)
if not event_extracted.empty:
event_extracted = event_extracted[['EventTime', 'order_id', 'limit_price', 'quantity', 'is_buy_order']]
event_extracted.rename(columns={'EventTime': 'TIMESTAMP',
'order_id': 'ORDER_ID',
'limit_price': 'PRICE',
'quantity': 'SIZE',
'is_buy_order': 'BUY_SELL_FLAG'}, inplace=True)
else:
event_extracted = pd.DataFrame({
'TIMESTAMP': [],
'ORDER_ID': [],
'PRICE': [],
'SIZE': [],
'BUY_SELL_FLAG': []
})
return event_extracted | 4b366a82c042966f01ffb37125655b30ceb67014 | 23,329 |
def d_psi(t):
"""Compute the derivative of the variable transform from Ogata 2005."""
t = np.array(t, dtype=float)
a = np.ones_like(t)
mask = t < 6
t = t[mask]
a[mask] = (np.pi * t * np.cosh(t) + np.sinh(np.pi * np.sinh(t))) / (
1.0 + np.cosh(np.pi * np.sinh(t))
)
return a | 8b0fc652a60d2ba45623bc098b7e9d0fae2c7dbe | 23,330 |
from typing import Dict
from typing import Any
import io
def build_environ(request: HTTPRequest, errors: Errors) -> Dict[str, Any]:
"""
参考 https://www.python.org/dev/peps/pep-3333/ 构建 environ
"""
headers = {
f"HTTP_{k.upper().replace('-','_')}": v for k, v in request.header.items()
}
environ = {
# 保持与阿里云函数计算 HTTP 触发器的一致
"fc.context": request.context,
"fc.request_uri": request.path,
# WSGI 标准值
"wsgi.version": (1, 0),
"wsgi.url_scheme": "http",
"wsgi.input": io.BytesIO(request.body),
"wsgi.errors": errors,
"wsgi.multithread": False,
"wsgi.multiprocess": False,
"wsgi.run_once": True,
"SERVER_NAME": "127.0.0.1",
"SERVER_PORT": "80",
"SERVER_PROTOCOL": "HTTP/1.0",
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": request.path,
"QUERY_STRING": "?" + "&".join([f"{k}={v}" for k, v in request.query.items()]),
"CONTENT_TYPE": headers.pop("HTTP_CONTENT_TYPE", ""),
"CONTENT_LENGTH": headers.pop("HTTP_CONTENT_LENGTH", ""),
}
environ.update(headers)
return environ | b2e98d726bf256b06d1c2f82679bd75ada7181cf | 23,331 |
def loadPage(url, filename):
"""
作用:根据url发送请求,获取服务器响应文件
url: 需要爬取的url地址
filename : 处理的文件名
"""
print "正在下载 " + filename
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}
request = urllib2.Request(url, headers=headers)
return urllib2.urlopen(request).read() | 7250a42d03bc1af7e9f30aa22198f3d4a56605f3 | 23,332 |
def get_exporter_class():
"""Returns exporter class based on preferences and support."""
if _use_xlsx() is True:
return XLSXExporter
else:
return CSVExporter | 3d66e0e0abe8c936e2f86598427ff27180dac6b4 | 23,333 |
def get_twitter_token():
"""This is used by the API to look for the auth token and secret
it should use for API calls. During the authorization handshake
a temporary set of token and secret is used, but afterwards this
function has to return the token and secret. If you don't want
to store this in the database, consider putting it into the
session instead.
"""
return session.get('twitter_token') | 218f16141473e76c4318870bec9516c77f1dfe1b | 23,334 |
def is_text_serializer(serializer):
"""Checks whether a serializer generates text or binary."""
return isinstance(serializer.dumps({}), str) | f08f40662da7fd34f5984028e601d664cac943df | 23,335 |
def plot_power(ngroups, mesh_shape, directory, mode="show"):
"""Plot the integrated fission rates from OpenMC and OpenMOC, as well as
the relative and absolute error of OpenMOC relative to OpenMC.
Parameters:
-----------
ngroups: int; number of energy groups
mesh_shape: str; name of the mesh shape
directory: str; path to the data
"""
mode = _check_mode(mode)
directory, shape = _check_params(directory, mesh_shape)
montecarlo_power = np.zeros(shape)
moc_power = np.zeros(shape)
# Integrate over all energy groups
for g in range(ngroups):
rates_name = "fission_{:02d}-of-{}_{}".format(g+1, ngroups, mesh_shape)
fname = directory + "montecarlo_" + rates_name
montecarlo_group_rates = np.loadtxt(fname)
montecarlo_power += montecarlo_group_rates
fname = directory + "moc_" + rates_name
moc_group_rates = np.loadtxt(fname)
moc_power += moc_group_rates
# Filter out results that are essentially zero
mc_mean = np.nanmean(montecarlo_power)*0.1
indices = (montecarlo_power < mc_mean) + (moc_power < mc_mean)
montecarlo_power[indices] = np.nan
moc_power[indices] = np.nan
# Normalize
montecarlo_power /= np.nanmean(montecarlo_power)
moc_power /= np.nanmean(moc_power)
# Find the errors in the normalized distributions
errors = np.divide(moc_power - montecarlo_power, montecarlo_power/100)
pcmerr = (moc_power - montecarlo_power)*100
if mode == "return":
return montecarlo_power, moc_power, errors, pcmerr
# Plot OpenMC's fission rates in the upper left subplot
plt.subplot(231)
plt.imshow(montecarlo_power.squeeze(), interpolation='none', cmap='jet')
plt.title('OpenMC Power Distribution\n{} groups'.format(ngroups))
cmin = min(np.nanmin(montecarlo_power), np.nanmin(moc_power))
cmax = max(np.nanmax(montecarlo_power), np.nanmax(moc_power))
plt.clim(cmin, cmax)
plt.colorbar()
# Plot OpenMOC's fission rates in the upper right subplot
plt.subplot(232)
plt.imshow(moc_power.squeeze(), interpolation='none', cmap='jet')
plt.title('OpenMOC Power Distribution\n{} groups'.format(ngroups))
plt.clim(cmin, cmax)
plt.colorbar()
# Plot the relative error in the lower left subplot
plt.subplot(233)
pct = plt.imshow(errors.squeeze(), interpolation='none', cmap='jet')
posmax = np.nanmax(errors)
negmax = np.nanmin(errors)
cmax = np.ceil(max(abs(posmax), abs(negmax)))
plt.clim(-cmax, +cmax)
plt.title('Relative error (%)')
plt.colorbar(pct)
# Plot the absolute error in the lower right subplot
plt.subplot(234)
pct = plt.imshow(pcmerr.squeeze(), interpolation='none', cmap='jet')
posmax = np.nanmax(pcmerr)
negmax = np.nanmin(pcmerr)
cmax = np.ceil(max(abs(posmax), abs(negmax)))
plt.clim(-cmax, +cmax)
plt.title('Absolute error (%)')
plt.colorbar(pct)
if mode == "show":
plt.show()
elif mode == "save":
# Save and/or show the plot
plt.tight_layout()
fname = directory + "power_{}-groups.pdf".format(ngroups)
plt.savefig(fname)
print("Figure saved to:", fname)
return montecarlo_power, moc_power, errors, pcmerr | e9db852e840d2f3f8013f006acc191276a74e9d7 | 23,336 |
def glDeleteFramebuffersEXT( baseOperation, n, framebuffers=None ):
"""glDeleteFramebuffersEXT( framebuffers ) -> None
"""
if framebuffers is None:
framebuffers = arrays.GLuintArray.asArray( n )
n = arrays.GLuintArray.arraySize( framebuffers )
return baseOperation( n, framebuffers ) | 514fe96d8088210bfe1251a8a9c7c93856f19504 | 23,337 |
import os
import time
def projection_matching(input_model, projs, nside, dir_suffix=None, **kwargs):
"""
Parameters:
-------
input_model: path of input mrc file
projections: numpy array
"""
try:
WD = kwargs['WD']
except KeyError as error:
raise KeyError('lack working directory')
EAs_grid = gen_ref_EAs_grid(nside=nside, psi_step=360)
if dir_suffix:
dstpath = os.path.join(WD, dir_suffix, 'model_projections')
else:
dstpath = os.path.join(WD, 'model_projections')
N = projs.shape[0]
num_projs = projs.shape[2]
num_model_imgs = EAs_grid.shape[0]
num_threads = mp.cpu_count()
tic = time.time()
global model_proj_imgs
model_proj_imgs = gen_mrcs_from_EAs(EAs_grid, input_model, dstpath)
print('Time to recover projections from mrcs file: {0:.4f} s'.format(
time.time() - tic))
print('Projection matching: multiprocess start')
with mp.Pool(processes=num_threads) as pool:
idx = pool.map(
find_similar, (projs[:, :, i] for i in range(num_projs)))
print('\nFinish orientation!')
idx_arr = np.asarray(idx)
orientations = EAs_grid[idx_arr[:, 0]]
orientations[:, 2] = idx_arr[:, 1]
return orientations | 9d4eb2c85587a88970b6e3de19b6a44db7baf6a8 | 23,338 |
def backtostr(dayback=1, format="%Y/%m/%d", thedate=date.today()):
"""Print backto datetime in string format."""
return(backto(dayback=dayback, thedate=thedate).strftime(format)) | c79b10962537d9eef939e7f49697275f31e900e2 | 23,339 |
def no_conflict_require_POST(f):
"""
Catches resource conflicts on save and returns a 409 error.
Also includes require_POST decorator
"""
@require_POST
@wraps(f)
def _no_conflict(*args, **kwargs):
try:
return f(*args, **kwargs)
except ResourceConflict:
return HttpResponse(status=409)
return _no_conflict | 98a28b5fbc2824eaa5d5020f8be1d55878974fc4 | 23,340 |
from typing import Tuple
from re import S
def _split_vector(expr, ranges, fill_ranges=True):
"""Extract the components of the given vector or matrix.
Parameters
==========
expr : Vector, DenseMatrix or list/tuple
ranges : list/tuple
Returns
=======
split_expr : tuple
Tuple of the form (x_expr, y_expr, z_expr). If a 2D vector is
provided, z_expr = S.Zero.
ranges : list/tuple
NOTE: this function is located in utils.py module (and not in vectors.py)
in order to avoid circular import.
"""
if isinstance(expr, Vector):
N = list(_get_coord_systems(expr))[0]
expr = expr.to_matrix(N)
# TODO: experimental_lambdify is not able to deal with base scalars.
# Need to replace them both in the vector as well in the ranges.
# Sympy's lambdify is able to deal with them. Once experimental_lambdify
# is removed, the following code shouldn't be necessary anymore.
bs = list(expr.atoms(BaseScalar))
bs = sorted(bs, key=str)
bs_dict = {b: Symbol(t) for b, t in zip(bs, ["x", "y", "z"])}
expr = expr.subs(bs_dict)
ranges = [r.subs(bs_dict) for r in ranges]
elif not isinstance(expr, (DenseMatrix, list, tuple, Tuple)):
raise TypeError(
"The provided expression must be a symbolic vector, or a "
"symbolic matrix, or a tuple/list with 2 or 3 symbolic "
+ "elements.\nReceived type = {}".format(type(expr))
)
elif (len(expr) < 2) or (len(expr) > 3):
raise ValueError(
"This function only plots 2D or 3D vectors.\n"
+ "Received: {}. Number of elements: {}".format(expr, len(expr))
)
if fill_ranges:
ranges = list(ranges)
fs = set().union(*[e.free_symbols for e in expr])
if len(ranges) < len(fs):
fs_ranges = set().union([r[0] for r in ranges])
for s in fs:
if s not in fs_ranges:
ranges.append(Tuple(s, -10, 10))
if len(expr) == 2:
xexpr, yexpr = expr
zexpr = S.Zero
else:
xexpr, yexpr, zexpr = expr
split_expr = xexpr, yexpr, zexpr
return split_expr, ranges | 77f243d17b912a2ac4ad2e546e8a5f144b200e3e | 23,341 |
def Transition_rep(source_State_name, target_State_name):
"""Representation of a transition
:param source_State_name: The sequence of "name" values of State objects referred to by attribute "source" in this Transition
:type source_State_name: Array
:param target_State_name: The sequence of "name" values of State objects referred to by attribute "target" in this Transition
:type target_State_name: Array
"""
return [f' {source_name}--{target_name}' for source_name, target_name in zip(source_State_name, target_State_name)] | 2e5f7048722997e0931fd6ec3a2d9e880a160359 | 23,342 |
def tifread(ifile, metaData):
"""Read raster from file."""
file = gdal.Open(ifile, GA_ReadOnly)
projection = file.GetProjection()
src = osr.SpatialReference()
src.ImportFromWkt(projection)
proj = src.ExportToWkt()
Nx = file.RasterXSize
Ny = file.RasterYSize
trans = file.GetGeoTransform()
dx = trans[1]
dy = trans[5]
if metaData == "A":
xp = np.arange(Nx)
yp = np.arange(Ny)
(Xp, Yp) = np.meshgrid(xp,yp)
X = trans[0] + (Xp+0.5)*trans[1] + (Yp+0.5)*trans[2] #FIXME: bottleneck!
Y = trans[3] + (Xp+0.5)*trans[4] + (Yp+0.5)*trans[5]
if metaData == "P":
xp = np.arange(Nx)
yp = np.arange(Ny)
(Xp, Yp) = np.meshgrid(xp,yp)
X = trans[0] + Xp*trans[1] + Yp*trans[2] #FIXME: bottleneck!
Y = trans[3] + Xp*trans[4] + Yp*trans[5]
band = file.GetRasterBand(1)
Z = band.ReadAsArray()
dx = np.abs(dx)
dy = np.abs(dy)
#return X, Y, Z, dx, dy, proj
return X, Y, Z | 3f2ce1491cb982b2427108002c02bc9581580b8b | 23,343 |
from typing import List
from typing import Any
def search_model(trial: optuna.trial.Trial) -> List[Any]:
"""Search model structure from user-specified search space."""
model = []
n_stride = 0
MAX_NUM_STRIDE = 5
UPPER_STRIDE = 2 # 5(224 example): 224, 112, 56, 28, 14, 7
n_layers = trial.suggest_int("n_layers", 8, 12)
stride = 1
input_max = 64
imput_min = 32
module_info = {}
### 몇개의 레이어를 쌓을지도 search하게 했습니다.
for i in range(n_layers):
out_channel = trial.suggest_int(f"{i+1}units", imput_min, input_max)
block = trial.suggest_categorical(
f"m{i+1}", ["Conv", "DWConv", "InvertedResidualv2", "InvertedResidualv3"]
)
repeat = trial.suggest_int(f"m{i+1}/repeat", 1, 5)
m_stride = trial.suggest_int(f"m{i+1}/stride", low=1, high=UPPER_STRIDE)
if m_stride == 2:
stride += 1
if n_stride == 0:
m_stride = 2
if block == "Conv":
activation = trial.suggest_categorical(
f"m{i+1}/activation", ["ReLU", "Hardswish"]
)
# Conv args: [out_channel, kernel_size, stride, padding, groups, activation]
model_args = [out_channel, 3, m_stride, None, 1, activation]
elif block == "DWConv":
activation = trial.suggest_categorical(
f"m{i+1}/activation", ["ReLU", "Hardswish"]
)
# DWConv args: [out_channel, kernel_size, stride, padding_size, activation]
model_args = [out_channel, 3, 1, None, activation]
elif block == "InvertedResidualv2":
c = trial.suggest_int(
f"m{i+1}/v2_c", low=imput_min, high=input_max, step=16
)
t = trial.suggest_int(f"m{i+1}/v2_t", low=1, high=4)
model_args = [c, t, m_stride]
elif block == "InvertedResidualv3":
kernel = trial.suggest_int(f"m{i+1}/kernel_size", low=3, high=5, step=2)
t = round(
trial.suggest_float(f"m{i+1}/v3_t", low=1.0, high=6.0, step=0.1), 1
)
c = trial.suggest_int(f"m{i+1}/v3_c", low=imput_min, high=input_max, step=8)
se = trial.suggest_categorical(f"m{i+1}/v3_se", [0, 1])
hs = trial.suggest_categorical(f"m{i+1}/v3_hs", [0, 1])
# k t c SE HS s
model_args = [kernel, t, c, se, hs, m_stride]
in_features = out_channel
model.append([repeat, block, model_args])
if i % 2:
input_max *= 2
input_max = min(input_max, 160)
module_info[f"block{i+1}"] = {"type": block, "repeat": repeat, "stride": stride}
# last layer
last_dim = trial.suggest_int("last_dim", low=128, high=1024, step=128)
# We can setup fixed structure as well
model.append([1, "Conv", [last_dim, 1, 1]])
model.append([1, "GlobalAvgPool", []])
model.append([1, "FixedConv", [6, 1, 1, None, 1, None]])
return model, module_info | 228180bc1f02c793273a763db271889f4dcd4f26 | 23,344 |
def create_search_forms(name, language_code, script_code):
"""Return a list of names suitable for searching.
Arguments:
name -- string name
language_code -- string code of language
script_code -- string code of script
"""
# QAZ: It would be useful if something could be done here (or
# wherever is most appropriate) to handle the case where names are
# assembled without spaces between the parts (eg, Chinese), since
# this means that whatever part(s) come after the first will not
# be found in a search.
name = str(name)
search_forms = [name]
if script_code == 'Latn':
ascii_form = asciify_name(name)
if ascii_form and ascii_form != name:
search_forms.append(ascii_form)
macron_as_double_form = demacronise_name(name)
if macron_as_double_form != name:
search_forms.append(macron_as_double_form)
abbreviated_form = abbreviate_name(name, language_code, script_code)
if abbreviated_form != name:
search_forms.append(abbreviated_form)
unpunctuated_form = unpunctuate_name(name)
if unpunctuated_form != name:
search_forms.append(unpunctuated_form)
return search_forms | a417fe9ab37e544c094341fcb7f1249feaed43c6 | 23,345 |
def iv_params(*, N_s, T_degC, I_ph_A, I_rs_1_A, n_1, I_rs_2_A, n_2, R_s_Ohm, G_p_S,
minimize_scalar_bounded_options=minimize_scalar_bounded_options_default,
newton_options=newton_options_default):
"""
Compute I-V curve parameters.
Inputs (any broadcast-compatible combination of python/numpy scalars and numpy arrays):
Same as P_mp().
Outputs (device-level, at each combination of broadcast inputs, return type is numpy.float64 for all scalar inputs):
dict containing the outputs of FF() with the addition of:
R_oc_Ohm resistance at open circuit
R_sc_Ohm resistance at short circuit
"""
result = FF(N_s=N_s, T_degC=T_degC, I_ph_A=I_ph_A, I_rs_1_A=I_rs_1_A, n_1=n_1, I_rs_2_A=I_rs_2_A, n_2=n_2,
R_s_Ohm=R_s_Ohm, G_p_S=G_p_S, minimize_scalar_bounded_options=minimize_scalar_bounded_options,
newton_options=newton_options)
R_oc_Ohm = R_oc(N_s=N_s, T_degC=T_degC, I_ph_A=I_ph_A, I_rs_1_A=I_rs_1_A, n_1=n_1, I_rs_2_A=I_rs_2_A, n_2=n_2,
R_s_Ohm=R_s_Ohm, G_p_S=G_p_S, newton_options=newton_options)['R_oc_Ohm']
R_sc_Ohm = R_sc(N_s=N_s, T_degC=T_degC, I_ph_A=I_ph_A, I_rs_1_A=I_rs_1_A, n_1=n_1, I_rs_2_A=I_rs_2_A, n_2=n_2,
R_s_Ohm=R_s_Ohm, G_p_S=G_p_S, newton_options=newton_options)['R_sc_Ohm']
result.update({'R_oc_Ohm': R_oc_Ohm, 'R_sc_Ohm': R_sc_Ohm})
return result | 7998b52f02482d79bae6c0732e99ed5e151326fa | 23,346 |
import glob
import os
import ast
def list_class_names(dir_path):
"""
Return the mapping of class names in all files
in dir_path to their file path.
Args:
dir_path (str): absolute path of the folder.
Returns:
dict: mapping from the class names in all python files in the
folder to their file path.
"""
py_files = glob.glob(os.path.join(dir_path, "*.py"))
py_files = [f for f in py_files if os.path.isfile(f) and
not f.endswith('__init__.py')]
cls_name_to_path = dict()
for py_file in py_files:
with open(py_file) as f:
node = ast.parse(f.read())
classes_in_file = [n for n in node.body if isinstance(n, ast.ClassDef)]
cls_names_in_file = [c.name for c in classes_in_file]
for cls_name in cls_names_in_file:
cls_name_to_path[cls_name] = py_file
return cls_name_to_path | 612f386330a494cfffcd4a7d2f296bf8020bae6f | 23,347 |
def update_domain(
uuid, name=None, disabled=None, project_id=None, user_id=None):
"""Update an existing domain."""
res = get_domain(uuid=uuid)
if disabled is not None:
res['disabled'] = disabled
if name is not None:
res['name'] = name
if project_id is not None:
res['project_id'] = project_id
if user_id is not None:
res['user_id'] = user_id
res.save()
return res | 7cdc53c96e17d79dd0998165b87dd745b8afd73e | 23,348 |
def ja_of(tree: Tree) -> str:
"""tree string in the Japanese CCGBank's format
Args:
tree (Tree): tree object
Returns:
str: tree string in Japanese CCGBank's format
"""
def rec(node):
if node.is_leaf:
cat = node.cat
word = normalize(node.word)
token = node.token
poss = [
token.get(pos, '*')
for pos in ('pos', 'pos1', 'pos2', 'pos3')
]
poss = [pos for pos in poss if pos != '*']
pos = '-'.join(poss) if len(poss) else '_'
inflections = [
token.get(i, '*')
for i in ('inflectionForm', 'inflectionType')
]
inflections = [i for i in inflections if i != '*']
inflection = '-'.join(inflections) if len(inflections) else '_'
return f'{{{cat} {word}/{word}/{pos}/{inflection}}}'
else:
children = ' '.join(rec(child) for child in node.children)
return f'{{{node.op_symbol} {node.cat} {children}}}'
return rec(tree) | 7722e9de3b31354be4820cc32a8949ffac333e5f | 23,349 |
def cut_flowlines_at_points(flowlines, joins, points, next_lineID):
"""General method for cutting flowlines at points and updating joins.
Only new flowlines are returned; any that are not cut by points are omitted.
Parameters
----------
flowlines : GeoDataFrame
joins : DataFrame
flowline joins
points : ndarray of MultiPoint or Point geometries
expected to match to flowlines
next_lineID : int
id of next flowline to be created
Returns
-------
(GeoDataFrame, DataFrame, ndarray)
new flowlines, updated joins, remove_ids (original flowline IDs that
need to be removed before merging in returned flowlines)
"""
flowlines = flowlines.copy()
joins = joins.copy()
flowlines["geometry"] = cut_lines_at_multipoints(
flowlines.geometry.values.data, points
)
# discard any that have only one segment; they weren't split and we don't want
# to update them. Split the rest into parts.
ix = pg.get_num_geometries(flowlines.geometry.values.data) > 1
flowlines = explode(
flowlines.loc[ix].reset_index().rename(columns={"lineID": "origLineID"})
).reset_index(drop=True)
# recalculate length and sinuosity
flowlines["length"] = pg.length(flowlines.geometry.values.data).astype("float32")
flowlines["sinuosity"] = calculate_sinuosity(flowlines.geometry.values.data).astype(
"float32"
)
# calculate new ID
flowlines["lineID"] = (flowlines.index + next_lineID).astype("uint32")
### Update flowline joins
# transform new lines to create new joins at the upstream / downstream most
# points of the original line
l = flowlines.groupby("origLineID").lineID
# the first new line per original line is the furthest upstream, so use its
# ID as the new downstream ID for anything that had this origLineID as its downstream
first = l.first().rename("new_downstream_id")
# the last new line per original line is the furthest downstream...
last = l.last().rename("new_upstream_id")
# Update existing joins with the new lineIDs we created at the upstream or downstream
# ends of segments we just created
joins = update_joins(
joins, first, last, downstream_col="downstream_id", upstream_col="upstream_id",
)
### Create new line joins for any that weren't inserted above
# Transform all groups of new line IDs per original lineID
# into joins structure
atts = (
flowlines.groupby("origLineID")[["NHDPlusID", "loop", "HUC4"]]
.first()
.rename(columns={"NHDPlusID": "upstream"})
)
# function to make upstream / downstream side of join
pairs = lambda a: pd.Series(zip(a[:-1], a[1:]))
new_joins = (
l.apply(pairs)
.apply(pd.Series)
.reset_index()
.rename(columns={0: "upstream_id", 1: "downstream_id"})
.join(atts, on="origLineID")
)
# NHDPlusID is same for both sides
new_joins["downstream"] = new_joins.upstream
new_joins["type"] = "internal"
new_joins["marine"] = False
new_joins = new_joins[
[
"upstream",
"downstream",
"upstream_id",
"downstream_id",
"type",
"loop",
"marine",
"HUC4",
]
]
joins = (
joins.append(new_joins, ignore_index=True, sort=False)
.sort_values(["downstream_id", "upstream_id"])
.reset_index(drop=True)
)
remove_ids = flowlines.origLineID.unique()
flowlines = flowlines.drop(columns=["origLineID"]).set_index("lineID")
return flowlines, joins, remove_ids | e535f6d5e073955b74784f5ce3121a929f9bc200 | 23,350 |
def export(gen, directory, file_prefix='{uid}-', **kwargs):
"""
Export a stream of documents to nxstxm_baseline.
.. note::
This can alternatively be used to write data to generic buffers rather
than creating files on disk. See the documentation for the
``directory`` parameter below.
Parameters
----------
gen : generator
expected to yield ``(name, document)`` pairs
directory : string, Path or Manager.
For basic uses, this should be the path to the output directory given
as a string or Path object. Use an empty string ``''`` to place files
in the current working directory.
In advanced applications, this may direct the serialized output to a
memory buffer, network socket, or other writable buffer. It should be
an instance of ``suitcase.utils.MemoryBufferManager`` and
``suitcase.utils.MultiFileManager`` or any object implementing that
inferface. See the suitcase documentation at
https://nsls-ii.github.io/suitcase for details.
file_prefix : str, optional
The first part of the filename of the generated output files. This
string may include templates as in ``{proposal_id}-{sample_name}-``,
which are populated from the RunStart document. The default value is
``{uid}-`` which is guaranteed to be present and unique. A more
descriptive value depends on the application and is therefore left to
the user.
**kwargs : kwargs
Keyword arugments to be passed through to the underlying I/O library.
Returns
-------
artifacts : dict
dict mapping the 'labels' to lists of file names (or, in general,
whatever resources are produced by the Manager)
Examples
--------
Generate files with unique-identifer names in the current directory.
>>> export(gen, '')
Generate files with more readable metadata in the file names.
>>> export(gen, '', '{plan_name}-{motors}-')
Include the experiment's start time formatted as YY-MM-DD_HH-MM.
>>> export(gen, '', '{time:%%Y-%%m-%%d_%%H:%%M}-')
Place the files in a different directory, such as on a mounted USB stick.
>>> export(gen, '/path/to/my_usb_stick')
"""
with Serializer(directory, file_prefix, **kwargs) as serializer:
for item in gen:
#print('ITEM:', item)
serializer(*item)
return serializer.artifacts | ec1b1237f63fc29c8e8d1b14283b392523e86426 | 23,351 |
import os
def get_test_dataset(path):
"""
Gets a dataset that only has features
:param string path: The path the the dataset file after /datasets/
:return: features
"""
with open(os.path.abspath(os.path.join(os.getcwd(), "../datasets/", path)), "r") as file:
data = [line.split(',') for line in file.read().split('\n')][:-1]
data = [[int(element) for element in row] for row in data]
features = [d for d in data]
return features | 4c9c8ed203367a51ae9d915e6318ad78a42372a1 | 23,352 |
def services():
"""
Returns the grader-notebook list used as services in jhub
Response: json
example:
```
{
services: [{"name":"<course-id", "url": "http://grader-<course-id>:8888"...}],
groups: {"formgrade-<course-id>": ["grader-<course-id>"] }
}
```
"""
services = GraderService.query.all()
# format a json
services_resp = []
groups_resp = {}
for s in services:
services_resp.append({
'name': s.name,
'url': s.url,
'oauth_no_confirm': s.oauth_no_confirm,
'admin': s.admin,
'api_token': s.api_token
})
# add the jhub user group
groups_resp.update({f'formgrade-{s.course_id}': [f'grader-{s.course_id}']})
return jsonify(services=services_resp, groups=groups_resp) | b1fbc2c90b344f75027538037c5d6786bda810c8 | 23,353 |
def chunks(l, k):
"""
Take a list, l, and create k sublists.
"""
n = len(l)
return [l[i * (n // k) + min(i, n % k):(i+1) * (n // k) + min(i+1, n % k)] for i in range(k)] | 7cf0c39941ed8f358c576046154af6b3ee54b70a | 23,354 |
def bfs(adj, src, dst, cache=None):
"""BFS search from source to destination. Check whether a path exists, does
not return the actual path.
Work on directed acyclic graphs where we assume that there is no path to the
node itself.
Args:
adj: Adjacency matrix.
src: Source node index, 0-based.
dst: Destination node index, 0-based.
cache: 2D matrix, cache[i, j] = 1 indicates path exists between two node
i and j. cache[i, j] = -1 indicates path does not exists between two node
i and j. chace[i, j] = 0 indicates unknown.
Returns:
found: A path is found between source and destination.
"""
if src == dst: return False
num_nodes = adj.shape[0]
if num_nodes == 0:
return False
if src >= num_nodes or dst >= num_nodes:
raise Exception("Index must be smaller than the number of nodes.")
if num_nodes == 1:
return False
# Whether a node has been visited, if not negative, the parent.
parent = np.zeros([num_nodes], dtype=np.int64) - 1
nodes_to_visit = [src]
found = False
# BFS loop.
while len(nodes_to_visit) > 0:
cur = nodes_to_visit.pop(0)
if cur == dst:
found = True
break
if cache is not None:
if cache[cur, dst] == 1:
found = True
break
elif cache[cur, dst] == -1:
continue
for jj in range(num_nodes):
if adj[cur, jj] == 1:
if parent[jj] == -1:
nodes_to_visit.append(jj)
parent[jj] = cur
if not found:
# Add the source node to the cache.
if cache is not None:
#log.info(("Setting -1", src, dst), verbose=2)
for ii in range(num_nodes):
if parent[ii] >= 0:
cache[ii, dst] = -1
cache[src, dst] = -1
return False
else:
# Add all the nodes to the cache.
if cache is not None:
# Backtrack.
while cur != src:
cur = parent[cur]
cache[cur, dst] = 1
#log.info(("Setting 1", cur, dst), verbose=2)
cache[src, dst] = 1
#log.info(("Setting 1", src, dst), verbose=2)
return True | 5e224ffa575dd0bd471142023e828a8e36d1782e | 23,355 |
import _ast
import ast
def get_source_ast(name: str) -> _ast.Module:
"""
Return ast of source code
"""
with open(name, "r") as f:
data = f.read()
return ast.parse(data) | 8e0826175df538bdd894f05b0c6cf143b0b0f69b | 23,356 |
import os
def fit_grain_FF_reduced(grain_id):
"""
Perform non-linear least-square fit for the specified grain.
Parameters
----------
grain_id : int
The grain id.
Returns
-------
grain_id : int
The grain id.
completeness : float
The ratio of predicted to measured (observed) Bragg reflections.
chisq: float
Figure of merit describing the sum of squared residuals for each Bragg
reflection in the form (x, y, omega) normalized by the total number of
degrees of freedom.
grain_params : array_like
The optimized grain parameters
[<orientation [3]>, <centroid [3]> <inverse stretch [6]>].
Notes
-----
input parameters are
[plane_data, instrument, imgser_dict,
tth_tol, eta_tol, ome_tol, npdiv, threshold]
"""
grains_table = paramMP['grains_table']
plane_data = paramMP['plane_data']
instrument = paramMP['instrument']
imgser_dict = paramMP['imgser_dict']
tth_tol = paramMP['tth_tol']
eta_tol = paramMP['eta_tol']
ome_tol = paramMP['ome_tol']
npdiv = paramMP['npdiv']
refit = paramMP['refit']
threshold = paramMP['threshold']
eta_ranges = paramMP['eta_ranges']
ome_period = paramMP['ome_period']
analysis_dirname = paramMP['analysis_dirname']
prefix = paramMP['spots_filename']
spots_filename = None if prefix is None else prefix % grain_id
grain = grains_table[grain_id]
grain_params = grain[3:15]
for tols in zip(tth_tol, eta_tol, ome_tol):
complvec, results = instrument.pull_spots(
plane_data, grain_params,
imgser_dict,
tth_tol=tols[0],
eta_tol=tols[1],
ome_tol=tols[2],
npdiv=npdiv, threshold=threshold,
eta_ranges=eta_ranges,
ome_period=ome_period,
dirname=analysis_dirname, filename=spots_filename,
return_spot_list=False,
quiet=True, check_only=False, interp='nearest')
# ======= DETERMINE VALID REFLECTIONS =======
culled_results = dict.fromkeys(results)
num_refl_tot = 0
num_refl_valid = 0
for det_key in culled_results:
panel = instrument.detectors[det_key]
'''
grab panel results:
peak_id
hkl_id
hkl
sum_int
max_int,
pred_angs,
meas_angs,
meas_xy
'''
presults = results[det_key]
nrefl = len(presults)
# make data arrays
refl_ids = np.empty(nrefl)
max_int = np.empty(nrefl)
for i, spot_data in enumerate(presults):
refl_ids[i] = spot_data[0]
max_int[i] = spot_data[4]
valid_refl_ids = refl_ids >= 0
# find unsaturated spots on this panel
unsat_spots = np.ones(len(valid_refl_ids), dtype=bool)
if panel.saturation_level is not None:
unsat_spots[valid_refl_ids] = \
max_int[valid_refl_ids] < panel.saturation_level
idx = np.logical_and(valid_refl_ids, unsat_spots)
# if an overlap table has been written, load it and use it
overlaps = np.zeros_like(idx, dtype=bool)
try:
ot = np.load(
os.path.join(
analysis_dirname, os.path.join(
det_key, 'overlap_table.npz'
)
)
)
for key in ot.keys():
for this_table in ot[key]:
these_overlaps = np.where(
this_table[:, 0] == grain_id)[0]
if len(these_overlaps) > 0:
mark_these = np.array(
this_table[these_overlaps, 1], dtype=int
)
otidx = [
np.where(refl_ids == mt)[0]
for mt in mark_these
]
overlaps[otidx] = True
idx = np.logical_and(idx, ~overlaps)
# logger.info("found overlap table for '%s'", det_key)
except(IOError, IndexError):
# logger.info("no overlap table found for '%s'", det_key)
pass
# attach to proper dict entry
# FIXME: want to avoid looping again here
culled_results[det_key] = [presults[i] for i in np.where(idx)[0]]
num_refl_tot += len(valid_refl_ids)
num_refl_valid += sum(valid_refl_ids)
pass # now we have culled data
# CAVEAT: completeness from pullspots only; incl saturated and overlaps
# <JVB 2015-12-15>
try:
completeness = num_refl_valid / float(num_refl_tot)
except(ZeroDivisionError):
raise RuntimeError(
"simulated number of relfections is 0; "
+ "check instrument config or grain parameters"
)
# ======= DO LEASTSQ FIT =======
if num_refl_valid <= 12: # not enough reflections to fit... exit
return grain_id, completeness, np.inf, grain_params
else:
grain_params = fitGrain(
grain_params, instrument, culled_results,
plane_data.latVecOps['B'], plane_data.wavelength
)
# get chisq
# TODO: do this while evaluating fit???
chisq = objFuncFitGrain(
grain_params[gFlag_ref], grain_params, gFlag_ref,
instrument,
culled_results,
plane_data.latVecOps['B'], plane_data.wavelength,
ome_period,
simOnly=False, return_value_flag=2)
pass # end conditional on fit
pass # end tolerance looping
if refit is not None:
# first get calculated x, y, ome from previous solution
# NOTE: this result is a dict
xyo_det_fit_dict = objFuncFitGrain(
grain_params[gFlag_ref], grain_params, gFlag_ref,
instrument,
culled_results,
plane_data.latVecOps['B'], plane_data.wavelength,
ome_period,
simOnly=True, return_value_flag=2)
# make dict to contain new culled results
culled_results_r = dict.fromkeys(culled_results)
num_refl_valid = 0
for det_key in culled_results_r:
presults = culled_results[det_key]
if not presults:
culled_results_r[det_key] = []
continue
ims = imgser_dict[det_key]
ome_step = sum(np.r_[-1, 1]*ims.metadata['omega'][0, :])
xyo_det = np.atleast_2d(
np.vstack([np.r_[x[7], x[6][-1]] for x in presults])
)
xyo_det_fit = xyo_det_fit_dict[det_key]
xpix_tol = refit[0]*panel.pixel_size_col
ypix_tol = refit[0]*panel.pixel_size_row
fome_tol = refit[1]*ome_step
# define difference vectors for spot fits
x_diff = abs(xyo_det[:, 0] - xyo_det_fit['calc_xy'][:, 0])
y_diff = abs(xyo_det[:, 1] - xyo_det_fit['calc_xy'][:, 1])
ome_diff = np.degrees(
xfcapi.angularDifference(xyo_det[:, 2],
xyo_det_fit['calc_omes'])
)
# filter out reflections with centroids more than
# a pixel and delta omega away from predicted value
idx_new = np.logical_and(
x_diff <= xpix_tol,
np.logical_and(y_diff <= ypix_tol,
ome_diff <= fome_tol)
)
# attach to proper dict entry
culled_results_r[det_key] = [
presults[i] for i in np.where(idx_new)[0]
]
num_refl_valid += sum(idx_new)
pass
# only execute fit if left with enough reflections
if num_refl_valid > 12:
grain_params = fitGrain(
grain_params, instrument, culled_results_r,
plane_data.latVecOps['B'], plane_data.wavelength
)
# get chisq
# TODO: do this while evaluating fit???
chisq = objFuncFitGrain(
grain_params[gFlag_ref],
grain_params, gFlag_ref,
instrument,
culled_results_r,
plane_data.latVecOps['B'], plane_data.wavelength,
ome_period,
simOnly=False, return_value_flag=2)
pass
pass # close refit conditional
return grain_id, completeness, chisq, grain_params | 941efcd3319c263ac95cfbaeecf07f1a5f12ef0a | 23,357 |
import math
def floor(base):
"""Get the floor of a number"""
return math.floor(float(base)) | 8b00ffccf30765f55ff024b35de364c617b4b20c | 23,358 |
def TranslateSecureTagsForFirewallPolicy(client, secure_tags):
"""Returns a list of firewall policy rule secure tags, translating namespaced tags if needed.
Args:
client: compute client
secure_tags: array of secure tag values
Returns:
List of firewall policy rule secure tags
"""
ret_secure_tags = []
for tag in secure_tags:
if tag.startswith('tagValues/'):
ret_secure_tags.append(
client.messages.FirewallPolicyRuleSecureTag(name=tag))
else:
ret_secure_tags.append(
client.messages.FirewallPolicyRuleSecureTag(
name=tag_utils.GetTagValueFromNamespacedName(tag).name))
return ret_secure_tags | 5cbf71885c167d5c9d0dcae3294be017512db73a | 23,359 |
import os
import fnmatch
def _find_files(directory, pattern):
"""Searches a directory finding all files and dirs matching unix pattern.
Args:
directory : (str)
The directory to search in.
patterns : (str)
A unix style pattern to search for. This should be the same style
of pattern that fnmatch or glob would take, and not regex.
Returns:
[str]
A list of file and directory names matching one of the patterns is
returned. The file names are relative to the directory we were
given.
Raises:
N/A
"""
files = [item for item in os.listdir(directory) if fnmatch(item, pattern)]
files.sort(key=lambda v: v.lower())
return files | 5f10d14d6c4c68b68c54ef681c8ad5669e7b03d4 | 23,360 |
from typing import Callable
from typing import Dict
from typing import Any
import functools
def memoize(func: Callable):
"""
A decorator that memoizes a function by storing its inputs and outputs.
Calling the function again with the same arguments will return the cached
output.
This function is somewhat more permissive than
:func:`functools.lru_cache` in what kinds of arguments can be cached,
but incurs a larger runtime overhead as a penalty.
"""
memo: Dict[Any, Any] = {}
@functools.wraps(func)
def memoizer(*args, **kwargs):
key = _hash_args_kwargs(*args, **kwargs)
try:
v = memo[key]
except KeyError:
v = memo[key] = func(*args, **kwargs)
return v
return memoizer | 7ba9500c57c867abcb43bf5ffe58087270ebef08 | 23,361 |
def mini_batch(positive_rdd, negative_rdd, num_iterations):
"""get the positive and negative classes with index for mini-batch"""
# mini-batch preparation
pos_num = int(batch_size / 46)
neg_num = pos_num * 45
i = num_iterations % int(74 / pos_num)
# get the new mini-batch rdd for this iteration
new_rdd = positive_rdd. \
filter(lambda x: i * pos_num <= x[1] < (i + 1) * pos_num). \
map(lambda x: (x[0][0], x[0][1])).union(
negative_rdd.filter(
lambda x: i * neg_num <= x[1] < (i + 1) * neg_num).map(
lambda x: (x[0][0], x[0][1])))
return new_rdd | e523c170d66cf9bdabf5f12ab78af417c59333a2 | 23,362 |
import math
def draw_polygon(img, max_sides=8, min_len=32, min_label_len=64):
""" Draw a polygon with a random number of corners and return the position
of the junctions + line map.
Parameters:
max_sides: maximal number of sides + 1
"""
num_corners = random_state.randint(3, max_sides)
min_dim = min(img.shape[0], img.shape[1])
rad = max(random_state.rand() * min_dim / 2, min_dim / 10)
# Center of a circle
x = random_state.randint(rad, img.shape[1] - rad)
y = random_state.randint(rad, img.shape[0] - rad)
# Convert length constrain to pixel if given float number
if isinstance(min_len, float) and min_len <= 1.:
min_len = int(min_dim * min_len)
if isinstance(min_label_len, float) and min_label_len <= 1.:
min_label_len = int(min_dim * min_label_len)
# Sample num_corners points inside the circle
slices = np.linspace(0, 2 * math.pi, num_corners + 1)
angles = [slices[i] + random_state.rand() * (slices[i+1] - slices[i])
for i in range(num_corners)]
points = np.array(
[[int(x + max(random_state.rand(), 0.4) * rad * math.cos(a)),
int(y + max(random_state.rand(), 0.4) * rad * math.sin(a))]
for a in angles])
# Filter the points that are too close or that have an angle too flat
norms = [np.linalg.norm(points[(i-1) % num_corners, :]
- points[i, :]) for i in range(num_corners)]
mask = np.array(norms) > 0.01
points = points[mask, :]
num_corners = points.shape[0]
corner_angles = [angle_between_vectors(points[(i-1) % num_corners, :] -
points[i, :],
points[(i+1) % num_corners, :] -
points[i, :])
for i in range(num_corners)]
mask = np.array(corner_angles) < (2 * math.pi / 3)
points = points[mask, :]
num_corners = points.shape[0]
# Get junction pairs from points
segments = np.zeros([0, 4])
# Used to record all the segments no matter we are going to label it or not.
segments_raw = np.zeros([0, 4])
for idx in range(num_corners):
if idx == (num_corners - 1):
p1 = points[idx]
p2 = points[0]
else:
p1 = points[idx]
p2 = points[idx + 1]
segment = np.concatenate((p1, p2), axis=0)
# Only record the segments longer than min_label_len
seg_len = np.sqrt(np.sum((p1 - p2) ** 2))
if seg_len >= min_label_len:
segments = np.concatenate((segments, segment[None, ...]), axis=0)
segments_raw = np.concatenate((segments_raw, segment[None, ...]),
axis=0)
# If not enough corner, just regenerate one
if (num_corners < 3) or check_segment_len(segments_raw, min_len):
return draw_polygon(img, max_sides, min_len, min_label_len)
# Get junctions from segments
junctions_all = np.concatenate((segments[:, :2], segments[:, 2:]), axis=0)
if junctions_all.shape[0] == 0:
junc_points = None
line_map = None
else:
junc_points = np.unique(junctions_all, axis=0)
# Get the line map
line_map = get_line_map(junc_points, segments)
corners = points.reshape((-1, 1, 2))
col = get_random_color(int(np.mean(img)))
cv.fillPoly(img, [corners], col)
return {
"points": junc_points,
"line_map": line_map
} | 1dc1cdb18424c8d47ee95777d52713c884eedc5d | 23,363 |
def order_budget_update(request, order_id):
"""
Update budget for order
"""
serializer = OrderBudgetSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
order = get_object_or_404(Order, pk=order_id)
budget = serializer.validated_data['budget']
order.budget = budget
order.save()
serializer = OrderSerializer(order)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED) | adc708d7cbc429ea6e5d81c5f9db60d0fa5f298a | 23,364 |
def analyse_readability_metrics(article_text):
"""
Use the textstat library to report multiple readability measures.
The readability metrics analysed are:
* The Flesch Reading Ease Score. A score from 100 (very easy to read) to 0 (very confusing).
* The grade score using the Flesch-Kincaid Grade Formula. For example a score of 9.3 means that a ninth grader would be able to read the document.
* The FOG index of the given text
* The SMOG index of the given text
* The ARI(Automated Readability Index) which outputs a number that approximates the grade level needed to comprehend the text. For example if the ARI is 6.5, then the grade level to comprehend the text is 6th to 7th grade
* The grade level of the text using the Coleman-Liau Formula
* The grade level using the Lisear Write Formula
* The grade level using the New Dale-Chall Formula.
:param article_text: The article text to operate on.
:return: An object containing all measures
"""
sylls = textstat.syllable_count(article_text)
words = textstat.lexicon_count(article_text)
sents = textstat.sentence_count(article_text)
if article_text != "":
"""
returns the Flesch Reading Ease Score. Following table is helpful to access the ease of readability in a document.
* 90-100 : Very Easy
* 80-89 : Easy
* 70-79 : Fairly Easy
* 60-69 : Standard
* 50-59 : Fairly Difficult
* 30-49 : Difficult
* 0-29 : Very Confusing
"""
flesch = textstat.flesch_reading_ease(article_text)
"""
returns the grade score using the Flesch-Kincaid Grade Formula.
For example a score of 9.3 means that a ninth grader would be able to read the document.
"""
flesch_k = textstat.flesch_kincaid_grade(article_text)
"""
returns the FOG index of the given text.
"""
fog = textstat.gunning_fog(article_text)
"""
return the SMOG index of the given text.
"""
smog = textstat.smog_index(article_text)
"""
returns the ARI(Automated Readability Index) which outputs a number that approximates the grade level needed to comprehend the text.
For example if the ARI is 6.5, then the grade level to comprehend the text is 6th to 7th grade
"""
ari = textstat.automated_readability_index(article_text)
"""
returns the grade level of the text using the Coleman-Liau Formula
"""
coleman_l = textstat.coleman_liau_index(article_text)
"""
returns the grade level using the Lisear Write Formula
"""
linsear_write = textstat.linsear_write_formula(article_text)
"""
Different from other tests, since it uses a lookup table of most commonly used 3000 english words.
Thus it returns the grade level using the New Dale-Chall Formula.
"""
dale_chall = textstat.dale_chall_readability_score(article_text)
"""
Based upon all the above tests returns the best grade level under which the given text belongs to.
"""
overall_consensus = textstat.text_standard(article_text)
return {
"syllable_count": sylls,
"word_count": words,
"sentence_count": sents,
"flesch_reading_ease": flesch,
"flesch_kincaid_grade": flesch_k,
"gunning_fog": fog,
"smog_index": smog,
"automated_readability_index": ari,
"coleman_liau_index": coleman_l,
"linsear_write_formula": linsear_write,
"dale_chall_readability_score": dale_chall,
"overall_consensus_grade": overall_consensus
} | 32a89cf4788d469a164f9cdd606f1675ddf219b8 | 23,365 |
def dx(data):
"""
Derivative by central difference
Edges are takes as difference between nearest points
Parameters
----------
data : ndarray
Array of NMR data.
Returns
-------
ndata : ndarray
Derivate of NMR data.
"""
z = np.empty_like(data)
z[..., 0] = data[..., 1] - data[..., 0] # first point
z[..., -1] = data[..., -1] - data[..., -2] # last point
z[..., 1:-1] = data[..., 2:] - data[..., :-2] # interior
return z | 6e88618750ff69662ec7f41ecfc50efbaab717db | 23,366 |
def grad(w):
""" Dao ham """
N = Xbar.shape[0]
return 1/N * Xbar.T.dot(Xbar.dot(w) - y) | 8bcce8c10eeb7ffae6e2af3e2f273f03b6984885 | 23,367 |
import graphsurgeon as gs
import importlib, sys
def from_tensorflow(graphdef, output_nodes=[], preprocessor=None, **kwargs):
"""
Converts a TensorFlow GraphDef to a UFF model.
Args:
graphdef (tensorflow.GraphDef): The TensorFlow graph to convert.
output_nodes (list(str)): The names of the outputs of the graph. If not provided, graphsurgeon is used to automatically deduce output nodes.
output_filename (str): The UFF file to write.
preprocessor (str): The path to a preprocessing script that will be executed before the converter. This script should define a ``preprocess`` function which accepts a graphsurgeon DynamicGraph and modifies it in place.
write_preprocessed (bool): If set to True, the converter will write out the preprocessed graph as well as a TensorBoard visualization. Must be used in conjunction with output_filename.
text (bool): If set to True, the converter will also write out a human readable UFF file. Must be used in conjunction with output_filename.
quiet (bool): If set to True, suppresses informational messages. Errors may still be printed.
list_nodes (bool): If set to True, the converter displays a list of all nodes present in the graph.
debug_mode (bool): If set to True, the converter prints verbose debug messages.
return_graph_info (bool): If set to True, this function returns the graph input and output nodes in addition to the serialized UFF graph.
Returns:
serialized UFF MetaGraph (str)
OR, if return_graph_info is set to True,
serialized UFF MetaGraph (str), graph inputs (list(tensorflow.NodeDef)), graph outputs (list(tensorflow.NodeDef))
"""
quiet = False
input_node = []
text = False
list_nodes = False
output_filename = None
write_preprocessed = False
debug_mode = False
return_graph_info = False
for k, v in kwargs.items():
if k == "quiet":
quiet = v
elif k == "input_node":
input_node = v
elif k == "text":
text = v
elif k == "list_nodes":
list_nodes = v
elif k == "output_filename":
output_filename = v
elif k == "write_preprocessed":
write_preprocessed = v
elif k == "debug_mode":
debug_mode = v
elif k == "return_graph_info":
return_graph_info = v
tf_supported_ver = "1.12.0"
if not quiet:
print("NOTE: UFF has been tested with TensorFlow " + str(tf_supported_ver) + ". Other versions are not guaranteed to work")
if tf.__version__ != tf_supported_ver:
print("WARNING: The version of TensorFlow installed on this system is not guaranteed to work with UFF.")
try:
except ImportError as err:
raise ImportError("""ERROR: Failed to import module ({})
Please make sure you have graphsurgeon installed.
For installation instructions, see:
https://docs.nvidia.com/deeplearning/sdk/tensorrt-api/#python and click on the 'TensoRT Python API' link""".format(err))
# Create a dynamic graph so we can adjust it as needed.
dynamic_graph = gs.DynamicGraph(graphdef)
# Always remove assert ops.
assert_nodes = dynamic_graph.find_nodes_by_op("Assert")
dynamic_graph.remove(assert_nodes, remove_exclusive_dependencies=True)
# Now, run the preprocessor, if provided.
if preprocessor:
# Temporarily insert this working dir into the sys.path
sys.path.insert(0, os.path.dirname(preprocessor))
# Import and execute!
pre = importlib.import_module(os.path.splitext(os.path.basename(preprocessor))[0])
pre.preprocess(dynamic_graph)
# Now clean up, by removing the directory from the system path.
del sys.path[0]
# Run process_dilated_conv() and process_softmax() so the user doesn't have to.
gs.extras.process_dilated_conv(dynamic_graph)
gs.extras.process_softmax(dynamic_graph)
# Get the modified graphdef back.
graphdef = dynamic_graph.as_graph_def()
if write_preprocessed and output_filename:
preprocessed_output_name = os.path.splitext(output_filename)[0] + "_preprocessed"
dynamic_graph.write(preprocessed_output_name + ".pb")
dynamic_graph.write_tensorboard(preprocessed_output_name)
if not quiet:
print("Preprocessed graph written to " + preprocessed_output_name + ".pb")
print("TensorBoard visualization written to " + preprocessed_output_name)
if not quiet:
print("UFF Version " + uff.__version__)
if debug_mode:
_debug_print("Debug Mode is ENABLED")
if not input_node:
if not quiet:
print("=== Automatically deduced input nodes ===")
print(str(dynamic_graph.graph_inputs))
print("=========================================\n")
# Deduce the likely graph outputs if none are provided
if not output_nodes:
output_nodes = [node.name for node in dynamic_graph.graph_outputs]
if not quiet:
print("=== Automatically deduced output nodes ===")
print(str(dynamic_graph.graph_outputs))
print("==========================================\n")
if list_nodes:
for i, node in enumerate(graphdef.node):
print('%i %s: "%s"' % (i + 1, node.op, node.name))
return
for i, name in enumerate(output_nodes):
if debug_mode:
_debug_print("Enumerating outputs")
output_nodes[i] = tf2uff.convert_node_name_or_index_to_name(
name, graphdef.node, debug_mode=debug_mode)
if not quiet:
print("Using output node", output_nodes[i])
input_replacements = {}
for i, name_data in enumerate(input_node):
name, new_name, dtype, shape = name_data.split(',', 3)
name = tf2uff.convert_node_name_or_index_to_name(name, graphdef.node, debug_mode=debug_mode)
if new_name == '':
new_name = name
dtype = np.dtype(dtype)
shape = [int(x) for x in shape.split(',')]
input_replacements[name] = (new_name, dtype, shape)
if not quiet:
print("Using input node", name)
if not quiet:
print("Converting to UFF graph")
uff_metagraph = uff.model.MetaGraph()
tf2uff.add_custom_descriptors(uff_metagraph)
uff_graph = tf2uff.convert_tf2uff_graph(
graphdef,
uff_metagraph,
output_nodes=output_nodes,
input_replacements=input_replacements,
name="main",
debug_mode=debug_mode)
uff_metagraph_proto = uff_metagraph.to_uff()
if not quiet:
print('No. nodes:', len(uff_graph.nodes))
if output_filename:
with open(output_filename, 'wb') as f:
f.write(uff_metagraph_proto.SerializeToString())
if not quiet:
print("UFF Output written to", output_filename)
if text: # ASK: Would you want to return the prototxt?
if not output_filename:
raise ValueError(
"Requested prototxt but did not provide file path")
output_filename_txt = _replace_ext(output_filename, '.pbtxt')
with open(output_filename_txt, 'w') as f:
f.write(str(uff_metagraph.to_uff(debug=True)))
if not quiet:
print("UFF Text Output written to", output_filename_txt)
# Always return the UFF graph!
if return_graph_info:
return uff_metagraph_proto.SerializeToString(), dynamic_graph.graph_inputs, dynamic_graph.graph_outputs
else:
return uff_metagraph_proto.SerializeToString() | 385bf490d2a034dd20856bb05370a90d461e7377 | 23,368 |
import random
def random_binary():
"""
测试 cached 缓存视图的装饰器 设置 key
:return:
"""
return [random.randrange(0, 2) for i in range(500)] | 3c30014d1222c136cb7d3d2fbe6e0d972decc776 | 23,369 |
def remove_from_end(string, text_to_remove):
"""
Remove a String from the end of a string if it exists
Args:
string (str): string to edit
text_to_remove (str): the text to remove
Returns: the string with the text removed
"""
if string is not None and string.endswith(text_to_remove):
return string[:-len(text_to_remove)]
return string | 19cebd002fcf5aea5290a6998129427363342319 | 23,370 |
from .compose import ComposeHole
from .directconv import DirectConv
from .matmuls import MatmulHole
from .reducesum import ReduceSum
from typing import Optional
from typing import Iterable
from typing import Any
def spec_to_hole(
spec: specs.Spec, inputs: Optional[Iterable] = None, output: Optional[Any] = None
) -> "Impl":
"""Returns a default, incomplete schedule for a Spec which consume given inputs.
If either `inputs` or `output` is None, default Tensors from the corresponding
TensorSpecs will be constructed (using the current target).
"""
# Import some Impls here to avoid import cycle
# TODO: Can we move this to its own file instead?
if inputs is None:
target = current_target()
inputs = tuple(target.tensor(s) for s in spec.inputs)
if output is None:
output = current_target().tensor(spec.output)
inputs = tuple(inputs)
if isinstance(spec, specs.Convolution):
assert len(inputs) == 2, f"Expected 2 Tensor/Tile operands; got {len(inputs)}"
return DirectConv(
lhs=inputs[0], rhs=inputs[1], output=output, serial_only=spec.serial_only
)
elif isinstance(spec, specs.Matmul):
assert len(inputs) == 2, f"Expected 2 Tensor/Tile operands; got {len(inputs)}"
return MatmulHole(
lhs=inputs[0], rhs=inputs[1], output=output, serial_only=spec.serial_only
)
elif isinstance(spec, specs.ReduceSum):
assert len(inputs) == 1, f"Expected 1 Tensor/Tile operands; got {len(inputs)}"
return ReduceSum(source=inputs[0], output=output, serial_only=spec.serial_only)
elif isinstance(spec, specs.Compose):
return ComposeHole(spec, inputs=inputs, output=output)
else:
raise NotImplementedError() | 0b23c1e1fff85c42391b11f6785dc16ef1039704 | 23,371 |
def get_node_number(self, node, typ) -> str:
"""Get the number for the directive node for HTML."""
ids = node.attributes.get("ids", [])[0]
if isinstance(self, LaTeXTranslator):
docname = find_parent(self.builder.env, node, "section")
else:
docname = node.attributes.get("docname", "")
# Latex does not have builder.fignumbers
fignumbers = self.builder.env.toc_fignumbers.get(docname, {})
number = fignumbers.get(typ, {}).get(ids, ())
return ".".join(map(str, number)) | f88227ef727d45d14cd9343ee26bdbfb15f6a2fc | 23,372 |
import os
import fnmatch
def count_mp3_files_below(adir_path):
"""counts all mp3 files below given dir including subdirs"""
matches = []
for root, dirnames, filenames in os.walk(adir_path):
for filename in fnmatch.filter(filenames, '*.mp3'):
matches.append(os.path.join(root, filename))
return len(matches) | c7be55162a8d1abd45fab1bef7592113494ed173 | 23,373 |
def get_client(client, aws_access_key_id, aws_secret_access_key, region=None):
"""Shortcut for getting an initialized instance of the boto3 client."""
return boto3.client(
client,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region
) | 4ebed5da9ca146b79563c0efb5bf04e7bc13f791 | 23,374 |
import logging
import traceback
import json
def main(req: func.HttpRequest) -> func.HttpResponse:
""" main function for status/http """
logging.info('Status processed a request.')
try:
response = get_http_response_by_status(200)
if req.get_body() and len(req.get_body()):
response = get_http_response_by_status(202)
headers = {
"Access-Control-Allow-Origin": "*"
}
return func_json_response(response, headers, "message")
#pylint: disable=broad-except
except Exception as err:
logging.error("Status HTTP error occurred: %s", traceback.format_exc())
msg_error = f"This endpoint encountered an error. {err}"
func_response = json.dumps(jsend.error(msg_error))
return func.HttpResponse(func_response, status_code=500) | ac464fd479c0df9f9d462f37d884542bf670dfef | 23,375 |
def _variable_map_by_name(variables):
"""
Returns Dict,representing referenced variable fields mapped by name.
Keyword Parameters:
variables -- list of 'variable_python_type' Warehouse support DTOs
>>> from pprint import pprint
>>> var1 = { 'column':'frob_hz', 'title':'Frobniz Resonance (Hz)'
... ,'python_type': 'float'
... ,'table': 'foo_fact'}
>>> list1 = [var1]
>>> pprint(_variable_map_by_name(list1))
{'frob_hz': {'column': 'frob_hz',
'python_type': 'float',
'table': 'foo_fact',
'title': 'Frobniz Resonance (Hz)'}}
"""
variable_by_field = {}
for var in variables:
field_name = var['column']
variable_by_field[field_name] = var
return variable_by_field | 91c27ceb84614313d036ec216ef4c4d567a68255 | 23,376 |
def true_divide(x, y):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.math.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x (np.ndarray): input tensor.
y (np.ndarray): another tensor.
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return np.true_divide(x, y) | 3bdcf5052730fd2c3e2e25a395b0e16534d2dcf9 | 23,377 |
def ObjectNotFoundError(NDARError):
"""S3 object not found"""
def __init__(self, object):
self.object = object
return
def __str__(self):
return 'Object not found: %s' % self.object | 3cc552f7074f8117ed18fd975bc5ac0b09f8016a | 23,378 |
def hello():
"""
Say hello using a template file.
"""
return render_template('index.html') | b2b09afd651a69fdc270238dbf3f724fa9f40ae4 | 23,379 |
def pause_sale(ctx):
"""
Pause the token sale
:param ctx:GetContext() used to access contract storage
:return:bool Whether pausing the sale was successful
"""
if CheckWitness(TOKEN_OWNER):
Put(ctx, SALE_STATUS_KEY, SALE_PAUSED)
return True
return False | 63e99802a852146f7a20460a28a7d277c4104954 | 23,380 |
def parse_item_hash(value):
"""
Parses the item-hash datatype, e.g. sha-256:5b8e5ee02caedd0a6f3539b19d6b462dd2d08918764e7f476506996024f7b84a
:param value: a string to parse
:return: parsed value
"""
if isinstance(value, ItemHash):
return value
if not isinstance(value, str):
raise ValueError('value must be a str')
return ItemHash(value) | 6275ab41d437728ea3448bf150f068668c3f1819 | 23,381 |
def __convert_swizzle_scale(scale, export_settings):
"""Convert a scale from Blender coordinate system to glTF coordinate system."""
if export_settings[gltf2_blender_export_keys.YUP]:
return Vector((scale[0], scale[2], scale[1]))
else:
return Vector((scale[0], scale[1], scale[2])) | 19c2f62c9cd3c267a3edc2451c5f0eba3208c34f | 23,382 |
import time
import html
from sys import audit
def launch_plugin_flow(current, client_id, rekall_session, plugin, plugin_arg):
"""Launch the flow on the client."""
db = current.db
flow_id = utils.new_flow_id()
spec = plugins.RekallAPI(current).get(plugin)
if not spec:
raise ValueError("Unknown plugin")
# Validate both plugin args and session args.
validate_plugin_args(plugin_arg, spec)
validate_plugin_args(rekall_session, plugins.SessionAPI(current))
flow = agent.Flow.from_keywords(
flow_id=flow_id,
created_time=time.time(),
ticket=dict(
location=dict(
__type__="HTTPLocation",
base=utils.route_api('/control/ticket'),
path_prefix=flow_id,
)),
actions=[
dict(__type__="PluginAction",
plugin=plugin,
args=plugin_arg,
rekall_session=rekall_session,
collection=dict(
__type__="JSONCollection",
location=dict(
__type__="BlobUploader",
base=html.URL(
c="api", f="control", args=['upload'], host=True),
path_template=(
"collection/%s/{part}" % flow_id),
))
)])
if rekall_session.get("also_upload_files"):
flow.file_upload = dict(
__type__="FileUploadLocation",
flow_id=flow_id,
base=html.URL(c="api", f='control/file_upload',
host=True))
db.flows.insert(
flow_id=flow_id,
client_id=client_id,
status=agent.FlowStatus.from_keywords(
timestamp=time.time(),
client_id=client_id,
flow_id=flow_id,
status="Pending"),
creator=utils.get_current_username(current),
flow=flow,
timestamp=flow.created_time.timestamp,
)
firebase.notify_client(client_id)
audit.log(current, "FlowLaunchPlugin", flow_id=flow_id, plugin=plugin,
client_id=client_id)
return {} | 69cb40da844293eaaa9a43aed2cca8bb476c41c9 | 23,383 |
from pathlib import Path
from typing import Type
from typing import cast
def load_config_from_expt_dir(experiment_dir: Path, loop_config: Type[OptimizerConfig]) -> OptimizerConfig:
"""
Locate a config file in experiment_dir or one of its subdirectories (for a per-seed config).
Config files are now normally in seed subdirectories, as they contain seed values.
"""
config_files = sorted(experiment_dir.glob(f"*/seed*/{CONFIG_FILENAME}")) or [experiment_dir / CONFIG_FILENAME]
config_file = config_files[0]
if not config_file.exists():
raise FileNotFoundError(f"Cannot find {CONFIG_FILENAME} at or under {experiment_dir}") # pragma: no cover
return cast(loop_config, simple_load_config(config_file, config_class=loop_config)) | 5c609b877c6c40f019d234b685caed17b287aca0 | 23,384 |
def glsadf_delay(order, stage):
"""Delay for glsadf
Parameters
----------
order : int
Order of glsadf filter coefficients
stage : int
-1 / gamma
Returns
-------
delay : array
Delay
"""
return np.zeros(_sptk.glsadf_delay_length(order, stage)) | dacf0a754ba2040ba7ae004658f02df3060c6251 | 23,385 |
def find_next(s: str)->[int]:
"""
input:string
output:the next array of string
"""
if len(s) == 1:
return [-1]
result = [0 for i in range(len(s))]
result[0] = -1
result[1] = 0
i = 2
cn = 0
while i < len(result):
if s[i-1] == s[cn]:
cn += 1
result[i] = cn
elif cn > 0:
cn = result[cn]
else:
result[i+1] = 0
i = i + 1
return result | 455297eee28360f75a4f714172f62a7645ca49e0 | 23,386 |
from datetime import datetime
def _read_date():
""" read date from input; default to today """
# show date
while 1:
dts = prompt("Date", default=str(datetime.date.today()))
try:
datetime.datetime.strptime(dts, "%Y-%m-%d")
break
except ValueError:
continue
return dts | 45cd0e68cc6ca14552c2e2953edd2cc15809f122 | 23,387 |
def handle_pending_submission(self, request, layout=None):
""" Renders a pending submission, takes it's input and allows the
user to turn the submission into a complete submission, once all data
is valid.
This view has two states, a completable state where the form values
are displayed without a form and an edit state, where a form is rendered
to change the values.
Takes the following query parameters for customization::
* ``edit`` render the view in the edit state
* ``return-to`` the view redirects to this url once complete
* ``title`` a custom title (required if external submission)
* ``quiet`` no success messages are rendered if present
"""
collection = FormCollection(request.session)
form = request.get_form(self.form_class, data=self.data)
form.action = request.link(self)
form.model = self
if 'edit' not in request.GET:
form.validate()
if not request.POST:
form.ignore_csrf_error()
elif not form.errors:
collection.submissions.update(self, form)
completable = not form.errors and 'edit' not in request.GET
if completable and 'return-to' in request.GET:
if 'quiet' not in request.GET:
request.success(_("Your changes were saved"))
# the default url should actually never be called
return request.redirect(request.url)
if 'title' in request.GET:
title = request.GET['title']
else:
title = self.form.title
price = get_price(request, form, self)
# retain some parameters in links (the rest throw away)
form.action = copy_query(
request, form.action, ('return-to', 'title', 'quiet'))
edit_link = URL(copy_query(
request, request.link(self), ('title', )))
# the edit link always points to the editable state
edit_link = edit_link.query_param('edit', '')
edit_link = edit_link.as_string()
return {
'layout': layout or FormSubmissionLayout(self, request, title),
'title': title,
'form': form,
'completable': completable,
'edit_link': edit_link,
'complete_link': request.link(self, 'complete'),
'model': self,
'price': price,
'checkout_button': price and request.app.checkout_button(
button_label=request.translate(_("Pay Online and Complete")),
title=title,
price=price,
email=self.email or self.get_email_field_data(form),
locale=request.locale
)
} | b591246498c22d6079181654a94b4cd6290732ec | 23,388 |
import functools
def i18n_view(tpl_base_name=None, **defaults):
"""
Renders a template with locale name as suffix. Unlike the normal view
decorator, the template name should not have an extension. The locale names
are appended to the base template name using underscore ('_') as separator,
and lower-case locale identifier.
Any additional keyword arguments are used as default template variables.
For example::
@i18n_view('foo')
def render_foo():
# Renders 'foo_en' for English locale, 'foo_fr' for French, etc.
return
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
locale = request.locale
tpl_name = '%s_%s' % (tpl_base_name, locale.lower())
except AttributeError:
tpl_name = tpl_base_name
tplvars = defaults.copy()
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator | 46c33f2be90fb9cca8059ba1f13f97c3e5a61807 | 23,389 |
def git_file_list(path_patterns=()):
"""Returns: List of files in current git revision matching `path_patterns`.
This is basically git ls-files.
"""
return exec_output_lines(['git', 'ls-files', '--exclude-standard'] + path_patterns, False) | 0c8cf1a3570d39e6d5c7f1658fd85b6ef2938d8a | 23,390 |
def unreserve_id():
"""
Removes the reservation of a SCSI ID as well as the memo for the reservation
"""
scsi_id = request.form.get("scsi_id")
reserved_ids = get_reserved_ids()["ids"]
reserved_ids.remove(scsi_id)
process = reserve_scsi_ids(reserved_ids)
if process["status"]:
RESERVATIONS[int(scsi_id)] = ""
flash(_(u"Released the reservation for SCSI ID %(id_number)s", id_number=scsi_id))
return redirect(url_for("index"))
flash(_(u"Failed to release the reservation for SCSI ID %(id_number)s", id_number=scsi_id))
flash(process["msg"], "error")
return redirect(url_for("index")) | 6665297f37420a5ad3498f9aa9a705f5b8f1830f | 23,391 |
def integral_sqrt_a2_minus_x2(x, a):
"""Integral of $\sqrt(a^2 - x^2)$ --- see (30) at
http://integral-table.com.
"""
return 0.5*x*np.sqrt(a**2 - x**2) + 0.5*a**2*np.arctan2(x, np.sqrt(a**2 - x**2)) | 778a5dc745c62727f192616448b6c98da3d93b5c | 23,392 |
def read_length(file_obj): # pragma: no cover
""" Numpy trick to get a 32-bit length from four bytes
Equivalent to struct.unpack('<i'), but suitable for numba-jit
"""
sub = file_obj.read(4)
return sub[0] + sub[1]*256 + sub[2]*256*256 + sub[3]*256*256*256 | 82c311c3a8e2d2e277979c19aaae665b0227f9cd | 23,393 |
import tempfile
import os
def check_and_reorder_reads(input_files, output_folder, temp_output_files):
""" Check if reads are ordered and if not reorder """
# read in the ids from the first pair (only check the first 100)
ids = []
for count, lines in zip(range(100),read_file_n_lines(input_files[0],4)):
ids.append(get_read_id_minus_pair(lines[0]))
mismatch=False
for lines, pair_id in zip(read_file_n_lines(input_files[1],4), ids):
if not get_read_id_minus_pair(lines[0]) == pair_id:
mismatch=True
break
# reorder the pairs to match
new_file_list = []
if mismatch:
message="Reordering read identifiers ..."
print(message+"\n")
logger.info(message)
for index, infile in enumerate(input_files):
file_out, new_file=tempfile.mkstemp(prefix="reordered_",
suffix="_"+file_without_extension(infile), dir=output_folder)
os.close(file_out)
# read in all of the sequences then sort and write out
ids={}
for lines in read_file_n_lines(infile,4):
id=get_read_id_minus_pair(lines[0])
ids[id]=lines
with open(new_file,"w") as file_handle:
for id in sorted(ids.keys()):
file_handle.write("".join(ids[id]))
# set the input file to the reordered temp file
input_files[index]=new_file
new_file_list.append(new_file)
# add the temp file to the list and remove extra that are not needed
update_temp_output_files(temp_output_files, new_file_list, input_files)
return input_files | c201b16cbc1fe1723c2354e8cad4e480c2d33a03 | 23,394 |
def acquires_lock(expires, should_fail=True, should_wait=False, resource=None, prefix=DEFAULT_PREFIX, create_id=None):
"""
Decorator to ensure function only runs when it is unique holder of the resource.
Any invocations of the functions before the first is done
will raise RuntimeError.
Locks are stored in redis with default prefix: `lock:acquires_lock`
Arguments:
expires(timedelta|int): Expiry time of lock, way more than expected time to run.
Intended as a failsafe clean-up mechanism.
should_fail(bool): Should error be raised if failed to acquire lock.
should_wait(bool): Should this task wait for lock to be released.
resource(str): Resource identifier, by default taken from function name.
prefix(str): Change prefix added to redis key (the 'lock:' part will always be added)
create_id(function): Change suffix added to redis key to lock only specific function call based on arguments.
Example:
You have a celery task and you want to ensure it is never
executed concurrently:
@shared_task
@acquire_lock(60, resource='foo')
def foo():
...
"""
# This is just a tiny wrapper around redis_lock
# 1) acquire lock or fail
# 2) run function
# 3) release lock
def decorator(f):
nonlocal resource
if resource is None:
resource = f.__name__
resource = '%s:%s' % (prefix, resource)
@wraps(f)
def wrapper(*args, **kwargs):
lock_suffix = None
if create_id:
lock_suffix = create_id(*args, **kwargs)
# The context manager is annoying and always blocking...
lock = get_lock(
resource='%s:%s' % (resource, lock_suffix) if lock_suffix else resource,
expires=expires,
)
lock_acquired = False
# Get default lock blocking mode
# Copying to local variable so original variable would not be touched
nonlocal should_wait
is_blocking = should_wait
should_execute_if_lock_fails = False
if 'should_execute_if_lock_fails' in kwargs:
should_execute_if_lock_fails = kwargs.pop("should_execute_if_lock_fails")
# If decorated fn is called with should_wait kwarg
# Override lock blocking mode
if 'should_wait' in kwargs:
is_blocking = kwargs.pop('should_wait')
if is_blocking:
logger.debug('Waiting for resource "%s"', resource)
if not lock.acquire(blocking=is_blocking):
if should_fail:
raise RuntimeError("Failed to acquire lock: %s" % resource)
logger.warning('Failed to acquire lock: %s', resource)
if not should_execute_if_lock_fails:
return False
else:
lock_acquired = True
try:
return f(*args, **kwargs)
finally:
try:
if lock_acquired:
lock.release()
except Exception as e:
logger.exception('Failed to release lock: %s', str(e), exc_info=False)
return wrapper
return decorator | 24b504a5319f76465e212f98a8bbfeddb0dc7b85 | 23,395 |
import json
def ifttt_comparator_alpha_options():
""" Option values for alphanumeric comparators """
errmsg = check_ifttt_service_key()
if errmsg:
return errmsg, 401
data = {"data": [
{"value": "ignore", "label": "ignore"},
{"value": "equal", "label": "is equal to"},
{"value": "not_equal", "label": "is not equal to"},
{"value": "cont", "label": "contains"},
{"value": "not_cont", "label": "does not contain"},
{"value": "equal_nc", "label": "is equal to (ignore case)"},
{"value": "not_equal_nc", "label": "is not equal to (ignore case)"},
{"value": "cont_nc", "label": "contains (ignore case)"},
{"value": "not_cont_nc", "label": "does not contain (ignore case)"},
{"value": "in", "label": "in [json array]"},
{"value": "not_in", "label": "not in [json array]"},
{"value": "in_nc", "label": "in [json array] (ignore case)"},
{"value": "not_in_nc", "label": "not in [json array] (ignore case)"},
]}
return json.dumps(data) | d04d1f421eeda42324372702b7caf1777dfba964 | 23,396 |
def flat_abs_maximum(data, preserve_sign=True):
"""
Function to return the absolute maximum value in an array. By default,
this function will preserve the sign, meaning that if an array contains [-75, -25, 0, 25, 50]
then the function will return -75 because that value has the highest magnitude but it will return
the original value (preserving the sign).
Removing the sign preservation basically makes this function a composite of abs and max.
:param data: data array source
:param preserve_sign: whether or not to preserve the sign of the output, default is True
:return: largest absolute value in the data array
"""
data = np.asarray(data)
abs_data = np.abs(data)
subset = np.unravel_index(np.argmax(abs_data), data.shape)
return data[subset] if preserve_sign else abs_data[subset] | 6276c874ba9e1dcd047b087f6954a11ee3b680a9 | 23,397 |
def get_image_to_groundplane_homography(P):
"""Given the 3x4 camera projection matrix P, returns the homography
mapping image plane points onto the ground plane."""
return np.linalg.inv(get_groundplane_to_image_homography(P)) | 189bb5b80243c9145065260c591fe00c29a9a947 | 23,398 |
import logging
def create_object_detection_edge_training(
train_object_detection_edge_model_request: TrainImageEdgeModel,
):
"""[Train a Object Detection Model for Edge in AutoML GCP]
Args:
train_object_detection_edge_model_request (TrainImageEdgeModel): [Based on Input Schema]
Raises:
error: [Error]
Returns:
[type]: [description]
"""
try:
logging.info(
f"Create Object Detection Model Router: {train_object_detection_edge_model_request}"
)
return TrainModelController().train_object_detection_edge_model_controller(
request=train_object_detection_edge_model_request
)
except Exception as error:
logging.error(f"{error=}")
raise error | afff20623b96195294740056de6e327add72148f | 23,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.