content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def controllerWalkLeft(node, add=False, multi=False):
"""Pick walks the next sibling to the left using controller tag
Arguments:
node (TYPE): Description
add (bool, optional): If true add to selection
multi (bool, optional): If true, selects all the siblings
"""
nodes = _getControllerWalkSiblings(pm.selected(), "left", multi)
pm.select(nodes, add=add)
| 22,400
|
def register_remap(remap_function, overwrite=False):
"""
Register a remap function for general usage.
Parameters
----------
remap_function : RemapFunction|Type
overwrite : bool
Should we overwrite any currently existing remap of the given name?
Returns
-------
None
"""
if isinstance(remap_function, type) and issubclass(remap_function, RemapFunction):
remap_function = remap_function()
if not isinstance(remap_function, RemapFunction):
raise TypeError('remap_function must be an instance of RemapFunction.')
remap_name = remap_function.name
if remap_name not in _REMAP_DICT:
_REMAP_DICT[remap_name] = remap_function
elif overwrite:
logger.info('Overwriting the remap {}'.format(remap_name))
_REMAP_DICT[remap_name] = remap_function
else:
logger.info('Remap {} already exists and is not being replaced'.format(remap_name))
| 22,401
|
def ape_insert_new_fex(cookie, in_device_primary_key, in_model, in_serial, in_vendor):
""" Auto-generated UCS XML API Method. """
method = ExternalMethod("ApeInsertNewFex")
method.cookie = cookie
method.in_device_primary_key = in_device_primary_key
method.in_model = in_model
method.in_serial = in_serial
method.in_vendor = in_vendor
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request
| 22,402
|
def save_data(df, database_file_path):
"""Save the data frame into a SQL database file in the specified path.
Parameters:
df (pandas.core.frame.DataFrame): The data frame to be saved as a SQL database file.
database_file_path (str): The path to save the SQL database file.
Returns:
None.
Example:
save_data(df, 'disaster_response.db')
"""
database_file_name = os.path.basename(database_file_path)
database_file_name = os.path.splitext(database_file_name)[0]
engine = create_engine('sqlite:///' + database_file_path)
df.to_sql(database_file_name, engine, index=False, if_exists='replace')
| 22,403
|
async def get_pool_info(address, api_url="https://rest.stargaze-apis.com/cosmos"):
"""Pool value and current rewards via rest API.
Useful links:
https://api.akash.smartnodes.one/swagger/#/
https://github.com/Smart-Nodes/endpoints
"""
rewards_url = f"{api_url}/distribution/v1beta1/delegators/{ADDRESS}/rewards"
delegated_url = f"{api_url}/staking/v1beta1/delegations/{ADDRESS}"
async with aiohttp.ClientSession() as session:
rewards_data, pool_data = await asyncio.gather(
gather_json(session, rewards_url), gather_json(session, delegated_url)
)
rewards = float(rewards_data["rewards"][0]["reward"][0]["amount"]) / 1_000_000
pool_value = (
float(pool_data["delegation_responses"][0]["balance"]["amount"]) / 1_000_000
)
return pool_value, rewards
| 22,404
|
def check_for_unused_inames(kernel):
"""
Check if there are any unused inames in the kernel.
"""
# Warn if kernel has unused inames
from loopy.transform.iname import get_used_inames
unused_inames = kernel.all_inames() - get_used_inames(kernel)
if unused_inames:
warn_with_kernel(
kernel, "unused_inames",
"Found unused inames in kernel: %s "
"Unused inames during linearization will be prohibited in "
"Loopy version 2021.X."
% unused_inames)
| 22,405
|
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
| 22,406
|
def mnist_reader(numbers):
"""
Read MNIST dataset with specific numbers you needed
:param numbers: A list of number from 0 - 9 as you needed
:return: A tuple of a numpy array with specific numbers MNIST training dataset,
labels of the training set and the length of the training dataset.
"""
# Training Data
f = open('./data/train-images.idx3-ubyte')
loaded = np.fromfile(file=f, dtype=np.uint8)
trainX = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float32) / 127.5 - 1
f = open('./data/train-labels.idx1-ubyte')
loaded = np.fromfile(file=f, dtype=np.uint8)
trainY = loaded[8:].reshape((60000)).astype(np.int32)
_trainX = []
for idx in range(0, len(trainX)):
if trainY[idx] in numbers:
_trainX.append(trainX[idx])
return np.array(_trainX), trainY, len(_trainX)
| 22,407
|
def _ensure_list(alist): # {{{
"""
Ensure that variables used as a list are actually lists.
"""
# Authors
# -------
# Phillip J. Wolfram, Xylar Asay-Davis
if isinstance(alist, six.string_types):
# print 'Warning, converting %s to a list'%(alist)
alist = [alist]
return alist
| 22,408
|
def _parse_einsum_input(operands):
"""Parses einsum operands.
This function is based on `numpy.core.einsumfunc._parse_einsum_input`
function in NumPy 1.14.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> _parse_einsum_input(('...a,...a->...', a, b))
('@a,@a', '@', [a, b])
>>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('@a,@a', '@', [a, b])
"""
if not operands:
raise ValueError('No input operands')
if isinstance(operands[0], str):
subscripts = operands[0].replace(' ', '')
operands = operands[1:]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError('Character %s is not a valid symbol.' % s)
# Check for proper "->"
if ('-' in subscripts) or ('>' in subscripts):
if any((
subscripts.count('-') > 1,
subscripts.count('>') > 1,
subscripts.count('->') != 1,
)):
raise ValueError('Subscripts can only contain one \'->\'.')
# Parse "..."
subscripts = subscripts.replace('...', '@')
if '.' in subscripts:
raise ValueError('Invalid Ellipses.')
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = operand_list
subscripts = ''
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += '@'
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError('For this input type lists must contain '
'either int or Ellipsis')
if num != last:
subscripts += ','
if output_list is not None:
subscripts += '->'
for s in output_list:
if s is Ellipsis:
subscripts += '@'
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError('For this input type lists must contain '
'either int or Ellipsis')
# Build output string if does not exist
if '->' in subscripts:
input_subscripts, output_subscript = subscripts.split('->')
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError(
'Output character %s did not appear in the input'
% ('...' if char == '@' else char))
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(',', '')
output_subscript = ''
for s in sorted(set(tmp_subscripts)):
if s == '@' or tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError('Number of einsum subscripts must be equal to the '
'number of operands.')
return input_subscripts, output_subscript, operands
| 22,409
|
def convert_to_numpy(*args, **kwargs):
"""
Converts all tf tensors in args and kwargs to numpy array
Parameters
----------
*args :
positional arguments of arbitrary number and type
**kwargs :
keyword arguments of arbitrary number and type
Returns
-------
list
converted positional arguments
dict
converted keyboard arguments
"""
args = recursively_convert_elements(args, tf.Tensor,
_single_element_tensor_conversion)
kwargs = recursively_convert_elements(kwargs, tf.Tensor,
_single_element_tensor_conversion)
return convert_to_numpy_identity(*args, **kwargs)
| 22,410
|
async def delete_contact(
contact_key: int, hash: str, resource: Resource = Depends(get_json_resource)
):
"""
Delete the contact with the given key.
If the record has changed since the hash was obtained, a 409 error is returned.
"""
try:
await resource.delete(contact_key, hash)
except SirixServerError:
return Response(status_code=status.HTTP_409_CONFLICT)
return Response(status_code=status.HTTP_204_NO_CONTENT)
| 22,411
|
def update_model_instance_meta_schema(request, file_type_id, **kwargs):
"""copies the metadata schema from the associated model program aggregation over to the model instance aggregation
"""
# Note: decorator 'authorise_for_aggregation_edit' sets the error_response key in kwargs
if 'error_response' in kwargs and kwargs['error_response']:
error_response = kwargs['error_response']
return JsonResponse(error_response, status=status.HTTP_400_BAD_REQUEST)
# Note: decorator 'authorise_for_aggregation_edit' sets the logical_file key in kwargs
logical_file = kwargs['logical_file']
metadata = logical_file.metadata
if not metadata.executed_by:
msg = "No associated model program was found"
error_response = {"status": "error", "message": msg}
return JsonResponse(error_response, status=status.HTTP_400_BAD_REQUEST)
elif not metadata.executed_by.metadata_schema_json:
msg = "Associated model program has no metadata schema"
error_response = {"status": "error", "message": msg}
return JsonResponse(error_response, status=status.HTTP_400_BAD_REQUEST)
logical_file.metadata_schema_json = metadata.executed_by.metadata_schema_json
if metadata.metadata_json:
# validate json data against metadata schema:
try:
metadata_json_schema = logical_file.metadata_schema_json
jsonschema.Draft4Validator(metadata_json_schema).validate(metadata.metadata_json)
except jsonschema.ValidationError as ex:
# delete existing invalid metadata
metadata.metadata_json = {}
logical_file.save()
metadata.is_dirty = True
metadata.save()
resource = logical_file.resource
resource_modified(resource, request.user, overwrite_bag=False)
ajax_response_data = {'status': 'success', 'logical_file_type': logical_file.type_name(),
'element_name': 'metadata_schema_json', 'message': "Update was successful"}
return JsonResponse(ajax_response_data, status=status.HTTP_200_OK)
| 22,412
|
def mypy_run(args):
"""Run mypy with given arguments and return the result."""
logger.log_cmd(["mypy"] + args)
try:
stdout, stderr, exit_code = run(args)
except BaseException:
logger.print_exc()
else:
for line in stdout.splitlines():
yield line, False
for line in stderr.splitlines():
yield line, True
| 22,413
|
def _calculate_permutation_scores_per_col(estimator, X, y, sample_weight, col_idx,
random_state, n_repeats, scorer):
"""Calculate score when `col_idx` is permuted."""
random_state = check_random_state(random_state)
# Work on a copy of X to to ensure thread-safety in case of threading based
# parallelism. Furthermore, making a copy is also useful when the joblib
# backend is 'loky' (default) or the old 'multiprocessing': in those cases,
# if X is large it will be automatically be backed by a readonly memory map
# (memmap). X.copy() on the other hand is always guaranteed to return a
# writable data-structure whose columns can be shuffled inplace.
X_permuted = X.copy()
scores = np.zeros(n_repeats)
shuffling_idx = np.arange(X.shape[0])
for n_round in range(n_repeats):
random_state.shuffle(shuffling_idx)
if hasattr(X_permuted, "iloc"):
col = X_permuted.iloc[shuffling_idx, col_idx]
col.index = X_permuted.index
X_permuted.iloc[:, col_idx] = col
else:
X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx]
feature_score = _weights_scorer(
scorer, estimator, X_permuted, y, sample_weight
)
scores[n_round] = feature_score
return scores
| 22,414
|
def get_statement_at_line(source: str, lineno: int, checker):
"""Get statements at line *lineno* from a source string.
:param source: The source to get the statements from.
:param lineno: Line number which the statement must include. Counted from 1.
:param checker: A function that checks each statement. It must return *None* if the check
fails. If anything else is returned, that becomes the return value of this function.
:returns: A list of tuples of string with the found statements and and offset between the
beginning of the match and *lineno*.
"""
module = ast_utils.cached_parse(source)
for stmt in module.body:
position = ast_utils.get_position(source, stmt)
if position.lineno <= lineno <= position.end_lineno:
res = checker(stmt, source)
if res is not None:
return res
raise RuntimeError('Statement not found.')
| 22,415
|
def ptrace(Q, sel):
"""
Partial trace of the Qobj with selected components remaining.
Parameters
----------
Q : :class:`qutip.Qobj`
Composite quantum object.
sel : int/list
An ``int`` or ``list`` of components to keep after partial trace.
Returns
-------
oper : :class:`qutip.Qobj`
Quantum object representing partial trace with selected components
remaining.
Notes
-----
This function is for legacy compatibility only. It is recommended to use
the ``ptrace()`` Qobj method.
"""
if not isinstance(Q, Qobj):
raise TypeError("Input is not a quantum object")
return Q.ptrace(sel)
| 22,416
|
def createConformations(outputfile, forcefield, smiles, sid):
"""Generate the conformations for a molecule and save them to disk."""
print(f'Generating {index}: {smiles}')
try:
mol = Molecule.from_smiles(smiles, allow_undefined_stereo=True)
fftop = Topology()
fftop.add_molecule(mol)
mmtop = fftop.to_openmm()
system = forcefield.create_openmm_system(fftop)
except:
print(' failed to parametrize')
return
# Generate 10 diverse starting points. Run MD from each one to generate a total
# of 100 high energy conformations.
mol.generate_conformers(n_conformers=10, rms_cutoff=0*unit.nanometers)
assert len(mol.conformers) == 10
def simulate(pos):
integrator = openmm.LangevinMiddleIntegrator(500*unit.kelvin, 1/unit.picosecond, 0.001*unit.picosecond)
simulation = app.Simulation(mmtop, system, integrator, openmm.Platform.getPlatformByName('Reference'))
simulation.context.setPositions(pos)
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(500*unit.kelvin)
states = []
for i in range(10):
simulation.step(10000)
state = simulation.context.getState(getPositions=True, getEnergy=True)
if state.getPotentialEnergy() < 1e4*unit.kilojoules_per_mole:
states.append(state)
return states
futures = []
with ThreadPoolExecutor() as executor:
for pos in mol.conformers:
futures.append(executor.submit(simulate, pos))
states = []
for future in futures:
states += future.result()
# Select 25 that are most different from each other.
if len(states) < 25:
print(' failed to generate states')
return
states = filterByRMSD(states, mmtop)
# Create a nearby, lower energy conformation from each one.
integrator = openmm.LangevinMiddleIntegrator(100*unit.kelvin, 1/unit.picosecond, 0.001*unit.picosecond)
simulation = app.Simulation(mmtop, system, integrator, openmm.Platform.getPlatformByName('Reference'))
for state in states[:]:
simulation.context.setState(state)
simulation.minimizeEnergy(maxIterations=5)
simulation.context.setVelocitiesToTemperature(100*unit.kelvin)
simulation.step(1000)
states.append(simulation.context.getState(getPositions=True))
saveToFile(outputfile, mol, states, sid)
| 22,417
|
def two_angle_circular_correlation_coef(angles1, angles2, mean1, mean2):
"""
Circular correlation measure. SenGupta 2001
"""
centered_a = angles1-mean1
centered_b = angles2-mean2
sin_centered_a = numpy.sin(centered_a)
sin_centered_b = numpy.sin(centered_b)
sin2_a = sin_centered_a*sin_centered_a
sin2_b = sin_centered_b*sin_centered_b
return numpy.dot(sin_centered_a, sin_centered_b) / math.sqrt(numpy.dot(sin2_a, sin2_b))
| 22,418
|
def format_data(data: Union[dict, list]) -> str:
"""
:param data: input data
:return: pretty formatted yaml representation of a dictionary
"""
return yaml.dump(data, sort_keys=False, default_flow_style=False)
| 22,419
|
def fasta2select(fastafilename, is_aligned=False,
ref_resids=None, target_resids=None,
ref_offset=0, target_offset=0, verbosity=3,
alnfilename=None, treefilename=None, clustalw="clustalw2"):
"""Return selection strings that will select equivalent residues.
The function aligns two sequences provided in a FASTA file and
constructs MDAnalysis selection strings of the common atoms. When
these two strings are applied to the two different proteins they
will generate AtomGroups of the aligned residues.
`fastafilename` contains the two un-aligned sequences in FASTA
format. The reference is assumed to be the first sequence, the
target the second. ClustalW_ produces a pairwise
alignment (which is written to a file with suffix ``.aln``). The
output contains atom selection strings that select the same atoms
in the two structures.
Unless `ref_offset` and/or `target_offset` are specified, the resids
in the structure are assumed to correspond to the positions in the
un-aligned sequence, namely the first residue has resid == 1.
In more complicated cases (e.g., when the resid numbering in the
input structure has gaps due to missing parts), simply provide the
sequence of resids as they appear in the topology in `ref_resids` or
`target_resids`, e.g. ::
target_resids = [a.resid for a in trj.select_atoms('name CA')]
(This translation table *is* combined with any value for
`ref_offset` or `target_offset`!)
Parameters
----------
fastafilename : str, path to filename
FASTA file with first sequence as reference and
second the one to be aligned (ORDER IS IMPORTANT!)
is_aligned : bool (optional)
``False`` (default)
run clustalw for sequence alignment;
``True``
use the alignment in the file (e.g. from STAMP) [``False``]
ref_offset : int (optional)
add this number to the column number in the FASTA file
to get the original residue number, default: 0
target_offset : int (optional)
add this number to the column number in the FASTA file
to get the original residue number, default: 0
ref_resids : str (optional)
sequence of resids as they appear in the reference structure
target_resids : str (optional)
sequence of resids as they appear in the target
alnfilename : str (optional)
filename of ClustalW alignment (clustal format) that is
produced by *clustalw* when *is_aligned* = ``False``.
default ``None`` uses the name and path of *fastafilename* and
subsititutes the suffix with '.aln'.
treefilename: str (optional)
filename of ClustalW guide tree (Newick format);
if default ``None`` the the filename is generated from *alnfilename*
with the suffix '.dnd' instead of '.aln'
clustalw : str (optional)
path to the ClustalW (or ClustalW2) binary; only
needed for `is_aligned` = ``False``, default: "ClustalW2"
Returns
-------
select_dict : dict
dictionary with 'reference' and 'mobile' selection string
that can be used immediately in :class:`AlignTraj` as
``select=select_dict``.
See Also
--------
:func:`sequence_alignment`, which does not require external
programs.
.. _ClustalW: http://www.clustal.org/
.. _STAMP: http://www.compbio.dundee.ac.uk/manuals/stamp.4.2/
"""
protein_gapped = Bio.Alphabet.Gapped(Bio.Alphabet.IUPAC.protein)
if is_aligned:
logger.info("Using provided alignment {}".format(fastafilename))
with open(fastafilename) as fasta:
alignment = Bio.AlignIO.read(
fasta, "fasta", alphabet=protein_gapped)
else:
if alnfilename is None:
filepath, ext = os.path.splitext(fastafilename)
alnfilename = filepath + '.aln'
if treefilename is None:
filepath, ext = os.path.splitext(alnfilename)
treefilename = filepath + '.dnd'
run_clustalw = Bio.Align.Applications.ClustalwCommandline(
clustalw,
infile=fastafilename,
type="protein",
align=True,
outfile=alnfilename,
newtree=treefilename)
logger.debug(
"Aligning sequences in %(fastafilename)r with %(clustalw)r.",
vars())
logger.debug("ClustalW commandline: %r", str(run_clustalw))
try:
stdout, stderr = run_clustalw()
except:
logger.exception("ClustalW %(clustalw)r failed", vars())
logger.info(
"(You can get clustalw2 from http://www.clustal.org/clustal2/)")
raise
with open(alnfilename) as aln:
alignment = Bio.AlignIO.read(
aln, "clustal", alphabet=protein_gapped)
logger.info(
"Using clustalw sequence alignment {0!r}".format(alnfilename))
logger.info(
"ClustalW Newick guide tree was also produced: {0!r}".format(treefilename))
nseq = len(alignment)
if nseq != 2:
raise ValueError(
"Only two sequences in the alignment can be processed.")
# implict assertion that we only have two sequences in the alignment
orig_resids = [ref_resids, target_resids]
offsets = [ref_offset, target_offset]
for iseq, a in enumerate(alignment):
# need iseq index to change orig_resids
if orig_resids[iseq] is None:
# build default: assume consecutive numbering of all
# residues in the alignment
GAP = a.seq.alphabet.gap_char
length = len(a.seq) - a.seq.count(GAP)
orig_resids[iseq] = np.arange(1, length + 1)
else:
orig_resids[iseq] = np.asarray(orig_resids[iseq])
# add offsets to the sequence <--> resid translation table
seq2resids = [resids + offset for resids, offset in zip(
orig_resids, offsets)]
del orig_resids
del offsets
def resid_factory(alignment, seq2resids):
"""Return a function that gives the resid for a position ipos in
the nseq'th alignment.
resid = resid_factory(alignment,seq2resids)
r = resid(nseq,ipos)
It is based on a look up table that translates position in the
alignment to the residue number in the original
sequence/structure.
The first index of resid() is the alignmment number, the
second the position in the alignment.
seq2resids translates the residues in the sequence to resid
numbers in the psf. In the simplest case this is a linear map
but if whole parts such as loops are ommitted from the protein
the seq2resids may have big gaps.
Format: a tuple of two numpy arrays; the first array is for
the reference, the second for the target, The index in each
array gives the consecutive number of the amino acid in the
sequence, the value the resid in the structure/psf.
Note: assumes that alignments have same length and are padded if
necessary.
"""
# could maybe use Bio.PDB.StructureAlignment instead?
nseq = len(alignment)
t = np.zeros((nseq, alignment.get_alignment_length()), dtype=int)
for iseq, a in enumerate(alignment):
GAP = a.seq.alphabet.gap_char
t[iseq, :] = seq2resids[iseq][np.cumsum(np.where(
np.array(list(a.seq)) == GAP, 0, 1)) - 1]
# -1 because seq2resid is index-1 based (resids start at 1)
def resid(nseq, ipos, t=t):
return t[nseq, ipos]
return resid
resid = resid_factory(alignment, seq2resids)
res_list = [] # collect individual selection string
# could collect just resid and type (with/without CB) and
# then post-process and use ranges for continuous stretches, eg
# ( resid 1:35 and ( backbone or name CB ) ) or ( resid 36 and backbone )
# should be the same for both seqs
GAP = alignment[0].seq.alphabet.gap_char
if GAP != alignment[1].seq.alphabet.gap_char:
raise ValueError(
"Different gap characters in sequence 'target' and 'mobile'.")
for ipos in range(alignment.get_alignment_length()):
aligned = list(alignment[:, ipos])
if GAP in aligned:
continue # skip residue
template = "resid %i"
if 'G' not in aligned:
# can use CB
template += " and ( backbone or name CB )"
else:
template += " and backbone"
template = "( " + template + " )"
res_list.append([template % resid(iseq, ipos) for iseq in range(nseq)])
sel = np.array(res_list).transpose()
ref_selection = " or ".join(sel[0])
target_selection = " or ".join(sel[1])
return {'reference': ref_selection, 'mobile': target_selection}
| 22,420
|
def eval_on_dataset(
model, state, dataset,
pmapped_eval_step):
"""Evaluates the model on the whole dataset.
Args:
model: The model to evaluate.
state: Current state associated with the model (contains the batch norm MA).
dataset: Dataset on which the model should be evaluated. Should already
being batched.
pmapped_eval_step: A pmapped version of the `eval_step` function (see its
documentation for more details).
Returns:
A dictionary containing the loss and error rate on the batch. These metrics
are averaged over the samples.
"""
eval_metrics = []
total_num_samples = 0
for eval_batch in dataset:
# Load and shard the TF batch.
eval_batch = load_and_shard_tf_batch(eval_batch)
# Compute metrics and sum over all observations in the batch.
metrics = pmapped_eval_step(model, state, eval_batch)
eval_metrics.append(metrics)
# Number of samples seen in num_replicas * per_replica_batch_size.
total_num_samples += (
eval_batch['label'].shape[0] * eval_batch['label'].shape[1])
# Metrics are all the same across all replicas (since we applied psum in the
# eval_step). The next line will fetch the metrics on one of them.
eval_metrics = common_utils.get_metrics(eval_metrics)
# Finally, we divide by the number of samples to get the mean error rate and
# cross entropy.
eval_summary = jax.tree_map(lambda x: x.sum() / total_num_samples,
eval_metrics)
return eval_summary
| 22,421
|
def create_directory(path):
"""Creates the given directory and returns the path."""
if not os.path.isdir(path):
os.makedirs(path)
return path
| 22,422
|
def factorize(n):
""" Prime factorises n """
# Loop upto sqrt(n) and check for factors
ret = []
sqRoot = int(n ** 0.5)
for f in xrange(2, sqRoot+1):
if n % f == 0:
e = 0
while n % f == 0:
n, e = n / f, e + 1
ret.append((f, e))
if n > 1:
ret.append((n, 1))
return ret
| 22,423
|
def createSimpleDataSet( numOfAttr, numOfObj ):
"""
This creates a simple data base with 3 attributes
The second one is 2 times the first one with some
Gauss noise. The third one is just random noise.
"""
database = []
for i in range(numOfObj):
data = dataObject(numOfAttr)
w=[random.gauss(2.0, 2.0)]
w.append(w[0]*3+random.gauss(0.0, 0.05))
w.append(random.random()*6)
data.setAttributes(w)
database.append(data)
return database
| 22,424
|
def clean_data(
data,
isz=None,
r1=None,
dr=None,
edge=0,
bad_map=None,
add_bad=None,
apod=True,
offx=0,
offy=0,
sky=True,
window=None,
darkfile=None,
f_kernel=3,
verbose=False,
*,
mask=None,
):
"""Clean data.
Parameters:
-----------
`data` {np.array} -- datacube containing the NRM data\n
`isz` {int} -- Size of the cropped image (default: {None})\n
`r1` {int} -- Radius of the rings to compute background sky (default: {None})\n
`dr` {int} -- Outer radius to compute sky (default: {None})\n
`edge` {int} -- Patch the edges of the image (VLT/SPHERE artifact, default: {200}),\n
`checkrad` {bool} -- If True, check the resizing and sky substraction parameters (default: {False})\n
Returns:
--------
`cube` {np.array} -- Cleaned datacube.
"""
n_im = data.shape[0]
cube_cleaned = [] # np.zeros([n_im, isz, isz])
l_bad_frame = []
bad_map, add_bad = _get_3d_bad_pixels(bad_map, add_bad, data)
for i in tqdm(range(n_im), ncols=100, desc="Cleaning", leave=False):
img0 = data[i]
img0 = _apply_edge_correction(img0, edge=edge)
if bad_map is not None:
img1 = fix_bad_pixels(img0, bad_map[i], add_bad=add_bad[i])
else:
img1 = img0.copy()
img1 = _remove_dark(img1, darkfile=darkfile, verbose=verbose)
if isz is not None:
# Get expected center for sky correction
filtmed = f_kernel is not None
center = find_max(img1, filtmed=filtmed, f=f_kernel)
else:
center = None
if sky and (r1 is not None or mask is not None):
img_biased = sky_correction(
img1, r1=r1, dr=dr, verbose=verbose, center=center, mask=mask
)[0]
elif sky:
warnings.warn(
"sky is set to True, but r1 and mask are set to None. Skipping sky correction",
RuntimeWarning,
)
img_biased = img1.copy()
else:
img_biased = img1.copy()
img_biased[img_biased < 0] = 0 # Remove negative pixels
if isz is not None:
# Get expected center for sky correction
filtmed = f_kernel is not None
im_rec_max = crop_max(
img_biased, isz, offx=offx, offy=offy, filtmed=filtmed, f=f_kernel
)[0]
else:
im_rec_max = img_biased.copy()
if (
(im_rec_max.shape[0] != im_rec_max.shape[1])
or (isz is not None and im_rec_max.shape[0] != isz)
or (isz is None and im_rec_max.shape[0] != img0.shape[0])
):
l_bad_frame.append(i)
else:
if apod and window is not None:
img = apply_windowing(im_rec_max, window=window)
elif apod:
warnings.warn(
"apod is set to True, but window is None. Skipping apodisation",
RuntimeWarning,
)
img = im_rec_max.copy()
else:
img = im_rec_max.copy()
cube_cleaned.append(img)
if verbose:
print("Bad centering frame number:", l_bad_frame)
cube_cleaned = np.array(cube_cleaned)
return cube_cleaned
| 22,425
|
def describe_dataset_group(datasetGroupArn=None):
"""
Describes the given dataset group. For more information on dataset groups, see CreateDatasetGroup .
See also: AWS API Documentation
Exceptions
:example: response = client.describe_dataset_group(
datasetGroupArn='string'
)
:type datasetGroupArn: string
:param datasetGroupArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the dataset group to describe.\n
:rtype: dict
ReturnsResponse Syntax{
'datasetGroup': {
'name': 'string',
'datasetGroupArn': 'string',
'status': 'string',
'roleArn': 'string',
'kmsKeyArn': 'string',
'creationDateTime': datetime(2015, 1, 1),
'lastUpdatedDateTime': datetime(2015, 1, 1),
'failureReason': 'string'
}
}
Response Structure
(dict) --
datasetGroup (dict) --A listing of the dataset group\'s properties.
name (string) --The name of the dataset group.
datasetGroupArn (string) --The Amazon Resource Name (ARN) of the dataset group.
status (string) --The current status of the dataset group.
A dataset group can be in one of the following states:
CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
DELETE PENDING
roleArn (string) --The ARN of the IAM role that has permissions to create the dataset group.
kmsKeyArn (string) --The Amazon Resource Name (ARN) of the KMS key used to encrypt the datasets.
creationDateTime (datetime) --The creation date and time (in Unix time) of the dataset group.
lastUpdatedDateTime (datetime) --The last update date and time (in Unix time) of the dataset group.
failureReason (string) --If creating a dataset group fails, provides the reason why.
Exceptions
Personalize.Client.exceptions.InvalidInputException
Personalize.Client.exceptions.ResourceNotFoundException
:return: {
'datasetGroup': {
'name': 'string',
'datasetGroupArn': 'string',
'status': 'string',
'roleArn': 'string',
'kmsKeyArn': 'string',
'creationDateTime': datetime(2015, 1, 1),
'lastUpdatedDateTime': datetime(2015, 1, 1),
'failureReason': 'string'
}
}
:returns:
Personalize.Client.exceptions.InvalidInputException
Personalize.Client.exceptions.ResourceNotFoundException
"""
pass
| 22,426
|
def test_update_nonexisting_subscription(rest_client, auth_token):
""" SUBSCRIPTION (REST): Test the update of a non-existing subscription """
subscription_name = uuid()
data = {'options': {'filter': {'project': ['toto', ]}}}
response = rest_client.put('/subscriptions/root/' + subscription_name, headers=headers(auth(auth_token)), json=data)
assert response.status_code == 404
assert response.headers.get('ExceptionClass') == 'SubscriptionNotFound'
| 22,427
|
def select_points():
""" Select points (empty) objects.
Parameters:
None
Returns:
list: Empty objects or None.
"""
selected = bpy.context.selected_objects
if selected:
return [object for object in selected if object.type == 'EMPTY']
print('***** Point (empty) objects were not selected *****')
return None
| 22,428
|
def _get_best_encoding(stream):
"""Returns the default stream encoding if not found."""
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
if _is_ascii_encoding(rv):
return 'utf-8'
return rv
| 22,429
|
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
| 22,430
|
def mask_conv1d1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
data_format="channels_last",
**kwargs):
"""
Masked 1-dim kernel version of the 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return MaskConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
data_format=data_format,
**kwargs)
| 22,431
|
def isready_cluster(ctx, project_name, cluster_name):
"""Check if the Atlas Cluster is 'IDLE'."""
project = ctx.obj.groups.byName[project_name].get().data
state = ctx.obj.groups[project.id].clusters[cluster_name].get().data.stateName
if state == "IDLE":
click.echo("True")
exit(0)
click.echo("False", err=True)
exit(1)
| 22,432
|
def decimal_from_tuple(signed, digits, expo):
"""Build `Decimal` objects from components of decimal tuple.
Parameters
----------
signed : bool
True for negative values.
digits : iterable of ints
digits of value each in [0,10).
expo : int or {'F', 'n', 'N'}
exponent of decimal.
Returns
-------
y : Decimal
corresponding decimal object.
"""
# Get everything in correct type because the Py3 decimal package is anal
signed = int(signed)
digits = ensure_tuple_of_ints(digits)
expo = expo if expo in ("F", "n", "N") else int(expo)
y = decimal.Decimal(decimal.DecimalTuple(signed, digits, expo))
return y
| 22,433
|
def metadata_update(
repo_id: str,
metadata: Dict,
*,
repo_type: str = None,
overwrite: bool = False,
token: str = None,
) -> str:
"""
Updates the metadata in the README.md of a repository on the Hugging Face Hub.
Example:
>>> from huggingface_hub import metadata_update
>>> metadata = {'model-index': [{'name': 'RoBERTa fine-tuned on ReactionGIF',
... 'results': [{'dataset': {'name': 'ReactionGIF',
... 'type': 'julien-c/reactiongif'},
... 'metrics': [{'name': 'Recall',
... 'type': 'recall',
... 'value': 0.7762102282047272}],
... 'task': {'name': 'Text Classification',
... 'type': 'text-classification'}}]}]}
>>> update_metdata("julien-c/reactiongif-roberta", metadata)
Args:
repo_id (`str`):
The name of the repository.
metadata (`dict`):
A dictionary containing the metadata to be updated.
repo_type (`str`, *optional*):
Set to `"dataset"` or `"space"` if updating to a dataset or space,
`None` or `"model"` if updating to a model. Default is `None`.
overwrite (`bool`, *optional*, defaults to `False`):
If set to `True` an existing field can be overwritten, otherwise
attempting to overwrite an existing field will cause an error.
token (`str`, *optional*):
The Hugging Face authentication token.
Returns:
`str`: URL of the commit which updated the card metadata.
"""
filepath = hf_hub_download(
repo_id,
filename=REPOCARD_NAME,
repo_type=repo_type,
use_auth_token=token,
force_download=True,
)
existing_metadata = metadata_load(filepath)
for key in metadata:
# update model index containing the evaluation results
if key == "model-index":
if "model-index" not in existing_metadata:
existing_metadata["model-index"] = metadata["model-index"]
else:
# the model-index contains a list of results as used by PwC but only has one element thus we take the first one
existing_metadata["model-index"][0][
"results"
] = _update_metadata_model_index(
existing_metadata["model-index"][0]["results"],
metadata["model-index"][0]["results"],
overwrite=overwrite,
)
# update all fields except model index
else:
if key in existing_metadata and not overwrite:
if existing_metadata[key] != metadata[key]:
raise ValueError(
f"""You passed a new value for the existing meta data field '{key}'. Set `overwrite=True` to overwrite existing metadata."""
)
else:
existing_metadata[key] = metadata[key]
# save and push to hub
metadata_save(filepath, existing_metadata)
return HfApi().upload_file(
path_or_fileobj=filepath,
path_in_repo=REPOCARD_NAME,
repo_id=repo_id,
repo_type=repo_type,
identical_ok=False,
token=token,
)
| 22,434
|
async def test_command_with_optional_arg(hass, client):
"""Test generic command functionality."""
await setup_webostv(hass)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_COMMAND: "test",
ATTR_PAYLOAD: {"target": "https://www.google.com"},
}
await hass.services.async_call(DOMAIN, SERVICE_COMMAND, data)
await hass.async_block_till_done()
client.request.assert_called_with(
"test", payload={"target": "https://www.google.com"}
)
| 22,435
|
def add_children_layer_before_parent_layer(cur_layer: tf.keras.layers.Layer, node_layer_map: dict, layer_out_node_map: dict, visited_layers: set, reversed_ordered_layers: list):
"""
Function to use topological sorting for finding all the layers which are accessible from the specific input_layer in the opposite order of occurrence
:param cur_layer:nlayer that we want to find path from
:param node_layer_map: dictionary includes node_ref as a key, in_layers and out_layer as value
:param layer_out_node_map: dictionary includes layer_ref as a key, outbound nodes as value
:param visited_layers: Set of all layers that have been visited
:param reversed_ordered_layers: List of layers in the opposite order of occurrence for the layers that we have visited so far
:return:
"""
# Mark the current layer as visited.
visited_layers.add(cur_layer)
if cur_layer in layer_out_node_map:
# Recur for all the layers adjacent to this layer
for next_node in layer_out_node_map[cur_layer]:
next_layer = node_layer_map[next_node][1]
if next_layer not in visited_layers:
add_children_layer_before_parent_layer(next_layer, node_layer_map, layer_out_node_map, visited_layers, reversed_ordered_layers)
reversed_ordered_layers.append(cur_layer)
else:
reversed_ordered_layers.append(cur_layer)
| 22,436
|
def test_baked_django_with_git_initiated(cookies):
"""Test Django git init has generated correctly."""
default_django = cookies.bake()
assert ".git" in os.listdir(default_django.project_path)
git_remote = bake_checker(
"git",
"remote",
"-v",
cwd=default_django.project_path,
)
assert "git@github.com:imAsparky/django-boilerplate.git (fetch)" in git_remote
| 22,437
|
def convertStringToArabic(numStr, stripChars=0):
"""
Convert a string to an arabic number;
Always returns numeric!
12-09-2004: Changed default stripChars to 0, because otherwise a roman I was
stripped before processing! Need to watch for programs that need
to now explicitly set stripChars to 1
>>> convertStringToArabic("IV")
4
>>> convertStringToArabic("123")
123
>>> convertStringToArabic("MC")
1100
"""
num = 0
if type(numStr) == type(1):
num = numStr # already numeric, arabic
elif isinstance(numStr, str):
numStr = trimPunctAndSpaces(numStr)
if stripChars:
numStr = trimNonDigits(numStr)
if numStr != "":
if isRoman(numStr.upper()):
num = convRomanToArabic(numStr.upper())
else:
try:
num = int(numStr)
except Exception as e:
raise ValueError("Cannot convert: %s" % numStr)
else:
# try not causing an exception on this. If there's no numeric, then return None.
logger.debug("Empty String. convertStringToArabic Conversion error")
else:
try:
num = int(numStr)
except Exception as e:
raise ValueError(e)
if num is None:
num = 0
raise ValueError("Cannot convert: %s" % numStr)
return num
| 22,438
|
def _process_location(loc: location.NormalizedLocation) -> None:
"""Run through all of the methods to enrich the location"""
_add_provider_from_name(loc)
_add_source_link(loc)
_add_provider_tag(loc)
_normalize_phone_format(loc)
| 22,439
|
def set_logging():
"""Sets additional logging to file for debug."""
logger_migrator = logging.getLogger('migrator')
logger_migrator.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(message)s - \n '
'[in %(pathname)s:%(lineno)d]')
fh = logging.FileHandler('migrator.log')
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logger_migrator.addHandler(fh)
logger_matcher = logging.getLogger('cds_dojson.matcher.dojson_matcher')
logger_matcher.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(message)s - \n '
'[in %(pathname)s:%(lineno)d]')
fh = logging.FileHandler('matcher.log')
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logger_matcher.addHandler(fh)
return logger_migrator
| 22,440
|
def system_types():
"""
系统类型(工作空间类型)
:return:
"""
return Workspace.sys_types().values()
| 22,441
|
def get_rnd_simplex(dimension, random_state):
"""
Uniform random point on a simplex, i.e. x_i >= 0 and sum of the coordinates is 1.
Donald B. Rubin, The Bayesian bootstrap Ann. Statist. 9, 1981, 130-134.
https://cs.stackexchange.com/questions/3227/uniform-sampling-from-a-simplex
Parameters
----------
dimension: int
Dimensionality of the simplex
random_state: optional, RandomState object
Returns
-------
numpy array corresponding to random sample in dimension of space
"""
t = random_state.uniform(0, 1, dimension - 1)
t = np.append(t, [0, 1])
t.sort()
return np.array([(t[i + 1] - t[i]) for i in range(len(t) - 1)])
| 22,442
|
def backup_generate_metadata(request, created_at='', secret=''):
"""
Generates metadata code for the backup.
Meant to be called by the local handler only with shared secret (not directly).
"""
if not secret == settings.GAEBAR_SECRET_KEY:
return HttpResponseForbidden()
backup = models.GaebarBackup.all().filter('created_at = ', timestamp_to_datetime(created_at)).get()
if not backup:
raise Http404
context = dict(backup = backup)
response = HttpResponse(loader.render_to_string('gaebar/metadata.py', context), 'text/plain')
response['Content-Disposition'] = 'attachment; filename=metadata.py'
return response
| 22,443
|
def test_bs_godec_trained_noise(noise_path, gif_name, preview):
"""Test bs_godec Implementation with trained noise
Keyword Arguments:
noise_path {string}
gif_name {string}
preview {boolean}
---
This test shows you how to run the bs_godec method.
"""
N = get_frame(noise_path)
M, R = bs_godec_trained(files, N)
plot_bs_results(M, N, R, bs_pics_path, preview=preview)
| 22,444
|
def hard_prune_repo(repo_path):
"""
1. Delete all files, except git metadata and files with names that match *.py.
2. Delete all empty folders.
3. Delete all symbolic links.
4. Set permission for all files to 440.
"""
assert os.path.exists(repo_path)
prune_commands = (
"find {} -type f ! -name '*.py' -not -iwholename '*.git*' -delete",
"find {} -type d -empty -delete",
"find {} -type l -delete",
"find {} -type f -exec chmod 440 {{}} \;")
for command in prune_commands:
command = command.format(repo_path)
print(command)
subprocess.run(command, shell=True, check=True)
| 22,445
|
def contains_conv(module: torch.nn.Module) -> bool:
""" Returns `True` if given `torch.nn.Module` contains at least one convolution module/op (based on `deepcv.meta.nn.is_conv` for convolution definition) """
return any(map(module.modules, lambda m: is_conv(m)))
| 22,446
|
def _construct_cell(empty=False):
"""Constructs a test cell."""
cell = scheduler.Cell('top')
if empty:
return cell
rack1 = scheduler.Bucket('rack:rack1', traits=0, level='rack')
rack2 = scheduler.Bucket('rack:rack2', traits=0, level='rack')
cell.add_node(rack1)
cell.add_node(rack2)
srv1 = scheduler.Server('srv1', [10, 20, 30], traits=1,
valid_until=1000, label='part')
srv2 = scheduler.Server('srv2', [10, 20, 30], traits=3,
valid_until=2000, label='part')
srv3 = scheduler.Server('srv3', [10, 20, 30], traits=0,
valid_until=3000, label='_default')
srv4 = scheduler.Server('srv4', [10, 20, 30], traits=0,
valid_until=4000, label='_default')
rack1.add_node(srv1)
rack1.add_node(srv2)
rack2.add_node(srv3)
rack2.add_node(srv4)
tenant1 = scheduler.Allocation()
cell.partitions['_default'].allocation.add_sub_alloc('t1', tenant1)
tenant11 = scheduler.Allocation()
tenant1.add_sub_alloc('t11', tenant11)
alloc1 = scheduler.Allocation([10, 10, 10], rank=100, traits=0)
tenant11.add_sub_alloc('a1', alloc1)
tenant2 = scheduler.Allocation()
cell.partitions['part'].allocation.add_sub_alloc('t2', tenant2)
alloc2 = scheduler.Allocation([10, 10, 10], rank=100, traits=3)
tenant2.add_sub_alloc('a2', alloc2)
return cell
| 22,447
|
def gaussian_kernel(F: np.ndarray) -> np.ndarray:
"""Compute dissimilarity matrix based on a Gaussian kernel."""
D = squared_dists(F)
return np.exp(-D/np.mean(D))
| 22,448
|
def _parse_yearweek(yearweek):
"""Utility function to convert internal string representations of calender weeks into datetime objects. Uses strings of format `<year>-KW<week>`. Weeks are 1-based."""
year, week = yearweek_regex.search(yearweek).groups()
# datetime.combine(isoweek.Week(int(year), int(week)).wednesday(),time(0))
return isoweek.Week(int(year), int(week))
| 22,449
|
def correction_file(namefile, namefile_corrected):
""" Correct the orthograph of a tokenized file"""
out = open(namefile_corrected, 'w')
with open(namefile, 'r') as f:
for line in f:
new_line = line.replace('\n', '')
new_line = new_line.split(';')
out.write(new_line[0] + ";") # keep the index
sent = new_line[1].split(' ')
for wrd in sent:
if wrd!='' and wrd != '\xc2\xa0' and wrd!=' ':
if dictionnary_french.check(wrd.lower()):
out.write(wrd.lower() + " ")
else:
out.write(correction(wrd.lower()) + " ")
out.write("\n")
out.close()
| 22,450
|
def exportSchemaAsJSONSchema(schema, versionNumber, filePath):
"""
Exports the given schema as a JSON Schemas document.
Parameters
----------
schema : Schema
The schema to export.
versionNumber : string
The version number of the schema.
filePath : string
The path to which to save the JSON Schemas file.
Returns
-------
None
"""
jsonSchemasExporter = JSONSchemasExporter()
jsonSchemasExporter.exportSchema(schema, versionNumber, filePath)
| 22,451
|
def classified_triplets(ctx: callable, config: callable, statsd: callable, logger: callable, run_id: int,
conn: callable, metadata_conn: callable, command: str, metrics_root: callable,
metrics_run_root: callable, output_dir: str, conditions: list) -> None:
"""Generate per-condition classified triplets list.
Arguments:
ctx: click context object
config: DIRBS config object
statsd: DIRBS statsd connection object
logger: DIRBS custom logger object
run_id: run id of the current job
conn: DIRBS PostgreSQL connection object
metadata_conn: DIRBS PostgreSQL metadata connection object
command: name of the command
metrics_root: root object for the statsd metrics
metrics_run_root: root object for the statsd run metrics
output_dir: output directory path
conditions: list of conditions for classified triplets
Returns:
None
"""
metadata.add_optional_job_metadata(metadata_conn, command, run_id,
report_schema_version=report_schema_version,
output_dir=os.path.abspath(str(output_dir)))
report_dir = make_report_directory(ctx, output_dir, run_id, conn, config)
with utils.CodeProfiler() as cp:
report_metadata = write_classified_triplets(logger, conditions, report_dir, conn)
statsd.gauge('{0}runtime.per_report.classified_triplets'.format(metrics_run_root), cp.duration)
metadata.add_optional_job_metadata(metadata_conn, command, run_id, report_outputs=report_metadata)
| 22,452
|
def test_scs_invalid_params(dev, apdev):
"""SCS command invalid parameters"""
tests = ["",
"scs_id=1",
"scs_id=1 foo",
"scs_id=1 add ",
"scs_id=1 add scs_up=8",
"scs_id=1 add scs_up=7",
"scs_id=1 add scs_up=7 classifier_type=1",
"scs_id=1 add scs_up=7 classifier_type=4",
"scs_id=1 add scs_up=7 classifier_type=4 ip_version=ipv4",
"scs_id=1 add scs_up=7 classifier_type=4 ip_version=ipv4 src_ip=q",
"scs_id=1 add scs_up=7 classifier_type=4 ip_version=ipv4 dst_ip=q",
"scs_id=1 add scs_up=7 classifier_type=4 ip_version=ipv4 src_port=q",
"scs_id=1 add scs_up=7 classifier_type=4 ip_version=ipv4 dst_port=q",
"scs_id=1 add scs_up=7 classifier_type=4 ip_version=ipv4 protocol=foo",
"scs_id=1 add scs_up=7 classifier_type=4 ip_version=ipv6 protocol=foo",
"scs_id=1 add scs_up=7 classifier_type=4 ip_version=ipv6 next_header=foo",
"scs_id=1 add scs_up=7 classifier_type=4 ip_version=ipv6 flow_label=ffffff",
"scs_id=1 add scs_up=7 classifier_type=10",
"scs_id=1 add scs_up=7 classifier_type=10 prot_instance=1",
"scs_id=1 add scs_up=7 classifier_type=10 prot_instance=1 prot_number=udp",
"scs_id=1 add scs_up=7 classifier_type=10 prot_instance=1 prot_number=udp filter_value=11223344",
"scs_id=1 add scs_up=7 classifier_type=10 prot_instance=1 prot_number=udp filter_value=qq",
"scs_id=1 add scs_up=7 classifier_type=10 prot_instance=1 prot_number=udp filter_value=11223344 filter_mask=ffffff",
"scs_id=1 add scs_up=7 classifier_type=10 prot_instance=1 prot_number=udp filter_value=11223344 filter_mask=qqqqqqqq",
"scs_id=1 add scs_up=7 classifier_type=10 prot_instance=1 prot_number=udp filter_value=11223344 filter_mask=ffffffff",
"scs_id=1 add scs_up=7 classifier_type=10 prot_instance=1 prot_number=foo filter_value=11223344 filter_mask=ffffffff",
"scs_id=1 add scs_up=7 classifier_type=10 prot_instance=1 prot_number=udp filter_value=11 filter_mask=ee classifier_type=10 prot_instance=2 prot_number=udp filter_value=22 filter_mask=ff",
"scs_id=1 add scs_up=7 classifier_type=10 prot_instance=1 prot_number=udp filter_value=11 filter_mask=ee classifier_type=10 prot_instance=2 prot_number=udp filter_value=22 filter_mask=ff tclas_processing=2",
"scs_id=1 add scs_up=7 classifier_type=10 prot_instance=1 prot_number=udp filter_value=11 filter_mask=ee classifier_type=10 prot_instance=2 prot_number=udp filter_value=22 filter_mask=ff tclas_processing=0",
"scs_id=1 add scs_up=6 classifier_type=4 ip_version=ipv4 src_ip=1.2.3.4 dst_ip=5.6.7.8 src_port=12345 dst_port=23456 dscp=5 protocol=udp scs_id=1 add scs_up=6 classifier_type=4 ip_version=ipv4 src_ip=1.2.3.4 dst_ip=5.6.7.8 src_port=12345 dst_port=23456 dscp=5 protocol=tcp"
"scs_id=1 remove",
"scs_id=1 change "]
for t in tests:
if "FAIL" not in dev[0].request("SCS " + t):
raise Exception("Invalid SCS parameters accepted: " + t)
| 22,453
|
def save_as_pdf_pages(plots, filename=None, path=None, verbose=True, **kwargs):
"""
Save multiple :class:`ggplot` objects to a PDF file, one per page.
Parameters
----------
plots : collection or generator of :class:`ggplot`
Plot objects to write to file. `plots` may be either a
collection such as a :py:class:`list` or :py:class:`set`:
>>> base_plot = ggplot(…)
>>> plots = [base_plot + ggtitle('%d of 3' % i) for i in range(1, 3)]
>>> save_as_pdf_pages(plots)
or, a generator that yields :class:`ggplot` objects:
>>> def myplots():
>>> for i in range(1, 3):
>>> yield ggplot(…) + ggtitle('%d of 3' % i)
>>> save_as_pdf_pages(myplots())
filename : :py:class:`str`, optional
File name to write the plot to. If not specified, a name
like “plotnine-save-<hash>.pdf” is used.
path : :py:class:`str`, optional
Path to save plot to (if you just want to set path and
not filename).
verbose : :py:class:`bool`
If ``True``, print the saving information.
kwargs : :py:class:`dict`
Additional arguments to pass to
:py:meth:`matplotlib.figure.Figure.savefig`.
Notes
-----
Using pandas' :meth:`~pandas.DataFrame.groupby` methods, tidy data
can be “faceted” across pages:
>>> from plotnine.data import mtcars
>>> def facet_pages(column)
>>> base_plot = [
>>> aes(x='wt', y='mpg', label='name'),
>>> geom_text(),
>>> ]
>>> for label, group_data in mtcars.groupby(column):
>>> yield ggplot(group_data) + base_plot + ggtitle(label)
>>> save_as_pdf_pages(facet_pages('cyl'))
Unlike :meth:`ggplot.save`, :meth:`save_as_pdf_pages` does not
process arguments for `height` or `width`. To set the figure size,
add :class:`~plotnine.themes.themeable.figure_size` to the theme
for some or all of the objects in `plots`:
>>> plot = ggplot(…)
>>> # The following are equivalent
>>> plot.save('filename.pdf', height=6, width=8)
>>> save_as_pdf_pages([plot + theme(figure_size=(8, 6))])
"""
# as in ggplot.save()
fig_kwargs = {'bbox_inches': 'tight'}
fig_kwargs.update(kwargs)
# If plots is already an iterator, this is a no-op; otherwise
# convert a list, etc. to an iterator
plots = iter(plots)
# filename, depends on the object
if filename is None:
# Take the first element from the iterator, store it, and
# use it to generate a file name
peek = [next(plots)]
plots = chain(peek, plots)
filename = peek[0]._save_filename('pdf')
if path:
filename = os.path.join(path, filename)
if verbose:
warn('Filename: {}'.format(filename), PlotnineWarning)
with PdfPages(filename) as pdf:
# Re-add the first element to the iterator, if it was removed
for plot in plots:
fig = plot.draw()
# Save as a page in the PDF file
pdf.savefig(fig, **fig_kwargs)
# To conserve memory when plotting a large number of pages,
# close the figure whether or not there was an exception
plt.close(fig)
| 22,454
|
def get_subpixel_indices(galtable, hpix=[], border=0.0, nside=0):
"""
Routine to get subpixel indices from a galaxy table.
Parameters
----------
galtable: `redmapper.Catalog`
A redmapper galaxy table master catalog
hpix: `list`, optional
Healpix number (ring format) of sub-region. Default is [] (full catalog)
border: `float`, optional
Border around hpix (in degrees) to find pixels. Default is 0.0.
Only works if hpix is a single-length list
nside: `int`, optional
Nside of healpix subregion. Default is 0 (full catalog).
Returns
-------
indices: `np.array`
Integer array of indices of galaxy table pixels in the subregion.
"""
if len(hpix) == 0 or nside == 0:
return np.arange(galtable.filenames.size)
theta, phi = hp.pix2ang(galtable.nside, galtable.hpix)
ipring_big = hp.ang2pix(nside, theta, phi)
_, indices = esutil.numpy_util.match(hpix, ipring_big)
# Ignore border if using full catalog
if border > 0.0 and len(hpix) > 0:
if len(hpix) != 1:
raise NotImplementedError("Cannot do boundary around a pixel list.")
# now we need to find the extra boundary...
boundaries = hp.boundaries(nside, hpix[0], step=galtable.nside // nside)
inhpix = galtable.hpix[indices]
for i in range(boundaries.shape[1]):
pixint = hp.query_disc(galtable.nside, boundaries[:, i],
border*np.pi/180., inclusive=True, fact=8)
inhpix = np.append(inhpix, pixint)
inhpix = np.unique(inhpix)
_, indices = esutil.numpy_util.match(inhpix, galtable.hpix)
return indices
| 22,455
|
def main():
"""creating the function in order to call all other functions inside of it"""
make_bear(bear_size, bear_color, sun_or_cloud)
make_sun()
| 22,456
|
def is_common_secret_key(key_name: str) -> bool:
"""Return true if the key_name value matches a known secret name or pattern."""
if key_name in COMMON_SECRET_KEYS:
return True
return any(
[
key_name.lower().endswith(key_suffix)
for key_suffix in COMMON_SECRET_KEY_SUFFIXES
]
)
| 22,457
|
def check_md5(func):
""" A decorator that checks if a file has been changed. """
@wraps(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
assert _check_md5(path, original_path), 'The file has been changed after {}().'.format(func.__name__)
return ret
return wrapper
| 22,458
|
def showgraphwidth(context, mapping):
"""Integer. The width of the graph drawn by 'log --graph' or zero."""
# just hosts documentation; should be overridden by template mapping
return 0
| 22,459
|
def test_get_scores_from_invalid_ratings_dataframe(tenor):
"""Tests if function can correctly handle pd.DataFrame objects."""
act = rtg.get_scores_from_ratings(ratings=conftest.input_invalid_df, tenor=tenor)
expectations = conftest.exp_invalid_df
expectations.columns = ["rtg_score_Fitch", "rtg_score_DBRS"]
# noinspection PyTypeChecker
assert_frame_equal(act, expectations)
| 22,460
|
def set_hud_visibility(bool_value, displays = None):
"""
Set the viewport hud display visibility.
Args:
bool_value (bool): True turns visiliblity on, False turns it off.
displays (list): List of heads up displays by name.
"""
if not displays:
displays = cmds.headsUpDisplay(q = True, lh = True)
for display in displays:
cmds.headsUpDisplay(display, e = True, vis = bool_value)
| 22,461
|
def diff_last_filter(trail, key=lambda x: x['pid']):
""" Filter out trails with last two key different
"""
return trail if key(trail[-1]) != key(trail[-2]) else None
| 22,462
|
def read_items(source, n, target):
"""This function reads and prints n items from the given iterable, also
putting them to the given target queue."""
for i in range(0, n):
item = source.__next__()
print((i, item))
target.put((i, item))
| 22,463
|
def cli():
"""Command Line Interface for the Reproducible Open Benchmark REANA
workflow controller."""
pass
| 22,464
|
def cpu_times():
"""Return a named tuple representing the following system-wide
CPU times:
(user, nice, system, idle, iowait, irq, softirq [steal, [guest,
[guest_nice]]])
Last 3 fields may not be available on all Linux kernel versions.
"""
procfs_path = get_procfs_path()
set_scputimes_ntuple(procfs_path)
with open_binary('%s/stat' % procfs_path) as f:
values = f.readline().split()
fields = values[1:len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
return scputimes(*fields)
| 22,465
|
def calc_centeroid(x, network: DTClustering, n_clusters: int):
"""クラスタ中心を計算します.
Notes:
Input x: [batch, sequence, feature, 1]
Output: [n_clusters, hidden sequence, hidden feature, 1]
"""
code = network.encode(x)
feature = code.view(code.shape[0], -1) # [batch, sequence * feature]
feature = feature.detach().cpu().numpy()
km = cluster.KMeans(n_clusters=n_clusters, n_init=10)
km.fit(feature)
centers = km.cluster_centers_.reshape(n_clusters, code.shape[1], code.shape[2], 1)
centers = centers.astype(np.float32)
return centers
| 22,466
|
def start_run():
"""
Starts at test run..
TODO all of it.
"""
uuid = request.form.get('uuid', default='none', type=str)
print('Starting a run: %s' % uuid)
return "ok"
| 22,467
|
def softXrayMono1(eV, k, m, c, rb_mm, bounce, inOff_deg, outOff_deg, verbose):
"""
# calculate premirror and grating angles for NSLS-II soft xray monos
# eV: energy
# k: central line density in mm-1
# m: diffraction order
# c: cff 0 < cff < infinity
# bounce = 'up' or 'down'
# inOff_deg - input beam angle relative to horizontal, NSLSII sense
# outOff_deg - output beam angle relative to horizontal, NSLSII sense
"""
# correct for several energies for Centurion
# correctly reverses sign of angles if geometry is flipped upside-down
# consider bounce direction
if bounce == "up":
a = -1
elif bounce == "down":
a = +1
else:
a = float("nan")
# calculate angles, no offsets
alpha_deg = ruben2005eqn8m(eV, c, k, m)
beta_deg = getBetaDeg(eV, alpha_deg, k, m)
# include offsets
thetaPMinc_deg = abs(
+0.5 * (outOff_deg - inOff_deg + a * (180.0 - alpha_deg + beta_deg))
)
thetaPM_deg = +0.5 * (outOff_deg + inOff_deg + a * (180.0 - alpha_deg + beta_deg))
thetaGR_deg = a * (90.0 + beta_deg) + outOff_deg
disp = getLinDisp(alpha_deg, beta_deg, k, m, rb_mm)
if verbose:
# alpha, beta both relative to normal and surface
print("eV=", eV, "c=", c)
print("alpha=", alpha_deg, 90.0 - alpha_deg)
print("beta=", beta_deg, (90 + beta_deg))
print("incident angle on pm=", thetaPMinc_deg)
print("dispersion (eV/mm) =", disp)
# grating and premirror rotation angles
print("rotation angles relative to horizontal")
print(" premirror", thetaPM_deg)
print(" grating", thetaGR_deg)
return (thetaPM_deg, thetaGR_deg, alpha_deg, beta_deg, thetaPMinc_deg, disp)
| 22,468
|
def createHelmholtz3dExteriorCalderonProjector(
context, hminusSpace, hplusSpace, waveNumber,
label=None, useInterpolation=False, interpPtsPerWavelength=5000):
"""
Create and return the exterior Calderon projector for the
Helmholtz equation in 3D.
*Parameters:*
- context (Context)
A Context object to control the assembly of the weak form of the
newly constructed operator.
- hminusSpace (Space)
Function space representing functions in H^{-1/2}.
- hplusSpace (Space)
Function space representing functions in H^{+1/2}.
- waveNumber (float or complex)
Wave number, i.e. the number k in the Helmholtz equation
nabla^2 u + k^2 u = 0.
- label (string)
Textual label of the operator. If set to None (default), a unique
label will be generated automatically.
- useInterpolation (bool)
If set to False (default), the standard exp() function will be used
to evaluate the exponential factor occurring in the kernel. If set
to True, the exponential factor will be evaluated by piecewise-cubic
interpolation of values calculated in advance on a regular
grid. This normally speeds up calculations, but might result in a
loss of accuracy. This is an experimental feature: use it at your
own risk.
- interpPtsPerWavelength (int)
If useInterpolation is set to True, this parameter determines the
number of points per "effective wavelength" (defined as 2 pi /
abs(waveNumber)) used to construct the interpolation grid. The
default value (5000) is normally enough to reduce the relative or
absolute error, *whichever is smaller*, below 100 * machine
precision. If useInterpolation is set to False, this parameter is
ignored.
*Returns* a newly constructed BoundaryOperator_BasisFunctionType_ResultType
object, with BasisFunctionType and ResultType determined automatically from
the context argument and equal to either float32, float64, complex64 or
complex128.
"""
basisFunctionType = context.basisFunctionType()
if (basisFunctionType != hminusSpace.basisFunctionType() or
basisFunctionType != hplusSpace.basisFunctionType()):
raise TypeError("BasisFunctionType of context and all spaces must be the same")
resultType = context.resultType()
# construct object
if not label:
label = ""
result = _constructObjectTemplatedOnBasis(
core, 'helmholtz3dExteriorCalderonProjector', basisFunctionType,
context, hminusSpace, hplusSpace, waveNumber, label,
useInterpolation, interpPtsPerWavelength)
result._context = context
result._hminusSpace = hminusSpace
result._hplusSpace = hplusSpace
return result
| 22,469
|
def setup(bot: commands.Bot) -> None:
"""
This is a necessary method to make the cog loadable.
Returns
-------
None
"""
bot.add_cog(Timeline(bot))
| 22,470
|
def prepare_xrf_map(data, chunk_pixels=5000, n_chunks_min=4):
"""
Convert XRF map from it's initial representation to properly chunked Dask array.
Parameters
----------
data: da.core.Array, np.ndarray or RawHDF5Dataset (this is a custom type)
Raw XRF map represented as Dask array, numpy array or reference to a dataset in
HDF5 file. The XRF map must have dimensions `(ny, nx, ne)`, where `ny` and `nx`
define image size and `ne` is the number of spectrum points
chunk_pixels: int
The number of pixels in a single chunk. The XRF map will be rechunked so that
each block contains approximately `chunk_pixels` pixels and contain all `ne`
spectrum points for each pixel.
n_chunks_min: int
Minimum number of chunks. The algorithm will try to split the map into the number
of chunks equal or greater than `n_chunks_min`. If HDF5 dataset is not chunked,
then the whole map is treated as one chunk. This should happen only to very small
files, so parallelism is not important.
Returns
-------
data: da.core.Array
XRF map represented as Dask array with proper chunk size. The XRF map may be loaded
block by block when processing using `dask.array.map_blocks` and `dask.array.blockwise`
functions with Dask multiprocessing scheduler.
file_obj: h5py.File object
File object that points to HDF5 file. `None` if input parameter `data` is Dask or
numpy array. Note, that `file_obj` must be kept alive until processing is completed.
Closing the file will invalidate references to the dataset in the respective
Dask array.
Raises
------
TypeError if input parameter `data` is not one of supported types.
"""
file_obj = None # It will remain None, unless 'data' is 'RawHDF5Dataset'
if isinstance(data, da.core.Array):
chunk_size = _compute_optimal_chunk_size(
chunk_pixels=chunk_pixels,
data_chunksize=data.chunksize[0:2],
data_shape=data.shape[0:2],
n_chunks_min=n_chunks_min,
)
data = data.rechunk(chunks=(*chunk_size, data.shape[2]))
elif isinstance(data, np.ndarray):
data = _array_numpy_to_dask(data, chunk_pixels=chunk_pixels, n_chunks_min=n_chunks_min)
elif isinstance(data, RawHDF5Dataset):
fpath, dset_name = data.abs_path, data.dset_name
# Note, that the file needs to remain open until the processing is complete !!!
file_obj = h5py.File(fpath, "r")
dset = file_obj[dset_name]
if dset.ndim != 3:
raise TypeError(
f"Dataset '{dset_name}' in file '{fpath}' has {dset.ndim} dimensions: 3D dataset is expected"
)
ny, nx, ne = dset.shape
if dset.chunks:
chunk_size = _compute_optimal_chunk_size(
chunk_pixels=chunk_pixels,
data_chunksize=dset.chunks[0:2],
data_shape=(ny, nx),
n_chunks_min=n_chunks_min,
)
else:
# The data is not chunked. Process data as one chunk.
chunk_size = (ny, nx)
data = da.from_array(dset, chunks=(*chunk_size, ne))
else:
raise TypeError(f"Type of parameter 'data' is not supported: type(data)={type(data)}")
return data, file_obj
| 22,471
|
def colorbar_factory(cax, mappable, **kwargs):
"""
Create a colorbar on the given axes for the given mappable.
.. note::
This is a low-level function to turn an existing axes into a colorbar
axes. Typically, you'll want to use `~.Figure.colorbar` instead, which
automatically handles creation and placement of a suitable axes as
well.
Parameters
----------
cax : `~matplotlib.axes.Axes`
The `~.axes.Axes` to turn into a colorbar.
mappable : `~matplotlib.cm.ScalarMappable`
The mappable to be described by the colorbar.
**kwargs
Keyword arguments are passed to the respective colorbar class.
Returns
-------
`.Colorbar`
The created colorbar instance.
"""
return Colorbar(cax, mappable, **kwargs)
| 22,472
|
def fourth_measurer_I_R(uniquePairsDf):
"""
fourth_measurer_I_R: computes the measure I_R that is based on the minimal number of tuples that should
be removed from the database for the constraints to hold.
The measure is computed via an ILP and the Gurobi optimizer is used to solve the ILP.
- There is a binary variable x for every tuple in the database.
- The constraints are of the form x + y >= 1 where x and y represent two tuples that jointly vioalte a constraint.
- The objective function is to minimize the sum of all x's.
Parameters
----------
uniquePairsDf : dataframe
the result of the query that finds all pairs of tuples that jointly violate a constraint.
Returns
-------
list of two int variables:
database_measurer.objVal is the minimal number of tuples that should be removed for the constraints to hold.
end1 - start is the running time of the function.
"""
start = time.time()
rows_violations = uniquePairsDf.values
varsDict2 = {}
database_measurer = gp.Model('Minimal deletions of tuples')
database_measurer.setParam('OutputFlag', 0) # do not show any comments on the screen
# variables
for i in rows_violations :
varsDict2[i[0]] = database_measurer.addVar(vtype=GRB.BINARY, name="x")
varsDict2[i[1]] = database_measurer.addVar(vtype=GRB.BINARY, name="x")
# constraints
for i in rows_violations :
database_measurer.addConstr(varsDict2[i[0]]+varsDict2[i[1]]>=1, name='con')
vars= []
for i in varsDict2:
vars.append(varsDict2[i])
# objective function
database_measurer.setObjective(sum(vars), GRB.MINIMIZE)
opt = database_measurer.optimize()
end1 = time.time()
return database_measurer.objVal , end1 - start
| 22,473
|
def import_hopdb_fossil_elements(path):
"""
Function to import fossil elements from xml file downloaded from the HOP DB.
This function populates both the FossilElement table and the Fossils table.
Fossils are aggregated from elements based on values in the HomininElement field
:param path:
:return:
"""
xml_file = open(path, 'U')
tree = etree.parse(xml_file) # tree is an lxml ElementTree object
element_tree = tree.getroot()
xml_file.close()
africa_list = [u'Chad', u'Libya', u'South Africa', u'Tanzania', u'Morocco', u'Malawi', u'Ethiopia', u'Algeria',
u'Namibia', u'Kenya', u'Zambia']
asia_list = [u'Israel', u'Indonesia', u'China', u'Vietnam', u'Iraq']
item_count = 0
for specimen in element_tree:
new_fossil_element = FossilElement()
# specimen_name = specimen.find('HomininElement').text
specimen_dict = {e.tag: e.text for e in specimen}
for element in specimen:
new_fossil_element.__setattr__(element.tag, element.text)
new_fossil_element.save()
# try:
# print 'created new specimen {} {}'.format(new_fossil_element.id, specimen_name)
# except UnicodeEncodeError:
# print 'created new specimen {}'.format(new_fossil_element.id)
item_count += 1
obj, created = Fossil.objects.get_or_create(HomininElement=new_fossil_element.HomininElement,
defaults=specimen_dict)
if created:
if obj.Country:
if obj.Country in africa_list:
obj.continent = 'Africa'
elif obj.Country in asia_list:
obj.continent = 'Asia'
obj.save()
new_fossil_element.fossil = obj
print('Successfully imported {} fossil elements from the HOPDB'.format(item_count))
| 22,474
|
def is_sublist_equal(list_one, list_two):
"""
Compare the values of two lists of equal length.
:param list_one: list - A list
:param list_two: list - A different list
:return EQUAL or UNEQUAL - If all values match, or not.
>>> is_sublist_equal([0], [0])
EQUAL
>>> is_sublist_equal([1], [0])
UNEQUAL
Iterate over values in each list and compare them
Assumes lists are of equal sizes
"""
for index, value in enumerate(list_one):
if value != list_two[index]:
return UNEQUAL
# Otherwise, all values matched, so it's equal
return EQUAL
| 22,475
|
def black_box_function(x, y):
"""Function with unknown internals we wish to maximize.
This is just serving as an example, for all intents and
purposes think of the internals of this function, i.e.: the process
which generates its output values, as unknown.
"""
return -x ** 2 - (y - 1) ** 2 + 1
| 22,476
|
def convert(
*,
csv,
tfrecords_template,
volume_shape,
examples_per_shard,
to_ras,
gzip,
verify_volumes,
num_parallel_calls,
multi_resolution,
start_resolution,
verbose,
):
"""Convert medical imaging volumes to TFRecords.
Volumes must all be the same shape. This will overwrite existing TFRecord files.
Labels can be volumetric or scalar.
"""
# TODO: improve docs.
volume_filepaths = _read_csv(csv)
num_parallel_calls = (
get_num_parallel() if num_parallel_calls == -1 else num_parallel_calls
)
_dirname = os.path.dirname(tfrecords_template)
if not os.path.exists(_dirname):
raise ValueError("directory does not exist: {}".format(_dirname))
if verify_volumes:
invalid_pairs = _verify_features_labels(
volume_filepaths=volume_filepaths,
volume_shape=volume_shape,
check_shape=True,
check_labels_int=True,
check_labels_gte_zero=True,
num_parallel_calls=None,
verbose=1,
)
if not invalid_pairs:
click.echo(click.style("Passed verification.", fg="green"))
else:
click.echo(click.style("Failed verification.", fg="red"))
click.echo(
f"Found {len(invalid_pairs)} invalid pairs of volumes. These files"
" might not all have shape {volume_shape}, the labels might not be an"
" integer type or coercible to integer type, or the labels might not"
" be >= 0."
)
for pair in invalid_pairs:
click.echo(pair[0])
click.echo(pair[1])
sys.exit(-1)
if multi_resolution:
start_resolution_log = np.log2(start_resolution).astype(np.int32)
target_resolution_log = np.log2(volume_shape[0]).astype(np.int32)
resolutions = [
2 ** res for res in range(start_resolution_log, target_resolution_log + 1)
]
else:
resolutions = None
_write_tfrecord(
features_labels=volume_filepaths,
filename_template=tfrecords_template,
examples_per_shard=examples_per_shard,
to_ras=to_ras,
compressed=gzip,
processes=num_parallel_calls,
multi_resolution=multi_resolution,
resolutions=resolutions,
verbose=verbose,
)
click.echo(click.style("Finished conversion to TFRecords.", fg="green"))
| 22,477
|
def reduce_wires_to_segments(wires, segments):
""" Reduce wire names to segment definitions.
For purposes of creating the routing heuristic, it is assumed that if two
source wires share a prefix, they can be considered segments for the
purposes of the routing heuristic.
This is definitely true for wires like SR1BEG1 or LV18.
This may apply to the local fanout wires like GFAN0 or FAN_BOUNCE0.
"""
WIRE_PARTS = re.compile('^(.*?)([0-9]+)$')
for wire in wires:
m = WIRE_PARTS.match(wire)
assert m is not None
segment = m.group(1)
if segment not in segments:
segments[segment] = set()
segments[segment].add(wire)
| 22,478
|
def test_vmrun_begin_replay():
"""Test begin_replay method."""
vmrun = mech.vmrun.VMrun('/tmp/first/some.vmx', executable='/tmp/vmrun',
provider='ws', test_mode=True)
expected = ['/tmp/vmrun', '-T', 'ws', 'beginReplay', '/tmp/first/some.vmx', 'a_name']
got = vmrun.begin_replay('a_name')
assert got == expected
| 22,479
|
def file_extension(path):
"""Lower case file extension."""
return audeer.file_extension(path).lower()
| 22,480
|
def locate(verbose):
"""Print location of the current workspace.
:param verbose: Unused.
"""
if not os.path.islink(ws_file):
print('no current workspace found, see "ros-get ws-create --help" how to create one')
return 1
else:
print(os.path.realpath(ws_file))
| 22,481
|
def motion(x, u, dt):
"""
motion model
"""
x[2] += u[1] * dt
x[0] += u[0] * math.cos(x[2]) * dt
x[1] += u[0] * math.sin(x[2]) * dt
x[3] = u[0]
x[4] = u[1]
return x
| 22,482
|
def sweep(
sweep: Union[dict, Callable], entity: str = None, project: str = None,
) -> str:
"""Initialize a hyperparameter sweep.
To generate hyperparameter suggestions from the sweep and use them
to train a model, call `wandb.agent` with the sweep_id returned by
this command. For command line functionality, see the command line
tool `wandb sweep` (https://docs.wandb.ai/ref/cli/wandb-sweep).
Args:
sweep: dict, SweepConfig, or callable. The sweep configuration
(or configuration generator). If a dict or SweepConfig,
should conform to the W&B sweep config specification
(https://docs.wandb.ai/guides/sweeps/configuration). If a
callable, should take no arguments and return a dict that
conforms to the W&B sweep config spec.
entity: str (optional). An entity is a username or team name
where you're sending runs. This entity must exist before you
can send runs there, so make sure to create your account or
team in the UI before starting to log runs. If you don't
specify an entity, the run will be sent to your default
entity, which is usually your username. Change your default
entity in [Settings](wandb.ai/settings) under "default
location to create new projects".
project: str (optional). The name of the project where you're
sending the new run. If the project is not specified, the
run is put in an "Uncategorized" project.
Returns:
sweep_id: str. A unique identifier for the sweep.
Examples:
Basic usage
<!--yeadoc-test:one-parameter-sweep-->
```python
import wandb
sweep_configuration = {
"name": "my-awesome-sweep",
"metric": {"name": "accuracy", "goal": "maximize"},
"method": "grid",
"parameters": {
"a": {
"values": [1, 2, 3, 4]
}
}
}
def my_train_func():
# read the current value of parameter "a" from wandb.config
wandb.init()
a = wandb.config.a
wandb.log({"a": a, "accuracy": a + 1})
sweep_id = wandb.sweep(sweep_configuration)
# run the sweep
wandb.agent(sweep_id, function=my_train_func)
```
"""
if callable(sweep):
sweep = sweep()
"""Sweep create for controller api and jupyter (eventually for cli)."""
if entity:
env.set_entity(entity)
if project:
env.set_project(project)
# Make sure we are logged in
wandb_login._login(_silent=True)
api = InternalApi()
sweep_id, warnings = api.upsert_sweep(sweep)
handle_sweep_config_violations(warnings)
print("Create sweep with ID:", sweep_id)
sweep_url = _get_sweep_url(api, sweep_id)
if sweep_url:
print("Sweep URL:", sweep_url)
return sweep_id
| 22,483
|
def connection_end_point (id, node_uuid, nep_uuid, cep_uuid):
"""Retrieve NodeEdgePoint by ID
:param topo_uuid: ID of Topology
:type uuid: str
:param node_uuid: ID of Node
:type node_uuid: str
:param nep_uuid: ID of NodeEdgePoint
:type nep_uuid: str
:param cep_uuid: ID of ConnectionEndPoint
:type cep_uuid: str
:rtype: ConnectionEndPoint
"""
for topo in context.topology_context.topology:
if topo.uuid == topo_uuid:
for node in topo.node:
if node.uuid == node_uuid:
for nep in node.owned_node_edge_point:
if nep.uuid == nep_uuid:
for cep in nep.cep_list.connection_end_point:
if cep.uuid == cep_uuid:
return cep
| 22,484
|
def remove_melt_from_perplex(perplex,melt_percent=-1):
""" Extrapolate high temperature values to remove melt content using sub-solidus values.
The assumption is that alpha and beta are constant and temperature-independent at high temperature."""
Tref = 273
Pref = 0
rho = perplex.rho.reshape( int(perplex.np), int(perplex.nt))
rhoresidual = perplex.rhoresidual.reshape( int(perplex.np), int(perplex.nt))
rhomelt = perplex.rhomelt.reshape( int(perplex.np), int(perplex.nt))
T = perplex.T.reshape( int(perplex.np), int(perplex.nt))
P = perplex.P.reshape( int(perplex.np), int(perplex.nt))
alpha = perplex.alpha.reshape(int(perplex.np), int(perplex.nt))
alpharesidual = perplex.alpharesidual.reshape(int(perplex.np), int(perplex.nt))
alphamelt = perplex.alphamelt.reshape(int(perplex.np), int(perplex.nt))
beta = perplex.beta.reshape( int(perplex.np), int(perplex.nt))
betaresidual = perplex.betaresidual.reshape( int(perplex.np), int(perplex.nt))
betamelt = perplex.betamelt.reshape( int(perplex.np), int(perplex.nt))
cp = perplex.cp.reshape( int(perplex.np), int(perplex.nt))
cpmelt = perplex.cpmelt.reshape( int(perplex.np), int(perplex.nt))
cpresidual = perplex.cpresidual.reshape( int(perplex.np), int(perplex.nt))
melt = perplex.melt.reshape( int(perplex.np), int(perplex.nt))
# smoothing alpha and beta along the boundaries to avoid vertical discontinuities not suitable
n_smooth = 3
rho_smooth = []
rhomelt_smooth = []
rhoresidual_smooth = []
alpha_smooth = []
beta_smooth = []
cp_smooth = []
alphamelt_smooth = []
betamelt_smooth = []
cpmelt_smooth = []
alpharesidual_smooth = []
betaresidual_smooth = []
cpresidual_smooth = []
i_smooth = 0
i_int = 0
#alpha_beta_values = False
for j in range(0,int(perplex.np)):
if (melt_percent<0):
are_values = False
for i in range(int(perplex.nt)-1,-1,-1):
#print('None T {} P {} melt {}'.format(T[j,i],P[j,i],melt[j,i]))
if ( melt[j,i] > 0.0e0 ):
#print('None T {} P {}'.format(T[j,i],P[j,i]))
pass
else:
if (i_smooth<n_smooth):
alpha_smooth.append(alpha[j,i])
beta_smooth.append(beta[j,i])
cp_smooth.append(cp[j,i])
cpmelt_smooth.append(cpmelt[j,i])
cpresidual_smooth.append(cpresidual[j,i])
alphamelt_smooth.append(alphamelt[j,i])
betamelt_smooth.append(betamelt[j,i])
alpharesidual_smooth.append(alpharesidual[j,i])
betaresidual_smooth.append(betaresidual[j,i])
rho_smooth.append(rho[j,i])
rhomelt_smooth.append(rhomelt[j,i])
rhoresidual_smooth.append(rhoresidual[j,i])
i_smooth = i_smooth + 1
else:
alpha_smooth[i_int] = alpha[j,i]
beta_smooth[i_int] = beta[j,i]
cp_smooth[i_int] = cp[j,i]
cpmelt_smooth[i_int] = cpmelt[j,i]
cpresidual_smooth[i_int] = cpresidual[j,i]
alphamelt_smooth[i_int] = alphamelt[j,i]
betamelt_smooth[i_int] = betamelt[j,i]
alpharesidual_smooth[i_int] = alpharesidual[j,i]
betaresidual_smooth[i_int] = betaresidual[j,i]
rho_smooth[i_int] = rho[j,i]
rhomelt_smooth[i_int] = rhomelt[j,i]
rhoresidual_smooth[i_int] = rhoresidual[j,i]
i_int = i_int + 1
if (i_int>=n_smooth):
i_int = 0
alpha_used = sum(alpha_smooth)/len(alpha_smooth)
beta_used = sum(beta_smooth)/len(beta_smooth)
cp_used = sum(cp_smooth)/len(cp_smooth)
rho_ref = sum(rho_smooth)/len(rho_smooth) / ( (1+beta_used*(P[j,i]-Pref)) * (1-alpha_used*(T[j,i]-Tref)) )
alpha_used_melt = sum(alphamelt_smooth)/len(alphamelt_smooth)
beta_used_melt = sum(betamelt_smooth)/len(betamelt_smooth)
cp_used_melt = sum(cpmelt_smooth)/len(cpmelt_smooth)
rho_ref_melt = sum(rhomelt_smooth)/len(rhomelt_smooth) / ( (1+beta_used_melt*(P[j,i]-Pref)) * (1-alpha_used_melt*(T[j,i]-Tref)) )
alpha_used_residual = sum(alpharesidual_smooth)/len(alpharesidual_smooth)
beta_used_residual = sum(betaresidual_smooth)/len(betaresidual_smooth)
cp_used_residual = sum(cpresidual_smooth)/len(cpresidual_smooth)
rho_ref_residual = sum(rhoresidual_smooth)/len(rhoresidual_smooth) / ( (1+beta_used_residual*(P[j,i]-Pref)) * (1-alpha_used_residual*(T[j,i]-Tref)) )
#if ( not alpha_beta_values):
# # we use low pressure value for alpha and beta - upper-bound estimation of it then
# alpha_used = alpha[j,i]
# beta_used = beta[j,i]
# alpha_beta_values = True
#rho_ref = rho[j,i] / ( (1+beta_used*(P[j,i]-Pref)) * (1-alpha_used*(T[j,i]-Tref)) )
melt_ref = 0.0e0
are_values = True
break
if (are_values):
for i in range(int(perplex.nt)-1,-1,-1):
if ( melt[j,i] > 0.0e0 ):
# rho[j,i] = rho_ref*(1+beta_used*(P[j,i]-Pref))*(1-alpha_used*(T[j,i]-Tref))
rho[j,i] = rho_ref*(1+betaresidual[j,i]*(P[j,i]-Pref))*(1-alpharesidual[j,i]*(T[j,i]-Tref))
#alpha[j,i] = alpha_used
#beta[j,i] = beta_used
# we do not extrapolate alpha and beta but only rho_ref
# we keep alpha and beta from residual in order to keep them P,T dependant
alpha[j,i] = alpharesidual[j,i]
beta[j,i] = betaresidual[j,i]
cp[j,i] = cpresidual[j,i]
melt[j,i] = melt_ref
rhomelt[j,i] = float('nan')
alphamelt[j,i] = float('nan')
betamelt[j,i] = float('nan')
cpmelt[j,i] = float('nan')
else:
melt[j,i] = melt_ref
rhomelt[j,i] = float('nan')
alphamelt[j,i] = float('nan')
betamelt[j,i] = float('nan')
cpmelt[j,i] = float('nan')
break
else:
for i in range(int(perplex.nt)-1,-1,-1):
# print('melt[j,i] {}'.format(melt[j,i]))
if (melt[j,i]>melt_percent/100.0e0):
melt[j,i] = melt_percent/100.0e0
rho[j,i] = rhoresidual[j,i]*(100.0e0-melt_percent)/100.0e0 + rhomelt[j,i]*melt_percent/100.0e0
alpha[j,i] = alpharesidual[j,i]*(100.0e0-melt_percent)/100.0e0 + alphamelt[j,i]*melt_percent/100.0e0
beta[j,i] = betaresidual[j,i]*(100.0e0-melt_percent)/100.0e0 + betamelt[j,i]*melt_percent/100.0e0
cp[j,i] = cpresidual[j,i]*(100.0e0-melt_percent)/100.0e0 + cpmelt[j,i]*melt_percent/100.0e0
if (np.isnan(rho[j,i])):
print('NaN melt {} rho {} rhoresidual {} rhomelt {} alpha {} beta {}'.format(
melt[j,i],rho[j,i],rhoresidual[j,i], rhomelt[j,i], alpha[j,i], beta[j,i]))
quit()
perplex.rho = rho.reshape(perplex.np*perplex.nt)
perplex.T = T.reshape(perplex.np*perplex.nt)
perplex.P = P.reshape(perplex.np*perplex.nt)
perplex.alpha = alpha.reshape(perplex.np*perplex.nt)
perplex.beta = beta.reshape(perplex.np*perplex.nt)
perplex.cp = cp.reshape(perplex.np*perplex.nt)
perplex.melt = melt.reshape(perplex.np*perplex.nt)
perplex.melt = np.zeros_like(perplex.melt)
perplex.rhomelt = rhomelt.reshape(perplex.np*perplex.nt)
perplex.alphamelt = alphamelt.reshape(perplex.np*perplex.nt)
perplex.betamelt = betamelt.reshape(perplex.np*perplex.nt)
perplex.cpmelt = cpmelt.reshape(perplex.np*perplex.nt)
perplex.rhoresidual = rhoresidual.reshape(perplex.np*perplex.nt)
perplex.alpharesidual = alpharesidual.reshape(perplex.np*perplex.nt)
perplex.betaresidual = betaresidual.reshape(perplex.np*perplex.nt)
perplex.cpresidual = cpresidual.reshape(perplex.np*perplex.nt)
return perplex
| 22,485
|
def _delete_empty_source_bucket(cloud_logger, source_bucket):
"""Delete the empty source bucket
Args:
cloud_logger: A GCP logging client instance
source_bucket: The bucket object for the original source bucket in the source project
"""
spinner_text = 'Deleting empty source bucket'
cloud_logger.log_text(spinner_text)
with yaspin(text=spinner_text) as spinner:
source_bucket.delete()
spinner.ok(_CHECKMARK)
| 22,486
|
def similarity_score(text_small, text_large, min_small = 10, min_large = 50):
"""
complexity: len(small) * len(large)
@param text_small: the smaller text
(in this case the text which's validity is being checked)
@param text_large: the larger text (in this case the scientific study)
returns: a number (-1 <= n <= 100) representing the similarity
-1 if the data isn't populated enough for reliability
"""
# cleaning text:
filtered_small = clean(text_small)
filtered_large = clean(text_large)
fSmallLen = len(filtered_small)
fLargeLen = len(filtered_large)
if (fSmallLen < min_small) or (fLargeLen < min_large): return -1
max_rating = fLargeLen * fSmallLen
hits = 0
for sm_word in filtered_small:
for big_word in filtered_large:
if sm_word == big_word: hits += 1
return 100. * hits / max_rating
| 22,487
|
def consume(command):
"""
Example of how to consume standard output and standard error of
a subprocess asynchronously without risk on deadlocking.
"""
# Launch the command as subprocess.
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Launch the asynchronous readers of the process' stdout and stderr.
stdout = AsynchronousFileReader(process.stdout, autostart=True)
stderr = AsynchronousFileReader(process.stderr, autostart=True)
# Check the readers if we received some output (until there is nothing more to get).
while not stdout.eof() or not stderr.eof():
# Show what we received from standard output.
for line in stdout.readlines():
print('Received line on standard output: ' + repr(line))
# Show what we received from standard error.
for line in stderr.readlines():
print('Received line on standard error: ' + repr(line))
# Sleep a bit before polling the readers again.
time.sleep(.1)
# Let's be tidy and join the threads we've started.
stdout.join()
stderr.join()
# Close subprocess' file descriptors.
process.stdout.close()
process.stderr.close()
| 22,488
|
def print_status(exp, offset=0, all_trials=False, collapse=False):
"""Print the status of the current experiment.
Parameters
----------
offset: int, optional
The number of tabs to the right this experiment is.
all_trials: bool, optional
Print all trials individually
collapse: bool, optional
Fetch trials for entire EVCTree. Defaults to False.
"""
trials = exp.fetch_trials(with_evc_tree=collapse)
exp_title = exp.node.tree_name
print(" " * offset, exp_title, sep="")
print(" " * offset, "=" * len(exp_title), sep="")
if all_trials:
print_all_trials(trials, offset=offset)
else:
print_summary(trials, offset=offset)
| 22,489
|
def abline(a_coords, b_coords, ax=None, **kwargs):
"""Draw a line connecting a point `a_coords` with a point `b_coords`.
Parameters
----------
a_coords : array-like, shape (2,)
xy coordinates of the start of the line.
b_coords : array-like, shape(2,)
xy coordiantes of the end of the line.
ax : matplotlib axis
Axe to plot the line
**kwargs : dict
Arguments to pass along to the matplotlib `plot` function.
"""
if ax is None:
ax = plt.gca()
line_start, line_end = list(zip(a_coords, b_coords))
line, = ax.plot(line_start, line_end, **kwargs)
return line
| 22,490
|
def get_extensions():
"""
Returns supported extensions of the DCC
:return: list(str)
"""
return ['.hip', '.hiplc', '.hipnc', '.hip*']
| 22,491
|
def precheck_data_format(idir, hlsp_name):
"""
Generates parameter file for check_metadata_format based on file endings.
:param idir: The directory containing HLSP files to check.
:type idir: str
:param hlsp_name: The name of the HLSP.
:type hlsp_name: str
"""
# Start logging to an output file.
logname = "precheck_data_format.log"
precheck_log = new_logger(logname)
precheck_log.info('Started at ' + datetime.datetime.now().isoformat())
# Initialize a new HLSPFile object.
new_file = HLSPFile(name=hlsp_name)
new_file.update_filepaths(input=os.path.realpath(idir))
# Get unique set of file endings.
all_file_endings = get_all_file_endings(idir)
# Sort these based on the extension type.
file_endings = set([x.split('.')[-1] for x in all_file_endings])
for fe in all_file_endings:
new_type = FileType(fe)
new_file.add_filetype(new_type)
precheck_log.info("Found the following file type: {0}".format(fe))
# Create the output file, based on the name of the HLSP.
new_file.toggle_ingest(1, state=True)
filename = new_file.save(caller=__file__)
#make_parameter_file(filename, file_endings, all_file_endings, idir)
precheck_log.info('Finished at ' + datetime.datetime.now().isoformat())
return filename
| 22,492
|
def calc_spatially_diffusion_factors(
regions,
fuel_disagg,
real_values,
low_congruence_crit,
speed_con_max,
p_outlier
):
"""
Calculate spatial diffusion values
Arguments
---------
regions : dict
Regions
fuel_disagg : dict
Disaggregated fuel per region
real_values : dict
Real values
p_outlier : float
Percentage of min and max outliers are flattened
Returns
-------
f_reg_norm_abs : dict
Diffusion values with normed population. If no value
is larger than 1, the total sum of all shares calculated
for every region is identical to the defined scenario variable.
spatial_diff_values : dict
Spatial diffusion values (not normed, only considering differences
in speed and congruence values)
Explanation
============
(I) Load diffusion values
(II) Calculate diffusion factors
(III) Calculate sigmoid diffusion values for technology
specific enduse service shares for every region
"""
# -----
# I. Diffusion diffusion values
# -----
spatial_diff_values = spatial_diffusion_values(
regions=regions,
real_values=real_values,
speed_con_max=speed_con_max,
low_congruence_crit=low_congruence_crit,
p_outlier=p_outlier)
# -----
# II. Calculation of diffusion factors (Not weighted with demand)
# -----
# Not weighted with demand
max_value_diffusion = max(list(spatial_diff_values.values()))
f_reg = {}
for region in regions:
f_reg[region] = spatial_diff_values[region] / max_value_diffusion
# Weighted with demand
f_reg_norm_abs, f_reg_norm = calc_diffusion_f(
regions,
f_reg,
spatial_diff_values,
[fuel_disagg['residential'], fuel_disagg['service'], fuel_disagg['industry']])
return f_reg, f_reg_norm, f_reg_norm_abs
| 22,493
|
def get_set(path):
"""Returns a matrix of data given the path to the CSV file. The heading row and NaN values are excluded."""
df = pd.read_csv(path, sep=';', encoding='latin')
return df.dropna(subset=['PMID1', 'PMID2', 'Authorship'], how='any').values
| 22,494
|
def add_landmark(soup: BeautifulSoup, textf: str, landmarks: list):
"""
Adds an item to landmark list with appropriate details.
INPUTS:
soup: BeautifulSoup representation of the file we are indexing in ToC
textf: path to the file
landmarks: the list of landmark items we are building
OUTPUTS:
None
"""
epub_type = get_epub_type(soup)
landmark = TocItem()
if epub_type != "":
landmark.epub_type = epub_type
landmark.file_link = textf
landmark.place = get_place(soup)
title_tag = soup.find("title")
if title_tag is not None:
landmark.title = title_tag.string
if landmark.title is None:
# This is a bit desperate, use this only if there's no proper <title> tag in file.
landmark.title = landmark.epub_type.capitalize
else:
landmark.title = landmark.epub_type.capitalize
landmarks.append(landmark)
| 22,495
|
def _client_get(client_create_fn: Callable[..., Any], params: ClientGetParams) -> Any:
"""
:param client_create_fn: the `boto3.client` or `boto3.resource` function
"""
which_service = params.boto3_client_name
endpoint_url = os.getenv(params.endpoint_url_key)
access_key_id = os.getenv(params.access_key_id_key)
access_key_secret = os.getenv(params.access_key_secret_key)
access_session_token = os.getenv(params.access_session_token)
# AWS_REGION is Fargate-specific, most AWS stuff uses AWS_DEFAULT_REGION.
region = os.getenv("AWS_REGION") or os.getenv("AWS_DEFAULT_REGION")
if not region:
raise FromEnvException("Please set AWS_REGION= or AWS_DEFAULT_REGION=")
# Not needed long term, more to help migrate to `env_helpers`.
# Notably, when `is_local` is not set, it won't break anything.
is_local = os.getenv("IS_LOCAL", None)
# Unlike Rust FromEnv, we rely on boto3's built in region handling.
if _running_in_localstack():
return _localstack_client(client_create_fn, params)
elif all((endpoint_url, access_key_id, access_key_secret)):
# Local, all are passed in from docker-compose.yml
logging.info(f"Creating a local client for {which_service}")
assert (
is_local != False
), f"You must pass in credentials for a local {which_service} client"
return client_create_fn(
params.boto3_client_name,
endpoint_url=endpoint_url,
aws_access_key_id=access_key_id,
aws_secret_access_key=access_key_secret,
aws_session_token=access_session_token,
region_name=region,
)
elif endpoint_url and not any((access_key_id, access_key_secret)):
# Local or AWS doing cross-region stuff
return client_create_fn(
params.boto3_client_name,
endpoint_url=endpoint_url,
region_name=region,
)
elif not any((endpoint_url, access_key_id, access_key_secret)):
# AWS
logging.info("Creating a prod client")
assert (
is_local != True
), f"You can't pass in credentials for a prod {which_service} client"
return client_create_fn(
params.boto3_client_name,
region_name=region,
)
else:
raise FromEnvException(
f"You specified access key but not endpoint for {params.boto3_client_name}?"
)
| 22,496
|
def parse_metadata_from_sensorcommunity_csv_filename(filename):
"""Parse sensor id, sensor type and date from a raw luftdaten.info AQ .csv
filename.
Parameters:
filename (path): the file to parse. Format of the file is expected to be
the one used by the luftdaten.info project and saved by
sensor.community, for example as in the one below:
https://archive.sensor.community/2020-01-13/
2020-01-12_sds011_sensor_35233.csv
Return:
tuple: (sensor_id, sensor_type, date) if possible,
(None, None, None) otherwise
"""
tokens = os.path.basename(os.path.splitext(filename)[0]).split("_")
if len(tokens) == 4 and tokens[2] == "sensor":
try:
date = datetime.strptime(tokens[0], "%Y-%m-%d")
except ValueError:
date = None
sensor_type = tokens[1]
sensor_id = int(tokens[3])
return (sensor_id, sensor_type, date)
# failure
return (None, None, None)
| 22,497
|
def gaussian_target(img_shape, t, MAX_X=0.85, MIN_X=-0.85, MAX_Y=0.85, MIN_Y=-0.85, sigma2=10):
"""
Create a gaussian bivariate tensor for target or robot position.
:param t: (th.Tensor) Target position (or robot position)
"""
X_range = img_shape[1]
Y_range = img_shape[2]
XY_range = np.arange(X_range*Y_range)
for i in range(t.size(0)):
X_t = int((MAX_X+t[i][1])*(img_shape[1]/(MAX_X-MIN_X)))
Y_t = int((MAX_Y-t[i][0])*(img_shape[2]/(MAX_Y-MIN_Y)))
bi_var_gaussian = -0.5 * (((XY_range // X_range)- X_t)**2 + (XY_range - (XY_range//Y_range)*Y_range - Y_t)**2)/sigma2
img_target = th.from_numpy((np.exp(bi_var_gaussian)/(2*np.pi*sigma2)).reshape(X_range, Y_range))
img_target = img_target[None,...][None,...]
if i==0: output = img_target
else: output = th.cat([output,img_target],0)
return output
| 22,498
|
def _old_process_multipart(entity):
"""The behavior of 3.2 and lower. Deprecated and will be changed in 3.3."""
process_multipart(entity)
params = entity.params
for part in entity.parts:
if part.name is None:
key = ntou('parts')
else:
key = part.name
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
| 22,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.