content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def compute_kld(confidences: torch.Tensor, reduction="mean") -> torch.Tensor:
"""
Args:
confidences (Tensor): a tensor of shape [N, M, K] of predicted confidences from ensembles.
reduction (str): specifies the reduction to apply to the output.
- none: no reduction will be applied,
- mean: the sum of the output will be divided by
the number of elements in the output.
Returns:
kld (Tensor): KL divergences for given confidences from ensembles.
- a tensor of shape [N,] when reduction is "none",
- a tensor of shape [,] when reduction is "mean".
"""
assert reduction in [
"none", "mean",
], f"Unknown reduction = \"{reduction}\""
kld = torch.zeros(confidences.size(0), device=confidences.device) # [N,]
ensemble_size = confidences.size(1)
if ensemble_size > 1:
pairs = []
for i in range(ensemble_size):
for j in range(ensemble_size):
pairs.append((i, j))
for (i, j) in pairs:
if i == j:
continue
kld += torch.nn.functional.kl_div(
confidences[:, i, :].log(),
confidences[:, j, :],
reduction="none", log_target=False,
).sum(1) # [N,]
kld = kld / (ensemble_size * (ensemble_size - 1))
if reduction == "mean":
kld = kld.mean() # [,]
return kld
| 11,500
|
def create_mappings(index_name, payload_file_path):
"""
create mapping in es
"""
try:
url = '{}/{}'.format(config['es_url'], index_name)
resp = requests.get(url)
if resp.status_code // 100 == 4: # if no such index there
with codecs.open(payload_file_path, 'r') as f:
payload = f.read() # stringfied json
resp = requests.put(url, payload)
if resp.status_code // 100 != 2:
logger.error('can not create es index for {}'.format(index_name))
else:
logger.error('es index {} created'.format(index_name))
except requests.exceptions.ConnectionError:
# es if not online, retry
time.sleep(5)
create_mappings(index_name, payload_file_path)
| 11,501
|
def get_crypto_quote(symbol, info=None):
"""Gets information about a crypto including low price, high price, and open price
:param symbol: The crypto ticker.
:type symbol: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [dict] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * asset_currency
* display_only
* id
* max_order_size
* min_order_size
* min_order_price_increment
* min_order_quantity_increment
* name
* quote_currency
* symbol
* tradability
"""
id = get_crypto_info(symbol, info='id')
url = urls.crypto_quote(id)
data = helper.request_get(url)
return(helper.filter(data, info))
| 11,502
|
def test_register_task_decl_duplicate1(collector, task_decl):
"""Test handling duplicate : in collector.
"""
collector.contributions['exopy.Task'] = None
tb = {}
task_decl.task = 'exopy.tasks:Task'
task_decl.register(collector, tb)
assert 'exopy.Task_duplicate1' in tb
| 11,503
|
def plot_contours_2D(clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# print(Z)
CS = plt.contourf(xx, yy, Z, level=[-0.5, 0.5, 1.5, 2.5, 3.5], **params)
proxy = [plt.Rectangle((0, 0), 1, 1, fc=pc.get_facecolor()[0])
for pc in CS.collections]
#print(len(CS.collections))
#labels = []
#for i in range(len(CS.collections)):
# labels.append(ClassifierClient.app_names_for_classifier[i])
# plt.title('Simplest default with labels')
#plt.legend(proxy, ["range(2-3)", "range(3-4)", "range(4-6)"])
##plt.colorbar()
| 11,504
|
def fix(item, missing_atoms=True, missing_residues=True, nonstandard_residues=True,
missing_terminals=True, missing_loops=False, missing_hydrogens=True,
pH=7.4, to_form=None, engine_fix='PDBFixer', engine_hydrogens='PDBFixer',
engine_loops='Modeller', verbose=False):
"""fix_pdb_structure(item, missing_atoms=True, missing_residues=True, nonstandard_residues=True,
missing_terminals=True, missing_loops=False, missing_hydrogens=True,
pH=7.4, to_form=None, engine_fix='PDBFixer', engine_hydrogens='PDBFixer',
engine_loops='Modeller', verbose=False):
Fixing missing atoms, residues, terminals or loops in the molecular model coming from a pdb file.
This method fixes the possible missing atoms, residues, loops or terminals in a molecular
model. The result is a new molecular model, in the desired supported form, with those elements
fixed.
Parameters
----------
item: molecular model
Molecular model in any supported form by MolSysMT.
arg2: type, default='value'
Paragraph with explanation.
Returns
-------
object: type
Paragraph with explanation.
Examples
--------
See Also
--------
:func:`molsysmt.load`
Notes
-----
Todo
----
Warning
-------
The method has being tested with the following input forms: pdbid, pdbfile, pdbfixer.PDBFixer
and openmm.Modeller.
"""
from .tools.forms import digest as digest_forms
from ._private_tools.engines import digest_engine
from .multitool import convert
form_in, form_out = digest_forms(item, to_form)
engine_fix = digest_engines(engine_fix)
engine_hydrogens = digest_engines(engine_hydrogens)
engine_loops = digest_engines(engine_loops)
tmp_item = None
if engine_fix=='PDBFixer':
tmp_item = convert(item, to_form='pdbfixer.PDBFixer')
if missing_residues:
tmp_item.findMissingResidues()
if missing_atoms:
tmp_item.findMissingAtoms()
if nonstandard_residues:
tmp_item.findNonstandardResidues()
if verbose:
print('Missing residues:', tmp_item.missingResidues)
print('Non standard residues:', tmp_item.nonstandardResidues)
print('Missing atoms', tmp_item.missingAtoms)
print('Missing terminals:', tmp_item.missingTerminals)
tmp_item.addMissingAtoms()
if verbose:
print('Missing residues or atoms reported fixed.')
if missing_hydrogens:
from .protonation import add_missing_hydrogens
tmp_item = add_missing_hydrogens(tmp_item, pH=pH, engine=engine_hydrogens, verbose=verbose)
if missing_loops:
from .model_loops import add_loop
tmp_item = add_loop(tmp_item, engine=engine_loops)
tmp_item = convert(tmp_item, to_form=form_out)
return tmp_item
| 11,505
|
def data_split(*args, **kwargs):
"""A function to split a dataset into train, test, and optionally
validation datasets.
**Arguments**
- ***args** : arbitrary _numpy.ndarray_ datasets
- An arbitrary number of datasets, each required to have
the same number of elements, as numpy arrays.
- **train** : {_int_, _float_}
- If a float, the fraction of elements to include in the training
set. If an integer, the number of elements to include in the
training set. The value `-1` is special and means include the
remaining part of the dataset in the training dataset after
the test and (optionally) val parts have been removed
- **val** : {_int_, _float_}
- If a float, the fraction of elements to include in the validation
set. If an integer, the number of elements to include in the
validation set. The value `0` is special and means do not form
a validation set.
- **test** : {_int_, _float_}
- If a float, the fraction of elements to include in the test
set. If an integer, the number of elements to include in the
test set.
- **shuffle** : _bool_
- A flag to control whether the dataset is shuffled prior to
being split into parts.
**Returns**
- _list_
- A list of the split datasets in train, [val], test order. If
datasets `X`, `Y`, and `Z` were given as `args` (and assuming a
non-zero `val`), then [`X_train`, `X_val`, `X_test`, `Y_train`,
`Y_val`, `Y_test`, `Z_train`, `Z_val`, `Z_test`] will be returned.
"""
# handle valid kwargs
train, val, test = kwargs.pop('train', -1), kwargs.pop('val', 0.0), kwargs.pop('test', 0.1)
shuffle = kwargs.pop('shuffle', True)
if len(kwargs):
raise TypeError('following kwargs are invalid: {}'.format(kwargs))
# validity checks
if len(args) == 0:
raise RuntimeError('Need to pass at least one argument to data_split')
# check for consistent length
n_samples = len(args[0])
for arg in args[1:]:
assert len(arg) == n_samples, 'args to data_split have different length'
# determine numbers
num_val = int(n_samples*val) if val<=1 else val
num_test = int(n_samples*test) if test <=1 else test
num_train = n_samples - num_val - num_test if train==-1 else (int(n_samples*train) if train<=1 else train)
assert num_train >= 0, 'bad parameters: negative num_train'
assert num_train + num_val + num_test <= n_samples, 'too few samples for requested data split'
# calculate masks
perm = np.random.permutation(n_samples) if shuffle else np.arange(n_samples)
train_mask = perm[:num_train]
val_mask = perm[-num_val:]
test_mask = perm[num_train:num_train+num_test]
# apply masks
masks = [train_mask, val_mask, test_mask] if num_val > 0 else [train_mask, test_mask]
# return list of new datasets
return [arg[mask] for arg in args for mask in masks]
| 11,506
|
def compare(isamAppliance1, isamAppliance2):
"""
Compare Policy Sets between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
for obj in ret_obj1['data']:
del obj['id']
del obj['userlastmodified']
del obj['lastmodified']
del obj['datecreated']
obj['policies'] = _convert_policy_id_to_name(isamAppliance1, obj['policies'])
for obj in ret_obj2['data']:
del obj['id']
del obj['userlastmodified']
del obj['lastmodified']
del obj['datecreated']
obj['policies'] = _convert_policy_id_to_name(isamAppliance2, obj['policies'])
return tools.json_compare(ret_obj1, ret_obj2,
deleted_keys=['id', 'userlastmodified', 'lastmodified', 'datecreated'])
| 11,507
|
def hardwareRenderPanel(*args, **kwargs):
"""
This command creates, edit and queries hardware render panels which contain only a hardware render editor.
Returns: `string` Panel name
"""
pass
| 11,508
|
def capacity(quantity, channel, gamma, dim, basis, eps, **kwargs):
"""
Runs the Blahut-Arimoto algorithm to compute the capacity given by
'quantity' (which can be 'h', 'tc', 'coh' or 'qmi' taking the channel,
gamma, dim, basis and tolerance (eps) as inputs).
With the optional keyword arguments 'plot' (Boolean), it outputs a plot
showing how the calculated value changes with the number of iterations.
With the optional keyword arguments 'latexplot' (Boolean), the plot uses
latex in the labels
"""
#to store the calculated values
itern = []
value = []
#initialization
rhoa = DensityMatrix(np.diag((1/dim)*np.ones((1,dim))[0]))
#Blahut-Arimoto algorithm iteration
for iterator in range(int(gamma*np.log2(dim)/eps)):
# for iterator in range(1):
itern.append(iterator)
sigmab = rhoa
rhoa = linalg.expm(np.log(2)*(linalg.logm(sigmab.mat)/np.log(2)
+ (1/gamma)*(F(quantity, sigmab, basis, channel).mat)))
rhoa = DensityMatrix(rhoa/np.trace(rhoa))
value.append(J(quantity, rhoa, rhoa, gamma, basis, channel))
#Plotting
if kwargs['plot'] is True:
# if kwargs['latexplot'] is True:
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
fig, ax = plt.subplots()
plt.plot(itern, value,
marker = '.',
markersize='7',
label = r'Capacity value vs iteration'
)
plt.xlabel(r'Number of iterations', fontsize = '14')
plt.ylabel(r'Value of capacity', fontsize = '14')
plt.xticks(fontsize = '8')
plt.yticks(fontsize = '8')
plt.grid(True)
plt.show()
return J(quantity, rhoa, rhoa, gamma, basis, channel)
| 11,509
|
def test_get_raw_features_and_labels_examples_in_same_order() -> None:
"""Tests that the raw features and raw labels have examples in the same
order. For example, say X_train[0] is the raw Bulbasaur image; then
y_train[0] must be the labels for Bulbasaur."""
# pylint: disable=invalid-name
processor = PokemonClassificationDataProcessor()
features, labels = processor.get_raw_features_and_labels(
DEFAULT_DATASET_TRAINVALTEST_PATH
)
X_all = np.concatenate(
(features["X_train"], features["X_val"], features["X_test"])
)
y_all = np.concatenate(
(labels["y_train"], labels["y_val"], labels["y_test"])
)
bulbasaur_idx = None
for idx, arr in enumerate(X_all):
if np.isclose(arr.mean(), BULBASAUR_IMG_MEAN):
bulbasaur_idx = idx
assert bulbasaur_idx is not None
assert set(y_all[bulbasaur_idx]) == BULBASAUR_LABEL
charizard_idx = None
for idx, arr in enumerate(X_all):
if np.isclose(arr.mean(), CHARIZARD_IMG_MEAN):
charizard_idx = idx
assert charizard_idx is not None
assert set(y_all[charizard_idx]) == CHARIZARD_LABEL
| 11,510
|
def get_Teq_from_L(L: ArrayLike, d: ArrayLike, A: ArrayLike) -> np.ndarray:
"""Calculates the equilibrium temperature of a planet
given the stellar luminosity L, planetary semi-major axis d
and surface albedo A:
Args:
L (ArrayLike): Stellar luminosity in erg/s.
d (ArrayLike): Planetary semi-major axis in cm.
A (ArrayLike): Planetary albedo.
Returns:
np.ndarray: The planetary equilibrium temperature in K.
"""
return ((L * (1 - A)) / (16 * sigma_b * np.pi * d ** 2)) ** 0.25
| 11,511
|
def lookup_container_plugin_by_type(container: IContainer, plugin_type: Type[ContainerResolutionPlugin]):
"""
Given a container, finds the first plugin that is an instance of the specified type.
:param container: The container to perform the lookup on.
:param plugin_type: The type of the plugin to find.
:return: The first instance of ``plugin_type`` in ``container.plugins``.
"""
return next(
plugin
for plugin
in container.plugins
if isinstance(plugin, plugin_type)
)
| 11,512
|
def restore_flag_values(saved_flag_values, flag_values=FLAGS):
"""Restores flag values based on the dictionary of flag values.
Args:
saved_flag_values: {'flag_name': value_dict, ...}
flag_values: FlagValues, the FlagValues instance from which the flag will
be restored. This should almost never need to be overridden.
"""
new_flag_names = list(flag_values)
for name in new_flag_names:
saved = saved_flag_values.get(name)
if saved is None:
# If __dict__ was not saved delete "new" flag.
delattr(flag_values, name)
else:
if flag_values[name].value != saved['_value']:
flag_values[name].value = saved['_value'] # Ensure C++ value is set.
flag_values[name].__dict__ = saved
| 11,513
|
async def test_repo_cloner_clone_local_repo(local_repo: Repo):
"""
checks that the cloner can handle a local repo url
"""
repo: Repo = local_repo
root: str = repo.working_tree_dir
target_path: str = Path(root).parent / "target"
result = await RepoCloner(
repo_url=root,
clone_path=target_path
).clone()
assert result.cloned_from_remote == True
assert Path(result.repo.working_tree_dir) == target_path
| 11,514
|
def shift_map_longitude(mapdata, lonshift, spline_order=1):
""" Simple shift of the map by wrapping it around the edges
Internally uses scipy's ndimage.shift with spline interpolation order as
requested for interpolation
Parameters
----------
mapdata : 2D Numpy array
A map with the second dimension the longutide stretched fully along the
map
lonshift : float
A simple float representing the longitude shift of the array
spline_order: int [1, 5]
Returns
-------
A shifted map
"""
from scipy.ndimage import shift
# Constant
degrees = 360.0
# Check the map and compute the relative shift
assert len(mapdata.shape) == 2, "Only for 2D maps"
assert mapdata.shape[1] > 1, "Map has only one longitudinal coordinate"
n = (mapdata.shape[1] - 1)
x = degrees * lonshift / n # The number of pixels to shift
# Use scipy for the rest
mapdata_shift = shift(mapdata, [0, x], mode='wrap', order=spline_order)
return mapdata_shift
| 11,515
|
def aalogoheights(aahistObj, N=20):
"""For a objhist of AA frequencies, compute the heights
of each AA for a logo plot"""
aahistObj = deepcopy(aahistObj)
keys = list(aahistObj.keys())
for aa in BADAA:
if aa in keys:
dummy = aahistObj.pop(aa)
keys = [aa for aa in aahistObj.sortedKeys(reverse=False)]
freq = aahistObj.freq()
p = np.array([freq[k] for k in keys])
#err = (1/np.log(2))*((N-1) / (2*aahistObj.sum()))
#totEntropy = np.log2(N)-((-p*np.log2(p)).sum() + err)
totEntropy = np.log2(N)-((-p*np.log2(p)).sum())
heights = p * totEntropy
return keys, heights
| 11,516
|
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
connection = op.get_bind()
# Get tuples (newer-id, older-id) of mirrored relationships created
# by external user. It's not safe to remove relationships created by
# regular GGRC users, because these relationship might be referenced
# by snapshots or other objects.
items = list(connection.execute(
sa.text(
"""
SELECT
r1.id AS dup, r2.id AS orig
FROM
relationships r1
JOIN
relationships r2 ON r1.source_type = r2.destination_type
AND r1.source_id = r2.destination_id
AND r2.source_type = r1.destination_type
AND r2.source_id = r1.destination_id
AND r1.id != r2.id
RIGHT JOIN
revisions rev ON rev.resource_type = 'Relationship'
AND rev.resource_id = r1.id
LEFT JOIN
events e ON e.id = rev.event_id
WHERE
r1.id > r2.id AND r1.is_external = 1
AND r2.is_external = 1
AND (r1.source_type = 'ExternalComment'
OR r1.destination_type = 'ExternalComment')
ORDER BY r1.id
"""
)
))
if not items:
logging.warning("[rev:%s] No mirrored external relationships found",
revision)
return
del_rels = set()
del_revs = set()
del_events = set()
print_items = list()
for new_id, old_id in items:
rev_ids, evt_ids = _get_revs_and_events_to_delete(connection, new_id)
print_revs = ', '.join(str(i) for i in rev_ids) if rev_ids else '<empty>'
print_evts = ', '.join(str(i) for i in evt_ids) if evt_ids else '<empty>'
print_items.append((new_id, old_id, print_revs, print_evts))
del_rels.add(new_id)
del_revs.update(rev_ids)
del_events.update(evt_ids)
logging.warning(
"[rev:%s] Mirrored external relationships (total %s) to delete:\n"
"%s",
revision,
len(items),
'\n'.join('{} (orig rel={}); del revs: {}; del evts: {}'.format(*item)
for item in print_items)
)
_delete_records(connection, 'revisions', del_revs)
_delete_records(connection, 'events', del_events)
_delete_records(connection, 'relationships', del_rels)
| 11,517
|
def cmdline_opts( request ):
"""PyMTL options parsed from pytest commandline options."""
opts = _parse_opts_from_request( request )
# If a fixture is used by a test class, this seems to be the only
# way to retrieve the fixture value.
# https://stackoverflow.com/a/37761165/13190001
if request.cls is not None:
request.cls.cmdline_opts = opts
return opts
| 11,518
|
def replace(index, ndim, axes, rindices):
"""Replace indexing for a specified dimension
Args:
index(index): object used in slicing
ndim(num): number of dimensions
axes(list): dimension to be replaced
rindex(list): new indexing for this dimensions
Returns:
index
"""
index2 = list(expand(index, ndim))
for axis, rindex in zip(axes, rindices):
axis = axisindex(index2, axis, ndim)
index2[axis] = rindex
return tuple(index2)
| 11,519
|
def _closed(sock):
"""Return True if we know socket has been closed, False otherwise.
"""
try:
rd, _, _ = select([sock], [], [], 0)
# Any exception here is equally bad (select.error, ValueError, etc.).
except:
return True
return len(rd) > 0
| 11,520
|
def get_forest_connection(device_name: str, seed=None):
"""Get a connection to a forest backend
Args:
device_name: the device to connect to
Returns:
A connection to either a pyquil simulator or a QPU
"""
if device_name == "wavefunction-simulator":
return WavefunctionSimulator(random_seed=seed)
else:
return get_qc(device_name)
| 11,521
|
def get_slack_colour(level):
"""Return Slack colour value based on log level."""
level = level.upper()
colours = {
"CRITICAL": "ff0000",
"ERROR": "ff9933",
"WARNING": "ffcc00",
"INFO": "33ccff",
"DEBUG": "good"
}
return colours.get(level, "good")
| 11,522
|
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None
):
"""Initialise Hue Bridge connection."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = HueSensorData(hass)
await hass.data[DOMAIN].async_add_platform_entities(
HueBinarySensor,
BINARY_SENSOR_MODELS,
async_add_entities,
config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL),
)
| 11,523
|
def add_suffix(path, suffix=""):
"""Adds a suffix to a filename *path*"""
return join(dirname(path), basename(path, ext=False) + suffix + extname(path))
| 11,524
|
def MdAE_np(preds, labels):
"""
Median Absolute Error
:param preds:
:param labels:
:return:
"""
preds = np.reshape(preds, [-1])
labels = np.reshape(labels, [-1])
return np.median(np.abs(preds - labels))
| 11,525
|
async def s3_fetch_object(url, s3, range=None, **kw):
""" returns object with
On success:
.url = url
.data = bytes
.last_modified -- last modified timestamp
.range = None | (in,out)
.error = None
On failure:
.url = url
.data = None
.last_modified = None
.range = None | (in, out)
.error = str| botocore.Exception class
"""
from botocore.exceptions import ClientError, BotoCoreError
def result(data=None, last_modified=None, error=None):
return SimpleNamespace(url=url, data=data, error=error, last_modified=last_modified, range=range)
bucket, key = s3_url_parse(url)
extra_args = dict(**kw)
if range is not None:
try:
extra_args['Range'] = s3_fmt_range(range)
except Exception:
return result(error='Bad range passed in: ' + str(range))
try:
obj = await s3.get_object(Bucket=bucket, Key=key, **extra_args)
stream = obj.get('Body', None)
if stream is None:
return result(error='Missing Body in response')
async with stream:
data = await stream.read()
except (ClientError, BotoCoreError) as e:
return result(error=e)
except Exception as e:
return result(error="Some Error: " + str(e))
last_modified = obj.get('LastModified', None)
return result(data=data, last_modified=last_modified)
| 11,526
|
def _create_topic(committer_id, topic, commit_message, commit_cmds):
"""Creates a new topic, and ensures that rights for a new topic
are saved first.
Args:
committer_id: str. ID of the committer.
topic: Topic. Topic domain object.
commit_message: str. A description of changes made to the topic.
commit_cmds: list(TopicChange). A list of TopicChange objects that
represent change commands made to the given topic.
"""
topic.validate()
if does_topic_with_name_exist(topic.name):
raise utils.ValidationError(
'Topic with name \'%s\' already exists' % topic.name)
if does_topic_with_url_fragment_exist(topic.url_fragment):
raise utils.ValidationError(
'Topic with URL Fragment \'%s\' already exists'
% topic.url_fragment)
create_new_topic_rights(topic.id, committer_id)
model = topic_models.TopicModel(
id=topic.id,
name=topic.name,
abbreviated_name=topic.abbreviated_name,
url_fragment=topic.url_fragment,
thumbnail_bg_color=topic.thumbnail_bg_color,
thumbnail_filename=topic.thumbnail_filename,
canonical_name=topic.canonical_name,
description=topic.description,
language_code=topic.language_code,
canonical_story_references=[
reference.to_dict()
for reference in topic.canonical_story_references],
additional_story_references=[
reference.to_dict()
for reference in topic.additional_story_references],
uncategorized_skill_ids=topic.uncategorized_skill_ids,
subtopic_schema_version=topic.subtopic_schema_version,
story_reference_schema_version=topic.story_reference_schema_version,
next_subtopic_id=topic.next_subtopic_id,
subtopics=[subtopic.to_dict() for subtopic in topic.subtopics],
meta_tag_content=topic.meta_tag_content,
practice_tab_is_displayed=topic.practice_tab_is_displayed,
page_title_fragment_for_web=topic.page_title_fragment_for_web
)
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
topic.version += 1
generate_topic_summary(topic.id)
| 11,527
|
def mss(**kwargs):
# type: (Any) -> MSSMixin
""" Factory returning a proper MSS class instance.
It detects the plateform we are running on
and choose the most adapted mss_class to take
screenshots.
It then proxies its arguments to the class for
instantiation.
"""
# pylint: disable=import-outside-toplevel
os_ = platform.system().lower()
if os_ == "darwin":
from . import darwin
return darwin.MSS(**kwargs)
if os_ == "linux":
from . import linux
return linux.MSS(**kwargs)
if os_ == "windows":
from . import windows
return windows.MSS(**kwargs)
raise ScreenShotError("System {!r} not (yet?) implemented.".format(os_))
| 11,528
|
def reshape_fps(X):
"""Reshape 4D fingerprint data to 2D
If X is already 2D, do nothing.
Returns: reshaped X
"""
if len(X.shape) == 4:
num_factors = X.shape[3]
num_fps = np.prod(X.shape[:3])
X.shape = (num_fps,num_factors)
else:
num_factors = X.shape[1]
num_fps = X.shape[0]
return X
| 11,529
|
def experiment(config, loss_select, opt_select):
"""Run optimizers with all configurations from config"""
losses = config["loss"]
optimizers = config["optimizer"]
for loss in losses:
for opt in optimizers:
loss_config = {**config["loss"][loss]}
opt_params = config["optimizer"][opt]
opt_config = {i: tune.grid_search(opt_params[i]) for i in opt_params}
exp_config = {
**loss_config,
**opt_config,
"hyperparams": list(opt_params.keys()),
}
loss_fn = loss_select[loss]
optimizer = opt_select[opt]
run = partial(single_run, loss=loss_fn, opt=optimizer)
analysis = tune.run(
run,
name=opt,
local_dir=Path(__file__).parent.absolute() / "tune_results" / loss,
metric="avg_subopt_gap",
mode="min",
num_samples=1,
config=exp_config,
)
yield analysis, opt, loss
| 11,530
|
def df_fc_overlap_2():
"""Scenario case with 2 fragments overlapping, bound to a common fragment."""
mol = Chem.MolFromSmiles('NC1CC(CCC1O)C1CCC1')
return DataFrame([
['mol_fc_overlap_2', 'XXX', 'O1', 0, 'O1:0', 'O2', 0, 'O2:0', 'ffo', 'fusion', 'false_positive', 'overlap', (7, 6, 5, 4, 3, 2, 1), (0, 1, 2, 3, 4, 5, 6), 12, mol, mol_o1, mol_o2, 'O1:0@1,2,3,4,5,6[ffo]O2:0@1,2,3,4,5,6'],
['mol_fc_overlap_2', 'XXX', 'O1', 0, 'O1:0', 'O3', 0, 'O3:0', 'cm', 'connection', 'monopodal', '', (7, 6, 5, 4, 3, 2, 1), (8, 9, 10, 11), 12, mol, mol_o1, mol_o3, 'O1:0@4[cm]O3:0@0'],
['mol_fc_overlap_2', 'XXX', 'O2', 0, 'O2:0', 'O3', 0, 'O3:0', 'cm', 'connection', 'monopodal', '', (0, 1, 2, 3, 4, 5, 6), (8, 9, 10, 11), 12, mol, mol_o2, mol_o3, 'O2:0@3[cm]O3:0@0'],
], columns=['idm', 'inchikey', 'idf1', 'idxf1', 'fid1', 'idf2', 'idxf2', 'fid2', 'fcc', 'category', 'type', 'subtype', '_aidxf1', '_aidxf2', 'hac', 'mol', 'mol_frag_1', 'mol_frag_2', 'fc'])
| 11,531
|
def show_toolbar(request):
"""Determines whether debug toolbar should be shown for the request.
Requires settings.DEBUG=True, 'debug_toolbar' GET param present,
and request ip in settings.INTERNAL_IPS.
Args:
request: HttpRequest object.
Returns:
Boolean.
"""
if ('debug_toolbar' not in request.GET and
'/__debug__/' not in request.path):
return False
return middleware.show_toolbar(request)
| 11,532
|
def strfnow(fmt=HUMAN_DATETIME):
"""
Returns a string representation of the current timestamp
"""
return datetime.now(tzlocal()).strftime(fmt)
| 11,533
|
def tag_to_dict(node):
"""Assume tag has one layer of children, each of which is text, e.g.
<medalline>
<rank>1</rank>
<organization>USA</organization>
<gold>13</gold>
<silver>10</silver>
<bronze>9</bronze>
<total>32</total>
</medalline>
"""
d = {}
for child in node:
d[child.tag] = child.text
return d
| 11,534
|
def test_id_rot():
"""Test equivalence of constants that represent no rotation."""
assert_array_almost_equal(R_id, matrix_from_axis_angle(a_id))
assert_array_almost_equal(R_id, matrix_from_quaternion(q_id))
assert_array_almost_equal(R_id, matrix_from_euler_xyz(e_xyz_id))
assert_array_almost_equal(R_id, matrix_from_euler_zyx(e_zyx_id))
| 11,535
|
def compute_contact_centroid(molecular_complex: Any,
cutoff: float = 4.5) -> np.ndarray:
"""Computes the (x,y,z) centroid of the contact regions of this molecular complex.
For a molecular complex, it's necessary for various featurizations
that compute voxel grids to find a reasonable center for the
voxelization. This function computes the centroid of all the contact
atoms, defined as an atom that's within `cutoff` Angstroms of an
atom from a different molecule.
Parameters
----------
molecular_complex: Object
A representation of a molecular complex, produced by
`rdkit_util.load_complex`.
cutoff: float, optional
The distance in Angstroms considered for computing contacts.
"""
fragments = reduce_molecular_complex_to_contacts(molecular_complex, cutoff)
coords = [frag[0] for frag in fragments]
contact_coords = merge_molecules_xyz(coords)
centroid = np.mean(contact_coords, axis=0)
return (centroid)
| 11,536
|
def general_operator_gamma_norm(matrix, gamma, max_j, max_q):
""" Returns the gamma operator norm of matrix, summing up to max_j and
considering the sup up to max_q. Assumed that matrix is a function
accepting two arguments i,j and not an array () for efficiency.
"""
max_j_sum = -1
q = 1
while(q < max_q):
temp_j_sum = nsum(lambda j: fprod([power(q, gamma), power(j, -gamma),
fabs(matrix(q, j))]), [1, max_j])
max_j_sum = temp_j_sum if temp_j_sum > max_j_sum else max_j_sum
q += 1
return max_j_sum
| 11,537
|
def crop_image_single(img, device):
"""
Implementation of the MTCNN network to crop single image to only show the face as shown in the
facenet_pytorch doc:
https://github.com/timesler/facenet-pytorch/blob/master/examples/infer.ipynb
:param device: pytorch device
:param img: single image to be cropped
:return: cropped image
"""
model = MTCNN(image_size=160, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7],
factor=0.709, post_process=False, device=device)
x_aligned = model(img)
return x_aligned
| 11,538
|
def tf2zpk(b, a):
"""Return zero, pole, gain (z,p,k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of ``b`` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
| 11,539
|
def gpiod_line_is_free(line: gpiod_line) -> bool:
"""
@brief Check if the calling user has neither requested ownership of this
line nor configured any event notifications.
@param line: GPIO line object.
@return True if given line is free, false otherwise.
"""
return line.state == _LINE_FREE
| 11,540
|
def aseta_nappain_kasittelija(kasittelija):
"""
Asettaa funktion, jota käytetään näppäimistöpainallusten käsittelyyn.
Tarvitaan vain jos haluat pelisi käyttävän näppäimistöä johonkin.
Käsittelijäfunktiolla tulee olla kaksi parametria: symboli ja
muokkausnapit. Symboli on vakio, joka on asetettu pyglet.window.key-
moduulissa (esim. pyglet.window.key.A on A-näppäin). Käytä alla olevaa
importia jotta pääset näihin helposti käsiksi:
from pyglet.window import key
jonka jälkeen pääset näppäinkoodeihin kiinni key-nimen kautta, esim. key.A.
Muokkausnapit on selitetty tämän moduulin dokumentaatiossa.
Käyttääksesi näppäimistöä sinun tulee määritellä funktio:
def nappain_kasittelija(symboli, muokkausnapit):
# asioita tapahtuu
ja sen jälkeen rekisteröidä se:
haravasto.aseta_nappain_kasittelija(nappain_kasittelija)
:param function kasittelija: käsittelijäfunktio näppäimistölle
"""
if grafiikka["ikkuna"]:
grafiikka["ikkuna"].on_key_press = kasittelija
else:
print("Ikkunaa ei ole luotu!")
| 11,541
|
def test_content(google_translator):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
assert google_translator.translate(payload='좋은') == "good"
| 11,542
|
def train_step(optimizer, inputs, learning_rate_fn, dropout_rng=None):
"""Perform a single training step."""
weights = jnp.where(inputs > 0, 1, 0)
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
dropout_rng, new_dropout_rng = random.split(dropout_rng)
def loss_fn(model):
"""Loss function used for training."""
with nn.stochastic(dropout_rng):
logits = model(inputs, train=True)
loss, weight_sum = compute_weighted_cross_entropy(logits, inputs, weights)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
metrics = compute_metrics(logits, inputs, weights)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
| 11,543
|
def doublet_line_polar_u(rcp,zcp,dmz_dz, bSelfInd=False):
"""
Velocity field induced by a semi-infinite doublet line (on the z axis) of intensity `dmz_dz`
Control points defined by polar coordinates `rcp` and `zcp`.
\int 1/(r^2 + (z-x)^2 )^(3/2) dx
\int 1/(r^2 + (z-x)^2 )^(5/2) dx
"""
if np.any(rcp<0):
raise Exception('Script meant for positive r')
r=np.asarray(rcp)
z=np.asarray(zcp)
# Vectorial "if" statements to isolate singular regions of the domain
bZ0 = np.abs(z)<1e-8
bR0 = np.abs(r)<1e-8
bZ0R0 = np.logical_and(bZ0,bR0)
bZ0Rp = np.logical_and(bZ0, np.abs(r)>1e-8)
bR0Zp = np.logical_and(bR0, z>1e-8)
bR0Zm = np.logical_and(bR0, z<-1e-8)
bOK = np.logical_and(~bZ0,~bR0)
uz=np.zeros(r.shape)
ur=np.zeros(r.shape)
norm2 = r**2+z**2
uz[bOK] = dmz_dz/(4*np.pi) * 1/r[bOK]**2 * ( z[bOK]**3/(norm2[bOK])**(3/2) - z[bOK]/(norm2[bOK])**(1/2) )
uz[bZ0Rp] = 0
uz[bR0Zm] = dmz_dz/(4*np.pi) * 1/norm2[bR0Zm]
#uz[bR0Zp] = dmz_dz/(4*np.pi) * 1/norm2[bR0Zp] #<<< No singularity there, but we force it to 0
ur[bOK] =-dmz_dz/(4*np.pi) * r[bOK] * 1/(norm2[bOK] )**(3/2)
ur[bZ0Rp] =-dmz_dz/(4*np.pi) * r[bZ0Rp] * 1/(norm2[bZ0Rp])**(3/2)
ur[bR0Zm] = 0
ur[bR0Zp] = 0
ur[bZ0R0] = 0
uz[bZ0R0] = 0
return ur, uz
| 11,544
|
def paginate(text: str):
"""Simple generator that paginates text."""
last = 0
pages = []
for curr in range(0, len(text)):
if curr % 1980 == 0:
pages.append(text[last:curr])
last = curr
appd_index = curr
if appd_index != len(text) - 1:
pages.append(text[last:curr])
return list(filter(lambda a: a != '', pages))
| 11,545
|
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
This test can be applied to classes or instances.
Classes currently have some additional tests that related to construction,
while passing instances allows the testing of multiple options.
Parameters
----------
estimator : estimator object or class
Estimator to check. Estimator is a class object or instance.
"""
if isinstance(Estimator, type):
# got a class
name = Estimator.__name__
estimator = Estimator()
check_parameters_default_constructible(name, Estimator)
check_no_attributes_set_in_init(name, estimator)
else:
# got an instance
estimator = Estimator
name = type(estimator).__name__
if hasattr(estimator, 'max_iter'):
if (isinstance(estimator, ShapeletModel) or
isinstance(estimator, SerializableShapeletModel)):
estimator.set_params(max_iter=100)
else:
estimator.set_params(max_iter=10)
if hasattr(estimator, 'total_lengths'):
estimator.set_params(total_lengths=1)
if hasattr(estimator, 'probability'):
estimator.set_params(probability=True)
for check in checks._yield_all_checks(name, estimator):
try:
check(name, estimator)
except SkipTest as exception:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(str(exception), SkipTestWarning)
| 11,546
|
def get():
"""
Get the current version number. Reads from the pyproject.toml file.
"""
print(get_toml_version())
| 11,547
|
def get_batch_copy(vocab_size, batch_size, seq_len):
"""Generates random data for copying."""
batch = np.random.choice(
vocab_size - 1, size=[batch_size, seq_len // 2 - 1]) + 1
batch = np.concatenate([np.zeros([batch_size, 1], dtype=int), batch], axis=1)
batch = np.concatenate([batch] * 2, axis=1)
batch_mask = np.concatenate([
np.zeros([batch_size, seq_len // 2], dtype=bool),
np.ones([batch_size, seq_len // 2], dtype=bool)
],
axis=1)
return batch, batch_mask
| 11,548
|
def buildCompareDFs(strTodayFileName):
"""read in and return today's CSV as DF, determine appropriate old CSV as DF, and the old file name for use later"""
# get today's file
dfTodaysCards = pandas.read_csv(
DATA_DIR_NAME + strTodayFileName, dtype={'Card Number': object})
dfTodaysCards = cleanCardDataFrame(dfTodaysCards)
# getting older file is a bit trickier, check the run log, find the most recent run, find the old file used, get the next recent old file to compare with
dictRunLog = readRunLog()
strOldFileName = determineCompareFile(dictRunLog)
print("ToCompareAgainst: " + strOldFileName)
dfOldCards = pandas.read_csv(
DATA_DIR_NAME + strOldFileName, dtype={'Card Number': object})
dfOldCards = cleanCardDataFrame(dfOldCards)
dfOldCards = dfOldCards.rename(
index=str, columns={"Count": "OldCount", "Price": "OldPrice"})
return dfTodaysCards, dfOldCards, strOldFileName
| 11,549
|
def _vj_stat(v = None, j = None, freq_type = 'vj_occur_freq', ts = None):
"""
Return estimate of a single v-gene, j-gene, or vj-gene-pairings frequency
specified < v > and <j> argumens , given a tcrsamper instance < ts >
Parameters
----------
v : str
j : str
e.g.,
freq_type : str
'vj_occur_freq', 'vj_freq', 'v_occur_freq', 'v_freq', 'j_occur_freq', 'j_freq'
df : pd.DataFrame
DataFrame containing v and j gene names
ts : tcrsampler.sampler.TCRsampler
sampler instance
Example
-------
>>> import pandas as pd
>>> import os
>>> from tcrsampler.sampler import TCRsampler
>>> from tcrregex.vj_diff import *
>>> t = TCRsampler()
>>> fn = os.path.join("tcrregex", "test_files", 'britanova_chord_blood_sample_5000.csv' )
>>> t.ref_df = pd.read_csv(fn)
>>> t.build_background()
>>> _vj_stat(v = 'TRBV20-1*01' , j ='TRBJ2-1*01', ts = t, freq_type = 'vj_occur_freq')
0.014802960592118424
>>> _vj_stat(v = 'TRBV20-1*01' , ts = t, freq_type = 'v_occur_freq')
0.060012002400480095
>>> _vj_stat(j = 'TRBJ2-1*01', ts = t, freq_type = 'j_occur_freq')
0.272254450890178
"""
if ts is None:
raise ValueError("._vj_stat requires < ts > be a TCRsampler instance")
if v is None and j is None:
raise ValueError("Niether a v- nor j-gene was supplied to ._vj_stat ; atleast one must be provided")
if v is None:
tp = j
assert freq_type in ['j_freq', 'j_occur_freq']
elif j is None:
tp = v
assert freq_type in ['v_freq', 'v_occur_freq']
else:
tp = (v,j)
assert freq_type in ['vj_freq', 'vj_occur_freq']
return ts.__dict__[freq_type][tp]
| 11,550
|
def __cvx_eda(y, delta, tau0=2., tau1=0.7, delta_knot=10., alpha=8e-4, gamma=1e-2, solver=None,
options={'reltol': 1e-9, 'show_progress': False}):
"""
CVXEDA Convex optimization approach to electrodermal activity processing
This function implements the cvxEDA algorithm described in "cvxEDA: a
Convex Optimization Approach to Electrodermal Activity Processing"
(http://dx.doi.org/10.1109/TBME.2015.2474131, also available from the
authors' homepages).
Arguments:
y: observed EDA signal (we recommend normalizing it: y = zscore(y))
delta: sampling interval (in seconds) of y
tau0: slow time constant of the Bateman function
tau1: fast time constant of the Bateman function
delta_knot: time between knots of the tonic spline function
alpha: penalization for the sparse SMNA driver
gamma: penalization for the tonic spline coefficients
solver: sparse QP solver to be used, see cvxopt.solvers.qp
options: solver options, see: http://cvxopt.org/userguide/coneprog.html#algorithm-parameters
Returns (see paper for details):
r: phasic component
p: sparse SMNA driver of phasic component
t: tonic component
l: coefficients of tonic spline
d: offset and slope of the linear drift term
e: model residuals
obj: value of objective function being minimized (eq 15 of paper)
"""
n = len(y)
y = cvx.matrix(y)
# bateman ARMA model
a1 = 1. / min(tau1, tau0) # a1 > a0
a0 = 1. / max(tau1, tau0)
ar = np.array([(a1 * delta + 2.) * (a0 * delta + 2.), 2. * a1 * a0 * delta ** 2 - 8.,
(a1 * delta - 2.) * (a0 * delta - 2.)]) / ((a1 - a0) * delta ** 2)
ma = np.array([1., 2., 1.])
# matrices for ARMA model
i = np.arange(2, n)
A = cvx.spmatrix(np.tile(ar, (n - 2, 1)), np.c_[i, i, i], np.c_[i, i - 1, i - 2], (n, n))
M = cvx.spmatrix(np.tile(ma, (n - 2, 1)), np.c_[i, i, i], np.c_[i, i - 1, i - 2], (n, n))
# spline
delta_knot_s = int(round(delta_knot / delta))
spl = np.r_[np.arange(1., delta_knot_s), np.arange(delta_knot_s, 0., -1.)] # order 1
spl = np.convolve(spl, spl, 'full')
spl /= max(spl)
# matrix of spline regressors
i = np.c_[np.arange(-(len(spl) // 2), (len(spl) + 1) // 2)] + np.r_[np.arange(0, n, delta_knot_s)]
nB = i.shape[1]
j = np.tile(np.arange(nB), (len(spl), 1))
p = np.tile(spl, (nB, 1)).T
valid = (i >= 0) & (i < n)
B = cvx.spmatrix(p[valid], i[valid], j[valid])
# trend
C = cvx.matrix(np.c_[np.ones(n), np.arange(1., n + 1.) / n])
nC = C.size[1]
# Solve the problem:
# .5*(M*q + B*l + C*d - y)^2 + alpha*sum(A,1)*p + .5*gamma*l'*l
# s.t. A*q >= 0
# old_options = cvx.solvers.options.copy()
cvx.solvers.options.clear()
cvx.solvers.options.update(options)
if solver == 'conelp':
# Use conelp
z = lambda m, n: cvx.spmatrix([], [], [], (m, n))
G = cvx.sparse([[-A, z(2, n), M, z(nB + 2, n)], [z(n + 2, nC), C, z(nB + 2, nC)],
[z(n, 1), -1, 1, z(n + nB + 2, 1)], [z(2 * n + 2, 1), -1, 1, z(nB, 1)],
[z(n + 2, nB), B, z(2, nB), cvx.spmatrix(1.0, range(nB), range(nB))]])
h = cvx.matrix([z(n, 1), .5, .5, y, .5, .5, z(nB, 1)])
c = cvx.matrix([(cvx.matrix(alpha, (1, n)) * A).T, z(nC, 1), 1, gamma, z(nB, 1)])
res = cvx.solvers.conelp(c, G, h, dims={'l': n, 'q': [n + 2, nB + 2], 's': []})
obj = res['primal objective']
else:
# Use qp
Mt, Ct, Bt = M.T, C.T, B.T
H = cvx.sparse([[Mt * M, Ct * M, Bt * M], [Mt * C, Ct * C, Bt * C],
[Mt * B, Ct * B, Bt * B + gamma * cvx.spmatrix(1.0, range(nB), range(nB))]])
f = cvx.matrix([(cvx.matrix(alpha, (1, n)) * A).T - Mt * y, -(Ct * y), -(Bt * y)])
res = cvx.solvers.qp(H, f, cvx.spmatrix(-A.V, A.I, A.J, (n, len(f))),
cvx.matrix(0., (n, 1)), solver=solver)
obj = res['primal objective'] + .5 * (y.T * y)
# cvx.solvers.options.clear()
# cvx.solvers.options.update(old_options)
l = res['x'][-nB:]
d = res['x'][n:n + nC]
t = B * l + C * d
q = res['x'][:n]
p = A * q
r = M * q
e = y - r - t
return r, t
# return r, p, t, l, d, e, obj
| 11,551
|
def pulsar_from_opencv_projection(
R: torch.Tensor,
tvec: torch.Tensor,
camera_matrix: torch.Tensor,
image_size: torch.Tensor,
znear: float = 0.1,
) -> torch.Tensor:
"""
Convert OpenCV style camera parameters to Pulsar style camera parameters.
Note:
* Pulsar does NOT support different focal lengths for x and y.
For conversion, we use the average of fx and fy.
* The Pulsar renderer MUST use a left-handed coordinate system for this
mapping to work.
* The resulting image will be vertically flipped - which has to be
addressed AFTER rendering by the user.
* The parameters `R, tvec, camera_matrix` correspond to the outputs
of `cv2.decomposeProjectionMatrix`.
Args:
R: A batch of rotation matrices of shape `(N, 3, 3)`.
tvec: A batch of translation vectors of shape `(N, 3)`.
camera_matrix: A batch of camera calibration matrices of shape `(N, 3, 3)`.
image_size: A tensor of shape `(N, 2)` containing the sizes of the images
(height, width) attached to each camera.
znear (float): The near clipping value to use for Pulsar.
Returns:
cameras_pulsar: A batch of `N` Pulsar camera vectors in the Pulsar
convention `(N, 13)` (3 translation, 6 rotation, focal_length, sensor_width,
c_x, c_y).
"""
return _pulsar_from_opencv_projection(R, tvec, camera_matrix, image_size, znear)
| 11,552
|
def identity(gender:str = None) -> dict:
""" Generates a pseudo-random identity.
Optional args
gender: 'm' for traditionally male, 'f' for traditionally female.
Returns a dict with the following keys:
name -> full name
given -> given name / first name
family -> family name / last name
address -> well formed address (fake of course)
city -> city of residence
state -> state of residence
zip_code -> zip code of residence (matches the city and state)
phone - > a phone number with an area code from the state of residence.
email -> a valid email address (fake of course)
"""
if gender and gender.lower() not in ["m", "f"]:
raise ValueError("'gender' must be 'm' or 'f'")
if gender and gender.lower() == "m":
given = _pluck(MGIVEN)
elif gender and gender.lower() == "f":
given = _pluck(FGIVEN)
else:
given = _pluck(MGIVEN + FGIVEN)
family = _pluck(FAMILY)
email = _make_email(given, family)
zip_code, city, state_code = _pluck(AREA)
phone = _make_phone(state_code)
address = _make_address()
return dict(name=f"{given} {family}".title(),
given=given.title(),
family=family.title(),
address=address,
city=city.title(),
state=state_code.upper(),
zip_code=zip_code,
phone=phone,
email=email)
| 11,553
|
def matthews_corrcoef(y_true, y_pred):
"""Returns matthew's correlation coefficient for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are
of very different sizes. The MCC is in essence a correlation coefficient
value between -1 and +1. A coefficient of +1 represents a perfect
prediction, 0 an average random prediction and -1 an inverse prediction.
The statistic is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Parameters
----------
y_true : array, shape = [n_samples]
true targets
y_pred : array, shape = [n_samples]
estimated targets
Returns
-------
mcc : float
matthew's correlation coefficient (+1 represents a perfect prediction,
0 an average random prediction and -1 and inverse prediction).
References
----------
http://en.wikipedia.org/wiki/Matthews_correlation_coefficient
http://dx.doi.org/10.1093/bioinformatics/16.5.412
"""
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
| 11,554
|
def convert_image_link(image):
"""Convert an image link specification into a Markdown image link
Args:
image (Match): A Match object corresponding to an image link
Returns:
str: Markdown formatted link to the image
"""
image_name = str(image.group(1))
file_ext = 'jpg'
if '|' in image_name:
image_name, file_ext = image_name.split('|')
image_link = f")}.{file_ext})"
return image_link
| 11,555
|
def yaml_dump(dict_to_dump: Dict[str, Any]) -> str:
"""Dump the dictionary as a YAML document."""
return yaml.safe_dump(dict_to_dump, default_flow_style=False)
| 11,556
|
def test_delete_files_success_nofile(s3_mock, paths, bucket):
"""delete_files should run successfully even when files not found."""
# Arrange
s3_mock.create_bucket(
Bucket=bucket, CreateBucketConfiguration={"LocationConstraint": "eu-west1"}
)
# Act & Assert
assert delete_files(paths) is None
| 11,557
|
def completeness(importance_matrix):
""""Compute completeness of the representation."""
per_factor = completeness_per_code(importance_matrix)
if importance_matrix.sum() == 0.:
importance_matrix = np.ones_like(importance_matrix)
factor_importance = importance_matrix.sum(axis=0) / importance_matrix.sum()
return np.sum(per_factor*factor_importance)
| 11,558
|
def image_stat(image_id):
"""
Return the statistics ofd an image as a pd dataframe
:param image_id:
:return:
"""
counts, total_area, mean_area, std_area = {}, {}, {}, {}
img_area = get_image_area(image_id)
for cl in CLASSES:
polygon_list = get_polygon_list(image_id, cl)
counts[cl] = len(polygon_list)
if len(polygon_list) > 0:
total_area[cl] = np.sum([poly.area for poly in polygon_list])\
/ img_area * 100.
mean_area[cl] = np.mean([poly.area for poly in polygon_list])\
/ img_area * 100.
std_area[cl] = np.std([poly.area for poly in polygon_list])\
/ img_area * 100.
return pd.DataFrame({'Class': CLASSES, 'Counts': counts,
'TotalArea': total_area, 'MeanArea': mean_area,
'STDArea': std_area})
| 11,559
|
def delete_original():
"""
Decorator that deletes the original
Discord message upon command execution.
:return: boolean
"""
async def predicate(ctx):
if ctx.invoked_with != "help": # Don't try to delete if help command
if isinstance(ctx.message.channel, discord.TextChannel):
try:
await ctx.message.delete()
except discord.errors.NotFound as e:
log.fatal(f"Unable to delete message.\n\t{e}")
return True
return commands.check(predicate)
| 11,560
|
def resize_stanford_dataset(inp_dir, out_dir):
"""
The function resizes all the images in stanford dataset to 224x224
"""
print_after_iter = 1000
files = [f for f in os.listdir(inp_dir) if os.path.isfile(os.path.join(inp_dir, f))]
for i in range(len(files)):
if i % print_after_iter == 0:
print i, 'files resized!'
src = os.path.join(inp_dir, files[i])
dst = os.path.join(out_dir, files[i])
img = Image.open(src).resize((224, 224))
img.save(dst)
| 11,561
|
def test_stream_targets_info():
"""
Tests an API call to get stream targets
"""
response = stream_targets_instance.info()
assert isinstance(response, dict)
assert 'stream_targets' in response
| 11,562
|
def test_cli_stdio_hub(runner, echo, app):
"""
Ensures tcp starts a server.
"""
with runner.isolated_filesystem():
with open('my.hub', 'w') as f:
f.write('Hello World!')
e = runner.invoke(Cli.main, ['--hub=my.hub', 'stdio'])
assert e.exit_code == 0
App.__init__.assert_called_with(hub_path='my.hub')
App.start_stdio_server.assert_called()
| 11,563
|
def list_template_dirs():
"""List names of directories containnig parallel programming templates."""
dirs = []
for templates_dir in settings.TEMPLATE_DIRS:
for template_dir in os.listdir(templates_dir):
path = os.path.join(templates_dir,template_dir)
if os.path.isdir(path):
dirs.append(template_dir)
return dirs
| 11,564
|
def fcd2dri(inpFCD, outSTRM, ignored):
"""
Reformats the contents of the given fcd-output file into a .dri file, readable
by PHEM. The fcd-output "fcd" must be a valid file name of an fcd-output.
The following may be a matter of changes:
- the engine torque is not given
"""
# print >> outSTRM, "v1\n<t>,<v>,<grad>,<n>\n[s],[km/h],[%],[1/min]\n"
print("v1\n<t>,<v>,<grad>\n[s],[km/h],[%]", file=outSTRM)
for q in inpFCD:
if q.vehicle:
for v in q.vehicle:
percSlope = math.sin(float(v.slope)) * 100.
print("%s,%.3f,%s" % (
sumolib._intTime(q.time), float(v.speed) * 3.6, percSlope), file=outSTRM)
| 11,565
|
def get_H_OS():
"""屋根又は天井の温度差係数 (-)
Args:
Returns:
float: 屋根又は天井の温度差係数 (-)
"""
adjacent_type = '外気'
return get_H(adjacent_type)
| 11,566
|
def forward_softmax(x):
"""
Compute softmax function for a single example.
The shape of the input is of size # num classes.
Important Note: You must be careful to avoid overflow for this function. Functions
like softmax have a tendency to overflow when very large numbers like e^10000 are computed.
You will know that your function is overflow resistent when it can handle input like:
np.array([[10000, 10010, 10]]) without issues.
x: A 1d numpy float array of shape number_of_classes
Returns:
A 1d numpy float array containing the softmax results of shape number_of_classes
"""
x = x - np.max(x,axis=0)
exp = np.exp(x)
s = exp / np.sum(exp,axis=0)
return s
| 11,567
|
def applySpectralClusters(kmeansObj, img, imgNullVal):
"""
Use the given KMeans object to predict spectral clusters on
a whole image array.
The kmeansObj is an instance of sklearn.cluster.KMeans,
as returned by fitSpectralClusters().
The img array is a numpy array of the image to predict on,
of shape (nBands, nRows, nCols).
Any pixels in img which have value imgNullVal will be set to
SEGNULLVAL (i.e. zero) in the output cluster image.
Return value is a numpy array of shape (nRows, nCols),
with each element being the segment ID value for that pixel.
"""
# Predict on the whole image. In principle we could omit the nulls,
# but it makes little difference to run time, and just adds complexity.
(nBands, nRows, nCols) = img.shape
# Re-organise the image data so it matches what sklearn
# expects.
xFull = numpy.transpose(img, axes=(1, 2, 0))
xFull = xFull.reshape((nRows*nCols, nBands))
clustersFull = kmeansObj.predict(xFull)
del xFull
clustersImg = clustersFull.reshape((nRows, nCols))
# Make the cluster ID numbers start from 1, and use SEGNULLVAL
# (i.e. zero) in null pixels
clustersImg += 1
if imgNullVal is not None:
nullmask = (img == imgNullVal).any(axis=0)
clustersImg[nullmask] = SEGNULLVAL
return clustersImg
| 11,568
|
def _singleton_new(cls, *args, **kwargs):
"""
An invalid new for singleton objects.
"""
raise TypeError(
"'{0}' cannot be instantiated because it is a singleton".format(
cls.__name__,
),
)
| 11,569
|
def get_config_and_project_dir(config_file: str):
"""Guess config file name and project dir"""
if config_file is not None:
config_file = path.abspath(config_file)
project_dir = path.dirname(config_file)
else:
project_dir = find_project_dir()
config_file = '{}/stakkr.yml'.format(project_dir)
return config_file, project_dir
| 11,570
|
def get_config() -> ConfigParser:
"""
Parse the config file.
:return: config
"""
cfg = ConfigParser()
cfg.read(CONFIG_PATH)
return cfg
| 11,571
|
def run_experiments(map: Map):
"""Run a series of experiments. Generate Random, Linear, and Curiosity agents for each starting position.
Test a series of brain configurations for the Curiosity agent so we can see if there is an optimal configuration.
"""
# Some high-level parameters
num_starting_positions = 10
random.seed(12345)
base_results_dir = "results2"
path_length = 1000
fov = 64
grain_size = (fov, fov, 1)
move_rate = 8 # Larger than 1 increases possible coverage of the map by the agent
# Defines the different possible parameters used when creating the various brains
brain_config = {}
brain_config['memory_type'] = [PriorityBasedMemory, ListBasedMemory]
brain_config['memory_length'] = [32, 64]
brain_config['novelty_loss_type'] = ['MSE', 'MAE']
brain_config['train_epochs_per_iter'] = [1, 2, 3]
brain_config['learning_rate'] = [0.0002, 0.0004]
# Calculate number of different curious agents per position
num_curious_agents_per_pos = 1
for _,v in brain_config.items():
num_curious_agents_per_pos *= len(v)
# Get range of possible (x,y) pairs. Subtract 2 since I don't quite know the whole usable range given the agent's size.
x_range = (map.fov + 2, map.img.size[0] - fov - 2)
y_range = (map.fov + 2, map.img.size[1] - fov - 2)
x_vals = []
y_vals = []
for _ in range(num_starting_positions):
x = random.randint(x_range[0], x_range[1])
if x not in x_vals:
x_vals.append(x)
y = random.randint(y_range[0], y_range[1])
if y not in y_vals:
y_vals.append(y)
position_list = list(zip(x_vals, y_vals))
# Create results directories
print("Creating directories and novelty files...")
result_dirs = []
for pos in position_list:
dir = "pos_" + str(pos[0]) + "_" + str(pos[1])
dir = os.path.join(base_results_dir, dir)
result_dirs.append(dir)
if not os.path.isdir(dir):
os.makedirs(dir, exist_ok=True)
# Create agents
print("Creating Linear/Random agents...")
linear_agents = []
random_agents = []
for i in range(num_starting_positions):
pos = position_list[i]
# Linear Agents
linear_motiv = Linear(map, rate=move_rate)
lin_agent = Agent(linear_motiv, pos)
data_dir = os.path.join(result_dirs[i], str(lin_agent))
lin_agent.set_data_dir(data_dir)
linear_agents.append(lin_agent)
# Random Agents
rand_motiv = Random(map, rate=move_rate)
rand_agent = Agent(rand_motiv, pos)
data_dir = os.path.join(result_dirs[i], str(rand_agent))
rand_agent.set_data_dir(data_dir)
random_agents.append(rand_agent)
# Run Linear agents
print("Running Linear agents...")
for i in range(num_starting_positions):
print(F"\nLinear Agent {i+1}/{num_starting_positions}:")
run_agents([linear_agents[i]], path_length)
linear_agents[i].save_reconstruction_snapshot()
linear_agents[i].save_data()
# Run Random agents
print("Running Random agents...")
for i in range(num_starting_positions):
print(F"\nRandom Agent {i+1}/{num_starting_positions}:")
run_agents([random_agents[i]], path_length)
random_agents[i].save_reconstruction_snapshot()
random_agents[i].save_data()
# Curiosity Agents
print("Creating/running Curiosity agents...")
start_time = time.time()
for i in range(num_starting_positions):
p = i+1
pos = position_list[i]
pos_start_time = time.time()
cur_agent_num = 1
for mem in brain_config['memory_type']:
for mem_len in brain_config['memory_length']:
for nov_type in brain_config['novelty_loss_type']:
for train_epochs in brain_config['train_epochs_per_iter']:
for lr in brain_config['learning_rate']:
# Must call clear_session to reset the global state and avoid memory clutter for the GPU
# Allows us to create more models without worrying about memory
tf.keras.backend.clear_session()
print(F"\nCurious Agent {cur_agent_num}/{num_curious_agents_per_pos} at Pos {p}/{num_starting_positions} {pos}:")
brain = Brain(mem(mem_len), grain_size, novelty_loss_type=nov_type,
train_epochs_per_iter=train_epochs, learning_rate=lr)
curious_motiv = Curiosity(map, brain, rate=move_rate)
curious_agent = Agent(curious_motiv, pos)
data_dir = os.path.join(result_dirs[i], str(curious_agent))
curious_agent.set_data_dir(data_dir)
run_agents([curious_agent], path_length)
curious_agent.save_reconstruction_snapshot()
curious_agent.save_data()
# Print estimated time remaining
wall_time = time.time() - start_time
pos_wall_time = time.time() - pos_start_time
pos_eta = (pos_wall_time / cur_agent_num) * (num_curious_agents_per_pos - cur_agent_num)
print(F"Position Wall Time: {get_time_str(pos_wall_time)}, Position ETR: {get_time_str(pos_eta)}")
num_agents_tested = cur_agent_num + i*num_curious_agents_per_pos
num_agents_remaining = num_starting_positions*num_curious_agents_per_pos - num_agents_tested
wall_time_eta = (wall_time / num_agents_tested) * num_agents_remaining
print(F"Wall Time: {get_time_str(wall_time)}, ETR: {get_time_str(wall_time_eta)}")
cur_agent_num += 1
| 11,572
|
def setver(_, ver=""):
"""Sets the Turtle Canon version"""
match = re.fullmatch(
(
r"v?(?P<version>[0-9]+(\.[0-9]+){2}" # Major.Minor.Patch
r"(-[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?" # pre-release
r"(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?)" # build metadata
),
ver,
)
if not match:
sys.exit(
"Error: Please specify version as 'Major.Minor.Patch(-Pre-Release+Build "
"Metadata)' or 'vMajor.Minor.Patch(-Pre-Release+Build Metadata)'"
)
ver = match.group("version")
update_file(
TOP_DIR / "turtle_canon/__init__.py",
(r'__version__ = (\'|").*(\'|")', f'__version__ = "{ver}"'),
)
update_file(
TOP_DIR / "README.md",
(
r"latest stable version is \*\*.*\*\*\.",
f"latest stable version is **{ver}**.",
),
strip="\n",
)
print(f"Bumped version to {ver}")
| 11,573
|
def greetings(queue, id_):
"""Send a dummy message"""
payload = {"type": "server:notice",
"notice": "subed to {}:{!s}".format(queue.value, id_)}
coro = MSG.send_message(queue, id_, payload)
asyncio.get_event_loop().call_later(0.2, asyncio.ensure_future, coro)
| 11,574
|
def drop_table(table_name, db_engine):
"""
Drops a table from the database
:param table_name: Name of the table that needs to be dropped
:param db_engine: Specifies the connection to the database
:return: None
"""
if has_table(table_name, db_engine):
logger.debug("Deleting old (pre-existing) table: " + table_name + "...")
statement = str("DROP TABLE IF EXISTS {};")
with db_engine.connect() as con:
try:
con.execute(statement.format(table_name))
except Exception as e:
logger.error("Error deleting table " + table_name + " from database!")
logger.error(e.args)
exit(1)
| 11,575
|
def test_array_to_image_valid_options():
"""Creates image buffer with driver options."""
arr = np.random.randint(0, 255, size=(3, 512, 512), dtype=np.uint8)
mask = np.zeros((512, 512), dtype=np.uint8) + 255
assert utils.array_to_image(arr, mask=mask, img_format="png", ZLEVEL=9)
| 11,576
|
async def test_missing_optional_config(hass):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"number": {
"state": "{{ 4 }}",
"set_value": {"service": "script.set_value"},
"step": "{{ 1 }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, 4, 1, 0.0, 100.0)
| 11,577
|
def iso_register(iso_code):
"""
Registers Calendar class as country or region in IsoRegistry.
Registered country must set class variables ``iso`` using this decorator.
>>> from calendra.core import Calendar
>>> from calendra.registry import registry
>>> from calendra.registry_tools import iso_register
>>> @iso_register('MC-MR')
... class MyRegion(Calendar):
... 'My Region'
Region calendar is then retrievable from registry:
>>> calendar = registry.get('MC-MR')
"""
def wrapper(cls):
from calendra.registry import registry
registry.register(iso_code, cls)
return cls
return wrapper
| 11,578
|
def dict_check_defaults(dd, **defaults):
"""Check that a dictionary has some default values
Parameters
----------
dd: dict
Dictionary to check
**defs: dict
Dictionary of default values
Example
-------
.. ipython:: python
@suppress
from xoa.misc import dict_check_defaults
dd = dict(color='blue')
dict_check_defaults(dd, color='red', size=10)
"""
if defaults is None:
defaults = {}
for item in defaults.items():
dd.setdefault(*item)
return dd
| 11,579
|
def construct_search_params():
"""Iterates through user-defined Entrez Search settings to assemble the search parameters.
Envars hold the most recent user-defined Entrez settings, such as rettype, retmax, database,
etc. These settings are iterated through, and their values are returned and appended to the
query.
"""
params = {}
for setting in ev.settings_eSearch:
if os.environ.get(setting[1]) != 'None':
params.update({setting[0].lower(): os.environ.get(setting[1])})
return params
| 11,580
|
def get_invested_and_worth(account):
"""Gets the money invested and the actual worth of an account"""
data = query_indexa(f"accounts/{account}/performance")
invested = data["return"]["investment"]
worth = data["return"]["total_amount"]
return {"invested": round(invested, 2), "worth": round(worth, 2)}
| 11,581
|
def make_album(singer, name, number = ''):
"""Return singers' names and album"""
album = {'singer': singer, 'name': name}
if number:
album['number'] = number
return album
| 11,582
|
def test_true(*_):
"""Creating new profile true path"""
profile_name = "existing_profile.yaml"
destination = "/output/directory/my_new_profile.yaml"
expected_source_file = os.path.join(fake_profiles_path(), profile_name)
expected_target_file = destination
expected_target_dir = os.path.dirname(destination)
new_profile(profile_name, destination)
# noinspection PyUnresolvedReferences
yacfg.output.ensure_output_path.assert_called_with(expected_target_dir)
# noinspection PyUnresolvedReferences
shutil.copyfile.assert_called_with(expected_source_file, expected_target_file)
| 11,583
|
def executeCustomQueries(when, _keys=None, _timeit=True):
"""Run custom queries as specified on the command line."""
if _keys is None:
_keys = {}
for query in CUSTOM_QUERIES.get(when, []):
print('EXECUTING "%s:%s"...' % (when, query))
sys.stdout.flush()
if query.startswith('FOR_EVERY_TABLE:'):
query = query[16:]
CURS.execute('SHOW TABLES;')
tables = [x[0] for x in CURS.fetchall()]
for table in tables:
try:
keys = {'table': table}
keys.update(_keys)
_executeQuery(query % keys)
if _timeit:
t('%s command' % when)
except Exception as e:
print('FAILED (%s)!' % e)
continue
else:
try:
_executeQuery(query % _keys)
except Exception as e:
print('FAILED (%s)!' % e)
continue
if _timeit:
t('%s command' % when)
| 11,584
|
def test_index_page():
"""
Check index page and click to Login
"""
# driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())
# driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get("http://127.0.0.1:5000/")
assert "AAA Home" in driver.title
driver.find_element_by_xpath(
"//a[contains(text(),'Please login')]").click()
assert "AAA Log in" in driver.title
| 11,585
|
def fit_ctmp_meas_mitigator(cal_data: Dict[int, Dict[int, int]],
num_qubits: int,
generators: List[Generator] = None) -> CTMPExpvalMeasMitigator:
"""Return FullMeasureErrorMitigator from result data.
Args:
cal_data: calibration dataset.
num_qubits: the number of qubits for the calibation dataset.
generators: Optional, input generator set.
Returns:
Measurement error mitigator object.
Raises:
QiskitError: if input arguments are invalid.
"""
if not isinstance(num_qubits, int):
raise QiskitError('Number of qubits must be an int')
if generators is None:
generators = standard_generator_set(num_qubits)
gen_mat_dict = {}
for gen in generators + _supplementary_generators(generators):
if len(gen[2]) > 1:
mat = _local_g_matrix(gen, cal_data, num_qubits)
gen_mat_dict[gen] = mat
# Compute rates for generators
rates = [_get_ctmp_error_rate(gen, gen_mat_dict, num_qubits) for gen in generators]
return CTMPExpvalMeasMitigator(generators, rates)
| 11,586
|
def discounted_item(data):
"""
DOCSTRING: Classifies item purchases as 'Promoted' or 'Not Promoted' based on 'Item Discount' column. Also 'COD Collectibles' column gets restructured by eliminating undesired default values, like 'Online'.
INPUT:
> data : Only accepts Pandas DataFrame or TextParser, that has been pre-processed earlier.
OUTPUT: Pandas DataFrame or TextParser with 1 additional column, i.e. 'On Promotion'.
"""
data["On Promotion"] = np.nan
data["Phone num"] = np.nan
data["COD Collectible"] = np.nan # Later again gets renamed within this func.
for i,v in data["Item Discount"].iteritems():
if v != 0:
data.loc[i, "On Promotion"] = "Promoted"
else:
data.loc[i, "On Promotion"] = "Not Promoted"
# Also taking care of COD Collectible:
for i,v in data["COD Collectibles"].iteritems():
if v == "Online":
data.loc[i, "COD Collectible"] = 0
else:
data.loc[i, "COD Collectible"] = v
# Also taking care of 'Phone No.' column:
for i,v in data["Phone No."].iteritems():
if v == "Online":
data.loc[i, "Phone num"] = "Unavailable"
else:
data.loc[i, "Phone num"] = v
data.drop(["COD Collectibles"], axis=1, inplace=True)
data.drop(["Phone No."], axis=1, inplace=True)
data.rename(columns={"COD Collectible": "COD Collectibles"}, inplace=True)
data.rename(columns={"Phone num": "Phone No."}, inplace=True)
return data
| 11,587
|
def NamespacedKubernetesSyncer(namespace, use_rsync=False):
"""Wrapper to return a ``KubernetesSyncer`` for a Kubernetes namespace.
Args:
namespace (str): Kubernetes namespace.
use_rsync (bool): Use ``rsync`` if True or ``kubectl cp``
if False. If True, ``rsync`` will need to be
installed in the Kubernetes pods for this to work.
If False, ``tar`` will need to be installed instead.
Returns: A ``KubernetesSyncer`` class to be passed to ``tune.run()``.
Example:
.. code-block:: python
from ray.tune.integration.kubernetes import NamespacedKubernetesSyncer
tune.run(train,
sync_to_driver=NamespacedKubernetesSyncer("ray"))
"""
class _NamespacedKubernetesSyncer(KubernetesSyncer):
_namespace = namespace
_use_rsync = use_rsync
return _NamespacedKubernetesSyncer
| 11,588
|
def _cons8_88(m8, L88, d_gap, k, Cp, h_gap):
"""dz constrant for edge gap sc touching 2 edge gap sc"""
term1 = 2 * h_gap * L88 / m8 / Cp # conv to inner/outer ducts
term2 = 2 * k * d_gap / m8 / Cp / L88 # cond to adj bypass edge
return 1 / (term1 + term2)
| 11,589
|
def cache_key(path):
"""Return cache key for `path`."""
return 'folder-{}'.format(hashlib.md5(path.encode('utf-8')).hexdigest())
| 11,590
|
def ref_731(n):
"""Reference number calculator. Returns reference number
calculated using 7-3-1 algorithm used in Estonian banks.
:param string n: base number (client id, etc)
:rtype: string
"""
return "%s%d" % (n,((10 - (sum(map(\
lambda l: int(n[-l])*(7,3,1)[(l-1) % 3], \
xrange(1, len(n)+1))))) % 10))
| 11,591
|
async def exception_as_response(e: Exception):
"""
Wraps an exception into a JSON Response.
"""
data = {
"message": str(e),
"traceback": "".join(traceback.TracebackException.from_exception(e).format())
}
return web.json_response(data, status=500)
| 11,592
|
def canvas_merge_union(layers, full=True, blend=canvas_compose_over):
"""Blend multiple `layers` into single large enough image"""
if not layers:
raise ValueError("can not blend zero layers")
elif len(layers) == 1:
return layers[0]
min_x, min_y, max_x, max_y = None, None, None, None
for image, offset in layers:
x, y = offset
w, h = image.shape[:2]
if min_x is None:
min_x, min_y = x, y
max_x, max_y = x + w, y + h
else:
min_x, min_y = min(min_x, x), min(min_y, y)
max_x, max_y = max(max_x, x + w), max(max_y, y + h)
width, height = max_x - min_x, max_y - min_y
if full:
output = None
for image, offset in layers:
x, y = offset
w, h = image.shape[:2]
ox, oy = x - min_x, y - min_y
image_full = np.zeros((width, height, 4), dtype=FLOAT)
image_full[ox : ox + w, oy : oy + h] = image
if output is None:
output = image_full
else:
output = blend(output, image_full)
else:
# this is optimization for method `over` blending
output = np.zeros((max_x - min_x, max_y - min_y, 4), dtype=FLOAT)
for index, (image, offset) in enumerate(layers):
x, y = offset
w, h = image.shape[:2]
ox, oy = x - min_x, y - min_y
effected = output[ox : ox + w, oy : oy + h]
if index == 0:
effected[...] = image
else:
effected[...] = blend(effected, image)
return output, (min_x, min_y)
| 11,593
|
def exception_response(request, code=400, exception=None):
"""
Create a response for an exception
:param request: request instance
:param code: exception code
:param exception: exception instance
:return: exception formatted response
"""
code = code if code in [400, 403, 404, 500] else 400
exception_repr = get_error_msg(exception)
log.error(usr=request.user, msg=f'{code} - {exception_repr}')
context = dict(
message=f"Error {code}",
request_path=request.path,
exception=exception_repr
)
if is_browser(request):
template = loader.get_template(f'error/{code}.html')
rtn = dict(
content=template.render(context, request),
content_type='text/html'
)
else:
rtn = dict(
content=json.dumps(context),
content_type='application/json'
)
return rtn
| 11,594
|
def get_jira_issues(jira, exclude_stories, epics_only, all_status, filename,
user):
"""
Query Jira and then creates a status update file (either temporary or named)
containing all information found from the JQL query.
"""
issue_types = ["Epic"]
if not epics_only:
issue_types.append("Initiative")
if not exclude_stories:
issue_types.append("Story")
issue_type = "issuetype in (%s)" % ", ".join(issue_types)
status = "status in (\"In Progress\")"
if all_status:
status = "status not in (Resolved, Closed)"
if user is None:
user = "currentUser()"
else:
user = "\"%s\"" % add_domain(user)
jql = "%s AND assignee = %s AND %s" % (issue_type, user, status)
vprint(jql)
my_issues = jira.search_issues(jql)
msg = message_header + email_to_name(os.environ['JIRA_USERNAME']) + "\n\n"
f = open_file(filename)
filename = f.name
f.write(msg)
vprint("Found issue:")
for issue in my_issues:
vprint("%s : %s" % (issue, issue.fields.summary))
f.write("[%s]\n" % issue)
f.write("# Header: %s\n" % issue.fields.summary)
f.write("# Type: %s\n" % issue.fields.issuetype)
f.write("# Status: %s\n" % issue.fields.status)
f.write("No updates since last week.\n\n")
f.close()
return filename
| 11,595
|
def entropy(series):
"""Normalized Shannon Index"""
# a series in which all the entries are equal should result in normalized entropy of 1.0
# eliminate 0s
series1 = series[series!=0]
# if len(series) < 2 (i.e., 0 or 1) then return 0
if len(series1) > 1:
# calculate the maximum possible entropy for given length of input series
max_s = -np.log(1.0/len(series))
total = float(sum(series1))
p = series1.astype('float')/float(total)
return sum(-p*np.log(p))/max_s
else:
return 0.0
| 11,596
|
def spatially_whiten(X:np.ndarray, *args, **kwargs):
"""spatially whiten the nd-array X
Args:
X (np.ndarray): the data to be whitened, with channels/space in the *last* axis
Returns:
X (np.ndarray): the whitened X
W (np.ndarray): the whitening matrix used to whiten X
"""
Cxx = updateCxx(None,X,None)
W,_ = robust_whitener(Cxx, *args, **kwargs)
X = X @ W #np.einsum("...d,dw->...w",X,W)
return (X,W)
| 11,597
|
def get_settings(basename: str="settings.yml", path: Path=PROJECT_ROOT / "conf") -> dict:
"""
Loads settings file
Args:
basename (str, optional): Basename of settings file. Defaults to "settings.yml".
path (Path, optional): Path of seetings file. Defaults to PROJECT_ROOT/"conf".
Raises:
exc: Yaml load exception
Returns:
dict: settings
"""
with open(str(path / basename), 'r') as stream:
try:
settings = yaml.safe_load(stream)
except yaml.YAMLError as exc:
raise exc
return settings
| 11,598
|
def quaternion2rotationPT( q ):
""" Convert unit quaternion to rotation matrix
Args:
q(torch.tensor): unit quaternion (N,4)
Returns:
torch.tensor: rotation matrix (N,3,3)
"""
r11 = (q[:,0]**2+q[:,1]**2-q[:,2]**2-q[:,3]**2).unsqueeze(0).T
r12 = (2.0*(q[:,1]*q[:,2]-q[:,0]*q[:,3])).unsqueeze(0).T
r13 = (2.0*(q[:,1]*q[:,3]+q[:,0]*q[:,2])).unsqueeze(0).T
r21 = (2.0*(q[:,1]*q[:,2]+q[:,0]*q[:,3])).unsqueeze(0).T
r22 = (q[:,0]**2+q[:,2]**2-q[:,1]**2-q[:,3]**2).unsqueeze(0).T
r23 = (2.0*(q[:,2]*q[:,3]-q[:,0]*q[:,1])).unsqueeze(0).T
r31 = (2.0*(q[:,1]*q[:,3]-q[:,0]*q[:,2])).unsqueeze(0).T
r32 = (2.0*(q[:,2]*q[:,3]+q[:,0]*q[:,1])).unsqueeze(0).T
r33 = (q[:,0]**2+q[:,3]**2-q[:,1]**2-q[:,2]**2).unsqueeze(0).T
r = torch.cat( (r11,r12,r13,
r21,r22,r23,
r31,r32,r33), 1 )
r = torch.reshape( r, (q.shape[0],3,3))
return r
| 11,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.