content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def find_2020(inputs,num_combinations = 2):
"""
accepts an interable as input and returns the product of the first two numbers in the
iterable that sum to 2020
"""
for vals in itertools.permutations(inputs,num_combinations):
if sum(vals) == 2020:
yield np.prod(vals)
| 5,341,500
|
def test_compare_build():
"""Build version must not be use in precedence."""
# https://semver.org/#spec-item-10
assert Version(1, 0, 0, None, "build1") == Version(1, 0, 0)
assert Version(1, 0, 0, None, "build1") == Version(1, 0, 0, None, "build2")
| 5,341,501
|
def feedback(olsys,H=1):
"""Calculate the closed-loop transfer function
olsys
cltf = --------------
1+H*olsys
where olsys is the transfer function of the open loop
system (Gc*Gp) and H is the transfer function in the feedback
loop (H=1 for unity feedback)."""
clsys=olsys/(1.0+H*olsys)
return clsys
| 5,341,502
|
def get_sigma_grid(
init_sigma: float = 1.0, factor: int = 2, n_grid_points: int = 20
) -> List[float]:
"""Get a standard parameter grid for the cross validation strategy.
Parameters
----------
init_sigma : float, default=1.0
The initial sigma to use to populate the grid points.
factor : int, default=2
The log scale factor to use for both the beginning and end of the grid.
n_grid_points : int, default=20
The number of grid points to use.
Returns
-------
param_grid : List[float]
The parameter grid as per the specifications
Example
-------
>> param_grid = get_param_grid()
>> param_grid = get_param_grid(10.0, 3, 1_000)
"""
# create bounds for search space (logscale)
init_space = 10 ** (-factor)
end_space = 10 ** (factor)
# create param grid
param_grid = np.logspace(
np.log10(init_sigma * init_space),
np.log10(init_sigma * end_space),
n_grid_points,
)
return param_grid
| 5,341,503
|
def parse_csv(value_column):
"""Parses a CSV file based on the provided column types."""
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(ALL_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
classes = tf.cast(label, tf.int32) - 1
return features, classes
| 5,341,504
|
def on_over_limit():
""" This is called when the rate limit is reached """
return jsonify(status='error', error=[_('Whoa, calm down and wait a bit before posting again.')])
| 5,341,505
|
def comment(strng,indent=''):
"""return an input string, commented out"""
template = indent + '# %s'
lines = [template % s for s in strng.splitlines(True)]
return ''.join(lines)
| 5,341,506
|
def square(number):
"""
Calculates how many grains were on each square
:param number:
:return:
"""
if number <= 0 or not number or number > 64:
raise ValueError(ERROR)
return 2**(number - 1)
| 5,341,507
|
def get_doc(name=None, filename=None, url=None, parsed=True, start=0, end=None,
localfile=None, params=None, cookies=None, **kwargs):
"""
Retrieve an IDE file from either a file or URL.
Note: `name`, `filename`, and `url` are mutually exclusive arguments.
One and only one must be specified. Attempting to supply more than one
will generate an error.
Example usage::
get_doc("my_recording.ide")
get_doc("https://example.com/remote_recording.ide")
get_doc(filename="my_recording.ide")
get_doc(url="https://example.com/remote_recording.ide")
get_doc(filename="my_recording.ide", start="1:23")
The `start` and `end` times, if used, may be specified in several
ways:
* `int`/`float` (Microseconds from the recording start)
* `str` (formatted as a time from the recording start, e.g., `MM:SS`,
`HH:MM:SS`, `DDd HH:MM:SS`). More examples:
* ``":01"`` or ``":1"`` or ``"1s"`` (1 second)
* ``"22:11"`` (22 minutes, 11 seconds)
* ``"3:22:11"`` (3 hours, 22 minutes, 11 seconds)
* ``"1d 3:22:11"`` (1 day, 3 hours, 22 minutes, 11 seconds)
* `datetime.timedelta` or `pandas.Timedelta` (time from the
recording start)
* `datetime.datetime` (an explicit UTC time)
:param name: The name or URL of the IDE. The method of fetching it will
be automatically chosen based on how it is formatted.
:param filename: The name of an IDE file. Supplying a name this way will
force it to be read from a file, avoiding the possibility of
accidentally trying to retrieve it via URL.
:param url: The URL of an IDE file. Supplying a name this way will force
it to be read from a URL, avoiding the possibility of accidentally
trying to retrieve it from a local file.
:param parsed: If `True` (default), the IDE will be fully parsed after it
is fetched. If `False`, only the file metadata will be initially
loaded, and a call to `idelib.importer.readData()`. This can save
time.
:param start: The starting time. Defaults to the start of the
recording. Only applicable if `parsed` is `True`.
:param end: The ending time. Defaults to the end of the recording. Only
applicable if `parsed` is `True`.
:param localfile: The name of the file to which to write data recieved
from a URL. If none is supplied, a temporary file will be used. Only
applicable when opening a URL.
:param params: Additional URL request parameters. Only applicable when
opening a URL.
:param cookies: Additional browser cookies for use in the URL request.
Only applicable when opening a URL.
:return: The fetched IDE data.
Additionally, `get_doc()` will accept the keyword arguments for
`idelib.importer.importFile()` or `idelib.importer.openFile()`
"""
if len([x for x in (name, filename, url) if x]) != 1:
raise TypeError("Only one source can be specified: name, filename, or url")
original = name or filename or url # For error reporting
stream = None
parsed_url = None
if name:
if os.path.isfile(name):
filename = name
else:
parsed_url = urlparse(name.replace('\\', '/'))
if not parsed_url.scheme or parsed_url.scheme == "file":
filename = parsed_url.path
else:
url = name
if filename:
filename = os.path.abspath(os.path.expanduser(filename))
stream = open(filename, 'rb')
elif url:
kwargs.setdefault('name', url)
parsed_url = parsed_url or urlparse(url)
if parsed_url.scheme.startswith('http'):
stream, _total = _get_url(url, localfile=localfile, params=params, cookies=cookies)
else:
# future: more fetching schemes before this `else` (ftp, etc.)?
raise ValueError(f"Unsupported transfer scheme: {parsed_url.scheme}")
if stream:
if not validate(stream):
stream.close()
raise ValueError(f"Could not read a Dataset from '{original}'"
f"(not an IDE file?)")
# Separate `openFile()` and `readData` kwargs, remove ones that aren't shared
open_kwargs = kwargs.copy()
read_kwargs = kwargs.copy()
for k in ('startTime', 'endTime', 'channels', 'source', 'total',
'bytesRead', 'samplesRead'):
open_kwargs.pop(k, None)
doc = openFile(stream, **open_kwargs)
if parsed:
for k in ('defaults', 'name', 'quiet'):
read_kwargs.pop(k, None)
session_start = doc.lastSession.utcStartTime
if session_start:
session_start = datetime.utcfromtimestamp(session_start)
if start:
read_kwargs['startTime'] = parse_time(start, session_start)
if end:
read_kwargs['endTime'] = parse_time(end, session_start)
readData(doc, **read_kwargs)
return doc
raise ValueError(f"Could not read data from '{original}'")
| 5,341,508
|
def naginator(parser, xml_parent, data):
"""yaml: naginator
Automatically reschedule a build after a build failure
Requires the Jenkins :jenkins-wiki:`Naginator Plugin <Naginator+Plugin>`.
:arg bool rerun-unstable-builds: Rerun build for unstable builds as well
as failures (default False)
:arg int fixed-delay: Fixed delay before retrying build (cannot be used
with progressive-delay-increment or progressive-delay-maximum.
This is the default delay type. (Default 0)
:arg int progressive-delay-increment: Progressive delay before retrying
build increment (cannot be used when fixed-delay is being used)
(Default 0)
:arg int progressive-delay-maximum: Progressive delay before retrying
maximum delay (cannot be used when fixed-delay is being used)
(Default 0)
:arg int max-failed-builds: Maximum number of successive failed builds
(Default 0)
:arg str regular-expression: Only rerun build if regular expression is
found in output (Default '')
Example:
.. literalinclude:: /../../tests/publishers/fixtures/naginator001.yaml
:language: yaml
"""
naginator = XML.SubElement(
xml_parent,
'com.chikli.hudson.plugin.naginator.NaginatorPublisher')
XML.SubElement(naginator, 'regexpForRerun').text = str(
data.get('regular-expression', ''))
XML.SubElement(naginator, 'checkRegexp').text = str(
'regular-expression' in data).lower()
XML.SubElement(naginator, 'rerunIfUnstable').text = str(
data.get('rerun-unstable-builds', False)).lower()
progressive_delay = ('progressive-delay-increment' in data or
'progressive-delay-maximum' in data)
if 'fixed-delay' in data and progressive_delay:
raise JenkinsJobsException("You cannot specify both fixed "
"and progressive delays")
if not progressive_delay:
delay = XML.SubElement(
naginator,
'delay',
{'class': 'com.chikli.hudson.plugin.naginator.FixedDelay'})
XML.SubElement(delay, 'delay').text = str(
data.get('fixed-delay', '0'))
else:
delay = XML.SubElement(
naginator,
'delay',
{'class': 'com.chikli.hudson.plugin.naginator.ProgressiveDelay'})
XML.SubElement(delay, 'increment').text = str(
data.get('progressive-delay-increment', '0'))
XML.SubElement(delay, 'max').text = str(
data.get('progressive-delay-maximum', '0'))
XML.SubElement(naginator, 'maxSchedule').text = str(
data.get('max-failed-builds', '0'))
| 5,341,509
|
def health_check() -> ControllerResponse:
"""
Retrieve the current health of service integrations.
Returns
-------
dict
Response content.
int
HTTP status code.
dict
Response headers.
"""
status = {}
for name, obj in _getServices():
logger.info('Getting status of %s' % name)
status[name] = _healthy_session(obj)
return status, 200, {}
| 5,341,510
|
def GeoSim(hss_0, pow_law_exp, lat1, lon1, lat2, lon2):
""" In order to make the Similarity adimensional I have to add a scale to the game.
This scale is hss, i.e. the scale after which the similairty is damped by a factor 2.
:param pow_law_exp: is the exponent of the power law
"""
# @TODO: measure power operator performance vs `math.pow`
return (float(hss_0)/(hss_0 + GeoDist(lat1, lon1, lat2, lon2)))**pow_law_exp
| 5,341,511
|
def _get_script():
"""Get path to the image sequence script"""
try:
from openpype.scripts import publish_filesequence
except Exception:
raise RuntimeError("Expected module 'publish_deadline'"
"to be available")
module_path = publish_filesequence.__file__
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
return module_path
| 5,341,512
|
def main():
"""Main"""
try:
config.load_kube_config()
except FileNotFoundError:
config.load_incluster_config()
core_v1_client = client.CoreV1Api()
k8s_client = client.ApiClient()
# ConfigMap
deploy_config_map(core_v1_client)
# CSI Pods
deploy_csi_pods(core_v1_client)
# Storage Class
deploy_storage_class()
# Watch CRD
crd_watch(core_v1_client, k8s_client)
| 5,341,513
|
def residual_unit(data, nchw_inshape, num_filter, stride, dim_match, name, bottle_neck=True,
workspace=256, memonger=False, conv_layout='NCHW', batchnorm_layout='NCHW',
verbose=False, cudnn_bn_off=False, bn_eps=2e-5, bn_mom=0.9, conv_algo=-1,
fuse_bn_relu=False, fuse_bn_add_relu=False, cudnn_tensor_core_only=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
nhwc_shape : tuple of int
Input minibatch shape in (n, c, h, w) format independent of actual layout
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
Returns
-------
(sym, nchw_outshape)
sym : the model symbol (up to this point)
nchw_outshape : tuple
(batch_size, features, height, width)
"""
nchw_shape = nchw_inshape
act = 'relu' if fuse_bn_relu else None
if bottle_neck:
conv1 = mx.sym.Convolution(data=data, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0))
bn1 = batchnorm(data=conv1, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn1', cudnn_off=cudnn_bn_off, act_type=act)
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1') if not fuse_bn_relu else bn1
conv2 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1))
bn2 = batchnorm(data=conv2, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn2', cudnn_off=cudnn_bn_off, act_type=act)
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2') if not fuse_bn_relu else bn2
conv3 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0))
if dim_match:
shortcut = data
else:
conv1sc = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_conv1sc', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
sc_nchw_shape = conv_nchw_out_shape(nchw_inshape, num_filter=num_filter, kernel=(1,1), stride=stride)
shortcut = batchnorm(data=conv1sc, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_sc', cudnn_off=cudnn_bn_off)
if memonger:
shortcut._set_attr(mirror_stage='True')
if fuse_bn_add_relu:
return (batchnorm_add_relu(data=conv3, addend=shortcut, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn3', cudnn_off=cudnn_bn_off),
nchw_shape)
else:
bn3 = batchnorm(data=conv3, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, eps=bn_eps, momentum=bn_mom, name=name + '_bn3', cudnn_off=cudnn_bn_off)
return (mx.sym.Activation(data=bn3 + shortcut, act_type='relu', name=name + '_relu3'),
nchw_shape)
else:
conv1 = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1))
bn1 = batchnorm(data=conv1, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_bn1', cudnn_off=cudnn_bn_off, act_type=act)
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1') if not fuse_bn_relu else bn1
conv2 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
nchw_shape = conv_nchw_out_shape(nchw_shape, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1))
if dim_match:
shortcut = data
else:
conv1sc = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_conv1sc', layout=conv_layout,
cudnn_algo_verbose=verbose,
cudnn_algo_fwd=conv_algo, cudnn_algo_bwd_data=conv_algo, cudnn_algo_bwd_filter=conv_algo,
cudnn_tensor_core_only=cudnn_tensor_core_only)
sc_nchw_shape = conv_nchw_out_shape(nchw_inshape, num_filter=num_filter, kernel=(1,1), stride=stride)
shortcut = batchnorm(data=conv1sc, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_sc', cudnn_off=cudnn_bn_off)
if memonger:
shortcut._set_attr(mirror_stage='True')
if fuse_bn_add_relu:
return (batchnorm_add_relu(data=conv2, addend=shortcut, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_bn2', cudnn_off=cudnn_bn_off),
nchw_shape)
else:
bn2 = batchnorm(data=conv2, io_layout=conv_layout, batchnorm_layout=batchnorm_layout,
fix_gamma=False, momentum=bn_mom, eps=bn_eps, name=name + '_bn2', cudnn_off=cudnn_bn_off)
return (mx.sym.Activation(data=bn2 + shortcut, act_type='relu', name=name + '_relu2'),
nchw_shape)
| 5,341,514
|
def dwa_control(x, config, goal, ob):
"""
Dynamic Window Approach control
"""
dw = calc_dynamic_window(x, config)
u, trajectory = calc_final_input(x, dw, config, goal, ob)
return u, trajectory
| 5,341,515
|
def detect_meteors(rf_dir, id_dir, noise_dir, output_dir,
t0=None, t1=None, rxch='zenith-l', txch='tx-h'):
"""Function to detect and summarize meteor head echoes.
Arguments
---------
rf_dir : string or list
RF data directory or directories.
id_dir : string
ID code metadata directory.
noise_dir : string
RX noise metadata directory.
output_dir : string
Meteor data output directory.
t0 : float, optional
Start time, seconds since epoch. If None, start at beginning of data.
t1 : float, optional
End time, seconds since epoch. If None, end at end of data.
rxch : string, optional
Receiver channel to process.
txch : string, optional
Transmitter channel.
"""
rfo = drf.read_hdf5(rf_dir)
ido = dmd.read_digital_metadata(id_dir)
no = dmd.read_digital_metadata(noise_dir)
if t0 is None or t1 is None:
bounds = []
bounds.append(rfo.get_bounds(rxch))
bounds.append(rfo.get_bounds(txch))
bounds.append(ido.get_bounds())
bounds.append(no.get_bounds())
bounds = np.asarray(bounds)
ss = np.max(bounds[:, 0])
se = np.min(bounds[:, 1])
fs = rfo.get_metadata(rxch)['sample_rate'].value
if t0 is None:
s0 = ss
else:
s0 = int(np.round(t0*fs))
if t1 is None:
s1 = se
else:
s1 = int(np.round(t1*fs))
tmm = TimingModeManager.TimingModeManager()
if os.path.exists('/tmp/tmm.hdf5'):
tmm.loadFromHdf5('/tmp/tmm.hdf5', skip_lowlevel=True)
else:
tmm.loadFromHdf5(skip_lowlevel=True)
for k, (tx, rx) in enumerate(data_generator(rfo, ido, no, tmm, s0, s1, rxch, txch)):
#FIXME call processing functions here
pass
| 5,341,516
|
def GetCoverageDirectory(fuzzer):
"""Get a coverage report directory for a fuzzer
Args:
fuzzer: The fuzzer to get the coverage report directory for.
Returns:
The location of the coverage report directory for the |fuzzer|.
"""
relative_path = os.path.join(COVERAGE_REPORT_DIRECTORY_NAME, fuzzer)
return GetScriptStoragePath(relative_path)
| 5,341,517
|
def decompress_bytes(inp_bytes: bytes, verbose=False) -> bytearray:
"""
Main function to decompress input bytes by extracting the Huffman map
and using the map to replace the encoded sequences with the original
characters.
:param inp_bytes: Input data to be compressed
:param verbose: set to True for printing console outputs
:return: decompressed bytearray data
"""
huff_map: HuffCode
rem: int
huff_map, rem = extract_huff_map(inp_bytes, verbose=verbose)
inp_bytes = inp_bytes[:-rem]
rev_seq: str = reverse_final_sequence(inp_bytes, verbose=verbose)
res: bytearray = reverse_huff_sequence(huff_map, rev_seq, verbose=verbose)
return res
| 5,341,518
|
def test_init():
""" Config initialization works as expected """
inst = _config.Config.from_file(fixture('config1.schema'))
assert inst.tables == [
('Yo', 'Yo'),
('some', 'table'),
('somethingElse', 'somethingElse'),
('y', 'x.y'),
('a', 'b.c'),
]
assert inst.schemas == {'foo': 'bar'}
assert inst._lines == [
'# This is a comment. I love comments.\n', '#\n', '\n', 'Yo\n',
'some = table\n', 'somethingElse\n', 'x.y\n', 'a = b.c\n', '\n',
'[schemas]\n', 'foo = bar\n',
]
| 5,341,519
|
def filter_nans(data,
threshold = 3,
threshold_type = "data"):
"""
=================================================================================================
filter_nans(data, threshold, threshold_type)
This function is meant to filter out the nan values from a list, based on the input arguments.
=================================================================================================
Arguments:
data -> A list (or iterable) of data points. The points are assumed to be numbers.
threshold -> An integer describing the minimum value requirement.
threshold_type -> A string describing how the threshold integer will be applied.
"on_data" "on_nan"
=================================================================================================
Returns: The filtered list, or an empty list if the threshold requirements were not met.
=================================================================================================
"""
# Make sure the user gave a valid thresholding option
assert threshold_type.lower() in ["data",
"on_data",
"on data",
"nan",
"on_nan",
"on nan"], "Threshold is either relative to NaN or data."
assert type(data) == list, "The data should be in a list"
# Filter NaNs, as they do not equal themselves
filtered = [val for val in data if val == val]
# Keep data if there are at least <threshold> data points
if threshold_type.lower() in ["data", "on_data", "on data"]:
if len(filtered) >= threshold:
return filtered
else:
return []
# Keep data if there are no more than <threshold> nans
elif threshold_type.lower() in ["nan", "on_nan", "on nan"]:
if len(data) - len(filtered) <= threshold:
return filtered
else:
return []
| 5,341,520
|
def configure_template_filters(app):
"""Configures the template filters."""
filters = {}
filters['format_date'] = format_date
filters['time_since'] = time_since
filters['is_online'] = is_online
filters['crop_title'] = crop_title
filters['forum_is_unread'] = forum_is_unread
filters['topic_is_unread'] = topic_is_unread
permissions = [
('is_admin', IsAdmin),
('is_moderator', IsAtleastModerator),
('is_admin_or_moderator', IsAtleastModerator),
('can_edit_user', CanEditUser),
('can_ban_user', CanBanUser),
]
filters.update(
[(name, partial(perm, request=request)) for name, perm in permissions]
)
# these create closures
filters['can_moderate'] = TplCanModerate(request)
filters['post_reply'] = TplCanPostReply(request)
filters['edit_post'] = TplCanEditPost(request)
filters['delete_post'] = TplCanDeletePost(request)
filters['post_topic'] = TplCanPostTopic(request)
filters['delete_topic'] = TplCanDeleteTopic(request)
app.jinja_env.filters.update(filters)
app.jinja_env.globals["run_hook"] = template_hook
app.pluggy.hook.flaskbb_jinja_directives(app=app)
| 5,341,521
|
def edit_seq2seq_config(config, frameworks=FULL_FRAMEWORKS, no_attn=False):
"""Rotate frameworks and optionally remove attention."""
configs = []
for fw in frameworks:
c = deepcopy(config)
c['backend'] = fw
configs.append(c)
if not no_attn:
new_configs = []
# Run the non attention version
for config in configs:
c = deepcopy(config)
c['model']['model_type'] = 'default'
new_configs.append(c)
new_configs.append(config)
configs = new_configs
return configs
| 5,341,522
|
def get_bioportal_prefix(prefix: str) -> Optional[str]:
"""Get the Bioportal prefix if available."""
return _get_mapped_prefix(prefix, "bioportal")
| 5,341,523
|
def _obtain_rapt(request, access_token, requested_scopes):
"""Given an http request method and reauth access token, get rapt token.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
access_token (str): reauth access token
requested_scopes (Sequence[str]): scopes required by the client application
Returns:
str: The rapt token.
Raises:
google.auth.exceptions.ReauthError: if reauth failed
"""
msg = _get_challenges(
request,
list(challenges.AVAILABLE_CHALLENGES.keys()),
access_token,
requested_scopes,
)
if msg["status"] == _AUTHENTICATED:
return msg["encodedProofOfReauthToken"]
for _ in range(0, RUN_CHALLENGE_RETRY_LIMIT):
if not (
msg["status"] == _CHALLENGE_REQUIRED or msg["status"] == _CHALLENGE_PENDING
):
raise exceptions.ReauthFailError(
"Reauthentication challenge failed due to API error: {}".format(
msg["status"]
)
)
if not is_interactive():
raise exceptions.ReauthFailError(
"Reauthentication challenge could not be answered because you are not"
" in an interactive session."
)
msg = _run_next_challenge(msg, request, access_token)
if msg["status"] == _AUTHENTICATED:
return msg["encodedProofOfReauthToken"]
# If we got here it means we didn't get authenticated.
raise exceptions.ReauthFailError("Failed to obtain rapt token.")
| 5,341,524
|
def argunique(items, key=None):
"""
Returns indices corresponding to the first instance of each unique item.
Args:
items (Sequence[VT]): indexable collection of items
key (Callable[[VT], Any], default=None): custom normalization function.
If specified returns items where ``key(item)`` is unique.
Returns:
Iterator[int] : indices of the unique items
Example:
>>> import ubelt as ub
>>> items = [0, 2, 5, 1, 1, 0, 2, 4]
>>> indices = list(ub.argunique(items))
>>> assert indices == [0, 1, 2, 3, 7]
>>> indices = list(ub.argunique(items, key=lambda x: x % 2 == 0))
>>> assert indices == [0, 2]
"""
if key is None:
return unique(range(len(items)), key=lambda i: items[i])
else:
return unique(range(len(items)), key=lambda i: key(items[i]))
| 5,341,525
|
def promote_cvals(*vals):
"""
Promote Python values into the most general dshape containing
all of them. Only defined over simple CType instances.
>>> promote_vals(1,2.)
dshape("float64")
>>> promote_vals(1,2,3j)
dshape("complex128")
"""
promoted = np.result_type(*vals)
datashape = CType.from_dtype(promoted)
return datashape
| 5,341,526
|
def setup_logging(
color: bool, verbose: bool = False, add_timestamp: bool = False
) -> None: # pragma: no cover
"""Setup logging.
Args:
color (bool): If true, the output will be colored using
colorlog. Otherwise, it will be plaintext.
"""
root_logger = logging.getLogger()
if verbose:
root_logger.setLevel(OUTPUT)
else:
root_logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(_get_formatter(color, add_timestamp))
root_logger.addHandler(handler)
# Silence noisy loggers
logging.getLogger("sh").setLevel(logging.WARNING)
| 5,341,527
|
def load_directory_metadata(directory_path, return_copy=True):
"""
Get stored metadata for files in path. This currently only stores bookmarks.
If no metadata is available, return an empty dictionary.
This is a hidden file in the directory which stores metadata for all files
in the directory, as well as the directory itself. This has a bunch of
advantages over putting the data in each file:
- Every file format has its own way of storing metadata, and there are no
robust libraries that handle all of them.
- We don't have to modify the user's files, so there's no chance of us screwing
up and causing data loss.
- Opening each file during a refresh is extremely slow. It's much faster to
have a single file that we only read once per directory scan.
- We can use Windows Search to search this data if we format it properly. Use
a file extension that it indexes by default (we use .txt), and we can insert
keywords in the file that we can search for. Windows Search will index metadata
for some file types, but it's hit-or-miss (it handles JPEGs much better than PNGs).
"""
with _metadata_lock:
return _load_directory_metadata_locked(directory_path, return_copy=return_copy)
| 5,341,528
|
def download_all():
"""Download all files in the DATA_HUB"""
for name in DATA_HUB:
download(name)
| 5,341,529
|
def _traverse_tree_and_group_all_objects_by_oclass(root_obj, result=None):
"""Traverses the tree once and groups all objects by oclass
:param root_obj: The root object where to start the traversion
:type root_obj: CUDS
:param result: The current results of the recursion, defaults to None
:type result: dict, optional
:return: All CUDS objects in the tree, grouped by oclass.
:rtype: dict
"""
if result is None:
result = {str(root_obj.oclass): [root_obj]}
for neighbour in root_obj.iter():
if neighbour.oclass not in result.keys():
result[str(neighbour.oclass)] = [neighbour]
else:
result[str(neighbour.oclass)].append(neighbour)
_traverse_tree_and_group_all_objects_by_oclass(neighbour, result)
return result
| 5,341,530
|
def check_measurement(m_info, filters):
"""
Determine whether a given measurement should be included based on the
filters.
Inputs:
m_info - A dictionary containing the configuration parameters for an
individual measurement.
filters - A dictionary containing a set of configuration parameter
values that should be included
Output:
include - Boolean indicating whether to include the given measurement
"""
include = True
for filter_field, filter_values in filters.iteritems():
try:
iter(filter_values)
except:
filter_values = [filter_values]
if not m_info[filter_field] in filter_values:
include = False
return include
| 5,341,531
|
def audiosegment2wav(data: AudioSegment):
"""
pydub.AudioSegment格式转为音频信号wav。
:param data:
:return:
"""
wav = np.array(data.get_array_of_samples()) / _int16_max
return wav
| 5,341,532
|
def embargo(cand_times, test_times, embargo_table):
"""
"Embargo" observations from the training set.
Args:
cand_times(Series): times of candidates to be the "embargoed set"
index: t0(start time)
value: t1(end time)
test_times(Series): times of the test set
index: t0(start time)
value: t1(end time)
embargo_table(Series): embargo times table returned by get_embargo_table()
Returns:
embargoed_times(Series): times of embargoed training set
index: t0(start time)
value: t1(end time)
"""
first_test_start = test_times.index[0]
final_test_start = test_times.index[-1]
final_embargo_start = embargo_table[final_test_start] # end time of the embargo
to_embargo_idx = cand_times.loc[first_test_start:final_embargo_start].index
embargoed_times = cand_times.drop(to_embargo_idx)
return embargoed_times
| 5,341,533
|
def mkdirp(directory):
"""
利用python库来做到shell中的 ``mkdir -p``
好处是不用 ``os.system()``,避免了fork进程造成的资源浪费。
:param directory: 路径
"""
if not os.path.isdir(directory):
os.makedirs(directory)
| 5,341,534
|
def test_can_parse_and_serialize_frozen_attrs_class():
"""Parse and serialize a frozen attrs class."""
processor = xml.user_object('book', _FrozenBook, [
xml.string('title'),
xml.string('author'),
])
value = _FrozenBook(
title='The Three Body Problem',
author='Liu Cixin'
)
assert_can_roundtrip_xml_value(processor, value)
| 5,341,535
|
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the WiZ Light platform from config_flow."""
# Assign configuration variables.
wiz_data = hass.data[DOMAIN][entry.entry_id]
wizbulb = WizBulbEntity(wiz_data.bulb, entry.data.get(CONF_NAME), wiz_data.scenes)
# Add devices with defined name
async_add_entities([wizbulb], update_before_add=True)
return True
| 5,341,536
|
def handle_uploaded_file(file, filename):
"""
Обработка файла csv спарсенного с online.edu.ru
"""
if not os.path.exists('upload/'):
os.mkdir('upload/')
path = 'upload/' + filename
with open(path, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
df = pandas.read_csv(path, sep=',', encoding='utf-8')
df.dropna(subset=['Направления подготовки'], inplace=True)
df = df.drop(['Unnamed: 0'], axis=1)
return df
| 5,341,537
|
async def check_account():
"""
A check that checks if the user has an account and if not creates one for them.
"""
async def check(ctx) -> bool:
conn = get_db()
cur = conn.cursor()
cur.execute("SELECT * FROM economy WHERE user_id = ?", (ctx.author.id,))
if cur.fetchone() is None:
cur.execute("INSERT INTO economy (user_id) VALUES (?)", (ctx.author.id,))
cur.execute("INSERT INTO cooldowns (user_id) VALUES (?)", (ctx.author.id,))
conn.commit()
cur.close()
conn.close()
return True
return check
| 5,341,538
|
def run_send_feedback(args):
"""
Do a semd-feedback call to the Seldon API
Parameters
----------
args
Command line args
"""
contract = json.load(open(args.contract, "r"))
contract = unfold_contract(contract)
sc = get_seldon_client(args)
if args.grpc:
transport = "grpc"
else:
transport = "rest"
for i in range(args.n_requests):
batch = generate_batch(contract, args.batch_size, "features")
response_predict = sc.predict(data=batch, deployment_name=args.deployment)
response_feedback = sc.feedback(
prediction_request=response_predict.request,
prediction_response=response_predict.response,
reward=1.0,
deployment_name=args.deployment,
transport=transport,
)
if args.prnt:
print(f"RECEIVED RESPONSE:\n{response_feedback}\n")
| 5,341,539
|
def mtf_image_transformer_base_cifar():
"""Data parallel CIFAR parameters."""
hparams = mtf_image_transformer_base()
hparams.mesh_shape = "batch:8"
hparams.layout = "batch:batch"
hparams.learning_rate_decay_steps = 13600 # one epoch
hparams.batch_size = 32
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.d_ff = 2048
hparams.learning_rate = 0.5
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
| 5,341,540
|
def edc_t(path):
"""EPICA Dome C Ice Core 800KYr Temperature Estimates
Temperature record, using Deuterium as a proxy, from the EPICA (European
Project for Ice Coring in Antarctica) Dome C ice core covering 0 to 800
kyr BP.
A data frame with 5788 observations on the following 5 variables.
`Bag`
Bag number
`ztop`
Top depth (m)
`Age`
Years before 1950
`Deuterium`
Deuterium dD data
`dT`
Temperature difference from the average of the last 1000 years ~
-54.5degC
http://www.ncdc.noaa.gov/paleo/icecore/antarctica/domec/domec_epica_data.html
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `edc_t.csv`.
Returns:
Tuple of np.ndarray `x_train` with 5788 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'edc_t.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/edcT.csv'
maybe_download_and_extract(path, url,
save_file_name='edc_t.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 5,341,541
|
def init_group_prams(net):
"""Initialize group_prams."""
decayed_params = []
no_decayed_params = []
for param in net.trainable_params():
if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name:
decayed_params.append(param)
else:
no_decayed_params.append(param)
group_params = [{'params': decayed_params, 'weight_decay': 0.0001},
{'params': no_decayed_params},
{'order_params': net.trainable_params()}]
return group_params
| 5,341,542
|
def nni_differences_parameters(nni=None, rpeaks=None):
"""Computes basic statistical parameters from a series of successive NN interval differences (mean, min, max, standard deviation).
Parameters
----------
nni : array
NN intervals in [ms] or [s].
rpeaks : array
R-peak times in [ms] or [s].
Returns (biosppy.utils.ReturnTuple Object)
------------------------------------------
[key : format]
Description.
nni_diff_mean: float
Mean NN interval difference [ms].
nni_diff_min : float
Minimum NN interval difference [ms].
nni_diff_max : float
Maximum NN interval difference [ms].
Notes
-----
.. Only one type of input data is required.
.. If both 'nni' and 'rpeaks' are provided, 'nni' will be chosen over the 'rpeaks'
.. NN and R-peak series provided in [s] format will be converted to [ms] format.
"""
# Check input
nn = tools.check_input(nni, rpeaks)
# Get NN interval differences
nnd = tools.nni_diff(nn)
# output
args = (float(nnd.mean()), int(nnd.min()), int(nnd.max()), )
names = ('nni_diff_mean', 'nni_diff_min', 'nni_diff_max', )
return utils.ReturnTuple(args, names)
| 5,341,543
|
def contextualize_model(model, cell_line, genes):
"""Contextualize model at the level of a PySB model."""
# Here we just make a PysbAssembler to be able
# to apply set_context on the model being passed in
model.name = cell_line
cell_line_ccle = cell_line + '_SKIN'
pa = PysbAssembler()
pa.model = model
pa.set_context(cell_line_ccle)
# Set initial conditions for missense mutations
variants = read_ccle_variants(genes)
mutations = variants['missense'][cell_line_ccle]
for gene, mut_list in mutations.items():
for fres, loc, tres in mut_list:
site_name = fres + loc
for ic in model.initial_conditions:
if ic[0].monomer_patterns[0].monomer.name == gene:
sc = ic[0].monomer_patterns[0].site_conditions
if site_name in sc:
sc[site_name] = tres
return pa.model
| 5,341,544
|
def _quaternionInverse(quat):
""" Inverses a list of quaternions
"""
quat_ = np.empty((quat.shape[0],4))
# For every quaternion
for i in range(quat.shape[0]):
mag = quat[i,0]**2 + quat[i,1]**2 + quat[i,2]**2 + quat[i,3]**2
quat_[i,0] = -quat[i,0]/mag
quat_[i,1] = -quat[i,1]/mag
quat_[i,2] = -quat[i,2]/mag
quat_[i,3] = quat[i,3]/mag
return quat_
| 5,341,545
|
def load_dataset(files: list[str]) -> Union[list[int], list[list[list[int]]]]:
"""load the images and labels of the test dataset
Args:
files (list[str]): list of files path for images and label dataset
Returns:
Union[list[int], list[list[list[int]]]]: list of labels and list of int matrixes
"""
print("loading the dataset...")
with open(files[0], "rb") as image_file:
megic_number = int.from_bytes(image_file.read(4), 'big', signed=True)
number_of_images = int.from_bytes(image_file.read(4), 'big', signed=True)
rows = int.from_bytes(image_file.read(4), 'big', signed=True)
cols = int.from_bytes(image_file.read(4), 'big', signed=True)
images = []
for _ in range(number_of_images):
matrix = []
for _ in range(rows):
row = []
for _ in range(cols):
row.append(int.from_bytes(image_file.read(1), 'big', signed=False))
matrix.append(row)
images.append(matrix)
with open(files[1], "rb") as label_file:
megic_number = int.from_bytes(label_file.read(4), 'big', signed=True)
number_of_labels = int.from_bytes(label_file.read(4), 'big', signed=True)
labels = []
for _ in range(number_of_labels):
labels.append(int.from_bytes(label_file.read(1), 'big', signed=False))
return labels, images
| 5,341,546
|
def get_minion_node_ips(boot_conf, hb_conf):
"""
Returns a list of IPs for all master nodes
:param boot_conf: the snaps-boot configuration dict
:param hb_conf: the adrenaline configuration dict
:return: a list of IP addresses
"""
return __get_node_ips(boot_conf, hb_conf, 'minions')
| 5,341,547
|
def _normalize_sql(sql, maxlen=150):
"""Collapse whitespace and middle-truncate if needed."""
out = ' '.join(sql.split())
if len(out) > maxlen:
i = int(maxlen / 2 - 4)
out = (out[0:i] +
' . . . ' +
out[-i:None])
return out
| 5,341,548
|
def utt_non_punct_dialog(dialog: Dict):
"""
Used by: book_skill
"""
dialog = utils.get_last_n_turns(dialog)
dialog = utils.remove_clarification_turns_from_dialog(dialog)
return [{"dialogs": [dialog]}]
| 5,341,549
|
def griddata_easy(xx, yy, data, xi=None, yi=None, dx=None, dy=None, nx=10, ny=10, method='nearest', fill_value=None):
"""
Generate a girdded data from scattered data z=f(x, y)
... Wrapper of scipy.interplate.riddata
Parameters
----------
xx: nd array-like
x-coordinate of scattered data
yy: nd array-like
y-coordinate of scattered data
data: nd array-like
values of scattered data
xi: 1d array
x-coordinate of the interpolated grid
... The array must be monotonically increasing.
... If None, xi = np.arange(xmin, xmax, dx)
yi: 1d array
y-coordinate of the interpolated grid
... The array must be monotonically increasing.
... If None, yi = np.arange(ymin, ymax, dy)
dx: float
spacing of 'xi' if 'xi' is not given
dy: float
spacing of 'xi' if 'xi' is not given
nx: int
if 'dx' were not given, dx is set as (xmax-xmin)/nx
ny: int
if 'dy' were not given, dx is set as (ymax-ymin)/ny
method: method of 2D interpolation
... Options: 'nearest', 'linear', 'cubic'
Returns
-------
xxi: 2d array
x-coordinate of the grid
yyi: 2d array
x-coordinate of the grid
data_i: 2d array
values on the grid
"""
xx, yy, data = np.asarray(xx), np.asarray(yy), np.asarray(data)
if not xx.shape == yy.shape == data.shape:
print('x.shape, y.shape, and data.shape must match. ', xx.shape, yy.shape, data.shape)
raise ValueError('shapes of x, y, and data do not match.')
x, y, data1d = xx.flatten(), yy.flatten(), data.flatten()
if xi is None:
xmin, xmax = np.nanmin(x), np.nanmax(x)
if dx is None:
dx = (xmax - xmin) / nx
xi = np.arange(xmin, xmax, dx)
if yi is None:
ymin, ymax = np.nanmin(y), np.nanmax(y)
if dy is None:
dy = (ymax - ymin) / ny
yi = np.arange(ymin, ymax, dy)
xxi, yyi = np.meshgrid(xi, yi)
# interpolate
data_i = griddata((x, y), data1d, (xxi, yyi), method=method, fill_value=fill_value)
return xxi, yyi, data_i
| 5,341,550
|
def emit_cover(ctx, go_toolchain,
source = None,
mode = None,
importpath = ""):
"""See go/toolchains.rst#cover for full documentation."""
if source == None: fail("source is a required parameter")
if mode == None: fail("mode is a required parameter")
if not importpath: fail("importpath is a required parameter")
stdlib = go_toolchain.stdlib.get(ctx, go_toolchain, mode)
covered = []
cover_vars = []
for s in source.entries:
if not s.want_coverage:
covered.append(s)
continue
outputs = []
for src in s.srcs:
if not src.basename.endswith(".go"):
outputs.append(src)
continue
cover_var = "Cover_" + src.basename[:-3].replace("-", "_").replace(".", "_")
cover_vars.append("{}={}={}".format(cover_var, src.short_path, importpath))
out = declare_file(ctx, path=cover_var, ext='.cover.go')
outputs.append(out)
args = ctx.actions.args()
add_go_env(args, stdlib, mode)
args.add(["--", "--mode=set", "-var=%s" % cover_var, "-o", out, src])
ctx.actions.run(
inputs = [src] + stdlib.files,
outputs = [out],
mnemonic = "GoCover",
executable = go_toolchain.tools.cover,
arguments = [args],
)
members = structs.to_dict(s)
members["srcs"] = outputs
covered.append(GoSource(**members))
return GoSourceList(entries=covered), cover_vars
| 5,341,551
|
async def test_binary_sensor_update_mount_type_garage(
hass: HomeAssistant, ufp: MockUFPFixture, sensor_all: Sensor
):
"""Test binary_sensor motion entity."""
await init_entry(hass, ufp, [sensor_all])
assert_entity_counts(hass, Platform.BINARY_SENSOR, 10, 10)
_, entity_id = ids_from_device_description(
Platform.BINARY_SENSOR, sensor_all, SENSE_SENSORS_WRITE[0]
)
state = hass.states.get(entity_id)
assert state
assert state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.DOOR.value
new_sensor = sensor_all.copy()
new_sensor.mount_type = MountType.GARAGE
mock_msg = Mock()
mock_msg.changed_data = {}
mock_msg.new_obj = new_sensor
ufp.api.bootstrap.sensors = {new_sensor.id: new_sensor}
ufp.ws_msg(mock_msg)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert (
state.attributes[ATTR_DEVICE_CLASS] == BinarySensorDeviceClass.GARAGE_DOOR.value
)
| 5,341,552
|
def shortdate(date=None):
"""turn (timestamp, tzoff) tuple into iso 8631 date."""
return datestr(date, format='%Y-%m-%d')
| 5,341,553
|
def default_fram( object_to_serialize):
"""
Python json api custom serializer function for FRAM Warehouse API
per:'Specializing JSON object encoding', https://simplejson.readthedocs.org
>>> import simplejson as json
>>> json.dumps({'Without':[1,'a',datetime(1999, 1, 1),'Serializer']})
Traceback (most recent call last):
...
TypeError: datetime.datetime(1999, 1, 1, 0, 0) is not JSON serializable
>>> dict2 = {'With':[1,'a',datetime(1999, 1, 1),'Serializer']}
>>> json.dumps( dict2, default=default_fram)
'{"With": [1, "a", "1999-01-01T00:00:00Z", "Serializer"]}'
>>> dict3 = {'With':[1,'a',date(1999, 1, 1),'Serializer']}
>>> json.dumps( dict3, default=default_fram)
'{"With": [1, "a", "1999-01-01", "Serializer"]}'
>>> dict4 = {'With':[1,'a',time(4, 5, 6),'Serializer']}
>>> json.dumps( dict4, default=default_fram)
'{"With": [1, "a", "1970-01-01T04:05:06Z", "Serializer"]}'
>>> numpy_64bit_int = {'With':[1,numpy.int64(5678),'Support']}
>>> json.dumps(numpy_64bit_int, default=default_fram)
'{"With": [1, 5678, "Support"]}'
>>> numpy_32bit_int = {'With':[1,numpy.int32(5678),'Support']}
>>> json.dumps(numpy_64bit_int, default=default_fram)
'{"With": [1, 5678, "Support"]}'
>>> numpy_16bit_int = {'With':[1,numpy.int16(5678),'Support']}
>>> json.dumps(numpy_64bit_int, default=default_fram)
'{"With": [1, 5678, "Support"]}'
"""
#Bake datetime objects into Strings
if isinstance( object_to_serialize, datetime):
if object_to_serialize.utcoffset() is None:
#Append 'Z', to conform to ISO8601 date spec
return object_to_serialize.isoformat()+'Z'
#Else, TZ offset present. TZ info will be automatically included per
# docs.python.org/3/library/datetime.html#datetime.datetime.isoformat
return object_to_serialize.isoformat()
if isinstance( object_to_serialize, date):
# No Timezone info available,
return object_to_serialize.isoformat()
if isinstance( object_to_serialize, time):
#No date available.Prefix:'1970-01-01T',to conform to ISO8601 date spec
isoformat = '1970-01-01T'+object_to_serialize.isoformat()
if object_to_serialize.utcoffset() is None:
# No Timezone info available,
# Append 'Z',to conform to ISO8601 date spec
return isoformat+'Z'
#else, TZ offset has already been added to string.
return isoformat
if isinstance(object_to_serialize, numpy.integer):
return int(object_to_serialize) #per Python issue24313, no support for numpy Ints
#Else, wasnt a datetime Date & we dont handle anything else.. so:
raise TypeError(repr(object_to_serialize) + " is not JSON serializable")
| 5,341,554
|
def read_random_stack_for_multiple_pickles(all_spectra, all_sequence, num_spectra, stack_size):
"""TODO(nh2tran): docstring."""
print("read_random_stack_for_multiple_pickles()")
random_idx = random.sample(xrange(num_spectra[-1]), min(stack_size, num_spectra[-1]))
random_locations = []
f_idx = np.array(num_spectra)
for i in random_idx:
idx = np.where((f_idx > i))[0][0]
if idx == 0:
random_locations.append([idx, i])
else:
random_locations.append([idx, i-num_spectra[idx-1]])
return read_spectra_from_multiple_pickles(all_spectra, all_sequence, random_locations)
| 5,341,555
|
def reset_fields_to_default():
"""reset datamodel to default : core fields and default custom fields"""
self.application.coll_model.remove({ "field_class" : "custom" })
create_generic_custom_fields()
| 5,341,556
|
def test_user_create_command(gsuite_client, mocker):
"""
Scenario: gsuite-user-create should works if valid arguments are provided.
Given:
- Command args.
When:
- Calling gsuite-user-create command with the arguments provided.
Then:
- Ensure CommandResult entry should be as expected.
"""
from GSuiteAdmin import user_create_command
with open('test_data/user_create_args.json', 'r') as file:
args = json.load(file)
with open('test_data/user_create_response.json') as file:
api_response = json.load(file)
with open('test_data/user_create_entry_context.json') as file:
expected_entry_context = json.load(file)
mocker.patch('GSuiteAdmin.GSuiteClient.http_request', return_value=api_response)
command_result = user_create_command(gsuite_client, args)
assert command_result.readable_output == expected_entry_context['HumanReadable']
assert command_result.outputs == expected_entry_context['EntryContext']['GSuite.User(val.id == obj.id)']
assert command_result.raw_response == expected_entry_context['Contents']
assert command_result.outputs_key_field == ['id']
assert command_result.outputs_prefix == 'GSuite.User'
| 5,341,557
|
def perfect_pattern(dict_class_counts, distinct_classes, pattern, supporting_items, results_dir):
"""
Performs checking whether the pattern is perfect and a common class can be found directly without constructing the
relative class hierarchy.
:param dict_class_counts: the count of each distinct class combinations to weight the superclass
:param distinct_classes: the distinct class combinations of relative hierarchy level 0
:param pattern: the pattern to evaluate
:param supporting_items: the classes to which the supporting items belong to
:param results_dir: the directory to store the results
:return:
"""
base_case = False
superclasses = list()
for key in dict_class_counts:
for subkey in key.split():
logging.debug('Subkey:\t%s', set([subkey]))
subkey_class = True
for cl in distinct_classes:
if set([subkey]).issubset(set(cl)):
pass # do nothing
else:
logging.debug('%s is no subset of %s', set([subkey]), set(cl))
subkey_class = False
break
if subkey_class and subkey not in superclasses:
logging.debug('Setting base_case to True!')
base_case = True
superclasses.append(subkey)
weights = [0 for superclass in superclasses]
weights_indexes = [[0] for superclass in superclasses]
if base_case:
logging.info('Found pattern on first hierarchy level!')
construct_result(pattern, {}, weights, weights_indexes, superclasses, dict_class_counts)
write_to_csv(pattern, distinct_classes, weights, superclasses, weights_indexes,
get_distribution(supporting_items), True, results_dir)
return base_case
| 5,341,558
|
def _benchmark_grep(filename, pattern):
"""Benchmarks grep.
Args:
- filename: The name of the file to be searched.
- pattern: The pattern we are searching for in the file.
"""
time_taken = timeit(setup=BENCHMARK_SETUP, number=SINGLE_STRING_TESTS,
stmt='subprocess.call(%s)' % GREP_CALL_ARGS.format(pattern, filename))
return time_taken / SINGLE_STRING_TESTS
| 5,341,559
|
def inotify_test(
test_paths: dict[str, pathlib.Path], tmp_path: pathlib.Path
) -> InotifyTest:
"""Generate a pre-configured test instance of `inotify_simple.INotify`.
Parameters
----------
test_paths: dict[str, pathlib.Path]
The test fixture that generates test files based on configuration
(:obj:`test_paths`).
tmp_path: pathlib.Path
The pytest `tmp_path` fixture providing a path object to a temporary
directory which is unique to each test function
(:obj:`_pytest.tmpdir.tmp_path`).
Returns
-------
inotify_simple: InotifyTest
A pre-configured `InotifyTest` object with the specified test paths.
"""
inotify = InotifyTest(tmp_path)
for key, path in test_paths.items():
inotify.add_watch(path)
return inotify
| 5,341,560
|
async def mock_pyatag_sleep():
"""Mock out pyatag sleeps."""
asyncio_sleep = asyncio.sleep
async def sleep(duration, loop=None):
await asyncio_sleep(0)
with patch("pyatag.gateway.asyncio.sleep", new=sleep):
yield
| 5,341,561
|
def set_system_bios( context, settings, system_id = None ):
"""
Finds a system matching the given ID and sets the BIOS settings
Args:
context: The Redfish client object with an open session
settings: The settings to apply to the system
system_id: The system to locate; if None, perform on the only system
Returns:
The response of the PATCH
"""
# Locate the system
system = get_system( context, system_id )
# Get the BIOS resource and determine if the settings need to be applied to the resource itself or the settings object
bios_uri = system.dict["Bios"]["@odata.id"]
bios = context.get( bios_uri )
etag = bios.getheader( "ETag" )
if "@Redfish.Settings" in bios.dict:
bios_uri = bios.dict["@Redfish.Settings"]["SettingsObject"]["@odata.id"]
bios_settings = context.get( bios_uri )
etag = bios_settings.getheader( "ETag" )
# Update the settings
payload = { "Attributes": settings }
headers = None
if etag is not None:
headers = { "If-Match": etag }
response = context.patch( bios_uri, body = payload, headers = headers )
verify_response( response )
return response
| 5,341,562
|
def ucs():
"""show unicode categories with 10 example values"""
print(A.print(ex=CAT))
| 5,341,563
|
def create() -> UserSecurityModel:
"""
Creates a new instance of the USM
"""
return UserSecurityModel()
| 5,341,564
|
def clDice(v_p, v_l):
"""[this function computes the cldice metric]
Args:
v_p ([bool]): [predicted image]
v_l ([bool]): [ground truth image]
Returns:
[float]: [cldice metric]
"""
if len(v_p.shape)==2:
tprec = cl_score(v_p,skeletonize(v_l))
tsens = cl_score(v_l,skeletonize(v_p))
elif len(v_p.shape)==3:
tprec = cl_score(v_p,skeletonize_3d(v_l))
tsens = cl_score(v_l,skeletonize_3d(v_p))
return 2*tprec*tsens/(tprec+tsens)
| 5,341,565
|
def validate_regex(regex_str):
"""
Checks if a given string is valid regex
:param str regex_str: a suspicios string that may or may not be valid regex
:rtype: bool
:return: True if valid regex was give, False in case of TypeError or re.error
"""
# another of those super basic function where i am not sure if there isn't an easier way
try:
re.compile(regex_str)
return True
except re.error:
return False
except TypeError: # for the string not being one
return False
| 5,341,566
|
def main():
"""
Initialize as global variable the settings as found in the config file and the command-line arguments.
"""
global done_list
global CONFIG_FILE
done_list = []
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument("titles", type=str, nargs='*',
help="All the names separated by a space will be \
searched and downloaded.")
parser.add_argument("-f", "--file", type=str,
help="Specify the file name, to download using \
reference as.")
parser.add_argument("-u", "--url", type=str,
help="Specify the url where list is located.")
parser.add_argument("-k", "--keyword", type=str,
help="Add additional keywords for search.")
parser.add_argument("-n", "--count", type=int,
help="Number of files to download from playlist/url.")
parser.add_argument("-r", "--repair_only", action="store_true",
help="Skip downloading and only add metadata.")
parser.add_argument("--ignore_downloaded", action="store_true",
help="Skip checking the downloaded.txt and \
download all files.")
parser.add_argument("--no_downloaded", action="store_true",
help="Skip adding the downloaded files to \
downloaded.txt.")
parser.add_argument("-c", "--config", type=str,
help="Specify config file.")
parser.add_argument("--ignore-config", action="store_true",
help="Ignore the default config file.")
parser.add_argument("--arrange", '-a', action="store_true",
help="Rearrange directory into artsit->album folder.")
parser.add_argument("--directory", "-d", type=str,
help="Specify a directory.")
# Check if arguments have a config file and use that.
args_temp = parser.parse_args()
if args_temp.config:
CONFIG_FILE = args_temp.config
if os.path.exists(CONFIG_FILE):
logging.debug("Using config file.")
sys.argv = ['@'+CONFIG_FILE] + sys.argv
else:
logging.debug("No conf file found at %s", CONFIG_FILE)
args = parser.parse_args(sys.argv)
logger.debug("ARGV: %s", sys.argv)
logger.debug("ARGUMENTS SUPPLIED: %s", str(args))
if args.directory and os.path.exists(args.directory):
ydl_opts['outtmpl'] = os.path.join(args.directory, ydl_opts['outtmpl'])
logger.debug("Output path: %s", ydl_opts['outtmpl'])
if args.arrange:
if not args.directory:
print("Need a directory to rearrange. Supply one with '-d' or '--directory'.")
logger.debug("Directory not supplied, rearrangement failed.")
else:
logger.debug("Arranging in the direcotory: %s", args.directory)
Rearrange(args.directory)
logger.debug("Rearrangement finished.")
if args.url:
print("You want an url:", args.url)
if 'thetoptens' in args.url:
if args.count:
GetTopTensMusic(args.url, args.keyword, args.count)
else:
GetTopTensMusic(args.url, args.keyword)
if 'billboard' in args.url:
if args.count:
GetBillboardsMusic(args.url, args.count)
else:
GetBillboardsMusic(args.url)
if 'youtube.com' in args.url:
GetYoutubeMusic(args.url)
args.titles = args.titles[1:]
if args.titles:
logger.debug("Getting titles: %s", args.titles)
GetMusicFromList(args.titles, args.ignore_downloaded,
args.no_downloaded)
if args.file:
try:
with open(args.file) as f:
tbd = f.readlines()
GetMusicFromList(tbd, args.ignore_downloaded,
args.no_downloaded)
except FileNotFoundError:
print("The specified file was not found.")
print("Are you sure " + args.file + " exists here?")
| 5,341,567
|
def add_host_ages(
input_metadata_file,
host_id_list,
host_birthday_list,
float_years,
output_metadata_file,
) -> None:
"""Add host age in years on to a metadata file.
The column added will be named "host_age_years" if --float-years isn't
set, and "host_age" if --float-years *is* set.
"""
manipulate_md(
input_metadata_file,
[host_id_list, host_birthday_list, float_years],
output_metadata_file,
_add_host_ages,
)
| 5,341,568
|
def load_stats_from_file(date):
"""
Load stats data from a stat file.
Params:
date -- a `datetime` instance.
"""
file_path = _build_stats_file_path(date)
if not isfile(file_path):
raise IOError # This will be FileNotFoundError in Python3.
with open(file_path, 'r') as fin:
return json.loads(fin.read())
| 5,341,569
|
def one_permutation(index):
""""
save the interaction score on one permutation dataset to NN_idx
"""
name = 'NN_' + str(index)
## Generate permutation datasets with main effects NN model
phenotype_y_perm = permutation_data_generation(BRRR, x_train, phenotype_y, gene_size)
phenotype_y_test_perm = permutation_data_generation(BRRR, x_test, phenotype_y_test, gene_size)
## Gene interaction NN training
sigma_int = tau_estimate(pve_int, slope_int, intercept_int)
encoder = Encoder(gene_size, sigma_int); predictor = Predictor_wide(num_gene, sparsity, sigma_int, num_hidden_nodes)
SSBNN = SparseBNN(encoder, predictor)
train_errors, test_errors = training(SSBNN, x_train, phenotype_y_perm, x_test, phenotype_y_test_perm, learning_rate, batch_size, num_epoch)
print('BNN model :')
splited_x_test = splite_data(x_test, gene_size)
PTVE_test(SSBNN, splited_x_test, phenotype_y_test_perm)
# Detect interactions from the trained gene interaction NN
num_samples = 50 ## number of samples of explanation
torch.manual_seed(0); perm = torch.randperm(x_test.size(0)); idx = perm[:num_samples]
gene_test, _ = SSBNN.encoder(splite_data(x_test[idx], gene_size), training = False); gene_test.detach_()
baseline = torch.mean(gene_test, dim = 0).view(1,-1)
GlobalSIS_BNN, topGlobalSIS_BNN, Shapely_BNN = GlobalSIS(SSBNN.predictor, gene_test, baseline)
np.savetxt(cwd+'/PermutationDistribution/'+name+'.csv', Shapely_BNN, delimiter=",")
| 5,341,570
|
def null_gt_null(left, right):
""":yaql:operator >
Returns false. This function is called when left and right are null.
:signature: left > right
:arg left: left operand
:argType left: null
:arg right: right operand
:argType right: null
:returnType: boolean
.. code:
yaql> null > null
false
"""
return False
| 5,341,571
|
def read_header(file):
""" Read the information in an OpenFOAM file header.
Parameters
----------
file : str
Name (path) of OpenFOAM file.
Returns
-------
info : dictionary
The information in the file header.
"""
with open(file, 'r') as f:
content = f.read()
info = {}
info['file'] = file
# read logo
logo_info = _read_logo(content)
info['foam_version'] = logo_info['Version']
info['website'] = logo_info['Website']
# read header
header_info = _read_header_info(content)
info['foam_class'] = header_info['foam_class']
info['name'] = header_info['name']
info['location'] = header_info['location']
return info
| 5,341,572
|
def set_object_show_name(object, show=True):
""" Display the name of an object.
Parameters:
object (obj): Object to display name.
show (bool): True or False.
Returns:
None
"""
object.show_name = show
| 5,341,573
|
def write_batch_preds_to_csv(predictions, csv_fpath: Path) -> None:
"""print batch preds to csv"""
raise NotImplementedError(f"Not implemented for type {predictions}")
| 5,341,574
|
def point_from_b58(b):
"""Return b58 decoded P."""
x, y = [int_from_b58(t) for t in b.split(",")]
return ECC.EccPoint(x=x, y=y, curve=CURVE)
| 5,341,575
|
def _cp_embeds_into(cp1, cp2):
"""Check that any state in ComplexPattern2 is matched in ComplexPattern1.
"""
# Check that any state in cp2 is matched in cp1
# If the thing we're matching to is just a monomer pattern, that makes
# things easier--we just need to find the corresponding monomer pattern
# in cp1
if cp1 is None or cp2 is None:
return False
cp1 = as_complex_pattern(cp1)
cp2 = as_complex_pattern(cp2)
if len(cp2.monomer_patterns) == 1:
mp2 = cp2.monomer_patterns[0]
# Iterate over the monomer patterns in cp1 and see if there is one
# that has the same name
for mp1 in cp1.monomer_patterns:
if _mp_embeds_into(mp1, mp2):
return True
return False
| 5,341,576
|
def get_tags_for_message(khoros_object, msg_id):
"""This function retrieves the tags for a given message.
.. versionadded:: 2.8.0
:param khoros_object: The core :py:class:`khoros.Khoros` object
:type khoros_object: class[khoros.Khoros]
:param msg_id: The Message ID for the message from which to retrieve tags
:type msg_id: str, int
:returns: A list of tags associated with the message
"""
tag_list = []
query = f"SELECT text FROM tags WHERE messages.id = '{msg_id}'" # nosec
response = liql.perform_query(khoros_object, liql_query=query, verify_success=True)
entries = api.get_items_list(response)
for entry in entries:
tag_list.append(entry['text'])
return tag_list
| 5,341,577
|
def _build_hierarchical_histogram_computation(
lower_bound: float, upper_bound: float, num_bins: int,
aggregation_factory: factory.UnweightedAggregationFactory):
"""Utility function creating tff computation given the parameters and factory.
Args:
lower_bound: A `float` specifying the lower bound of the data range.
upper_bound: A `float` specifying the upper bound of the data range.
num_bins: The integer number of bins to compute.
aggregation_factory: The aggregation factory used to construct the federated
computation.
Returns:
A tff federated computation function.
"""
@computations.tf_computation(computation_types.SequenceType(tf.float32))
def client_work(client_data):
return _discretized_histogram_counts(client_data, lower_bound, upper_bound,
num_bins)
aggregator = aggregation_factory.create(client_work.type_signature.result)
@computations.federated_computation(
computation_types.at_clients(client_work.type_signature.parameter))
def hierarchical_histogram_computation(federated_client_data):
# Work done at clients.
client_histogram = intrinsics.federated_map(client_work,
federated_client_data)
# Aggregation to server.
return aggregator.next(aggregator.initialize(), client_histogram).result
return hierarchical_histogram_computation
| 5,341,578
|
def test_fst_sta_start_session_no_add_params(dev, apdev, test_params):
"""FST STA start session - no add params"""
fst_start_session(apdev, test_params, bad_param_session_add_no_params,
False)
| 5,341,579
|
def detect_intent_texts(project_id, session_id, language_code):
"""Returns the result of detect intent with texts as inputs.
Using the same `session_id` between requests allows continuation
of the conversation."""
import dialogflow_v2 as dialogflow
session_client = dialogflow.SessionsClient()
session_id = '1234567'
session = session_client.session_path(project_id, session_id)
print('Session path: {}\n'.format(session))
import pandas as pd
with open('AskUbuntu/test-ubuntu_with-labels', 'rb') as fp:
TEST_DATA = pickle.load(fp)
correct_count = 0
total_count = 0
predictions = []
true_labels = []
predicted_entities = dict()
true_labels_for_testing = list()
ne_class_list = set()
sample_count = 10000
with open('log.txt', 'w') as log:
for index, example in enumerate(TEST_DATA):
text, label, ent_dict = example
if len(text) >= 256:
continue
entities = ent_dict
ent_labels = dict()
for ent in entities:
start, stop, ent_type = ent
ent_type = ent_type.replace('_', '')
ne_class_list.add(ent_type)
if ent_type in ent_labels:
ent_labels[ent_type].append((start, stop))
else:
ent_labels[ent_type] = [(start, stop)]
true_labels_for_testing.append(ent_labels)
# print(true_labels_for_testing)
true_labels.append(label)
intent = str(label)
total_count += 1
text_input = dialogflow.types.TextInput(
text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(
session=session, query_input=query_input)
print('=' * 20)
# print(response)
intent_predicted = response.query_result.fulfillment_text
if intent_predicted.strip():
predictions.append(int(intent_predicted))
else:
predictions.append(-1)
if intent == intent_predicted:
correct_count += 1
else:
log.write('True:' + intent + '\n')
log.write('Predicted:' + intent_predicted + '\n\n')
parameters = response.query_result.parameters.fields
# print(parameters)
entity_dict = dict()
for ent in parameters:
for item in parameters[ent].list_value:
try:
entity_text_len = len(item)
entity_dict[entity_text_len].add((item, ent))
except:
entity_dict[entity_text_len] = {(item, ent)}
predicted_entities[index] = entity_dict
print('Query text: {}'.format(response.query_result.query_text))
# print('Query Entities: {0}'.format(parameters))
print('Detected intent: {} (confidence: {})\n'.format(
response.query_result.intent.display_name,
response.query_result.intent_detection_confidence))
print('Fulfillment text: {}\n'.format(
response.query_result.fulfillment_text))
# sleep(0.25)
print("Total count:{}, Correct count:{}".format(total_count, correct_count))
if index == sample_count:
break
print("Accuracy:{}".format(correct_count/total_count))
pred_df = pd.Series(predictions)
true_df = pd.Series(true_labels)
pred_df.reset_index(drop=True, inplace=True)
true_df.reset_index(drop=True, inplace=True)
acc = accuracy_score(true_df, pred_df)
print("Accuracy sklearn:{}".format(acc))
print(f1_score(true_df, pred_df, average='weighted'))
print(precision_score(true_df, pred_df, average='weighted'))
print(recall_score(true_df, pred_df, average='weighted'))
# print(f1_score(true_df, pred_df, average='micro'))
# print(precision_score(true_df, pred_df, average='micro'))
# print(recall_score(true_df, pred_df, average='micro'))
test_segments = list()
for idx, example in enumerate(TEST_DATA):
raw_text, _, _ = example
if len(raw_text) >= 256:
continue
entity_dict = predicted_entities[idx]
entities_list = list()
result = []
for index in sorted(entity_dict, reverse=True):
for ent, key in entity_dict[index]:
# print(ent, key)
for match in finditer(re.escape(ent), raw_text):
is_new = False
start, stop = match.span()
for low, high in result:
if (low <= start <= high) or (low <= stop <= high):
break
else:
is_new = True
result.append((start, stop))
if is_new:
entities_list.append((start, stop, key))
# For evaluation
test_segments.append((raw_text, entities_list))
if idx == sample_count:
break
results_of_prediction = list()
for example in test_segments:
_, entities = example
ent_labels = dict()
for ent in entities:
start, stop, ent_type = ent
ent_type = ent_type.replace('_', '')
if ent_type in ent_labels:
ent_labels[ent_type].append((start, stop))
else:
ent_labels[ent_type] = [(start, stop)]
results_of_prediction.append(ent_labels)
print(len(results_of_prediction), len(true_labels_for_testing))
from eval.quality import calculate_prediction_quality
f1, precision, recall, results = \
calculate_prediction_quality(true_labels_for_testing,
results_of_prediction,
tuple(ne_class_list))
print(f1, precision, recall, results)
| 5,341,580
|
def check_endpoint(func):
"""Check available endpoint."""
@wraps(func)
def wrapper(*args, **kwargs):
sig = inspect.signature(func)
args_value = sig.bind(*args, **kwargs)
endpoint = args_value.arguments["endpoint"]
if endpoint not in AVAILABLE_ENDPOINTS:
raise ClientException(f"Unavailable endpoints: {endpoint}")
return func(*args, **kwargs)
return wrapper
| 5,341,581
|
def read_sql_one(id):
"""
This function responds to a request for api/reviews/{id}
with one matching review from reviews
:param id: id of the review
:return: review matching the id
"""
response = Response.query.filter_by(id=id).one_or_none()
if response is not None:
# serialize the data for the response
response_schema = ResponseSchema()
return response_schema.dump(response).data
else:
abort(404, f"Review {id} not found.")
| 5,341,582
|
def ds_to_numpy(ds: Dataset) -> Tuple[np.ndarray, np.ndarray]:
"""Transform torch dataset to numpy arrays
Parameters
----------
ds : Dataset
COVID dataset
Returns
-------
Tuple[np.ndarray, np.ndarray]
Flattened images + labels
"""
imgs = []
labels = []
for img, label in ds:
imgs.append(img.detach().cpu().numpy().flatten()[np.newaxis, ])
labels.append(label)
return np.concatenate(imgs), np.array(labels)
| 5,341,583
|
def _b64(b):
"""Helper function base64 encode for jose spec."""
return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "")
| 5,341,584
|
def load_from_arff(filename, label_count, label_location="end",
input_feature_type='float', encode_nominal=True, load_sparse=False,
return_attribute_definitions=False):
"""Method for loading ARFF files as numpy array
Parameters
----------
filename : str
path to ARFF file
labelcount: integer
number of labels in the ARFF file
endian: str {"big", "little"} (default is "big")
whether the ARFF file contains labels at the beginning of the
attributes list ("start", MEKA format)
or at the end ("end", MULAN format)
input_feature_type: numpy.type as string (default is "float")
the desire type of the contents of the return 'X' array-likes,
default 'i8', should be a numpy type,
see http://docs.scipy.org/doc/numpy/user/basics.types.html
encode_nominal: bool (default is True)
whether convert categorical data into numeric factors - required
for some scikit classifiers that can't handle non-numeric
input features.
load_sparse: boolean (default is False)
whether to read arff file as a sparse file format, liac-arff
breaks if sparse reading is enabled for non-sparse ARFFs.
return_attribute_definitions: boolean (default is False)
whether to return the definitions for each attribute in the
dataset
Returns
-------
X : :mod:`scipy.sparse.lil_matrix` of `input_feature_type`, shape=(n_samples, n_features)
input feature matrix
y : :mod:`scipy.sparse.lil_matrix` of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
names of attributes : List[str]
list of attribute names from ARFF file
"""
if not load_sparse:
arff_frame = arff.load(
open(filename, 'r'), encode_nominal=encode_nominal, return_type=arff.DENSE
)
matrix = sparse.csr_matrix(
arff_frame['data'], dtype=input_feature_type
)
else:
arff_frame = arff.load(
open(filename, 'r'), encode_nominal=encode_nominal, return_type=arff.COO
)
data = arff_frame['data'][0]
row = arff_frame['data'][1]
col = arff_frame['data'][2]
matrix = sparse.coo_matrix(
(data, (row, col)), shape=(max(row) + 1, max(col) + 1)
)
if label_location == "start":
X, y = matrix.tocsc()[:, label_count:].tolil(), matrix.tocsc()[:, :label_count].astype(int).tolil()
feature_names = arff_frame['attributes'][label_count:]
label_names = arff_frame['attributes'][:label_count]
elif label_location == "end":
X, y = matrix.tocsc()[:, :-label_count].tolil(), matrix.tocsc()[:, -label_count:].astype(int).tolil()
feature_names = arff_frame['attributes'][:-label_count]
label_names = arff_frame['attributes'][-label_count:]
else:
# unknown endian
return None
if return_attribute_definitions:
return X, y, feature_names, label_names
else:
return X, y
| 5,341,585
|
def gapfill_to_ensemble(model, iterations=1, universal=None, lower_bound=0.05,
penalties=None, exchange_reactions=False,
demand_reactions=False, integer_threshold=1e-6):
"""
Performs gapfilling on model, pulling reactions from universal.
Any existing constraints on base_model are maintained during gapfilling, so
these should be set before calling gapfill_to_ensemble (e.g. secretion of
metabolites, choice of objective function etc.).
Currently, only iterative solutions are supported with accumulating
penalties (i.e. after each iteration, the penalty for each reaction
doubles).
Parameters
----------
model : cobra.Model
The model to perform gap filling on.
universal : cobra.Model
A universal model with reactions that can be used to complete the
model.
lower_bound : float, 0.05
The minimally accepted flux for the objective in the filled model.
penalties : dict, None
A dictionary with keys being 'universal' (all reactions included in
the universal model), 'exchange' and 'demand' (all additionally
added exchange and demand reactions) for the three reaction types.
Can also have reaction identifiers for reaction specific costs.
Defaults are 1, 100 and 1 respectively.
integer_threshold : float, 1e-6
The threshold at which a value is considered non-zero (aka
integrality threshold). If gapfilled models fail to validate,
you may want to lower this value. However, picking a threshold that is
too low may also result in reactions being added that are not essential
to meet the imposed constraints.
exchange_reactions : bool, False
Consider adding exchange (uptake) reactions for all metabolites
in the model.
demand_reactions : bool, False
Consider adding demand reactions for all metabolites.
Returns
-------
ensemble : medusa.core.Ensemble
The ensemble object created from the gapfill solutions.
"""
gapfiller = GapFiller(model, universal=universal,
lower_bound=lower_bound, penalties=penalties,
demand_reactions=demand_reactions,
exchange_reactions=exchange_reactions,
integer_threshold=integer_threshold)
solutions = gapfiller.fill(iterations=iterations)
print("finished gap-filling. Constructing ensemble...")
ensemble = _build_ensemble_from_gapfill_solutions(model,solutions,
universal=universal)
return ensemble
| 5,341,586
|
def reparameterize(mu, logvar, n_samples=1):
"""Reparameterization trick.
Args:
mu (torch.Tensor): Mean.
logvar (torch.Tensor): Logarithm of variation.
n_samples (int): The number of samples.
Returns:
torch.Tensor: Samples drawn from the given Gaussian distribution.
The shape is equal to mu if n_samples is 1,
and (n_samples, *mu.shape) if n_samples is larger than 1.
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn(n_samples, *std.size(), device=std.device)
z = mu + eps * std
return z.squeeze(0)
| 5,341,587
|
def rotationMatrixFromNormals(v0,v1,tol=1e-20):
"""
Performs the minimum number of rotations to define a rotation from the direction indicated by the vector n0 to the direction indicated by n1.
The axis of rotation is n0 x n1
https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
:param numpy.array v0: vector of length 3
:param numpy.array v1: vector of length 3
:param tol = 1e-20: tolerance. If the norm of the cross product between the two vectors is below this, no rotation is performed
:rtype: numpy.array, 3x3
:return: rotation matrix which rotates the frame so that n0 is aligned with n1
"""
# ensure both n0, n1 are vectors of length 1
assert len(v0) == 3, "Length of n0 should be 3"
assert len(v1) == 3, "Length of n1 should be 3"
# ensure both are true normals
n0 = v0*1./np.linalg.norm(v0)
n1 = v1*1./np.linalg.norm(v1)
n0dotn1 = n0.dot(n1)
# define the rotation axis, which is the cross product of the two vectors
rotAx = crossProd(n0,n1)
if np.linalg.norm(rotAx) < tol:
return np.eye(3,dtype=float)
rotAx *= 1./np.linalg.norm(rotAx)
cosT = n0dotn1/(np.linalg.norm(n0)*np.linalg.norm(n1))
sinT = np.sqrt(1.-n0dotn1**2)
ux = np.array([[0., -rotAx[2], rotAx[1]], [rotAx[2], 0., -rotAx[0]], [-rotAx[1], rotAx[0], 0.]],dtype=float)
return np.eye(3,dtype=float) + sinT*ux + (1.-cosT)*(ux.dot(ux))
| 5,341,588
|
def test_temporal():
"""
Show that the kronecker product of a spherical harmonic
covariance and a temporal covariance projected into
flux space is just the elementwise product of the
spherical harmonic covariance projected into flux
space and the temporal covariance.
"""
np.random.seed(0)
# Dimensions
ydeg = 5
N = (ydeg + 1) ** 2
K = 10
# Random flux design matrix
A = np.random.randn(K, N)
# Random Ylm covariance
Ly = np.tril(np.random.randn(N, N))
Sy = Ly @ Ly.T
# Random temporal covariance
Lt = np.tril(np.random.randn(K, K))
St = Lt @ Lt.T
# Two ways of computing the same thing
cov1 = (A @ Sy @ A.T) * St
cov2 = block_diag(*A) @ np.kron(St, Sy) @ block_diag(*A).T
assert np.allclose(cov1, cov2)
| 5,341,589
|
def test_original(ssf_app):
"""Test an unmodified call, without function calls."""
original = open_url('/', application=ssf_app)
assert (original.SimpleGrid.SimpleGrid.shape == (2, 3))
| 5,341,590
|
def P_to_array(P: NestedDicts) -> np.array:
""" Converts a transition matrix in nested dictionary format to a numpy array.
P is usually given as starting state -> action -> ending state w/ data, we reorder this to
action -> starting state -> ending state -> transition probability.
"""
# Action, Starting State, Ending State, value is probability
out = np.zeros(shape=(len(P[0]), len(P), len(P)))
for start_state, actions in P.items():
for action, results in actions.items():
for prob, end_state, _, __ in results:
out[action, start_state, end_state] += prob
return out
| 5,341,591
|
def write_dataframe(df, fileName):
"""write_dataframe
Writes a DataFrame into a .csv file.
Input:
df -- the DataFrame to write
fileName -- the name of the file (will be saved in ./data/)
"""
path = dataPath + fileName
df.to_csv(path)
| 5,341,592
|
def get_available_smc_versions():
"""
Return list of available SMC versions. SMC versioning is done by
d70/smc:v6.1.2. Version returned is after the colon.
"""
return [repotag for image in get_images(filter='d70/smc')
for repotag in image.get('RepoTags')]
| 5,341,593
|
def pprint(obj, cols=4, columnwise=True, gap=4):
"""
Print the given list in evenly-spaced columns.
Parameters
----------
obj : list
The list to be printed.
cols : int
The number of columns in which the list should be printed.
columnwise : bool, default=True
If True, the items in the list will be printed column-wise.
If False the items in the list will be printed row-wise.
gap : int
The number of spaces that should separate the longest column
item/s from the next column. This is the effective spacing
between columns based on the maximum len() of the list items.
"""
sobj = [str(item) for item in obj]
if cols > len(sobj):
cols = len(sobj)
max_len = max([len(item) for item in sobj])
if columnwise:
cols = int(math.ceil(float(len(sobj)) / float(cols)))
plist = [sobj[i: i+cols] for i in range(0, len(sobj), cols)]
if columnwise:
if not len(plist[-1]) == cols:
plist[-1].extend(['']*(len(sobj) - len(plist[-1])))
plist = zip(*plist)
printer = '\n'.join([
''.join([c.ljust(max_len + gap) for c in p])
for p in plist])
print (printer)
| 5,341,594
|
def boolean_fn2(a, b, c):
""" Return the truth value of (a ∧ b) ∨ (-a ∧ -b) """
return a and b or not a and not b
| 5,341,595
|
def get_ica_gds_configuration() -> libgds.Configuration:
"""
Get the configuration object for ica wes
:return:
"""
from utils.ica_base import get_configuration
return get_configuration(libgds.Configuration)
| 5,341,596
|
def retry_load_images(image_paths, retry=10, backend="pytorch"):
"""
This function is to load images with support of retrying for failed load.
Args:
image_paths (list): paths of images needed to be loaded.
retry (int, optional): maximum time of loading retrying. Defaults to 10.
backend (str): `pytorch` or `cv2`.
Returns:
imgs (list): list of loaded images.
"""
for i in range(retry):
imgs = []
for image_path in image_paths:
with g_pathmgr.open(image_path, "rb") as f:
img_str = np.frombuffer(f.read(), np.uint8)
img = cv2.imdecode(img_str, flags=cv2.IMREAD_COLOR)
imgs.append(img)
if all(img is not None for img in imgs):
if backend == "pytorch":
imgs = torch.as_tensor(np.stack(imgs))
return imgs
else:
logger.warn("Reading failed. Will retry.")
time.sleep(1.0)
if i == retry - 1:
raise Exception("Failed to load images {}".format(image_paths))
| 5,341,597
|
def b2p(exts):
"""Convert two points of a polygon into its bounding box.
(Rectangular polygon parallel with axes.)
"""
p0x = exts[0][0]
p0y = exts[0][1]
p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0'
p1x = exts[0][2]
p1y = exts[0][3]
p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0'
pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0'
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
e = "%s %s %s %s %s" % (p0, pb, p1, pu, p0)
i = []
if exts[1] is not None:
for h in exts[1]:
p0x = h[0]
p0y = h[1]
p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0'
p1x = h[2]
p1y = h[3]
p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0'
pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0'
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
i.append("%s %s %s %s %s" % (p0, pu, p1, pb, p0))
return e, i
| 5,341,598
|
def unix_to_human_time(utime, alt_format=0):
"""convert Unix time to Human readable time"""
try:
fraction = utime - int(utime)
except OverflowError as err:
t = 'Unix time %s too long to convert, substituting 0' % utime
# TODO log this time issue
print('NEED TO LOG THIS TIME ISUSE:', t)
fraction = utime = 0
# handle special case of -1 (not handled correctly by 'date')
if int(utime == -1):
return 1969, 12, 31, 23, 59, 59
cmd = 'date -u -d "1970-01-01 %d sec" +"%%Y %%m %%d %%H %%M %%S"' % int(utime)
try:
result = getoutput(cmd)
# s = split(result)
s = result.split()
# s[5] = atoi(s[5]) + fraction
s[5] = int(s[5]) + fraction
except ValueError as err:
t = 'date conversion error\ndate command was: %sdate command returned: %s' % (cmd, result)
# TODO log this time issue
print('NEED TO LOG THIS TIME ISUSE:', t)
raise ValueError(err)
if alt_format == 1:
return "%s_%s_%s_%s_%s_%06.3f" % tuple(s)
elif alt_format == 0:
return "%s/%s/%s %s:%s:%06.3f" % tuple(s)
else: # i.e. alt_format == 2
s[0:5] = list(map(atoi, s[0:5]))
return tuple(s)
| 5,341,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.