content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def open_tar(path_or_file, *args, **kwargs):
"""
A with-context for tar files. Passes through positional and kwargs to tarfile.open.
If path_or_file is a file, caller must close it separately.
"""
(path, fileobj) = ((path_or_file, None) if isinstance(path_or_file, Compatibility.string)
else (None, path_or_file))
with closing(tarfile.open(path, *args, fileobj=fileobj, **kwargs)) as tar:
yield tar | 5,324,500 |
def min_rank(series, ascending=True):
"""
Equivalent to `series.rank(method='min', ascending=ascending)`.
Args:
series: column to rank.
Kwargs:
ascending (bool): whether to rank in ascending order (default is `True`).
"""
ranks = series.rank(method="min", ascending=ascending)
return ranks | 5,324,501 |
def write_config_key(stream, envconfig, key, intro_comment=""):
"""Writes the YAML representation of a single key
This writes a single key of a dict to an output stream and then removes
the key from the dict.
Parameters
----------
stream
Output Stream
envconfig : dict
key
Key from the dictionary
intro_comment : str, optional
Introduction comment for the key
"""
if key in envconfig:
mini_config = dict()
mini_config[key] = envconfig.pop(key)
if intro_comment:
safe_write(stream, "\n# %s\n\n" % intro_comment)
write_config(stream, mini_config) | 5,324,502 |
def glCurrentViewport(x=None, y=None, width=None, height=None):
""" Returns a (x, y, width, height)-tuple with the current viewport bounds.
If x, y, width and height are given, set the viewport bounds.
"""
# Why? To switch between the size of the onscreen canvas and the offscreen buffer.
# The canvas could be 256x256 while an offscreen buffer could be 1024x1024.
# Without switching the viewport, information from the buffer would be lost.
if x is not None and y is not None and width is not None and height is not None:
glViewport(x, y, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(x, width, y, height, -1, 1)
glMatrixMode(GL_MODELVIEW)
xywh = (GLint*4)(); glGetIntegerv(GL_VIEWPORT, xywh)
return tuple(xywh) | 5,324,503 |
def test_version():
"""Check that we're using Boreutil's implementation."""
assert check_version("head") | 5,324,504 |
def algorithms():
"""Get a list of the names of the available stemming algorithms.
The only algorithm currently supported is the "english", or porter2,
algorithm.
"""
return ['english'] | 5,324,505 |
def load_yaml(filepath):
"""Import YAML config file."""
with open(filepath, "r") as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc) | 5,324,506 |
def zipmerge(name, srcs):
"""Macro wrapper for zipmerge
Args:
name: Name to be used for this rule. It produces name.zip
srcs: List of zips to be combined.
"""
native.genrule(
name = name,
srcs = srcs,
outs = ["%s.zip" % name],
tools = ["//third_party/libzip:zipmerge"],
message = "Combining following zips: %s" % ",".join(srcs),
cmd = (
"$(location //third_party/libzip:zipmerge) -s $@ %s" % (" ".join(srcs))
),
) | 5,324,507 |
def trial_4_detect_plaintext_type_from_otp():
"""
Trial 4
-------
Testing ability to recognize the type of text when the cipher is a real OTP (random key for each specimen)
The task of the ML is to recognize the difference between english text and binary data
Proven to be impossible when the keys are single use and true random, so we expect this to fail
ML model - FC NN
Training rounds = 50
Plaintext = Binary / Text
Ciphers = OTP (XOR with random key for each record)
Result = Failure
"""
evaluation_field = EncryptionDatastoreConstants.PLAINTEXT_TYPE
possible_values = EncryptionDatastoreConstants.POSSIBLE_PLAINTEXT_TYPES
plaintext_generators = None # use default
encryption_generators = {
(EncryptionMethod.XOR, BlockMode.ECB): EncryptionManager(
EncryptionMethod.XOR,
encryption_key_size=2000)
}
model_creator = KerasFullyConnectedNNModelCreator(len(possible_values))
train_and_evaluate(model_creator, evaluation_field, possible_values, plaintext_generators, encryption_generators) | 5,324,508 |
def parse_json(json_path):
"""
parser JSON
Args:
json_path: input json file path
Returns:
json_dict: parser json dict result
"""
try:
with open(json_path) as json_file:
json_dict = json.load(json_file)
except Exception:
logging.error("json file load error !")
else:
return json_dict | 5,324,509 |
def validate_project_with_external_validator(args, project_path):
"""
Try to use an external validator (typically tng-sdk-validation)
to validate the given service project.
Throws TangoValidationException on validation error.
"""
# check if external validator is available?
try:
from tngsdk.validation import cli as v_cli
from tngsdk.validation.validator import Validator
except BaseException as ex:
LOG.error("Skipping validation: tng-sdk-validate not installed?")
LOG.debug(ex)
return
# ok! let us valiade ...
v = Validator()
# define validation_level
if len(args.validation_level) == 1:
validation_level = "-"+args.validation_level
else:
validation_level = "--"+args.validation_level
# define arguments for validator
v_args = v_cli.parse_args([
validation_level, # levels -s / -i / -t
"--debug", # temporary
"--project", project_path, # path to project
"--workspace", args.workspace # workspace path
])
v_cli.dispatch(v_args, v)
# check validation result
# - warnings
if v.warning_count > 0:
LOG.warning("There have been {} tng-validate warnings"
.format(v.warning_count))
LOG.warning("tng-validate warnings: '{}'".format(v.warnings))
# - errors
if v.error_count > 0:
raise TangoValidationException("tng-validate error(s): '{}'"
.format(v.errors)) | 5,324,510 |
def _get_ex_msg(obj):
""" Get exception message """
return obj.value.message if hasattr(obj, 'value') else obj.message | 5,324,511 |
def __getattr__(name: str) -> Any:
"""Lazily imports modules and items within them.
Args:
name (str): name of sourdough module or item.
Raises:
AttributeError: if there is no module or item matching 'name'.
Returns:
Any: a module or item stored within a module.
"""
return lazily_import(name = name,
package = __name__,
mapping = importables) | 5,324,512 |
def check_availabel_resourses(target_fname):
""" Проверка доступности ресурсов до запуска паука."""
rpt = []
all_right = True
target_generator = parser_target_for_spider(target_fname)
for at in target_generator:
info = at[0]
head_rpt_msg = 'Name node: ['+info[0]+']\nUrl: ['+info[1]+']\n'
# Проверка ключей
params = json.loads(info[3])
available_keys = tools.get_app_cfg()['App']['Spider']['available_keys']
for at in params:
if at not in available_keys:
all_right = False
rpt.append(head_rpt_msg+"\tError: find desabled key params.")
# Проверка преобразователя
if 'to_text' in params:
available_convertors = tools.get_app_cfg()['App']['Spider']['to_text_convertors']
if params['to_text'] not in available_convertors:
all_right = False
rpt.append(head_rpt_msg+"\tError: no registred to-text-convertor - "+
params['to_text']+
". May be registred in"+
" /app-cfgs/spider_cfg.yaml file.")
else:
if 'std_' not in params['to_text'] and 'custom_' not in params['to_text']:
all_right = False
rpt.append(head_rpt_msg+"\tError: bad name to-text-convertor - "+
params['to_text']+
". Must begin with std_ or custom_ prefix.")
# Проверка преобразователя по умолчанию
url = info[1]
extenton = url.split('.')[-1]
if not params:
auto_detected_urls = tools.get_app_cfg()['App']['Spider']['auto_detected_urls']
if extenton not in auto_detected_urls:
all_right = False
rpt.append(head_rpt_msg+"\tError: url не распознан и пользовательски настройки не задны")
# Проверка доступности ресурса
# Файл файловой системы
url_exist = os.path.exists(url)
if not url_exist:
all_right = False
rpt.append(head_rpt_msg+"\tError: url найден в пределах операционной системы."+
" Если это сетевой адрес задайте пареметры [external_url: yes get(or post) add params]")
# Проверка доступности сетевого адреса
if 'external_url' in params:
all_right = False
rpt.append(head_rpt_msg+"\tWarning: проверка внешних адресов не реализована")
# Проверки пройдены
return all_right, rpt | 5,324,513 |
def set_R_path(path=""):
"""
Explicitly set path to `R` installation dir.
If R is not available on the path, then it can be explicitly
specified here.
Use path="" to reset to default system path.
"""
settings._R_path = path | 5,324,514 |
def remove_last_period(mm):
"""
Shorten the planning horizon of the model by one period.
"""
m = mm.model
variables = ["harv", "age"]
constraints = ["harv", "age", "env"]
harv_vars = pg.get_variables(m, "harv")
max_period = max([float(i.varName.split(",")[-1][:-1]) for i in harv_vars])
for v in variables:
all_vars = pg.get_variables(m, v, filter_values={-1: max_period})
for v in all_vars:
m.remove(v)
for c in constraints:
all_cons = pg.get_constraints(m, c, filter_values={-1: max_period})
for a in all_cons:
m.remove(a)
m.update() | 5,324,515 |
def _validate_isofactor(isofactor, signed):
""" [Docstring]
"""
if isofactor[0] == 0.0:
return (False, "Error: 'isovalue' cannot be zero")
if isofactor[1] <= 1.0:
return (False, "Error: 'factor' must be greater than one")
if not signed and isofactor[0] < 0:
return (False, "Error: Negative 'isovalue' in absolute "
"thresholding mode")
return (True, "") | 5,324,516 |
def get_response_comments(request, comment_id, page, page_size, requested_fields=None):
"""
Return the list of comments for the given thread response.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
comment_id: The id of the comment/response to get child comments for.
page: The page number (1-indexed) to retrieve
page_size: The number of comments to retrieve per page
requested_fields: Indicates which additional fields to return for
each child comment. (i.e. ['profile_image'])
Returns:
A paginated result containing a list of comments
"""
try:
cc_comment = Comment(id=comment_id).retrieve()
cc_thread, context = _get_thread_and_context(
request,
cc_comment["thread_id"],
retrieve_kwargs={
"with_responses": True,
"recursive": True,
}
)
if cc_thread["thread_type"] == "question":
thread_responses = itertools.chain(cc_thread["endorsed_responses"], cc_thread["non_endorsed_responses"])
else:
thread_responses = cc_thread["children"]
response_comments = []
for response in thread_responses:
if response["id"] == comment_id:
response_comments = response["children"]
break
response_skip = page_size * (page - 1)
paged_response_comments = response_comments[response_skip:(response_skip + page_size)]
if not paged_response_comments and page != 1:
raise PageNotFoundError("Page not found (No results on this page).")
results = _serialize_discussion_entities(
request, context, paged_response_comments, requested_fields, DiscussionEntity.comment
)
comments_count = len(response_comments)
num_pages = (comments_count + page_size - 1) // page_size if comments_count else 1
paginator = DiscussionAPIPagination(request, page, num_pages, comments_count)
return paginator.get_paginated_response(results)
except CommentClientRequestError:
raise CommentNotFoundError("Comment not found") | 5,324,517 |
def GetParentStatementNode(
node: AST.Node,
) -> Optional[AST.Node]:
"""\
Returns the statement that is the logical parent of this node.
This code attempts to handle the complexities of embedded phrases (for example, a statement that
is made up of other phrases) where this node may be nested multiple levels below what ultimately
constitutes its parent.
"""
parent = node.Parent
while parent is not None:
if parent.Type is not None and parent.Type.Name.endswith("Statement"):
break
parent = parent.Parent
return cast(Optional[AST.Node], parent) | 5,324,518 |
def count_possibilities(dic):
"""
Counts how many unique names can be created from the
combinations of each lists contained in the passed dictionary.
"""
total = 1
for key, value in dic.items():
total *= len(value)
return total | 5,324,519 |
def user_update():
"""
test
"""
username = request.get_json()['username']
email = request.get_json()['email']
password = request.get_json()['password']
try:
id = g.user.id
currentuser = User.query.get(id)
if password:
currentuser.password = password
currentuser.username = username
db.session.add(currentuser)
db.session.commit()
msg = "success"
res = {
'code': 200,
'msg': msg
}
return jsonify(res)
except Exception as e:
msg = "fail"
res = {
'code': 400,
'msg': msg
}
return jsonify(res) | 5,324,520 |
def feature_matrix_hdf5(smis: Iterable[str], size: int, *,
featurizer: Featurizer = Featurizer(),
name: str = 'fps',
path: str = '.') -> Tuple[str, Set[int]]:
"""Precalculate the fature matrix of xs with the given featurizer and store
the matrix in an HDF5 file
Parameters
----------
xs: Iterable[T]
the inputs for which to generate the feature matrix
size : int
the length of the iterable
ncpu : int (Default = 0)
the number of cores to parallelize feature matrix generation over
featurizer : Featurizer, default=Featurizer()
an object that encodes inputs from an identifier representation to
a feature representation
name : str (Default = 'fps')
the name of the output HDF5 file
path : str (Default = '.')
the path under which the HDF5 file should be written
Returns
-------
fps_h5 : str
the filename of an hdf5 file containing the feature matrix of the
representations generated from the molecules in the input file.
The row ordering corresponds to the ordering of smis
invalid_idxs : Set[int]
the set of indices in xs containing invalid inputs
"""
fps_h5 = str(Path(path)/f'{name}.h5')
# fingerprint = featurizer.fingerprint
# radius = featurizer.radius
# length = featurizer.length
ncpu = int(ray.cluster_resources()['CPU'])
with h5py.File(fps_h5, 'w') as h5f:
CHUNKSIZE = 512
fps_dset = h5f.create_dataset(
'fps', (size, len(featurizer)),
chunks=(CHUNKSIZE, len(featurizer)), dtype='int8'
)
batch_size = CHUNKSIZE * 2 * ncpu
n_batches = size//batch_size + 1
invalid_idxs = set()
i = 0
offset = 0
for smis_batch in tqdm(batches(smis, batch_size), total=n_batches,
desc='Precalculating fps', unit='batch'):
fps = feature_matrix(smis_batch, featurizer)
for fp in tqdm(fps, total=batch_size, smoothing=0., leave=False):
if fp is None:
invalid_idxs.add(i+offset)
offset += 1
continue
# fp = next(fps)
fps_dset[i] = fp
i += 1
# original dataset size included potentially invalid xs
valid_size = size - len(invalid_idxs)
if valid_size != size:
fps_dset.resize(valid_size, axis=0)
return fps_h5, invalid_idxs | 5,324,521 |
def xpath_error(code, message=None, token=None, prefix='err'):
"""
Returns an XPath error instance related with a code. An XPath/XQuery/XSLT error code
(ref: https://www.w3.org/2005/xqt-errors/) is an alphanumeric token starting with four
uppercase letters and ending with four digits.
:param code: the error code.
:param message: an optional custom additional message.
:param token: an optional token instance.
:param prefix: the namespace prefix to apply to the error code, defaults to 'err'.
"""
if ':' not in code:
pcode = '%s:%s' % (prefix, code) if prefix else code
elif not prefix or not code.startswith(prefix + ':'):
raise ElementPathValueError('%r is not an XPath error code' % code)
else:
pcode = code
code = code[len(prefix) + 1:]
# XPath 2.0 parser error (https://www.w3.org/TR/xpath20/#id-errors)
if code == 'XPST0001':
return ElementPathValueError(message or 'Parser not bound to a schema', pcode, token)
elif code == 'XPST0003':
return ElementPathValueError(message or 'Invalid XPath expression', pcode, token)
elif code == 'XPDY0002':
return MissingContextError(message or 'Dynamic context required for evaluate', pcode, token)
elif code == 'XPTY0004':
return ElementPathTypeError(message or 'Type is not appropriate for the context', pcode, token)
elif code == 'XPST0005':
return ElementPathValueError(message or 'A not empty sequence required', pcode, token)
elif code == 'XPST0008':
return ElementPathNameError(message or 'Name not found', pcode, token)
elif code == 'XPST0010':
return ElementPathNameError(message or 'Axis not found', pcode, token)
elif code == 'XPST0017':
return ElementPathTypeError(message or 'Wrong number of arguments', pcode, token)
elif code == 'XPTY0018':
return ElementPathTypeError(message or 'Step result contains both nodes and atomic values', pcode, token)
elif code == 'XPTY0019':
return ElementPathTypeError(message or 'Intermediate step contains an atomic value', pcode, token)
elif code == 'XPTY0020':
return ElementPathTypeError(message or 'Context item is not a node', pcode, token)
elif code == 'XPDY0050':
return ElementPathTypeError(message or 'Type does not match sequence type', pcode, token)
elif code == 'XPST0051':
return ElementPathNameError(message or 'Unknown atomic type', pcode, token)
elif code == 'XPST0080':
return ElementPathNameError(message or 'Target type cannot be xs:NOTATION or xs:anyAtomicType', pcode, token)
elif code == 'XPST0081':
return ElementPathNameError(message or 'Unknown namespace', pcode, token)
# XPath data types and function errors
elif code == 'FOER0000':
return ElementPathError(message or 'Unidentified error', pcode, token)
elif code == 'FOAR0001':
return ElementPathValueError(message or 'Division by zero', pcode, token)
elif code == 'FOAR0002':
return ElementPathValueError(message or 'Numeric operation overflow/underflow', pcode, token)
elif code == 'FOCA0001':
return ElementPathValueError(message or 'Input value too large for decimal', pcode, token)
elif code == 'FOCA0002':
return ElementPathValueError(message or 'Invalid lexical value', pcode, token)
elif code == 'FOCA0003':
return ElementPathValueError(message or 'Input value too large for integer', pcode, token)
elif code == 'FOCA0005':
return ElementPathValueError(message or 'NaN supplied as float/double value', pcode, token)
elif code == 'FOCA0006':
return ElementPathValueError(
message or 'String to be cast to decimal has too many digits of precision', pcode, token
)
elif code == 'FOCH0001':
return ElementPathValueError(message or 'Code point not valid', pcode, token)
elif code == 'FOCH0002':
return ElementPathLocaleError(message or 'Unsupported collation', pcode, token)
elif code == 'FOCH0003':
return ElementPathValueError(message or 'Unsupported normalization form', pcode, token)
elif code == 'FOCH0004':
return ElementPathValueError(message or 'Collation does not support collation units', pcode, token)
elif code == 'FODC0001':
return ElementPathValueError(message or 'No context document', pcode, token)
elif code == 'FODC0002':
return ElementPathValueError(message or 'Error retrieving resource', pcode, token)
elif code == 'FODC0003':
return ElementPathValueError(message or 'Function stability not defined', pcode, token)
elif code == 'FODC0004':
return ElementPathValueError(message or 'Invalid argument to fn:collection', pcode, token)
elif code == 'FODC0005':
return ElementPathValueError(message or 'Invalid argument to fn:doc or fn:doc-available', pcode, token)
elif code == 'FODT0001':
return ElementPathValueError(message or 'Overflow/underflow in date/time operation', pcode, token)
elif code == 'FODT0002':
return ElementPathValueError(message or 'Overflow/underflow in duration operation', pcode, token)
elif code == 'FODT0003':
return ElementPathValueError(message or 'Invalid timezone value', pcode, token)
elif code == 'FONS0004':
return ElementPathKeyError(message or 'No namespace found for prefix', pcode, token)
elif code == 'FONS0005':
return ElementPathValueError(message or 'Base-uri not defined in the static context', pcode, token)
elif code == 'FORG0001':
return ElementPathValueError(message or 'Invalid value for cast/constructor', pcode, token)
elif code == 'FORG0002':
return ElementPathValueError(message or 'Invalid argument to fn:resolve-uri()', pcode, token)
elif code == 'FORG0003':
return ElementPathValueError(
message or 'fn:zero-or-one called with a sequence containing more than one item', pcode, token
)
elif code == 'FORG0004':
return ElementPathValueError(
message or 'fn:one-or-more called with a sequence containing no items', pcode, token
)
elif code == 'FORG0005':
return ElementPathValueError(
message or 'fn:exactly-one called with a sequence containing zero or more than one item', pcode, token
)
elif code == 'FORG0006':
return ElementPathTypeError(message or 'Invalid argument type', pcode, token)
elif code == 'FORG0008':
return ElementPathValueError(
message or 'The two arguments to fn:dateTime have inconsistent timezones', pcode, token
)
elif code == 'FORG0009':
return ElementPathValueError(
message or 'Error in resolving a relative URI against a base URI in fn:resolve-uri', pcode, token
)
elif code == 'FORX0001':
return ElementPathValueError(message or 'Invalid regular expression flags', pcode, token)
elif code == 'FORX0002':
return ElementPathValueError(message or 'Invalid regular expression', pcode, token)
elif code == 'FORX0003':
return ElementPathValueError(message or 'Regular expression matches zero-length string', pcode, token)
elif code == 'FORX0004':
return ElementPathValueError(message or 'Invalid replacement string', pcode, token)
elif code == 'FOTY0012':
return ElementPathValueError(message or 'Argument node does not have a typed value', pcode, token)
else:
raise ElementPathValueError(message or 'Unknown XPath error code %r.' % code, token=token) | 5,324,522 |
def upgrade(new_rpm_path, local_config_dir=None, overwrite=False):
"""
Copy and upgrade a new presto-server rpm to all of the nodes in the
cluster. Retains existing node configuration.
The existing topology information is read from the config.json file.
Unlike install, there is no provision to supply topology information
interactively.
The existing cluster configuration is collected from the nodes on the
cluster and stored on the host running presto-admin. After the
presto-server packages have been upgraded, presto-admin pushes the
collected configuration back out to the hosts on the cluster.
Note that the configuration files in the presto-admin configuration
directory are not updated during upgrade.
:param new_rpm_path - The path to the new Presto RPM to
install
:param local_config_dir - (optional) Directory to store the cluster
configuration in. If not specified, a temp
directory is used.
:param overwrite - (optional) if set to True then existing
configuration will be orerwriten.
:param --nodeps - (optional) Flag to indicate if server upgrade
should ignore checking Presto rpm package
dependencies. Equivalent to adding --nodeps
flag to rpm -U.
"""
stop()
temp_config_tar = configure_cmds.gather_config_directory()
package.deploy_upgrade(new_rpm_path)
configure_cmds.deploy_config_directory(temp_config_tar) | 5,324,523 |
def _setPropertyValue(self, name, value, typeString = ''):
"""Set the typed value of a property by its name, creating a child element
to hold the property if needed."""
method = getattr(self.__class__, "_setPropertyValue" + getTypeString(value))
return method(self, name, value, typeString) | 5,324,524 |
def build_toy_input_feature_values(features,
use_rank_two=False,
has_catset=False):
"""Create a set of input features values.
These examples will fall respectively in the nodes 6, 5, 3, 2 of
_build_toy_random_forest.
Args:
features: Dictionary of input feature tensors. If None, the features are
indexed by name (used in tf2).
use_rank_two: Should the feature be passed as one or two ranked tensors.
has_catset: Add two categorical-set features to the dataspec.
Returns:
Dictionary of feature values.
"""
is_tf2 = features is None
def shape(x):
if use_rank_two:
y = [[v] for v in x]
else:
y = x
if is_tf2:
return tf.constant(y)
else:
return y
if is_tf2:
class Identity:
def __getitem__(self, key):
return key
features = Identity()
feature_values = {
features["a"]: shape([2, 2, 0, 0]),
features["b"]: shape(["x", "z", "x", "z"]),
features["c"]: shape([1, 2, 1, 2]),
features["bool_feature"]: shape([1, 0, 1, 1])
}
if has_catset:
ragged_constant = tf.ragged.constant if is_tf2 else tf.ragged.constant_value
feature_values[features["d"]] = ragged_constant(
[["x"], ["y"], ["y", "z"], [""]], dtype=tf.string)
feature_values[features["e"]] = ragged_constant(
[[11, 12], [], [14, 15, 16], [-1]], dtype=tf.int32)
return feature_values | 5,324,525 |
def parse_arguments():
"""Read arguments from a command line."""
parser = argparse.ArgumentParser(description="Arguments get parsed via --commands")
parser.add_argument(
"-v",
metavar="verbosity",
type=int,
default=4,
help="Verbosity of logging: 0 -critical, 1- error, 2 -warning, 3 -info, 4 -debug",
)
args = parser.parse_args()
verbose = {0: logging.CRITICAL, 1: logging.ERROR, 2: logging.WARNING, 3: logging.INFO, 4: logging.DEBUG}
logging.basicConfig(format="%(message)s", level=verbose[args.v], filename="output/errors.log")
return args | 5,324,526 |
def light_head_preprocess_for_train(image, labels, bboxes,
out_shape, data_format='NHWC',
scope='light_head_preprocess_train'):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing.
Returns:
A preprocessed image.
"""
fast_mode = False
with tf.name_scope(scope, 'light_head_preprocess_train', [image, labels, bboxes]):
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
# Convert to float scaled [0, 1].
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
tf_summary_image(image, bboxes, 'image_with_bboxes_0')
# image, bboxes = control_flow_ops.cond(tf.random_uniform([1], minval=0., maxval=1., dtype=tf.float32)[0] < 0.5, lambda: (image, bboxes), lambda: tf_image.ssd_random_expand(image, bboxes, 2))
image, bboxes = control_flow_ops.cond(tf.random_uniform([1], minval=0., maxval=1., dtype=tf.float32)[0] < 0.3, lambda: (image, bboxes), lambda: tf_image.ssd_random_expand(image, bboxes, tf.random_uniform([1], minval=2, maxval=3, dtype=tf.int32)[0]))
tf_summary_image(image, bboxes, 'image_on_canvas_1')
# Distort image and bounding boxes.
#print(image, labels, bboxes)
random_sample_image, labels, bboxes = tf_image.ssd_random_sample_patch(image, labels, bboxes, ratio_list=[0.4, 0.6, 0.8, 1.])
tf_summary_image(random_sample_image, bboxes, 'image_shape_distorted_2')
# Randomly flip the image horizontally.
random_sample_flip_image, bboxes = tf_image.random_flip_left_right(random_sample_image, bboxes)
random_sample_flip_resized_image = tf_image.resize_image(random_sample_flip_image, out_shape,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
tf_summary_image(random_sample_flip_resized_image, bboxes, 'image_fliped_and_resized_3')
# Randomly distort the colors. There are 4 ways to do it.
dst_image = apply_with_random_selector(
random_sample_flip_resized_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=4)
tf_summary_image(dst_image, bboxes, 'image_color_distorted_4')
# Rescale to VGG input scale.
image = dst_image * 2.
image.set_shape([None, None, 3])
image = tf_image_whitened(image, [_R_MEAN/127.5, _G_MEAN/127.5, _B_MEAN/127.5])
# Image data format.
if data_format == 'NCHW':
image = tf.transpose(image, perm=(2, 0, 1))
return image, labels, bboxes | 5,324,527 |
def get_dgs(align_dg_dict):
"""
Function that creates inverse dictionary of align_dg_dict
align_dg_dict: dict. Dictionary of alignments and clustering DG assignments
Returns dg_align_dict: dict, k=dg_id, v=[alignids]
align_dg_dict comes from get_spectral(graph) or get_cliques(graph)
"""
dgs_list = set(align_dg_dict.values()) #list of all duplex groups
dg_align_dict = {}
for dg in dgs_list:
dg_align_list =[x for (x,y) in align_dg_dict.items() if y == dg]
dg_align_dict[dg] = dg_align_list
return dg_align_dict
#test case: | 5,324,528 |
def DateTime_GetBeginDST(*args, **kwargs):
"""DateTime_GetBeginDST(int year=Inv_Year, int country=Country_Default) -> DateTime"""
return _misc_.DateTime_GetBeginDST(*args, **kwargs) | 5,324,529 |
def apply_voigt1d(fid, pks, snorms, a0, b0, a, b, zp, bw, flo=None,
up=True, link_gg=True, link_ll=True, dx_snr=None, dx_snr_mode='outside',
f_cutoff=0., ftype='voigt', outfile=''):
""" Apply Voigt-1D window function, fit the spectrum, and return the treated SnR & FWHM
:arguments
fid: np1darray FID signal
pks: list of float peak positions
snorms: float The normalized intensities of the peaks
a0: float FID initial a0
b0: float FID initial b0
a: float Voigt-1D window parameter a
b: float Voigt-1D window parameter b
zp: int zero-padding length
fpk: float peak frequency
bw: float chirp bandwidth
flo: float LO frequency
up: bool is chirp going up
link_gg: bool link all Gaussian FWHM
link_ll: bool link all Lorentzian FWHM
dx_snr: float +/- dx_snr from pk to calculate noise & SnR
dx_snr_mode: str
'inside' take pk - dx_snr < x < pk + dx_snr as noise range
'outside' take x < pk - dx_snr || x > pk + dx_snr as noise range
f_cutoff: float low frequency cutoff
outfile: str filename to save fit
ftype: str lineshape function type
'voigt'
'gaussian'
'lorentzian'
'complex-voigt'
:returns
snr: float SnR of the spectral line
vv: float FWHM of the spectral line determined by the voigt fit
"""
t = np.arange(len(fid)) * 1e-3
if a == 0 and b == 0:
wf = np.ones_like(t)
else:
wf = np.exp(- a * t**2 - b * t) * t
if isinstance(zp, type(None)):
zp = find_fid_tr_len(a0, b0)
y = np.abs(np.fft.rfft(fid * wf, zp))
x = np.fft.rfftfreq(zp) * 1e3
# convert to IF normalized spectrum
xc, yc = to_mol_freq(x, y / np.max(y), bw, up=up, flo=flo, f_cutoff=f_cutoff)
# initial guess
gg0 = 2 * np.sqrt(np.log(2) * (a + a0)) / np.pi
ll0 = max((b + b0) / np.pi, 0)
vv_ab = fwhm_ab(a0 + a, b0 + b)
if dx_snr:
idx = np.array(np.ones_like(xc), dtype='bool')
if dx_snr_mode == 'outside':
for _pk in pks:
idx = np.logical_and(idx, np.logical_or(xc < _pk - dx_snr, xc > _pk + dx_snr))
# fit this xc & yc
res = fit_spectrum(xc, yc, pks, snorms, gg0, ll0, link_gg=link_gg,
link_ll=link_ll, ftype=ftype)
noise = np.std(res.residual[idx])
elif dx_snr_mode == 'inside': # if SnR mode is inside, we only need to fit the data inside
for _pk in pks:
idx = np.logical_or(idx, np.logical_and(xc > _pk - dx_snr, xc < _pk + dx_snr))
# cut xc & yc
xc = xc[idx]
yc = yc[idx]
# fit this xc & yc
res = fit_spectrum(xc, yc, pks, snorms, gg0, ll0, link_gg=link_gg,
link_ll=link_ll, ftype=ftype)
noise = np.std(res.residual)
else:
raise ValueError('Unknown dx_snr_mode string')
else:
res = fit_spectrum(xc, yc, pks, snorms, gg0, ll0, link_gg=link_gg,
link_ll=link_ll, ftype=ftype)
noise = np.std(res.residual)
snr = (np.max(yc) - res.params['p0'].value) / noise
par_list = []
for name, p in res.params.items():
if isinstance(p.stderr, float):
par_list.append('{:s}={:>8.4f}({:>8.4f})'.format(name, p.value, p.stderr))
else:
par_list.append('{:s}={:>8.4f}({:^8s})'.format(name, p.value, 'nan'))
if isinstance(res.redchi, float):
par_list.append('redchi={:>7.4f}'.format(res.redchi))
else:
par_list.append('redchi=nan')
if ftype == 'voigt':
vv_fit = fwhm_voigt_fit(res.params['ll0'].value, res.params['gg0'].value)
elif ftype == 'complex-voigt':
vv_fit = fwhm_complex_voigt_fit(res.params['ll0'].value, res.params['gg0'].value)
elif ftype == 'gaussian':
vv_fit = res.params['gg0'].value
elif ftype == 'lorentzian':
vv_fit = res.params['ll0'].value
else:
raise ValueError('Unknown function type')
print('a={:>6.4f}'.format(a), 'b={:>7.4f}'.format(b), 'vv_ab={:>6.4f}'.format(vv_ab),
'vv_fit={:>6.4f}'.format(vv_fit), 'snr={:>6.2f}'.format(snr), ' '.join(par_list))
if outfile: # save fit
yfits = [] # a list of yfit for each peak
for i in range(len(pks)):
if ftype == 'voigt':
_x0 = res.params['x'+str(i)].value
_gg = res.params['gg'+str(i)].value
_ll = res.params['ll'+str(i)].value
_s = res.params['s'+str(i)].value
yfits.append(voigt(xc - _x0, _gg, _ll) * _s)
elif ftype == 'complex-voigt':
_x0 = res.params['x' + str(i)].value
_gg = res.params['gg' + str(i)].value
_ll = res.params['ll' + str(i)].value
_s = res.params['s' + str(i)].value
yfits.append(complex_voigt(xc - _x0, _gg, _ll) * _s)
elif ftype == 'gaussian':
_x0 = res.params['x' + str(i)].value
_gg = res.params['gg' + str(i)].value
_s = res.params['s' + str(i)].value
_y = np.exp(-((xc - _x0) / _gg)**2 * 4 * np.log(2)) * _s
yfits.append(_y)
elif ftype == 'lorentzian':
_x0 = res.params['x' + str(i)].value
_ll = res.params['ll' + str(i)].value
_s = res.params['s' + str(i)].value
_y = _ll / (2 * np.pi * ((xc - _x0)**2 + _ll**2 / 4))
yfits.append(_y)
else:
raise ValueError('Unknown function type')
if len(pks) == 1:
outdata = np.column_stack((xc, yc, res.residual))
outfmt = ['%6.2f', '%9.6f', '%9.6f']
else:
outdata = np.column_stack((xc, yc, res.residual, *yfits))
outfmt = ['%6.2f', '%9.6f', '%9.6f'] + ['%9.6f'] * len(pks)
hd_list = ['a0={:>6.4f} b0={:>6.4f} FLO={:g}MHz {:s}'.format(a0, b0, flo, 'UP' if up else 'DOWN'),
'Voigt-1D: a={:>6.4f} b={:>6.4f}'.format(a, b),
'SNR={:>6.2f} FWHM_AB={:>6.4f} FWHM_FIT={:>6.4f}'.format(snr, vv_ab, vv_fit),
' | '.join(par_list),
'{:>5s} {:^10s} {:^10s}'.format('freq', 'inten', 'residual')
]
np.savetxt(outfile, outdata, fmt=outfmt, header='\n'.join(hd_list))
return snr, vv_fit | 5,324,530 |
def show_mail(event=None):
"""Shows the full mail when you click on it."""
global primary_bg_color, secondary_bg_color, fg_color, entry_config, highlight, win
value = str(event.widget).split('.')[-1]
if value.isnumeric() and len(value) > 12:
table_name = connect_to_sql(f"select email from user_data where unique_id='{user_id}'")[0][0]
mail = connect_to_sql(f"select * from `{table_name}` where mail_id='{value}'")[0][1:]
from_, to_, subject, mssg, date = mail
mailbox = tk.Toplevel(win, width=960, height=480, bg=secondary_bg_color)
mailbox.title(subject)
mailbox.pack_propagate(False)
tk.Label(mailbox, text=from_, bg=primary_bg_color,
fg=fg_color, font=('calibri', 18, 'bold'), width=74, anchor=tk.W).place(x=0, y=0)
tk.Label(mailbox, text=date, bg=primary_bg_color,
fg=fg_color, font=('calibri', 18, 'bold'), width=74, anchor=tk.W).place(x=0, y=36)
tk.Label(mailbox, text=subject, bg=primary_bg_color,
fg=fg_color, font=('calibri', 18, 'bold'), width=74, anchor=tk.W).place(x=0, y=72)
text_ = tk.Text(mailbox, height=16, width=96, bg=primary_bg_color, font=('calibri', 14), borderwidth=0,
selectbackground='blue', fg=fg_color, highlightcolor=highlight, relief='flat')
text_.place(x=0, y=110)
text_.insert("1.0", mssg)
text_.config(state=tk.DISABLED)
text_.pack_propagate(False)
scrollbar = tk.Scrollbar(text_, orient="vertical", command=text_.yview)
text_.configure(yscrollcommand=scrollbar.set)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y) | 5,324,531 |
def parse_options(dict_in: Optional[dict], defaults: Optional[dict] = None):
"""
Utility function to be used for e.g. kwargs
1) creates a copy of dict_in, such that it is safe to change its entries
2) converts None to an empty dictionary (this is useful, since empty dictionaries cant be argument defaults)
3) optionally, sets defaults, if keys are not present
Parameters
----------
dict_in
defaults
Returns
-------
"""
if dict_in is None:
dict_in = {}
else:
dict_in = dict_in.copy()
if defaults:
for key in defaults:
dict_in.setdefault(key, defaults[key])
return dict_in | 5,324,532 |
def build_optimizer(model, optimizer_cfg):
"""Build optimizer from configs.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
optimizer_cfg (dict): The config dict of the optimizer.
Positional fields are:
- type: class name of the optimizer.
- lr: base learning rate.
Optional fields are:
- any arguments of the corresponding optimizer type, e.g.,
weight_decay, momentum, etc.
- paramwise_options: a dict with regular expression as keys
to match parameter names and a dict containing options as
values. Options include 6 fields: lr, lr_mult, momentum,
momentum_mult, weight_decay, weight_decay_mult.
Returns:
torch.optim.Optimizer: The initialized optimizer.
Example:
>>> model = torch.nn.modules.Conv1d(1, 1, 1)
>>> paramwise_options = {
>>> '(bn|gn)(\\d+)?.(weight|bias)': dict(weight_decay_mult=0.1),
>>> '\\Ahead.': dict(lr_mult=10, momentum=0)}
>>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,
>>> weight_decay=0.0001,
>>> paramwise_options=paramwise_options)
>>> optimizer = build_optimizer(model, optimizer_cfg)
"""
optimizer_cfg = copy.deepcopy(optimizer_cfg)
constructor_type = optimizer_cfg.pop('constructor',
'DefaultOptimizerConstructor')
paramwise_cfg = optimizer_cfg.pop('paramwise_options', None)
optim_constructor = build_optimizer_constructor(
dict(
type=constructor_type,
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg))
optimizer = optim_constructor(model)
return optimizer | 5,324,533 |
def test_normal_sw(data):
"""Shapiro-Wilk"""
norm_data = (data - np.mean(data))/(np.std(data)/np.sqrt(len(data)))
return st.shapiro(norm_data) | 5,324,534 |
def dict_to_namespace(cfg_dict: Dict[str, Any]) -> 'Namespace':
"""Converts a nested dictionary into a nested namespace."""
cfg_dict = deepcopy(cfg_dict)
def expand_dict(cfg):
for k, v in cfg.items():
if isinstance(v, dict) and all(isinstance(k, str) for k in v.keys()):
cfg[k] = expand_dict(v)
elif isinstance(v, list):
for nn, vv in enumerate(v):
if isinstance(vv, dict) and all(isinstance(k, str) for k in vv.keys()):
cfg[k][nn] = expand_dict(vv)
return Namespace(**cfg)
return expand_dict(cfg_dict) | 5,324,535 |
def country_currency(code, country_name):
"""Gives information about the currency of a country"""
_data = return_country(currency_data, country_name)
if _data:
_, currency_name, the_code, symbol = _data
else:
return click.secho('Country does not exist. Perhaps, write the full name?', fg='red')
click.secho("The currency is: {}({})".format(currency_name, symbol), fg='green')
if code:
click.secho("The currency short code is: {}".format(the_code), fg='green') | 5,324,536 |
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix((1, 2, 3))
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0, 0, 0, 1
if not numpy.linalg.det(P):
raise ValueError("Matrix is singular")
scale = numpy.zeros((3, ), dtype=numpy.float64)
shear = [0, 0, 0]
angles = [0, 0, 0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0, 0, 0, 1
else:
perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64)
translate = M[3, :3].copy()
M[3, :3] = 0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
scale *= -1
row *= -1
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective | 5,324,537 |
def test_integration_with_orion(clean_mongo, clean_crate, entity):
"""
Make sure QL correctly handles headers in Orion's notification
"""
h = {
'Content-Type': 'application/json',
'Fiware-Service': 'myservice',
'Fiware-ServicePath': '/',
}
# Subscribe QL to Orion
params = {
'orionUrl': ORION_URL,
'quantumleapUrl': QL_URL,
}
r = requests.post("{}/subscribe".format(QL_URL), params=params, headers=h)
assert r.status_code == 201
# Insert values in Orion with Service and ServicePath
data = json.dumps(entity)
r = requests.post('{}/entities'.format(ORION_URL), data=data, headers=h)
assert r.ok
# Wait notification to be processed
time.sleep(2)
# Query WITH headers
url = "{qlUrl}/entities/{entityId}".format(
qlUrl=QL_URL,
entityId=entity['id'],
)
query_params = {
'type': entity['type'],
}
r = requests.get(url, params=query_params, headers=h)
assert r.status_code == 200, r.text
obtained = r.json()
assert obtained['entityId'] == entity['id']
# Query WITHOUT headers
r = requests.get(url, params=query_params)
assert r.status_code == 404, r.text | 5,324,538 |
def record_load(name:str, data:dict, connection_timeout:float=1.0) -> None:
"""
Load data into epics process variables for given location.
Parameters
----------
name: str
location name
data: dict
location data
{TYPE:str, FLAG:int, JOIN:int, RISE:int, TIME:float, *:float}
Returns
-------
None
"""
epics.caput(f'H:{name}:TYPE', data.get('TYPE'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:FLAG', data.get('FLAG'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:JOIN', data.get('JOIN'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:RISE', data.get('RISE'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:TIME', data.get('TIME'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:BX', data.get('BX'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:AX', data.get('AX'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:FX', data.get('FX'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:BY', data.get('BY'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:AY', data.get('AY'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:FY', data.get('FY'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:SIGMA_BX', data.get('SIGMA_BX'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:SIGMA_AX', data.get('SIGMA_AX'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:SIGMA_FX', data.get('SIGMA_FX'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:SIGMA_BY', data.get('SIGMA_BY'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:SIGMA_AY', data.get('SIGMA_AY'), connection_timeout=connection_timeout)
epics.caput(f'H:{name}:MODEL:SIGMA_FY', data.get('SIGMA_FY'), connection_timeout=connection_timeout) | 5,324,539 |
def GetMaxHarmonic( efit ):
"""Determine highest-order of harmonic amplitudes in an ellipse-fit object"""
# We assume that columns named "ai3_err", "ai4_err", "ai5_err", etc.
# exist, up to "aiM_err", where M is the maximum harmonic number
momentNums = [int(cname.rstrip("_err")[2:]) for cname in efit.colNames
if cname[:2] == "ai" and cname[-4:] == "_err"]
return max(momentNums) | 5,324,540 |
def gzip_open_to(fin_gz, fout):
"""Unzip a file.gz file."""
with gzip.open(fin_gz, 'rb') as zstrm:
with open(fout, 'wb') as ostrm:
ostrm.write(zstrm.read())
assert os.path.isfile(fout), "COULD NOT GUNZIP({G}) TO FILE({F})".format(G=fin_gz, F=fout)
os.remove(fin_gz) | 5,324,541 |
def export_results(process_results, references, output_dir,
results_file_prefix=""):
"""
Exports an xlsx file with results over all run simulations and an html file
showing plots of the results.
The Plot shows the source demand of all settings. If a reference time
series has been set there will also be plots showing relative and absolute
deviations to the references data where it is given.
:param process_results: The results mapped over the simulations.
:param references: A reference file
:param output_dir: Where shall the result files be saved?
:param results_file_prefix: A prefix added to all result files.
"""
res_info = pd.DataFrame()
res_values = pd.DataFrame()
no_references = False
filename = f'{output_dir + os.path.sep + results_file_prefix}'
for p in process_results:
res_info = res_info.append(p[0])
res_values.loc[:, p[0].name] = p[1].loc[:, 'Demand Heat Source']
with pd.ExcelWriter(f'{filename}_results.xlsx') as xlsx:
pd.DataFrame(res_info).to_excel(xlsx, index=False, sheet_name='Info')
if references is not None and not no_references:
common_cols = res_values.columns.intersection(references.columns)
if not common_cols.empty:
res_rel_dev_percent = (res_values.loc[:, common_cols].div(
references.loc[:, common_cols], axis=1) - 1) * 100
res_abs_dev = res_values.loc[:, common_cols] - \
references.loc[:, common_cols]
pd.DataFrame(res_rel_dev_percent). \
to_excel(xlsx, index=False, sheet_name='Rel_dev')
pd.DataFrame(res_abs_dev). \
to_excel(xlsx, index=False, sheet_name='Abs_dev')
else:
no_references = True
# Use Bokeh to print plots and export as html file
plots = []
line_width = 1.5
output_file(f'{filename}_results.html')
# Plot absolute deviations and actual demand
plot = figure(width=1600, height=500, x_axis_type='datetime',
title='Resulting heat source demands')
time = res_values.index.values
for n in res_values:
i = res_values.columns.get_loc(n)
if not no_references and n in res_abs_dev:
plot.line(x=time, y=res_abs_dev.loc[:, n],
legend_label=n + ' deviation to given reference [W]',
line_color=Spectral10[i],
line_width=line_width, line_alpha=0.9)
plot.line(x=time, y=res_values.loc[:, n], legend_label=n + ' [W]',
line_color=Spectral10[i], line_width=line_width,
line_alpha=0.9)
plot.xaxis.formatter = DatetimeTickFormatter(days=['%B %d'],
hours=['%H:%M'])
plot.legend.location = "top_right"
plot.legend.click_policy = "hide"
plots.append(plot)
# Plot relative deviations
if not no_references:
plot = figure(width=1600, height=300, x_axis_type='datetime',
title='Results compared to given references in percent')
for n in res_rel_dev_percent:
i = res_values.columns.get_loc(n)
plot.line(x=time, y=res_rel_dev_percent.loc[:, n],
legend_label=n + ' [%]', line_color=Spectral10[i],
line_width=line_width, line_alpha=0.9)
plot.xaxis.formatter = DatetimeTickFormatter(days=['%B %d'],
hours=['%H:%M'])
plot.legend.location = "top_right"
plot.legend.click_policy = "hide"
plots.append(plot)
show(gridplot(children=plots, sizing_mode='stretch_width', ncols=1)) | 5,324,542 |
def quaternary_tournament(population, scores, next_gen_number, random_seed=42):
"""Selects next generation using quaternary tournament selection for
single objective.
This function implements quaternary tournament selection to select a
specific number of members for the next generation.
Args:
population: A list containing members of current population with
parents and children combined.
scores: A list containing scores of each member of the
population. The format is:
[member 1 score, member 2 score, ....]
The order of members has to be consistent with population
argument.
next_gen_number: An int indicating the number of members to be
selected for the next generation.
random_seed: An int indicating the seed of the random number
generator.
Returns:
A list of members for the next generation of population.
"""
np.random.seed(random_seed)
indices = list(range(len(population)))
indices_array = np.array(indices)
selected = []
for i in range(next_gen_number):
best_score = math.inf
picked = None
selected_indices = np.random.choice(indices_array, size=4)
for indx in selected_indices:
if scores[indx] < best_score:
best_score = scores[indx]
picked = population[indx]
selected.append(picked)
return selected | 5,324,543 |
def ctrl(browser, k):
"""Send key combination Ctrl+(k)"""
trigger_keystrokes(browser, "control-%s" % k) | 5,324,544 |
def empty_list():
"""An empty list"""
return [] | 5,324,545 |
def snapshot():
"""Creates a default initialized startup snapshot.
deno_core requires this, although it does not document this outside of a
few random mentions in issues.
"""
return Runtime(will_snapshot=True).snapshot() | 5,324,546 |
def sample_user(email='riti2874@gmail.com', password='Riti#2807'):
"""Createing a smaple user"""
return get_user_model().objects.create_user(email, password) | 5,324,547 |
def is_chinese_prior_leap_month(m_prime, m):
"""Return True if there is a Chinese leap month on or after lunar
month starting on fixed day, m_prime and at or before
lunar month starting at fixed date, m."""
return ((m >= m_prime) and
(is_chinese_no_major_solar_term(m) or
is_chinese_prior_leap_month(m_prime, chinese_new_moon_before(m)))) | 5,324,548 |
async def botinfo():
"""Get Info About The Bot"""
em = discord.Embed(color=0xea7938) #0xea7938 is the color code
em.add_field(name='Bot Info', value="This bot was created with the library Discord.py\n If you need any help or questions, join the [devs' server](https://discord.gg/km9Yudr)")
em.add_field(name='Total Commands', value=(len(bot.commands)))
em.add_field(name = 'Add Me On Steam', value = '[Steam](http://steamcommunity.com/id/RageKickedGamer/)')
em.add_field(name = 'Invite Me!', value = '[Click Here](https://discordbots.org/bot/386637469438836738)')
em.set_footer(text="Rage Bot v1.0")
await bot.say(embed = em) | 5,324,549 |
def print_video_id_with_car_events(annotation_dir):
"""Print car events stats in the stanford dataset.
Args:
annotation_dir: Annotation dir.
video_list_file_path: List of video files to include (Default value = None).
Returns:
"""
annotations = io_util.load_stanford_campus_annotation(annotation_dir)
annotations = filter_annotation_by_label(annotations)
video_ids = list(set(annotations['videoid']))
video_ids.sort()
print('There are {} videos with car events.'.format(len(video_ids)))
print(json.dumps(video_ids, indent=4)) | 5,324,550 |
def main():
"""Fetches tokens from map, returns as compact .json map"""
print(" ===================")
print(" Token Collector ")
print(" ===================")
print("Press ctrl+c or close window to quit.\n")
print("Will take any number of .json map files as command line arguments:")
print(" > map_to_tokens.exe <map-1 path> <map-2 path>...<map-n path>")
print(" OR")
print(" > python map_to_tokens.py <map-1 path> <map-2 path>...<map-n path>\n")
# add arguments to parser
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("maps", metavar="<map path>", help="Combine each map's tokens into a single output file.", nargs='*')
parser.add_argument("-c", "--combine", help="Combine each map's tokens into a single output file.", action="store_true")
parser.add_argument("-sc", "--skipconfirm", help="Skip confirmation prompt for output destination", action="store_true")
parser.add_argument("-p", "--padding", type=int, default=0, metavar="<integer>", help="Padding between tokens")
parser.add_argument("-mp", "--mappadding", type=int, default=3, metavar="<integer>", help="Padding between tokens grouped from separate maps")
parser.add_argument("-fu", "--fillunder", help="Place a colored (filled) cell under each token", action="store_true")
parser.add_argument("-a", "--align", help="Align along which edge of the tokens (when handling tokens larger than 1x1).", choices=['top', 'bottom', 'left', 'right'], default='bottom')
parser.add_argument("-d", "--destination", metavar="<path>", help="Output destination path for .json file.")
parser.add_argument("-nep", "--noexitpause", help='Skip "Press Enter to Exit..."', action="store_true")
args = parser.parse_args()
print(f'Command Line Arguments, as parsed:\n {vars(args)}\n')
# get maps from command line
shmaps_list = uihelper.get_shmaps(args.maps)
# set output destination
outdest = Path(args.destination).resolve() if args.destination else Path.cwd()
print(f"\nOutput destination currently set to:\n {outdest}")
if not args.skipconfirm:
user_outdest = input("Enter to continue, or enter full path to set output destination: ")
if user_outdest:
outdest = Path(user_outdest)
# generate maps
output_maps = {}
if args.combine:
output_maps.update({'tokens_map': group_tokens_on_map(shmaps_list, args.padding, args.mappadding, args.align, args.fillunder)})
else:
# process maps separately
for shmap in shmaps_list:
outmap = group_tokens_on_map([shmap], args.padding, args.mappadding, args.align, args.fillunder)
output_maps.update({shmap.name: outmap})
# save maps to disk
for name, shmap in output_maps.items():
print(shmap.export_to(outdest, filenamestem=name))
if not args.noexitpause:
# pause before exiting - necessary for pyinstaller
input("Press Enter to Exit...") | 5,324,551 |
def parse_wrapper(max_iters,driver,data,file):
"""
The following method is designed to automatically parse each url contained in a long list
of scraped urls, and writes the title, abstract, and doi to a new text file with a user
input "file_name.txt."
Arguments:
max_iters - total number of scraped urls to be parsed
driver - desired webdriver
data - text file containing a list of the scraped urls
file - the new text file given by the user input
"""
for i in range(0,max_iters):
print('On url ',i)
driver.refresh()
time.sleep(2)
urli = str(extractor(data.iloc[i,0],driver,3))
file.write(urli)
file.write('\n') | 5,324,552 |
def read_config_file(fname):
"""Reads a JSON or YAML file.
"""
if fname.endswith(".yaml") or fname.endswith(".yml"):
try:
rfunc = partial(yaml.load, Loader=yaml.FullLoader)
except AttributeError:
rfunc = yaml.load
elif fname.endswith(".json"):
rfunc = json.load
else:
raise TypeError("Did not understand file type {}.".format(fname))
try:
with open(fname, "r") as handle:
ret = rfunc(handle)
except FileNotFoundError:
raise FileNotFoundError("No config file found at {}.".format(fname))
return ret | 5,324,553 |
async def insert_submission(submission: Submission, config: Config = CONFIG):
"""
Store a Submission object into metadata store.
Args:
submission: Submission object
config: Runtime configuration
"""
client = await get_db_client(config)
collection = client[config.db_name][COLLECTION_NAME]
await collection.insert_one(submission)
client.close() | 5,324,554 |
def plot_wo(wo, legend=True, **plot_kwargs):
"""Plot a water observation bit flag image.
Parameters
----------
wo : xr.DataArray
A DataArray containing water observation bit flags.
legend : bool
Whether to plot a legend. Default True.
plot_kwargs : dict
Keyword arguments passed on to DataArray.plot.
Returns
-------
plot
"""
cmap = mcolours.ListedColormap([
np.array([150, 150, 110]) / 255, # dry - 0
np.array([0, 0, 0]) / 255, # nodata, - 1
np.array([119, 104, 87]) / 255, # terrain - 16
np.array([89, 88, 86]) / 255, # cloud_shadow - 32
np.array([216, 215, 214]) / 255, # cloud - 64
np.array([242, 220, 180]) / 255, # cloudy terrain - 80
np.array([79, 129, 189]) / 255, # water - 128
np.array([51, 82, 119]) / 255, # shady water - 160
np.array([186, 211, 242]) / 255, # cloudy water - 192
])
bounds=[
0,
1,
16,
32,
64,
80,
128,
160,
192,
255,
]
norm = mcolours.BoundaryNorm(np.array(bounds) - 0.1, cmap.N)
cblabels = ['dry', 'nodata', 'terrain', 'cloud shadow', 'cloud', 'cloudy terrain', 'water', 'shady water', 'cloudy water']
try:
im = wo.plot.imshow(cmap=cmap, norm=norm, add_colorbar=legend, **plot_kwargs)
except AttributeError:
im = wo.plot(cmap=cmap, norm=norm, add_colorbar=legend, **plot_kwargs)
if legend:
try:
cb = im.colorbar
except AttributeError:
cb = im.cbar
ticks = cb.get_ticks()
cb.set_ticks(ticks + np.diff(ticks, append=256) / 2)
cb.set_ticklabels(cblabels)
return im | 5,324,555 |
def _get_tweets(db_path="tweets.json"):
"""Get Trump's tweets, caching after first use."""
global TWEETS
# Cache tweets if they haven't been fetched yet
if TWEETS is None:
# Try to read from a saved file
if os.path.exists(db_path):
with open(db_path) as f:
TWEETS = json.load(f)
# Fall back to reading from the API, but caching to a file.
else:
from thedonald import tweets
TWEETS = tweets.write_tweets_to_file(db_path)
# Return from cache
return TWEETS | 5,324,556 |
def int_list_data():
"""Item data with an in list. [1, 2, 3, 4, 5, 6]"""
return easymodel.ListItemData([1, 2, 3, 4, 5, 6]) | 5,324,557 |
def test_handle_unknown() -> None:
"""
Tests correct output when prediction data is not present in the unseen data.
"""
transformer = DatetimeOneHotEncoderTransformer()
data = DatetimeIndex([datetime(2019, 1, 1, 1, 0, 0)])
unseen_data = DatetimeIndex([datetime(2020, 2, 2, 2, 0, 0)])
correct_output: ndarray[Any, dtype[Any]] = array([[0, 0, 0, 0, 0]])
transformer.fit(data, True, True, True, True, True)
output = transformer.predict(unseen_data)
assert array_equal(output, correct_output) | 5,324,558 |
def check_transaction_threw(receipt: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""Check if the transaction threw/reverted or if it executed properly by reading
the transaction receipt.
Returns None in case of success and the transaction receipt if the
transaction's status indicator is 0x0.
"""
if "status" not in receipt:
raise ValueError(
"Transaction receipt does not contain a status field. Upgrade your client"
)
if receipt["status"] == RECEIPT_FAILURE_CODE:
return receipt
return None | 5,324,559 |
def button(style: ButtonStyle, label: str, **kwargs) -> Callable[..., Button]:
"""A decorator used to create buttons.
This should be decorating the buttons callback.
Parameters
----------
style: :class:`.ButtonStyle`
The styling to use for the button
label: :class:`str`
The label of the button
custom_id: Optional[:class:`str`]
The custom id to set for the button
disabled: Optional[:class:`bool`]
Whether or not the button should be marked as disabled
emoji: Optional[:class:`.Emoji`]
The emoji to set for the button
url: Optional[:class:`str`]
The url to use for url styled buttons
Returns
-------
:class:`.Button`
The created button instance
"""
def inner(func: Coroutine) -> Button:
button = Button(style, label, **kwargs)
button.callback = functools.partial(func, button) # type: ignore
return button
return inner | 5,324,560 |
def measure2(rep=REPETITIONS, timeout=TIMEOUT):
"""
The whole thing:
Start processes.
Measure memory.
Hammer servers.
Measure memory.
Kill processes.
"""
f=open("logging.txt", "w")
stdout=stderr=f
processes=startProcessesTotallyIndependent(stdout=stdout, stderr=stderr)
measureMemory(processes)
hammerLocalhost(processes, rep=rep, timeout=timeout)
for i in range(len(processes)):processes[i][2]="" # we don't need the urls anymore
print "\nNow after all those calls, measure memory again ... ",
measureMemory(processes)
killEm(processes)
f.close() | 5,324,561 |
def yaml_export(args, vault):
"""Run this module
:returns: None
"""
print(vault.group.yaml_export()) | 5,324,562 |
def notify(header: str, body: str, urgency: str="normal") -> None:
"""Show a desktop notification via notify-send.
header -- the notification header
body -- the notification body
urgency -- the urgency level (default 'normal') [low|normal|critical]
"""
if which("notify-send"):
subprocess.run(["notify-send", "-a", header, "-u", urgency, body]) | 5,324,563 |
def cover():
"""runs the unit tests with coverage"""
os.system('coverage run manage.py run_test')
os.system('coverage report')
os.system('coverage html') | 5,324,564 |
def _check_groups_docker():
"""
Check that the current user belongs to the required groups to both run S2E and build S2E images.
"""
if not _user_belongs_to('docker'):
_raise_group_error('docker') | 5,324,565 |
def AuxSource_Cast(*args):
"""
Cast(BaseObject o) -> AuxSource
AuxSource_Cast(Seiscomp::Core::BaseObjectPtr o) -> AuxSource
"""
return _DataModel.AuxSource_Cast(*args) | 5,324,566 |
def to_byte_array(int_value):
"""Creates a list of bytes out of an integer representing data that is
CODESIZE bytes wide."""
byte_array = [0] * CODESIZE
for i in range(CODESIZE):
byte_array[i] = int_value & 0xff
int_value = int_value >> 8
if BIG_ENDIAN:
byte_array.reverse()
return byte_array | 5,324,567 |
def no_op(loss_tensors):
"""no op on input"""
return loss_tensors | 5,324,568 |
def get_example_two_cube(session, varcodes, selected_year=None):
""" Create 2D cube of flight routes per selector variable description, per date selector. """
cubes_api = aa.CubesApi(session.api_client)
# If no selected year, no underlying base query
if selected_year is None:
query = create_query()
# Else, only return flight counts for the selected year
else:
year_rule = aa.Rule(
create_clause(session, REPORTING_PERIOD_YEARS_CODE, [selected_year])
)
query = create_query(rule=year_rule)
dimensions = [create_dimension(code) for code in varcodes]
measure = create_measure()
cube = create_cube(query, dimensions, [measure])
cube_result = cubes_api.cubes_calculate_cube_synchronously(
session.data_view, session.system, cube=cube
)
return cube_result | 5,324,569 |
def svd_reduce(imgs, n_jobs):
"""Reduce data using svd.
Work done in parallel across subjects.
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected shape is
[n_voxels, n_timeframes]
n_timeframes and n_voxels are assumed to be the same across subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
imgs can also be a list of list of arrays where element i, j of
the array is a numpy array of shape [n_voxels, n_timeframes] that
contains the data of subject i collected during session j.
imgs can also be a list of arrays where element i of the array is
a numpy array of shape [n_voxels, n_timeframes] that contains the
data of subject i (number of sessions is implicitly 1)
n_jobs : integer, optional, default=1
The number of CPUs to use to do the computation.
-1 means all CPUs, -2 all CPUs but one, and so on.
Returns
-------
reduced_data : np array shape=(n_subjects, n_timeframes, n_timeframes)
"""
def svd_i(img):
n_voxels = get_safe_shape(img[0])[0]
slices = []
t_i = 0
for i in range(len(img)):
n_voxels, n_timeframes = get_safe_shape(img[i])
slices.append(slice(t_i, t_i + n_timeframes))
t_i = t_i + n_timeframes
total_timeframes = t_i
# First compute X^TX
C = np.zeros((total_timeframes, total_timeframes))
for i in range(len(img)):
Xi = safe_load(img[i])
slice_i = slices[i]
C[slice_i, slice_i] = Xi.T.dot(Xi) / 2
for j in range(i + 1, len(img)):
Xj = safe_load(img[j])
slice_j = slices[j]
C[slice_i, slice_j] = Xi.T.dot(Xj)
del Xj
del Xi
C = C + C.T
# Then compute SVD
V, S, Vt = np.linalg.svd(C)
X_reduced = (
np.sqrt(S.reshape(-1, 1)) * Vt
) # X_reduced = np.diag(np.sqrt(S)).dot(V)
return X_reduced
X = Parallel(n_jobs=n_jobs)(delayed(svd_i)(img) for img in imgs)
return X | 5,324,570 |
def parse_project(record: Record, add_info: dict) -> Tuple[Pair, List[Pair], List[Pair]]:
"""Parse the project record in airtable to Billinge group format.
Return a key-value pair of the project and a list of key-value pairs of the people doc and institution doc
in the project. The record should be denormalized at first.
Parameters
----------
record : Record
The record from the airtable.
add_info : dict
A dictionary of the additional information.
Returns
-------
project : tuple
The key-value pair of project document.
people : list
The list of the key-value pairs of the people in the collaborators list.
institutions : list
The list of the key-value pairs of the institutions of those collaborators.
"""
record = tools.get_data(record)
pairs = list(map(parse_person, record.get('Collaborators', [])))
people = [pair[0] for pair in pairs if pair[0] is not None]
institutions = [pair[1] for pair in pairs if pair[1] is not None]
key = record.get('Name')
value = {
'begin_date': record.get('Start Date'),
'collaborators': tools.get_keys(people),
'description': record.get('Notes'),
'grants': record.get('Grant'),
'group_members': [tools.gen_person_id(record.get('Project Lead'))],
'lead': tools.gen_person_id(record.get('Project Lead')),
'log_url': _retrieve(add_info, 'log_url'),
'ana_repo_url': record.get('Link to Analysis'),
'man_repo_url': record.get('Link to Paper'),
'deliverable': autogen.auto_gen_deliverable(record.get('Start Date')),
'milestones': autogen.auto_gen_milestons(record.get('Start Date')),
'name': _retrieve(add_info, 'name'),
'pi_id': _retrieve(add_info, 'pi_id'),
'status': record.get('Status')
}
project = (key, value)
return project, people, institutions | 5,324,571 |
def forbidden_handler_exc(message):
"""More flexible handling of 403-like errors. Used by raising ForbiddenError.
Args:
message (str) - Custom message to display
"""
return render_template('errors/403.html', message=message), 403 | 5,324,572 |
def run_test():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('./tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1 | 5,324,573 |
def resnet34(num_classes: int = 1000, class_type: str = "single") -> ResNet:
"""
Standard ResNet 34 implementation;
expected input shape is (B, 3, 224, 224)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:return: The created ResNet Module
"""
sec_settings = [
ResNetSectionSettings(
num_blocks=3, in_channels=64, out_channels=64, downsample=False
),
ResNetSectionSettings(
num_blocks=4, in_channels=64, out_channels=128, downsample=True
),
ResNetSectionSettings(
num_blocks=6, in_channels=128, out_channels=256, downsample=True
),
ResNetSectionSettings(
num_blocks=3, in_channels=256, out_channels=512, downsample=True
),
]
return ResNet(
sec_settings=sec_settings, num_classes=num_classes, class_type=class_type
) | 5,324,574 |
def linspace(start, stop, num=50):
"""
Linspace with additional points
"""
grid = list(np.linspace(start, stop, num))
step = (stop - start) / (num - 1)
grid.extend([0.1 * step, 0.5 * step, stop - 0.1 * step, stop - 0.5 * step])
return sorted(grid) | 5,324,575 |
def strip_html(markdown):
"""
Strip HTML tags from a markdown string.
Entities present in the markdown will be escaped.
Parameters:
markdown: A :term:`native string` to be stripped and escaped.
Returns:
An escaped, stripped :term:`native string`.
"""
class Parser(HTMLParser):
text_parts = []
def handle_data(self, data):
self.text_parts.append(
data
.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace('"', """)
)
def handle_entityref(self, name):
self.text_parts.append("&" + name + ";")
def handle_charref(self, name):
self.text_parts.append("&#" + name + ";")
parser = Parser()
parser.feed(markdown)
return "".join(parser.text_parts) | 5,324,576 |
def test_check_case():
"""Test check_case with non-default options."""
assert check_case('TEST', lower=False) == 'TEST' | 5,324,577 |
def find_character_occurences(processed_txt):
"""
Return a list of actors from `doc` with corresponding occurences.
"""
total_len = len(processed_text)
characters = Counter()
index = 0
for ent in processed_txt.ents:
if ent.label_ == 'PERSON':
characters[ent.lemma_] += 1
if index % (total_len/10) == 0:
print '*',
index += 1
return characters | 5,324,578 |
def parents_similarity(subject1, subject2, values):
"""
This function creates two new subjects by comparing the bits of the binary encoded provided subjects (the
parents). If both parents have the same bit in a certain location, the offspring have a very high probability
of having the same bit too in that location. If the parents' bits are opposite, than the offspring's bits
are chosen randomly. For example, say the parents are s1 = '11000' and s2 = '11101', then with high probability
the offspring will be s1_new = '11100' and s2_new = '11001' (the middle and last digit are randomly chosen).
:param subject1: one subject of the population
:param subject2: another subject of the population
:param values: list or dict. a sequence of all values a subject in the population can have
:return: a tuple of two new subjects
"""
def create_child(bits1, bits2):
child = ''
for i in range(0,len(bits1)):
r = random.random()
if bits1[i] == bits2[i]:
take_from_1 = r < 0.9
else:
take_from_1 = r < 0.5
if take_from_1:
new_bit = bits1[i]
else:
new_bit = bits2[i]
child += new_bit
return child
bits1 = seq_to_binary_string(subject1, values)
bits2 = seq_to_binary_string(subject2, values)
new_bits1 = create_child(bits1, bits2)
new_bits2 = create_child(bits2, bits1)
return binary_string_to_seq(new_bits1, values), binary_string_to_seq(new_bits2, values) | 5,324,579 |
def generic_fftshift(x,axis=[-2,-1],inverse=False):
"""
Fourier shift to center the low frequency components
Parameters
----------
x : torch Tensor
Input array
inverse : bool
whether the shift is for fft or ifft
Returns
-------
shifted array
"""
if len(axis) > len(x.shape):
raise ValueError('Not enough axis to shift around!')
y = x
for axe in axis:
dim_size = x.shape[axe]
shift = int(dim_size/2)
if inverse:
if not dim_size%2 == 0:
shift += 1
y = torch.roll(y,shift,axe)
return y | 5,324,580 |
async def test_flow_user_invalid_auth(hass: HomeAssistant) -> None:
"""Test user initialized flow with invalid authentication."""
with patch_config_flow_tautulli(AsyncMock()) as tautullimock:
tautullimock.side_effect = exceptions.PyTautulliAuthenticationException
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONF_DATA
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"]["base"] == "invalid_auth"
with patch_config_flow_tautulli(AsyncMock()):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=CONF_DATA,
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == NAME
assert result2["data"] == CONF_DATA | 5,324,581 |
def eval_ner(gold, sen, labels):
"""
evaluate NER
"""
if len(gold) != len(sen):
print(len(gold), len(sen))
raise "lenghts not equal"
tp = 0
#tn = 0
fp = 0
fn = 0
list_en = ["LOC", "MISC", "ORG", "PER"]
for en in list_en:
g = list_entities(gold, labels["B-" + en], labels["I-" + en])
s = list_entities(sen, labels["B-" + en], labels["I-" + en])
for loc in g:
if loc in s:
tp += 1
else:
fn += 1
for loc in s:
if loc not in g:
fp += 1
return (tp, fp, fn) | 5,324,582 |
def generate_data(m: int = 1000, n: int = 30, sigma: int = 40):
"""Generates data for regression.
To experiment with your own data, just replace the contents of this
function with code that loads your dataset.
Args
----
m : int
The number of examples.
n : int
The number of features per example.
sigma : positive float
The standard deviation of the additive noise.
Returns
-------
X : np.array
An array of featurized examples, shape (m, n), m the number of
examples and n the number of features per example.
Y : np.array
An array of shape (m,) containing the observed labels for the
examples.
beta_star : np.array
The true parameter. This is the quantity we are trying to
estimate.
"""
beta_star = np.random.randn(n)
# Generate an ill-conditioned data matrix
X = np.random.randn(m, n)
U, _, V = np.linalg.svd(X)
s = np.linspace(30, 1, min(m, n))
S = np.zeros((m, n))
S[:min(m, n), :min(m, n)] = np.diag(s)
X = np.dot(U, np.dot(S, V))
# Corrupt the observations with additive Gaussian noise
Y = X.dot(beta_star) + np.random.normal(0, sigma, size=m)
return X, Y, beta_star | 5,324,583 |
def handle_leave(message, text_message=None,
failure_answer=choice(DICTBASE['negation'])):
"""
leave chat if user in userbase and if possible
"""
admin = False
if (message.chat.type != 'private' and
any(user['name'] == message.from_user.username for user in USERBASE)):
answer = choice(DICTBASE['obedience'])
admin = True
else:
answer = failure_answer
grach.reply(message, grach.send_message, answer)
if admin:
grach.leave_chat(message.chat.id) | 5,324,584 |
def read_datafile(path: str):
""" read a flight data file and unpack it """
with open(path, 'rb') as fp:
data = fp.read()
if len(data) != altacc_format.size:
logging.warning(f"invalid data file length, {len(data)} bytes!")
fields = altacc_format.unpack(data)
flight = AltAccDump._make(fields)
checksum = sum(data[:-4]) % 0x10000
if flight.CkSum != checksum:
raise ValueError(f"checksum mismatch datafile={flight.CkSum} computed:{checksum}")
return flight | 5,324,585 |
def get_compiler_option():
""" Determines the compiler that will be used to build extension modules.
Returns
-------
compiler : str
The compiler option specificied for the build, build_ext, or build_clib
command; or the default compiler for the platform if none was
specified.
"""
compiler = get_distutils_build_option('compiler')
if compiler is None:
return ccompiler.get_default_compiler()
return compiler | 5,324,586 |
def create_output_from_files(data_file_path:str, sheet_name:str, yaml_file_path:str, wikifier_filepath:str, output_filepath:str =None, output_format:str ="json"):
"""A convenience function for creating output from files and saving to an output file.
Equivalent to calling KnowledgeGraph.generate_from_files followed by one of the KnowledgeGraph save functions
But also returns to data generated for saving, so user can examine/process it (same as KnowledgeGraph.get_output)
Args:
data_file_path (str): location of the spreadsheet file
sheet_name (str): name of the sheet being used. for csv files, name of the file
yaml_file_path (str): location of the yaml file describing the region and template
wikifier_filepath (str): location of the wikifier file used to create the item table
output_filename (str, optional): location to save output. Defaults to None.
filetype (str, optional): accepts "json", "tsv" (or "kgtk"). Defaults to "json".
Returns:
str: string of the output data in the [filetype] format
"""
kg = KnowledgeGraph.generate_from_files(
data_file_path, sheet_name, yaml_file_path, wikifier_filepath)
output = kg.get_output(output_format)
if output_filepath:
with open(output_filepath, 'w', encoding="utf-8") as f:
f.write(output)
return output | 5,324,587 |
def delete_article(index_id, task_id, document_id):
"""
从ElasticSearch中删除一篇文档
:param index_id: 目标index
:param task_id: 目标task
:param document_id: 目标document
:return:
"""
doc_id = DocumentTools.get_doc_id(index_id, task_id, document_id)
if document_id == "_all":
BaseTaskController.clear_all_docs(index_id, task_id)
else:
Search(index=index_id).query("match", doc_id=doc_id).delete() | 5,324,588 |
def test_creation_single_row():
""" test the creation of DataClass objects from dicts or arrays;
single row only"""
# ground truth tables - compare against these tables
ground_truth_1 = QTable([[1], [2], [3]], names=('aa', 'bb', 'cc'))
ground_truth_2 = QTable([[1]*u.m, [2]*u.kg, [3]*u.cm/u.s],
names=('aa', 'bb', 'cc'))
ground_truth_3 = QTable([[1]*u.m, [2], [3]*u.kg], names=('aa', 'bb', 'cc'))
ground_truth_4 = QTable([[1], ['stuff'], [3]], names=('aa', 'bb', 'cc'))
ground_truth_5 = QTable([[1]*u.km, [2], ['test']],
names=('aa', 'bb', 'cc'))
# test DataClass.from_dict for different cases
test_dict_1 = DataClass.from_dict(OrderedDict([('aa', 1), ('bb', 2),
('cc', 3)]))
test_dict_2 = DataClass.from_dict(OrderedDict([('aa', 1*u.m),
('bb', 2*u.kg),
('cc', 3*u.cm/u.s)]))
test_dict_3 = DataClass.from_dict(OrderedDict([('aa', 1*u.m), ('bb', 2),
('cc', 3*u.kg)]))
test_dict_4 = DataClass.from_dict(OrderedDict([('aa', 1), ('bb', 'stuff'),
('cc', 3)]))
test_dict_5 = DataClass.from_dict(OrderedDict([('aa', 1*u.km), ('bb', 2),
('cc', 'test')]))
assert test_dict_1.table == ground_truth_1
assert test_dict_2.table == ground_truth_2
assert test_dict_3.table == ground_truth_3
assert test_dict_4.table == ground_truth_4
assert test_dict_5.table == ground_truth_5
# test DataClass.from_rows for different cases
test_array_1 = DataClass.from_rows([1, 2, 3],
names=('aa', 'bb', 'cc'))
test_array_2 = DataClass.from_rows([1*u.m, 2*u.kg, 3*u.cm/u.s],
names=('aa', 'bb', 'cc'))
test_array_3 = DataClass.from_rows([1*u.m, 2, 3*u.kg],
names=('aa', 'bb', 'cc'))
test_array_4 = DataClass.from_rows([1, 'stuff', 3],
names=('aa', 'bb', 'cc'))
test_array_5 = DataClass.from_rows([1*u.km, 2, 'test'],
names=('aa', 'bb', 'cc'))
assert test_array_1.table == ground_truth_1
assert test_array_2.table == ground_truth_2
assert test_array_3.table == ground_truth_3
assert test_array_4.table == ground_truth_4
assert test_array_5.table == ground_truth_5
# test single row, single column
ground_truth_6 = QTable([[1]], names=('aa',))
test_dict_6 = DataClass.from_dict({'aa': 1})
assert test_dict_6.table == ground_truth_6
test_array_6 = DataClass.from_rows([1], names='aa')
assert test_array_6.table == ground_truth_6
# test units parameter
test_array_2b = DataClass.from_rows([1, 2, 3],
names=('aa', 'bb', 'cc'),
units=(u.m, u.kg, u.cm/u.s))
test_array_3b = DataClass.from_rows([1, 2, 3],
names=('aa', 'bb', 'cc'),
units=(u.m, None, u.kg))
assert test_array_2b.table == ground_truth_2
assert test_array_3b.table == ground_truth_3
ground_truth_7 = QTable([[1]*u.kg], names=('aa',))
test_array_7 = DataClass.from_rows([1], names='aa', units='kg')
assert test_array_7.table == ground_truth_7
with pytest.raises(DataClassError):
DataClass.from_rows([1], names='aa', units=('m', 'kg'))
# test single row starting with string
ground_truth_8 = QTable(rows=[['a', 1]], names=('aa', 'bb'))
test_array_8 = DataClass.from_rows(['a', 1], names=('aa', 'bb'))
assert ground_truth_8 == test_array_8.table | 5,324,589 |
def make_new_images(dataset, imgs_train, imgs_val):
"""
Split the annotations in dataset into two files train and val
according to the img ids in imgs_train, imgs_val.
"""
table_imgs = {x['id']:x for x in dataset['images']}
table_anns = {x['image_id']:x for x in dataset['annotations']}
keys = ['info', 'licenses', 'images', 'annotations', 'categories']
# Train
dataset_train = dict.fromkeys(keys)
dataset_train['info'] = dataset['info']
dataset_train['licenses'] = dataset['licenses']
dataset_train['categories'] = dataset['categories']
dataset_train['images'] = [table_imgs[x] for x in imgs_train]
dataset_train['annotations'] = [table_anns[x] for x in imgs_train]
# Validation
dataset_val = dict.fromkeys(keys)
dataset_val['info'] = dataset['info']
dataset_val['licenses'] = dataset['licenses']
dataset_val['categories'] = dataset['categories']
dataset_val['images'] = [table_imgs[x] for x in imgs_val]
dataset_val['annotations'] = [table_anns[x] for x in imgs_val]
return dataset_train, dataset_val | 5,324,590 |
def test_similarity_sample_multiprocess(pk_target):
"""Test similarity cutoff filter"""
def weight(T):
return T ** 4
_filter = SimilaritySamplingFilter(sample_size=10, weight=weight)
pk_target.react_targets = True
pk_target.filters.append(_filter)
pk_target.transform_all(processes=2, generations=2)
# Filter must return less compounds than non-filter
# Non-deterministic results, so no exact value can be used
assert len(pk_target.compounds) < 1452 | 5,324,591 |
def compute_maximum_ts_map(ts_map_results):
"""
Compute maximum TS map across a list of given ts maps.
Parameters
----------
ts_map_results : list
List of `~gammapy.image.SkyImageCollection` objects.
Returns
-------
images : `~gammapy.image.SkyImageCollection`
Images (ts, niter, amplitude)
"""
# Get data
ts = np.dstack([result.ts for result in ts_map_results])
niter = np.dstack([result.niter for result in ts_map_results])
amplitude = np.dstack([result.amplitude for result in ts_map_results])
scales = [result.scale for result in ts_map_results]
# Set up max arrays
ts_max = np.max(ts, axis=2)
scale_max = np.zeros(ts.shape[:-1])
niter_max = np.zeros(ts.shape[:-1])
amplitude_max = np.zeros(ts.shape[:-1])
for i, scale in enumerate(scales):
index = np.where(ts[:, :, i] == ts_max)
scale_max[index] = scale
niter_max[index] = niter[:, :, i][index]
amplitude_max[index] = amplitude[:, :, i][index]
meta = {'MORPH': (ts_map_results[0].morphology, 'Source morphology assumption')}
return SkyImageCollection(ts=ts_max, niter=niter_max, amplitude=amplitude_max,
meta=meta) | 5,324,592 |
async def test_form_uuid_error(hass: HomeAssistant) -> None:
"""Test we handle error from bad uuid."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] is None
with patch("systembridgeconnector.websocket_client.WebSocketClient.connect"), patch(
"systembridgeconnector.websocket_client.WebSocketClient.get_data"
), patch(
"systembridgeconnector.websocket_client.WebSocketClient.receive_message",
return_value=FIXTURE_DATA_SYSTEM_BAD,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], FIXTURE_USER_INPUT
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"} | 5,324,593 |
def compute_dist(array1, array2, type='euclidean'):
"""Compute the euclidean or cosine distance of all pairs.
Args:
array1: numpy array with shape [m1, n]
array2: numpy array with shape [m2, n]
type: one of ['cosine', 'euclidean']
Returns:
numpy array with shape [m1, m2]
"""
assert type in ['cosine', 'euclidean']
if type == 'cosine':
array1 = self.normalize(array1, axis=1)
array2 = self.normalize(array2, axis=1)
dist = np.matmul(array1, array2.T)
return dist
else:
# shape [m1, 1]
square1 = np.sum(np.square(array1), axis=1)[..., np.newaxis]
# shape [1, m2]
square2 = np.sum(np.square(array2), axis=1)[np.newaxis, ...]
squared_dist = - 2 * np.matmul(array1, array2.T) + square1 + square2
squared_dist[squared_dist < 0] = 0
dist = np.sqrt(squared_dist)
del square1, square2, squared_dist
gc.collect()
return dist | 5,324,594 |
def clicr_to_concept_txt(train_file="/mnt/b5320167-5dbd-4498-bf34-173ac5338c8d/Datasets/bmj_case_reports_data/dataset_json_concept_annotated/train1.0.json", out_file="/mnt/b5320167-5dbd-4498-bf34-173ac5338c8d/Datasets/bmj_case_reports_data/dataset_json_concept_annotated/train1.0_concepts.txt"):
"""
Prepare a single txt file with entities marked as @ent_a_b. One sentence per line, lowercased
"""
dataset = load_json(train_file)
with open(out_file, "w") as out_f:
for datum in dataset[DATA_KEY]:
for l in datum[DOC_KEY][TITLE_KEY].split("\n"):
if not l.strip():
continue
out_f.write(to_entities(l, ent_marker="@ent_").lower()+"\n")
for l in datum[DOC_KEY][CONTEXT_KEY].split("\n"):
if not l.strip():
continue
out_f.write(to_entities(l, ent_marker="@ent_").lower()+"\n")
for qa in datum[DOC_KEY][QAS_KEY]:
q = qa[QUERY_KEY]
for a in qa[ANS_KEY]:
if a["origin"] == "dataset":
q = q.replace("@placeholder", a[TXT_KEY])
out_f.write(to_entities(q, ent_marker="@ent_").lower()+"\n") | 5,324,595 |
def timer(step, callback, *args):
"""定时器"""
s = internet.TimerService(step, callback, *args)
s.startService()
return s | 5,324,596 |
def clean_tfreocrds(input_file, output_file):
"""Clean tfrecords file"""
writer = tf.python_io.TFRecordWriter(output_file)
count = 0
for example in tf.python_io.tf_record_iterator(input_file):
result = tf.train.Example.FromString(example)
labels = result.features.feature['label'].float_list.value
# the labels should have at least 2 different values
if len(set(labels)) != 1:
writer.write(example)
else:
print(labels)
count += 1
print(str(count) + ' examples has the same labels')
writer.close() | 5,324,597 |
def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""
Download SuperDARN data from Virginia Tech organized for loading by pysat.
"""
import sys
import os
import pysftp
import davitpy
if user is None:
user = os.environ['DBREADUSER']
if password is None:
password = os.environ['DBREADPASS']
with pysftp.Connection(
os.environ['VTDB'],
username=user,
password=password) as sftp:
for date in date_array:
myDir = '/data/'+date.strftime("%Y")+'/grdex/'+tag+'/'
fname = date.strftime("%Y%m%d")+'.' + tag + '.grdex'
local_fname = fname+'.bz2'
saved_fname = os.path.join(data_path,local_fname)
full_fname = os.path.join(data_path,fname)
try:
print('Downloading file for '+date.strftime('%D'))
sys.stdout.flush()
sftp.get(myDir+local_fname, saved_fname)
os.system('bunzip2 -c '+saved_fname+' > '+ full_fname)
os.system('rm ' + saved_fname)
except IOError:
print('File not available for '+date.strftime('%D'))
return | 5,324,598 |
def select_file(title: str) -> str:
"""Opens a file select window and return the path to selected file"""
import tkinter as tk
from tkinter import filedialog
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(title=title)
return file_path | 5,324,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.