max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
manage.py | LimaGuilherme/flask-boilerplate | 0 | 12772751 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
import sys
from app import initialize
manager = Manager(initialize.web_app)
def register_migrate(manager):
from app import models
migrate = Migrate(initialize.web_app, models.db)
manager.add_command('db', MigrateCommand)
return migrate
if __name__ == '__main__':
if 'db' in sys.argv:
migrate = register_migrate(manager)
manager.run()
| 2.09375 | 2 |
tests/generators/finality/main.py | jmyllyla/eth2.0-specs | 1 | 12772752 | from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.altair import spec as spec_altair
from eth2spec.merge import spec as spec_merge
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
specs = (spec_phase0, spec_altair, spec_merge)
if __name__ == "__main__":
phase_0_mods = {'finality': 'eth2spec.test.phase0.finality.test_finality'}
altair_mods = phase_0_mods # No additional Altair specific finality tests
merge_mods = phase_0_mods # No additional Merge specific finality tests
all_mods = {
PHASE0: phase_0_mods,
ALTAIR: altair_mods,
MERGE: spec_merge,
}
run_state_test_generators(runner_name="finality", specs=specs, all_mods=all_mods)
| 1.648438 | 2 |
keras-yolo3-Traffic Sign/detect.py | qqww12345/001_AI_project | 0 | 12772753 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'detect.ui'
#
# Created by: PyQt5 UI code generator 5.12
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.openimage = QtWidgets.QPushButton(self.centralwidget)
self.openimage.setGeometry(QtCore.QRect(20, 180, 75, 23))
self.openimage.setObjectName("openimage")
self.showimage = QtWidgets.QLabel(self.centralwidget)
self.showimage.setGeometry(QtCore.QRect(100, 20, 401, 451))
self.showimage.setObjectName("showimage")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.openimage.setText(_translate("MainWindow", "打开图片"))
self.showimage.setText(_translate("MainWindow", "TextLabel"))
| 1.851563 | 2 |
model_search/phoenix.py | LinqCod/model_search | 0 | 12772754 | <filename>model_search/phoenix.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A Phoenix estimator builder."""
import functools
import inspect
from absl import logging
import keras_tuner
from model_search import controller
from model_search import hparam as hp
from model_search import loss_fns
from model_search import metric_fns
from model_search import task_manager
from model_search.architecture import architecture_utils
from model_search.generators import base_tower_generator
from model_search.generators import trial_utils
from model_search.meta import transfer_learning
from model_search.metadata import ml_metadata_db
from model_search.proto import phoenix_spec_pb2
from model_search.proto import transfer_learning_spec_pb2
import numpy as np
import tensorflow.compat.v2 as tf
REPLAY_CONFIG_FILENAME = "replay_config.pbtxt"
_TL_HOOKS = {
transfer_learning_spec_pb2.TransferLearningSpec
.UNIFORM_AVERAGE_TRANSFER_LEARNING:
transfer_learning.UniformAverageTransferLearningHook,
transfer_learning_spec_pb2.TransferLearningSpec
.LOSS_WEIGHTED_AVERAGE_TRANSFER_LEARNING:
transfer_learning.LossWeightedAverageTransferLearningHook,
}
def aggregate_initial_architecture(hparams):
"""Helper function to aggregate initial architecture into an array hparam."""
output = hparams.copy()
initial_architecture_size = len(
[hp for hp in hparams.keys() if hp.startswith("initial_architecture_")])
if initial_architecture_size:
output["initial_architecture"] = [
hparams["initial_architecture_{}".format(i)]
for i in range(initial_architecture_size)
]
return output
def _merge_hparams(original_hparams, overrides):
"""Merges to hp.HParams objects."""
# make a copy
hparams = hp.HParams(**original_hparams.values())
existing_ones = {k: v for k, v in overrides.values().items() if k in hparams}
new_ones = {k: v for k, v in overrides.values().items() if k not in hparams}
hparams.override_from_dict(existing_ones)
for k, v in new_ones.items():
hparams.add_hparam(k, v)
return hparams
def _default_predictions_fn(logits,
mode=tf.estimator.ModeKeys.TRAIN,
temperature=1.0):
"""Converts logits to predictions dict. Assumes classification."""
new_logits = logits
if mode == tf.estimator.ModeKeys.PREDICT and temperature != 1.0:
assert temperature > 0
temp_const = tf.constant(1 / temperature, name="softmax_temperature_const")
logging.info("Applying temperature to logits")
new_logits = tf.multiply(logits, temp_const, name="softmax_temperature_mul")
predictions = tf.math.argmax(input=new_logits, axis=-1)
probabilities = tf.nn.softmax(new_logits)
log_probabilities = tf.nn.log_softmax(new_logits)
predictions_dict = {
"predictions": predictions,
"probabilities": probabilities,
"log_probabilities": log_probabilities,
"logits": logits
}
return predictions_dict
class Estimator(tf.estimator.Estimator):
"""Estimator wrapper to add reporting to metadata storage after evaluation."""
def __init__(self,
model_fn,
model_dir=None,
config=None,
params=None,
warm_start_from=None,
metadata=None):
tf.estimator.Estimator._assert_members_are_not_overridden = staticmethod( # pylint: disable=protected-access
lambda _: None)
super(Estimator, self).__init__(
model_fn=model_fn,
config=config,
params=params,
warm_start_from=warm_start_from)
self._metadata = metadata
self._model_dir = config.model_dir
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
eval_results = super(Estimator, self).evaluate(
input_fn=input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
if self._metadata is not None:
native_results = {k: v.item() for k, v in eval_results.items()}
logging.info("Saving the following evaluation dictionary.")
logging.info(native_results)
self._metadata.report(native_results, self._model_dir)
return eval_results
class Phoenix(object):
"""Phoenix: A smart search AutoML algorithm."""
def __init__(self,
phoenix_spec,
input_layer_fn,
study_owner,
study_name,
head=None,
logits_dimension=None,
label_vocabulary=None,
loss_fn=None,
metric_fn=None,
predictions_fn=None,
metadata=None,
optimizer_fn=None):
"""Constructs a Phoenix instance.
Args:
phoenix_spec: A `PhoenixSpec` proto with the spec for the run.
input_layer_fn: A function that converts feature Tensors to input layer.
See learning.autolx.model_search.data.Provider.get_input_layer_fn
for details.
study_owner: A string holding the ldap of the study owner. We use tuner
platforms to conduct the various architectures training. This field
specifies the study owner.
study_name: A string holding the study name.
head: A head to use with Phoenix for creating the loss and eval metrics.
If no head is given, Phoenix falls back to using the loss_fn and
metric_fn. N.B.: Phoenix creates its own EstimatorSpec so everything
besides the loss and eval metrics returned by head will be ignored.
logits_dimension: An int holding the dimension of the output. Must be
provided if head is None. Will be ignored if head is not None.
label_vocabulary: List or tuple with labels vocabulary. Needed only if the
labels are of type string. This list is used by the loss function if
loss_fn is not provided. It is also used in the metric function to
create the accuracy metric ops. Use only with multiclass classification
problems.
loss_fn: A function to compute the loss. Ignored if `head` is not None.
Must accept as inputs a `labels` Tensor, a `logits` Tensor, and
a `weights` Tensor. `weights` must either be rank 0 or have
the same rank as labels. If None, Phoenix defaults to using softmax
cross-entropy.
Additional `params` holding the hparameters of the trial can be added
as input to the signature.
For multitaks, you have the option to pass a dict of functions keyed
by the task name to apply different loss functions for different tasks.
metric_fn: Metrics for Tensorboard. Ignored if `head` is not None.
metric_fn takes `label` and `predictions` as input, and outputs a
dictionary of (tensor, update_op) tuples. `label` is a Tensor (in the
single task case) or a dict of Tensors (in the case of multi-task, where
the key of the dicts correspond to the task names). `predictions` is a
dict of Tensors. In the single task case, it consists of `predictions`,
`probabilities`, and `log_probabilities`. In the multi-task case, it
consists of the same keys as that of the single task case, but also
those corresponding to each task (e.g., predictions/task_name_1). See
`metric_fns` for more detail. If `metric_fn` is None, it will include a
metric for the number of parameters, accuracy (if logit_dimensions >=
2), and AUC metrics (if logit_dimensions == 2).
predictions_fn: A function to convert eval logits to the
`predictions` dictionary passed to metric_fn. If `None`, defaults to
computing 'predictions', 'probabilities', and 'log_probabilities'.
For multitaks, you have the option to pass a dict of functions keyed
by the task name to apply different prediction functions for different
tasks.
metadata: An object that implements metadata api in
learning.adanets.phoenix.metadata.Metadata
optimizer_fn: A function that follows two possible signatures: 1. takes
`params` as args and returns a tensorflow v1 optimizer instance.
2. A function with no args that returns a tensorflow v1 optimizer.
Please keep as None to use our default optimizers (i.e. let the
search choose an optimizer).
"""
# Check Phoenix preconditions and fail early if any of them are broken.
if phoenix_spec.multi_task_spec:
# TODO(b/172564129): Add support for head and custom loss_fns in
# multi-task.
assert not head, "head is not supported for multi-task."
if head:
msg = "Do not specify {} when using head as head already contains it."
assert not logits_dimension, msg.format("logits_dimension")
assert not label_vocabulary, msg.format("label_vocabulary")
assert not loss_fn, msg.format("loss_fn")
assert not metric_fn, msg.format("metric_fn")
# Check ensemble search / distillation preconditions.
ensemble_spec = phoenix_spec.ensemble_spec
distillation_spec = phoenix_spec.distillation_spec
if trial_utils.has_distillation(
distillation_spec) and trial_utils.has_ensemble_search(
ensemble_spec
) and not trial_utils.is_intermixed_ensemble_search(ensemble_spec):
ensemble_search_spec = (
ensemble_spec.nonadaptive_search
if trial_utils.is_nonadaptive_ensemble_search(ensemble_spec) else
ensemble_spec.adaptive_search)
if (distillation_spec.minimal_pool_size ==
ensemble_search_spec.minimal_pool_size):
logging.warning("minimal_pool_size is the same for ensemble spec and "
"distillation spec, so distillation will be ignored.")
self._phoenix_spec = phoenix_spec
self._input_layer_fn = input_layer_fn
self._study_owner = study_owner
self._study_name = study_name
self._head = head
self._logits_dimension = (
self._head.logits_dimension if head else logits_dimension)
self._label_vocabulary = label_vocabulary
if self._label_vocabulary:
assert self._logits_dimension == len(self._label_vocabulary)
self._loss_fn = loss_fn or loss_fns.make_multi_class_loss_fn(
label_vocabulary=label_vocabulary)
self._user_specified_metric_fn = metric_fn
self._predictions_fn = (predictions_fn or _default_predictions_fn)
if metadata is None:
self._metadata = ml_metadata_db.MLMetaData(phoenix_spec, study_name,
study_owner)
else:
self._metadata = metadata
self._task_manager = task_manager.TaskManager(
phoenix_spec=phoenix_spec,
logits_dimension=logits_dimension,
loss_fn=self._loss_fn,
head=self._head)
self._controller = controller.InProcessController(
phoenix_spec=phoenix_spec, metadata=self._metadata)
self._user_optimizer_fn = optimizer_fn
@property
def metadata(self):
return self._metadata
def keras_compile(self, towers, hparams):
"""Compiles the keras model based on hparams."""
optimizer_args = dict()
# Learning rate
lr = hparams.learning_rate
if getattr(hparams, "exponential_decay_rate", None) is not None:
max_times = self._phoenix_spec.learning_spec.max_decay_times
steps = hparams.exponential_decay_steps
decay = hparams.exponential_decay_rate
lr = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[steps * j for j in range(1, max_times)],
values=[lr * (decay**i) for i in range(1, max_times + 1)])
optimizer_args["learning_rate"] = lr
# Gradient Clip
if getattr(hparams, "gradient_max_norm", None) is not None:
optimizer_args["clipnorm"] = hparams.gradient_max_norm
# L2 Error
if getattr(hparams, "l2_regularization", None) is not None:
logging.error("Keras mode doesn't support L2 regularization.")
# Optimizer
optimizer = None
if hparams.optimizer == "sgd":
optimizer = tf.keras.optimizers.SGD(**optimizer_args)
elif hparams.optimizer == "momentum":
optimizer = tf.keras.optimizers.SGD(**optimizer_args, momentum=0.9)
elif hparams.optimizer == "adam":
optimizer = tf.keras.optimizers.Adam(**optimizer_args)
elif hparams.optimizer == "adagrad":
optimizer = tf.keras.optimizers.Adagrad(**optimizer_args)
elif hparams.optimizer == "rmsprop":
optimizer = tf.keras.optimizers.RMSprop(**optimizer_args)
elif hparams.optimizer == "lazy_adam":
logging.error("Lazy adam is not implemented in Keras. Falling back to "
"Adam")
optimizer = tf.keras.optimizers.Adam(**optimizer_args)
towers["search_generator"][0].compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
return towers["search_generator"][0]
def keras_model_builder(self,
hparams,
run_config=None,
is_training=None,
input_layer_fn=None,
compile_model=True):
"""Builds a keras model based on hparams."""
if compile_model:
# Validate keras mode config is correct.
if self._phoenix_spec.HasField("ensemble_spec"):
logging.error("Cannot run ensembling in keras mode.")
self._phoenix_spec.ClearField("ensemble_spec")
if self._phoenix_spec.HasField("distillation_spec"):
logging.error("Cannot run distillation in keras mode.")
self._phoenix_spec.ClearField("distillation_spec")
run_config_ = run_config
if run_config is None:
run_config_ = tf.estimator.RunConfig()
if isinstance(hparams, hp.HParams):
params = hparams
elif isinstance(hparams,
keras_tuner.engine.hyperparameters.HyperParameters):
parameter_values = aggregate_initial_architecture(hparams.values)
params = hp.HParams(**parameter_values)
else:
parameter_values = aggregate_initial_architecture(hparams.values())
params = hp.HParams(**parameter_values)
logging.info(run_config_)
logging.info(run_config_.model_dir)
my_id = architecture_utils.DirectoryHandler.get_trial_id(
run_config_.model_dir, self._phoenix_spec)
# Get all information we have so far.
trials = []
# TODO(b/172564129): Only the chief needs the trials. Test to see if
# workers need them
if not self._phoenix_spec.HasField("replay"):
trials = self._metadata.get_completed_trials()
else:
params = _merge_hparams(
params,
hp.HParams.from_proto(self._phoenix_spec.replay.towers[my_id -
1].hparams))
params.set_hparam(
"initial_architecture",
self._phoenix_spec.replay.towers[my_id - 1].architecture[:])
# Update our database - clean up and sync ops.
if run_config_.is_chief:
self._metadata.before_generating_trial_model(my_id, run_config_.model_dir)
# Determine whether to do ensemble search or distillation on this trial.
trial_mode = trial_utils.get_trial_mode(
self._phoenix_spec.ensemble_spec, self._phoenix_spec.distillation_spec,
my_id)
generators = self._controller.get_generators(my_id, trials)
towers = {}
for name, generator in generators.items():
logging.info(generators)
tower = generator.instance.generate(
input_layer_fn=input_layer_fn,
trial_mode=trial_mode,
logits_dimension=self._logits_dimension,
hparams=params,
run_config=run_config_,
is_training=is_training,
trials=generator.relevant_trials)
towers[name] = tower
if input_layer_fn is not None and self._phoenix_spec.is_input_shared:
shared_input_tensor, shared_lengths = input_layer_fn(
is_training=is_training, scope_name="Phoenix/SharedInput")
for generator_towers in towers.values():
for tower in generator_towers:
tower.add_feature_columns_input_layer(shared_input_tensor,
shared_lengths)
elif input_layer_fn is not None:
logging.info(towers)
for generator_towers in towers.values():
for tower in generator_towers:
if not tower.has_input_tensor():
input_tensor, lengths = input_layer_fn(
is_training=is_training,
scope_name="{}/input".format(tower.name))
tower.add_feature_columns_input_layer(input_tensor, lengths)
if compile_model:
# Keras mode: Search only
return self.keras_compile(towers, params)
return towers, trials
def _make_model_fn(self, run_config, train_steps, use_tpu=False):
"""Returns a model_fn for the estimator."""
def model_fn(features, labels, mode, params):
"""Model function that wraps the model specified."""
self._metric_fn = self._user_specified_metric_fn
self._default_metric_fn_list = []
if self._logits_dimension >= 2:
self._default_metric_fn_list.append(
metric_fns.make_accuracy_metric_fn(self._label_vocabulary))
if self._logits_dimension == 2:
self._default_metric_fn_list += [
metric_fns.make_auc_roc_metric_fn(self._label_vocabulary),
metric_fns.make_auc_pr_metric_fn(self._label_vocabulary)
]
my_id = architecture_utils.DirectoryHandler.get_trial_id(
run_config.model_dir, self._phoenix_spec)
# Create a copy of hparams
hparams = params
if my_id <= len(self._phoenix_spec.user_suggestions):
hparams = _merge_hparams(
params,
hp.HParams.from_proto(
self._phoenix_spec.user_suggestions[my_id - 1].hparams))
# When predicting for RNN, we might not need the length.
is_training = mode == tf.estimator.ModeKeys.TRAIN
lengths_feature_name = self._phoenix_spec.lengths_feature_name
if mode == tf.estimator.ModeKeys.PREDICT:
if isinstance(features, dict) and lengths_feature_name not in features:
lengths_feature_name = ""
if "params" in inspect.signature(self._input_layer_fn).parameters:
input_layer_fn = functools.partial(
self._input_layer_fn,
features=features,
params=hparams,
lengths_feature_name=lengths_feature_name)
else:
input_layer_fn = functools.partial(
self._input_layer_fn,
features=features,
lengths_feature_name=lengths_feature_name)
towers, trials = self.keras_model_builder(
hparams=hparams,
is_training=is_training,
run_config=run_config,
input_layer_fn=input_layer_fn,
compile_model=False)
for generator_towers in towers.values():
for tower in generator_towers:
tower(None, training=is_training)
trial_mode = trial_utils.get_trial_mode(
self._phoenix_spec.ensemble_spec,
self._phoenix_spec.distillation_spec, my_id)
training_hooks = []
# TODO(b/172564129): Figure out how to handle transfer learning for multi
# task. Install transfer learning hook.
if (is_training and
self._phoenix_spec.transfer_learning_spec.transfer_learning_type in
_TL_HOOKS):
tower_name = base_tower_generator.SEARCH_GENERATOR
vars_to_warm_start = architecture_utils.get_tower_variables(tower_name)
if vars_to_warm_start:
hook_fn = _TL_HOOKS[
self._phoenix_spec.transfer_learning_spec.transfer_learning_type]
tl_spec = self._phoenix_spec.transfer_learning_spec
tl_hook = hook_fn(
vars_to_warm_start=vars_to_warm_start,
current_trial_id=my_id,
completed_trials=trials,
discount_factor=tl_spec.previous_trials_discount_factor,
max_completed_trials=tl_spec.max_completed_trials,
model_dir=run_config.model_dir)
training_hooks.append(tl_hook)
learning_rate_spec_keys = [
"learning_rate", "l2_regularization", "gradient_max_norm",
"exponential_decay_steps", "exponential_decay_rate"
]
learning_rate_spec = {
key: value
for key, value in hparams.values().items()
if key in learning_rate_spec_keys
}
tower_name = None
# Create the metric_fn if it wasn't specified.
if not self._metric_fn:
metric_fn = metric_fns.create_num_parameters_metric_fn(tower_name)
self._default_metric_fn_list.append(metric_fn)
self._metric_fn = metric_fns.combine_metric_fns(
self._default_metric_fn_list)
model_spec = self._task_manager.create_model_spec(
features=features,
params=hparams,
learning_rate_spec=learning_rate_spec,
use_tpu=use_tpu,
trial_mode=trial_mode,
towers=towers,
labels=labels,
mode=mode,
my_id=my_id,
model_directory=run_config.model_dir,
predictions_fn=self._predictions_fn,
optimizer_fn=self._user_optimizer_fn)
if run_config.is_chief:
self._metadata.after_generating_trial_model(my_id)
search_architecture = [["no_search"]]
if base_tower_generator.SEARCH_GENERATOR in towers.keys():
search_architecture = [
t.architecture
for t in towers[base_tower_generator.SEARCH_GENERATOR]
]
trial_utils.write_replay_spec(
model_dir=run_config.model_dir,
filename=REPLAY_CONFIG_FILENAME,
original_spec=self._phoenix_spec,
search_architecture=search_architecture[0],
hparams=hparams)
# No need to add train op for the eval graph.
train_op = None
if is_training:
train_op = self._increment_global_step(
model_spec.train_op, train_steps,
base_tower_generator.SEARCH_GENERATOR)
if (isinstance(labels, dict) and
not self._phoenix_spec.pass_label_dict_as_is):
label_names = [
label_spec.label_name
for label_spec in self._phoenix_spec.multi_task_spec
]
actual_labels = {
name: label for name, label in labels.items() if name in label_names
}
else:
actual_labels = labels
if use_tpu:
eval_metrics = None
weights = None
if self._phoenix_spec.weight_feature_name:
weights = features[self._phoenix_spec.weight_feature_name]
if mode != tf.estimator.ModeKeys.PREDICT and not self._head:
eval_metrics = (self._metric_fn,
[actual_labels, model_spec.predictions, weights])
return tf.compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
loss=model_spec.loss,
predictions=model_spec.predictions,
train_op=train_op,
eval_metrics=eval_metrics,
training_hooks=training_hooks)
eval_metric_ops = None
if mode != tf.estimator.ModeKeys.PREDICT and not self._head:
weights = None
if self._phoenix_spec.weight_feature_name:
weights = features[self._phoenix_spec.weight_feature_name]
eval_metric_ops = self._metric_fn(actual_labels, model_spec.predictions,
weights)
if self._head:
return self._head.create_estimator_spec(
features,
mode,
model_spec.eval_logits,
labels,
train_op_fn=lambda _: train_op)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=model_spec.loss,
predictions=model_spec.predictions,
train_op=train_op,
training_hooks=training_hooks + model_spec.train_hooks,
eval_metric_ops=eval_metric_ops)
return model_fn
# TODO(b/172564129): Move increment_global_step to TaskManager.
# TODO(b/172564129): Figure out how to set train steps for multi-task.
def _increment_global_step(self, train_op, train_steps, tower_name):
"""Increments the global step based on the tower size.
N.B. if the tower size does not divide evenly into the train_steps, it will
train for longer than required.
Args:
train_op: The train_op to execute before incrementing the global_step.
train_steps: The total number of steps to train for.
tower_name: The name of the tower which is currently training.
Returns:
An tf.Op which increments the global step by the required amount.
"""
if self._phoenix_spec.use_synchronous_optimizer:
return train_op
increment_amount = 1
tower_size = architecture_utils.get_architecture_size(tower_name)
if (self._phoenix_spec.use_parameter_scaled_training and tower_size):
train_step_per_block = max(
int(train_steps // self._phoenix_spec.maximum_depth), 1)
tower_train_steps = tower_size * train_step_per_block
increment_amount = max(int(train_steps // tower_train_steps), 1)
with tf.control_dependencies([train_op]):
global_step = tf.compat.v1.train.get_or_create_global_step()
return tf.compat.v1.assign_add(global_step, increment_amount)
def get_estimator(self, run_config, hparams, train_steps):
"""Returns a Phoenix `Estimator` for train and evaluation.
Args:
run_config: `RunConfig` object to configure the runtime settings.
hparams: `HParams` instance defining custom hyperparameters.
train_steps: The total number of training steps.
Returns:
Returns an `Estimator`.
Raises:
ValueError: in case flatten is used as a search block or is missing from
the initial architecture.
"""
if not all("FLATTEN" not in block for block in hparams.new_block_type):
raise ValueError("Flatten cannot be a search block type")
return Estimator(
model_fn=self._make_model_fn(
run_config=run_config, train_steps=train_steps, use_tpu=False),
config=run_config,
params=hparams,
metadata=self._metadata)
def get_tpu_estimator(self,
run_config,
hparams,
train_steps,
train_batch_size,
eval_on_tpu,
embedding_config_spec=None,
eval_batch_size=None):
"""Returns a Phoenix `Estimator` for train and evaluation.
Args:
run_config: `RunConfig` object to configure the runtime settings.
hparams: `HParams` instance defining custom hyperparameters.
train_steps: The total number of training steps.
train_batch_size: batch size for train.
eval_on_tpu: whether to use tpu for evaluation.
embedding_config_spec: (Optional) Embedding config spec instance.
eval_batch_size: (Optional) if not set, we use train batch size.
Returns:
Returns an `TPUEstimator`.
Raises:
ValueError: in case flatten is used as a search block or is missing from
the initial architecture.
"""
if not all("FLATTEN" not in block for block in hparams.new_block_type):
raise ValueError("Flatten cannot be a search block type")
return tf.compat.v1.estimator.tpu.TPUEstimator(
model_fn=self._make_model_fn(
run_config=run_config, train_steps=train_steps, use_tpu=True),
config=run_config,
use_tpu=True,
params=hparams,
train_batch_size=train_batch_size,
eval_batch_size=(eval_batch_size or train_batch_size),
embedding_config_spec=embedding_config_spec,
eval_on_tpu=eval_on_tpu)
@staticmethod
def get_keras_hyperparameters_space(phoenix_spec, train_steps):
"""Gets the Phoenix search space as keras Hyperparameters object."""
hp_space = keras_tuner.HyperParameters()
hp_space.merge(
architecture_utils.get_blocks_search_space(phoenix_spec.blocks_to_use))
hp_space.Float("learning_rate", 1e-6, 0.01, sampling="log")
hp_space.Choice("new_block_type", phoenix_spec.blocks_to_use)
# Try different optimizers.
hp_space.Choice("optimizer",
["momentum", "sgd", "adagrad", "adam", "rmsprop"])
# Search for the best tower of depth phoenix_spec.minimum_depth
# Used for initial structure (before evolution + going deeper).
for i in range(phoenix_spec.minimum_depth):
hp_space.Choice("initial_architecture_{}".format(i),
phoenix_spec.blocks_to_use)
learning_spec = phoenix_spec.learning_spec
# Exponential decay.
if learning_spec.apply_exponential_decay:
hp_space.Float("exponential_decay_rate",
learning_spec.min_learning_rate_decay_rate,
learning_spec.max_learning_rate_decay_rate)
decay_steps = [
train_steps // i for i in range(learning_spec.min_decay_times,
learning_spec.max_decay_times)
]
seen = set()
unique_decay_steps = [
x for x in decay_steps if not (x in seen or seen.add(x))
]
hp_space.Choice("exponential_decay_steps", unique_decay_steps)
# Gradient clipping
if learning_spec.apply_gradient_clipping:
hp_space.Int("gradient_max_norm",
learning_spec.min_gradient_norm_when_clipping,
learning_spec.max_gradient_norm_when_clipping)
# L2 regularization
if learning_spec.apply_l2_regularization:
hp_space.Float("l2_regularization", learning_spec.min_l2_regularization,
learning_spec.max_l2_regularization)
# Apply dropout between blocks. Here -1 wouldn't apply any dropouts.
if phoenix_spec.apply_dropouts_between_blocks:
assert learning_spec.min_dropout < learning_spec.max_dropout
step = (learning_spec.max_dropout - learning_spec.min_dropout) / 10
dropout = np.arange(learning_spec.min_dropout, learning_spec.max_dropout,
step)
hp_space.Choice("dropout_rate", [-1.0] + dropout.tolist())
return hp_space
| 1.796875 | 2 |
test/test_authentication.py | zaclochhead/flask_calendar | 0 | 12772755 | import pytest
from flask_calendar.authentication import Authentication
EXISTING_USERNAME = "a_username"
CORRECT_PASSWORD = "<PASSWORD>"
@pytest.fixture
def authentication() -> Authentication:
return Authentication(
data_folder="test/fixtures", password_salt="<PASSWORD>", failed_login_delay_base=0)
@pytest.mark.parametrize("username, password, expected", [
("an_irrelevant_username", "an_irrelevant_password", False),
(EXISTING_USERNAME, "an_irrelevant_password", False),
(EXISTING_USERNAME, CORRECT_PASSWORD, True),
])
def test_is_valid_authentication(authentication: Authentication, username: str, password: str, expected: bool) -> None:
assert authentication.is_valid(username=username, password=password) is expected
def test_retrieve_user_data(authentication: Authentication) -> None:
user = authentication.user_data(username=EXISTING_USERNAME)
assert user is not None
for key in ["username", "password", "default_calendar"]:
assert key in user
assert user[key] is not None
def test_password_is_not_stored_plain(authentication: Authentication) -> None:
user = authentication.user_data(username=EXISTING_USERNAME)
assert user["password"] != <PASSWORD>
assert user["password"] == authentication._hash_password(CORRECT_PASSWORD)
| 3.078125 | 3 |
calculate.py | cu-swe4s-fall-2019/version-control-shla9937 | 0 | 12772756 | import argparse
import math_lib as ml
parser = argparse.ArgumentParser(description='do math')
parser.add_argument('first_number', type=int, help='First Number')
parser.add_argument('second_number', type=int, help='Second Number')
args = parser.parse_args()
if __name__ == '__main__':
print(ml.add(args.first_number, args.second_number))
print(ml.div(args.first_number, args.second_number)) | 3.078125 | 3 |
gehomesdk/erd/values/oven/erd_closed_loop_cooking_devices_status.py | willhayslett/gehome | 17 | 12772757 | #WIP
import enum
#0x5771
#0x5772
#0x5773
#0x5774
#0x5775
#0x5776
#0x5777
class ErdCloseLoopCookingDevicesStatus:
class CookingDeviceType(enum.Enum):
DEFAULT = "Default"
HESTANT_CUE_PAN = "HestanCuePan"
HESTANT_CUE_POT = "HestanCuePot"
PARAGON_SENSOR = "Paragon"
POPUP_SENSOR = "PopupSensor"
_CODE_MAPPING = {
"0x5771": "0x5778",
"0x5772": "0x5779",
"0x5773": "0x577a",
"0x5774": "0x577b",
"0x5775": "0x577c",
"0x5776": "0x577d",
"0x5777": "0x577e"
}
_y0 = 100
_z0 = 450
def __init__(self, value: str, code: str):
self.code = ErdCloseLoopCookingDevicesStatus._CODE_MAPPING[code]
self.t0 = -1
self.u0 = -1
if not value:
self.status = "NoData"
self.device_type = self.CookingDeviceType.DEFAULT
self.r0 = 0
try:
self.device_type = self._initialize_device_type(value)
self.r0 = int(value[2,4], 16) #TODO: based on the original logic, this needs error handling
self.status = value[4,8]
if len(value) >= 16:
self.t0 = int(value[8:12], 16)
self.u0 = int(value[12:16], 16)
elif self.device_type == self.CookingDeviceType.POPUP_SENSOR:
self.t0 = ErdCloseLoopCookingDevicesStatus.y0
self.u0 = ErdCloseLoopCookingDevicesStatus.z0
pass
except:
self.device_type = self.CookingDeviceType.DEFAULT
self.r0 = 0
def _initialize_device_type(self, value: str):
try:
return {
"00": self.device_type.POPUP_SENSOR,
"01": self.device_type.PARAGON_SENSOR,
"02": self.device_type.HESTANT_CUE_PAN,
"03": self.device_type.HESTANT_CUE_POT
}[value[:2]]
except:
return self.device_type.DEFAULT
| 2.546875 | 3 |
pymetabolism/tests/test_parsers.py | Midnighter/pymetabolism | 1 | 12772758 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=============
Parsers Tests
=============
:Authors:
<NAME>
<NAME>
:Date:
2011-04-10
:Copyright:
Copyright(c) 2011 Jacobs University of Bremen. All rights reserved.
:File:
test_parsers.py
"""
import os
import nose.tools as nt
from ..miscellaneous import OptionsManager
from .. import parsers
from ..metabolism import metabolism as pymet
def test_nothing():
pass
#class TestSBMLParser(object):
#
# def __init__(self):
# self.options = OptionsManager.get_instance()
# self.options.reversible_suffix = "r"
# self.parser = parsers.SBMLParser()
# self.system = self.parser.parse(os.path.join(os.path.dirname(__file__),
# "data", "Ec_core_flux1.xml"))
#
# def test_compartments(self):
# nt.assert_equal(len(self.system.compartments), 3)
# for comp in self.system.compartments:
# nt.assert_true(isinstance(comp, pymet.SBMLCompartment))
# nt.assert_true(comp.name)
#
# def test_compounds(self):
# nt.assert_equal(len(self.system.compounds), 77)
# for cmpd in self.system.compounds:
# nt.assert_true(isinstance(cmpd, pymet.SBMLCompartmentCompound))
# nt.assert_true(cmpd.name)
# nt.assert_true(cmpd.identifier)
# nt.assert_true(len(cmpd.extended_name) > 0)
#
# def test_reactions(self):
# nt.assert_equal(len(self.system.reactions), 77)
# for rxn in self.system.reactions:
# nt.assert_true(isinstance(rxn, pymet.SBMLReaction))
# nt.assert_true(rxn.name)
# nt.assert_true(rxn.identifier)
# nt.assert_true(len(rxn.extended_name) > 0)
| 2.3125 | 2 |
cell.py | DFrye333/DynamicMaze | 0 | 12772759 | <reponame>DFrye333/DynamicMaze
'''
Module: cell
Author: <NAME>
Description: Contains the Cell class.
'''
from utility import Direction
class Cell:
'''
Class: Cell
Description: Represents an individual cell in a maze.
'''
# The print character for a visited cell.
VISITED_STRING = " "
# The print character for an unvisited cell.
UNVISITED_STRING = "/"
# The print character for a horizontal wall.
WALL_HORIZONTAL_STRING = "-"
# The print character for a vertical wall.
WALL_VERTICAL_STRING = "|"
def __init__(self, content, position):
'''
Method: __init__
Description: Cell constructor.
Parameters: content, position
content: String - A string visually representing the cell
position: 2-Tuple - The cell's position in the maze that owns it
[0] - Cell's x-position
[1] - Cell's y-position
Return: None
'''
self.m_content = content
self.m_position = position
self.m_visited = False
self.m_walls = {
Direction.NORTH : True,
Direction.EAST : True,
Direction.SOUTH : True,
Direction.WEST : True
}
def visit(self):
'''
Method: visit
Description: Sets the cell's content attribute into the visited state.
Parameters: No parameters
Return: None
'''
self.m_visited = True
self.m_content = self.VISITED_STRING
def unvisit(self):
'''
Method: visit
Description: Sets the cell's content attribute into the unvisited state.
Parameters: No parameters
Return: None
'''
self.m_visited = False
self.m_content = self.UNVISITED_STRING
def is_visited(self):
'''
Method: is_visited
Description: Determines whether or not the cell is in the visited state.
Parameters: No parameters
Return: Boolean - Whether or not the cell is in the visited state
'''
return self.m_visited
def get_content(self):
'''
Method: get_content
Description: Gets the cell's content attribute.
Parameters: No parameters
Return: String - Cell's content attribute
'''
return self.m_content
def get_position_x(self):
'''
Method: get_position_x
Description: Gets the cell's x-position attribute.
Parameters: No parameters
Return: String - Cell's x-position attribute
'''
return self.m_position[0]
def get_position_y(self):
'''
Method: get_position_y
Description: Gets the cell's y-position attribute.
Parameters: No parameters
Return: String - Cell's y-position attribute
'''
return self.m_position[1]
def get_wall(self, direction):
'''
Method: get_wall
Description: Gets the cell's wall attribute corresponding to the given direction.
Parameters: direction
direction: Direction - Direction corresponding to the desired wall
Return: String - Cell's wall attribute corresponding to the given direction
'''
return self.m_walls.get(direction)
def set_content(self, content):
'''
Method: set_content
Description: Sets the cell's content attribute.
Parameters: content
content: String - A string visually representing the cell
Return: None
'''
self.m_content = content
def set_position_x(self, x):
'''
Method: set_position_x
Description: Sets the cell's x-position attribute.
Parameters: x
x: Int - Cell's x-position within the maze that owns it
Return: None
'''
self.m_position[0] = x
def set_position_y(self, y):
'''
Method: set_position_y
Description: Sets the cell's y-position attribute.
Parameters: y
y: Int - Cell's y-position within the maze that owns it
Return: None
'''
self.m_position[1] = y
def set_wall(self, direction, value):
'''
Method: set_wall
Description: Sets the cell's wall attribute corresponding to the given direction.
Parameters: direction, value
direction: Direction - Direction corresponding to the desired wall
value: Boolean - Whether the wall exists or not
Return: None
'''
self.m_walls[direction] = value | 3.390625 | 3 |
rects.py | prakharchoudhary/faceCV | 3 | 12772760 | import cv2
import numpy as np
import utils
def outlineRect(image, rect, color):
"""used to draw a rectangle"""
if rect is None:
return
X, y, w, h = [int(i) for i in rect]
cv2.rectangle(image, (X, y), (X + w, y + h), color)
def copyRect(src, dst, srcRect, dstRect, mask=None,
interpolation=cv2.INTER_LINEAR):
"""Copy part of the source to part of the destination"""
x0, y0, w0, h0 = [int(i) for i in srcRect]
x1, y1, w1, h1 = [int(j) for j in dstRect]
# Resize the contents of the source sub-rectangle
# Put the result in the destination subrectangle
if mask is None:
dst[y1:y1 + h1, x1:x1 + w1] = cv2.resize(src[y0:y0 + h0, x0:x0 + w0], (w1, h1),
interpolation=interpolation)
else:
if not utils.isGray(src):
# Convert the mask to 3 channels, like the image.
mask = mask.repeat(3).reshape(h0, w0, 3)
# Perform the copy, with the mask applied.
dst[y1:y1 + h1, x1:x1 + w1] = np.where(cv2.resize(mask,
(w1, h1),
interpolation=cv2.INTER_LINEAR),
cv2.resize(src[y0:y0 + h0, x0:x0 + w0], (w1, h1),
interpolation=interpolation),
dst[y1:y1 + h1, x1:x1 + w1])
def swapRects(src, dst, rects, masks=None,
interpolation=cv2.INTER_LINEAR):
"""Copy the source with two or more sub-rectangles swapped."""
if dst is not src:
dst[:] = src
numRects = len(rects)
if numRects < 2:
return
if masks is None:
masks = [None] * numRects
# Copy the contents of last rectangle into temporary storage.
x, y, w, h = rects[numRects - 1]
temp = src[y:y + h, x:x + w].copy()
# Copy the contents of each rectangle into next
i = numRects - 2
while i >= 0:
copyRect(src, dst, rects[i], rects[i + 1], masks[i],
interpolation)
i -= 1
# Copy the temporarily stored content into the first rectangle
copyRect(temp, dst, (0, 0, w, h), rects[0], masks[numRects - 1],
interpolation)
| 3.453125 | 3 |
tests/unit/drivers/core/test_core_cisco_iosxr_driver.py | carlmontanari/ssh2net | 10 | 12772761 | import pytest
from ssh2net.core.cisco_iosxr.driver import IOSXRDriver, PRIVS
from tests.unit.drivers.base_driver_unit_tests import BaseDriverUnitTest
class TestIOSXR(BaseDriverUnitTest):
def setup_method(self):
self.privs = PRIVS
self.driver = IOSXRDriver()
def test__determine_current_priv_exec(self):
pytest.skip("no privilege exec on iosxr")
| 2.203125 | 2 |
ipawac_assistant/assistant-modules/linphoneCalling.py | shreyashag/ipawac_assistant | 0 | 12772762 | <gh_stars>0
# -*- coding: utf-8-*-
import re
WORDS = ["CALL"]
def handle(text, mic, profile, linphone):
"""
Reports the current time based on the user's timezone.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
try:
if re.search(r'\bEnd call\b', text, re.IGNORECASE):
linphone.end_call()
speaker.clean_and_say("Alright, I have ended the call")
if re.search('aman', text, re.IGNORECASE):
speaker.clean_and_say("Calling <NAME>!")
linphone.create_call('amansalehjee')
elif re.search('shreyash', text, re.IGNORECASE):
speaker.clean_and_say("Calling shreyash!")
linphone.create_call('shreyash23')
elif re.search('nikhil', text, re.IGNORECASE):
speaker.clean_and_say("Calling nikhil!")
linphone.create_call('nikhil93')
except KeyboardInterrupt:
linphone.end_call()
def isValid(text):
"""
Returns True if input is related to the time.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'call', text, re.IGNORECASE))
| 3.171875 | 3 |
tools/import_wikihow_elasticsearch.py | bigabig/wikihow-qa | 6 | 12772763 | import json,sys,io,os
from argparse import ArgumentParser
from elasticsearch import Elasticsearch
from argparse import ArgumentParser
def main():
parser = ArgumentParser()
parser.add_argument("-d", "--data", dest="data", help="path to dataset", metavar="DATA")
parser.add_argument("-i", "--index", dest="index", help="elasticsearch index", metavar="INDEX")
args = parser.parse_args()
if not args.data or not args.index:
parser.print_help()
return
es = Elasticsearch(['localhost'], port=9200)
id = 0
dataset_path = os.path.normpath(args.data);
for entry in os.scandir(dataset_path):
filepath = entry.path
filename = entry.name
if filename.endswith(".json"):
print("Indexing "+filename)
with io.open(filepath, 'r', encoding="utf8") as file:
json_data = json.load(file)
json_data['filename'] = filename
es.index(index=args.index, doc_type="_doc", id=id, body=json_data)
id = id + 1
else:
continue
if __name__ == "__main__":
main()
| 2.9375 | 3 |
stanCode_Projects/my_photoshop/stanCodoshop.py | juneyochen/sc-projects | 0 | 12772764 | """
SC101 - Assignment3
Adapted from <NAME>'s Ghost assignment by
<NAME>.
-----------------------------------------------
This code helps users combine the photos to get a best photo.
"""
import os
import sys
from simpleimage import SimpleImage
def get_pixel_dist(pixel, red, green, blue):
"""
Returns the square of the color distance between pixel and mean RGB value
Input:
pixel (Pixel): pixel with RGB values to be compared
red (int): average red value across all images
green (int): average green value across all images
blue (int): average blue value across all images
Returns:
dist (int): squared distance between red, green, and blue pixel values
"""
# define the color distance from each pixel to the average pixel
color_distance = ((red - pixel.red)**2 + (green - pixel.green)**2 + (blue - pixel.blue)**2) ** 0.5
return color_distance
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
Assumes you are returning in the order: [red, green, blue]
"""
# calculate the average pixel for rea, green and blue colors
pixel_red_sum = 0 # create a box for red pixel
pixel_green_sum = 0 # create a box for green pixel
pixel_blue_sum = 0 # create a box for blue pixel
n = 0
# sum up each color pixels
for i in range(len(pixels)):
pixel_red_sum += pixels[i].red
pixel_green_sum += pixels[i].green
pixel_blue_sum += pixels[i].blue
n += 1 # count how many pixels are there
rgb = [int(pixel_red_sum / n), int(pixel_green_sum / n), int(pixel_blue_sum / n)] # calculate the average
return rgb
def get_best_pixel(pixels):
"""
Given a list of pixels, returns the pixel with the smallest
distance from the average red, green, and blue values across all pixels.
Input:
pixels (List[Pixel]): list of pixels to be averaged and compared
Returns:
best (Pixel): pixel closest to RGB averages
"""
avg = get_average(pixels) # find the average pixels for different photos
smallest = get_pixel_dist(pixels[0], avg[0], avg[1], avg[2]) # use the first photo as the best pixel compared to the average pixels
best_pixel = pixels[0] # if other photo is better than the first photo, the first photo can be changed
for i in range(len(pixels)):
a = get_pixel_dist(pixels[i], avg[0], avg[1], avg[2])
if a < smallest:
smallest = a
best_pixel = pixels[i]
return best_pixel
def solve(images):
"""
Given a list of image objects, compute and display a Ghost solution image
based on these images. There will be at least 3 images and they will all
be the same size.
Input:
images (List[SimpleImage]): list of images to be processed
"""
width = images[0].width
height = images[0].height
result = SimpleImage.blank(width, height)
######## YOUR CODE STARTS HERE #########
# compare different photo at same position
for x in range(width):
for y in range(height):
pixels = [] # a list for load the the pixel at same position
for image in images:
same_position_pixel = image.get_pixel(x, y)
pixels.append(same_position_pixel)
best_pixel = get_best_pixel(pixels) # choose the best pixel
result_pixel = result.get_pixel(x, y) # fill the best pixel into the result
result_pixel.red = best_pixel.red
result_pixel.green = best_pixel.green
result_pixel.blue = best_pixel.blue
######## YOUR CODE ENDS HERE ###########
print("Displaying image!")
result.show()
def jpgs_in_dir(dir):
"""
(provided, DO NOT MODIFY)
Given the name of a directory, returns a list of the .jpg filenames
within it.
Input:
dir (string): name of directory
Returns:
filenames(List[string]): names of jpg files in directory
"""
filenames = []
for filename in os.listdir(dir):
if filename.endswith('.jpg'):
filenames.append(os.path.join(dir, filename))
return filenames
def load_images(dir):
"""
(provided, DO NOT MODIFY)
Given a directory name, reads all the .jpg files within it into memory and
returns them in a list. Prints the filenames out as it goes.
Input:
dir (string): name of directory
Returns:
images (List[SimpleImages]): list of images in directory
"""
images = []
jpgs = jpgs_in_dir(dir)
for filename in jpgs:
print("Loading", filename)
image = SimpleImage(filename)
images.append(image)
return images
def main():
# (provided, DO NOT MODIFY)
args = sys.argv[1:]
# We just take 1 argument, the folder containing all the images.
# The load_images() capability is provided above.
images = load_images(args[0])
solve(images)
if __name__ == '__main__':
main()
| 4.1875 | 4 |
clean_architecture_basic_classes/basic_domain/basic_entity.py | aberriel/clean_architecture_basic_classes | 0 | 12772765 | from clean_architecture_basic_classes.basic_domain.basic_value import \
BasicValue
from marshmallow import Schema, fields
from uuid import uuid4
def missing_id():
return str(uuid4())
class BasicEntity(BasicValue):
def __init__(self, entity_id=None):
self.entity_id = entity_id or str(uuid4())
self.adapter = None
def set_adapter(self, adapter):
self.adapter = adapter
def save(self):
my_id = self.adapter.save(self.to_json())
return my_id
def update(self):
my_id = self.adapter.save(self.to_json())
return my_id
def delete(self):
self.adapter.delete(self.entity_id)
def __eq__(self, other):
return self.entity_id == other.entity_id
def __hash__(self):
return hash(self.entity_id)
class Schema(Schema):
entity_id = fields.String(required=False,
allow_none=True,
missing=missing_id)
| 2.71875 | 3 |
pyvoxel/node.py | zx013/pyvoxel | 0 | 12772766 | <reponame>zx013/pyvoxel
# -*- coding: utf-8 -*-
from pyvoxel.log import Log
# 类中attr属性改变时触发on_attr事件,同时同步改变关联的值
class Node(object):
def __new__(cls): # 不用在子类中调用super初始化
cls._trigger = {}
cls._reflex = {}
cls.parent = None
cls.children = []
print('node new')
return super().__new__(cls)
def __setattr__(self, name, value):
ovalue = self.__dict__.get(name, None)
self.__dict__[name] = value
try:
self._on_func(name, ovalue, value)
except Exception as ex:
Log.error(ex)
# 调用on_函数
def _on_func(self, name, ovalue, value):
on_name = 'on_' + name
if on_name in self.__dict__:
on_func = self.__dict__[on_name]
on_func(ovalue, value)
if name in self._trigger:
for node, nname in self._trigger[name]:
node._update_value(nname, (self, name), value)
# 更新关联类的值
def _update_value(self, name, base, basev):
try:
expr, pattern, local = self._reflex[name]
local[pattern[base]] = basev
value = eval(expr, None, local)
setattr(self, name, value)
except Exception as ex:
Log.error(ex)
def add_node(self, node):
self.children.append(node)
if node.parent:
Log.warning('{node} already has parent'.format(node=node))
node.parent = self
if __name__ == '__main__':
t1 = Node()
t2 = Node()
t3 = Node()
t4 = Node()
t1.add_node(t2)
t2.add_node(t3)
t3.add_node(t4)
t1.testa = 1
t1.testb = 2
t3.testc = 3
t4.test = 10
t2.bind('testd', 'p.testa + p.testb * p.testb - c0.testc')
t4.bind('teste', 'p.p.testd + p.p.p.testa + self.test')
print(t2.testd, t4.teste)
t1.testa = 4
print(t2.testd, t4.teste)
t1.testb = 5
print(t2.testd, t4.teste)
t3.testc = 6
print(t2.testd, t4.teste)
| 2.4375 | 2 |
openchroma/utils.py | alvii147/OpenChroma | 0 | 12772767 | <gh_stars>0
import numpy as np
def is_array_like(arr):
'''
Check if object is array-like.
Parameters
----------
arr : any
Object to be tested.
Returns
-------
arr_like : bool
Indicates whether given object is array-like.
'''
arr_like = isinstance(arr, (list, tuple, np.ndarray))
return arr_like
def require_array_like(arr, var_name='Array', exception=TypeError):
'''
Raise exception if object is array-like.
Parameters
----------
arr : any
Object to be tested.
var_name : str, optional
Name of the variable to be tested, used to construct exception message.
exception : Exception, optional
Exception to raise if test fails.
'''
if not is_array_like(arr):
message = f'`{var_name}` must be an array-like object'
raise exception(message)
def is_shape(arr, shape):
'''
Check if array is of desired shape.
Parameters
----------
arr : array-like
Array to be tested.
shape : array-like
Desired shape.
Returns
-------
shapes_equal : bool
Indicates whether given array is of desired shape.
'''
arr_shape = np.shape(arr)
if np.isscalar(shape):
shape = (shape,)
shapes_equal = np.array_equal(arr_shape, shape)
return shapes_equal
def require_shape(arr, shape, var_name='Array', exception=ValueError):
'''
Raise exception if array is not of desired shape.
Parameters
----------
arr : any
Array to be tested.
shape : array-like
Desired shape.
var_name : str, optional
Name of the variable to be tested, used to construct exception message.
exception : Exception, optional
Exception to raise if test fails.
'''
if not is_shape(arr, shape):
message = f'`{var_name}` must be of shape {shape}'
raise exception(message)
def is_dim(arr, dim):
'''
Check if array is of desired shape.
Parameters
----------
arr : array-like
Array to be tested.
dim : int
Desired dimension.
Returns
-------
dim_equal : bool
Indicates whether given array is of desired dimension.
'''
arr_shape = np.shape(arr)
dim_equal = len(arr_shape) == dim
return dim_equal
def require_dim(arr, dim, var_name='Array', exception=ValueError):
'''
Raise exception if array is not of desired dimension.
Parameters
----------
arr : any
Array to be tested.
dim : int
Desired dimension.
var_name : str, optional
Name of the variable to be tested, used to construct exception message.
exception : Exception, optional
Exception to raise if test fails.
'''
if not is_dim(arr, dim):
message = f'`{var_name}` must be {dim}-dimensional'
raise exception(message)
def is_axis_size(arr, size, axis=0):
'''
Check if given axis of array is of desired size.
Parameters
----------
arr : any
Array to be tested.
size : int
Desired size.
axis : int, optional
Axis of array to test for desired size.
Returns
-------
size_equal : bool
Indicates whether given axis of array is of desired size.
'''
arr_shape = np.shape(arr)
size_equal = arr_shape[axis] == size
return size_equal
def require_axis_size(
arr,
size,
axis=0,
var_name='Array',
exception=ValueError,
):
'''
Raise exception if given axis of array is not of desired size.
Parameters
----------
arr : any
Array to be tested.
size : int
Desired size.
axis : int, optional
Axis of array to test for desired size.
var_name : str, optional
Name of the variable to be tested, used to construct exception message.
exception : Exception, optional
Exception to raise if test fails.
'''
if not is_axis_size(arr, size, axis=axis):
message = f'`{var_name}` must be of size {size} at axis {axis}'
raise exception(message)
| 3.453125 | 3 |
python/wrappergen.py | ju6ge/rbdl-orb | 12 | 12772768 | <filename>python/wrappergen.py<gh_stars>10-100
#!/usr/bin/python
import sys, re, os
def usage(arg0):
print ("Usage: {} <input.template.pyx> <output.pyx>".format(arg0))
sys.exit(-1)
wrapper_command_strings = {
"ClassDefinitions" : """cdef class _%PARENT%_%MEMBER%_%TYPE%_VectorWrapper:
cdef crbdl.%PARENT% *parent
def __cinit__ (self, uintptr_t ptr):
self.parent = <crbdl.%PARENT% *> ptr
def __getitem__(self, key):
if isinstance( key, slice ) :
#Get the start, stop, and step from the slice
return [%TYPE%.fromPointer (<uintptr_t> &(self.parent.%MEMBER%[i])) for i in xrange (*key.indices(len(self)))]
else:
return %TYPE%.fromPointer (<uintptr_t> &(self.parent.%MEMBER%[key]))
def __setitem__(self, key, value):
if isinstance( key, slice ) :
#Get the start, stop, and step from the slice
src_index = 0
for i in xrange (*key.indices(len(self))):
assert isinstance (value[src_index], %TYPE%), "Invalid type! Expected %TYPE%, but got " + str(type(value[src_index])) + "."
self.parent.%MEMBER%[i] = (<%TYPE%> value[src_index]).thisptr[0]
src_index = src_index + 1
else:
assert isinstance (value, %TYPE%), "Invalid type! Expected %TYPE%, but got " + str(type(value)) + "."
self.parent.%MEMBER%[key] = (<%TYPE%> value).thisptr[0]
def __len__(self):
return self.parent.%MEMBER%.size()
""",
"MemberDefinitions" : """ cdef _%PARENT%_%MEMBER%_%TYPE%_VectorWrapper %MEMBER%""",
"CInitCode" : """ self.%MEMBER% = _%PARENT%_%MEMBER%_%TYPE%_VectorWrapper (<uintptr_t> self.thisptr)""",
"AddProperty" : """ property %MEMBER%:
def __get__ (self):
vector_size = self.thisptr.%MEMBER%.size()
prop = [%TYPE% (address=<uintptr_t> &(self.thisptr.%MEMBER%[i])) for i in range(vector_size)]
return prop
"""
}
def parse_line (line_str):
command = ""
args = {}
# remove comments
line_str = line_str.split("#")[0]
wrapper_line_str_match = re.search ("%VectorWrapper(\S*)\s*\((.*)\).*%", line_str)
if (wrapper_line_str_match):
command = wrapper_line_str_match.group(1)
arg_str = wrapper_line_str_match.group(2)
arg_match = re.findall("(\s*(\S*)\s*=\s*(\w*)\s*,?)", arg_str)
if len(arg_match) > 0:
for arg in arg_match:
if len(arg) != 3:
print ("Invalid command args at line_str {}".format
(line_number))
sys.exit(-1)
args[arg[1]] = arg[2]
return command, args
return False, None
if __name__ == "__main__":
if len(sys.argv) != 3:
usage (sys.argv[0])
infilename = sys.argv[1]
outfilename = sys.argv[2]
# print ("Processing {} to generate {}".format (infilename, outfilename))
infile = open(infilename)
outfile = open(outfilename, "w")
outfile.write ("""# WARNING!
#
# This file was automatically created from {} using {}.
# Do not modify this file directly. Edit original source instead!!
""".format (os.path.basename(infilename), os.path.basename(sys.argv[0])))
template = infile.read()
template_lines = template.split ("\n")
# find the classes that will contain generated code
generated_parent_classes = []
generated_parent_members = {}
for line_number, line_str in enumerate (template_lines):
command, args = parse_line (line_str)
if command:
if args["PARENT"] not in generated_parent_classes:
generated_parent_classes.append (args["PARENT"])
generated_parent_members[args["PARENT"]] = []
if command=="AddProperty":
generated_parent_members[args["PARENT"]].append ({
"TYPE": args["TYPE"],
"MEMBER": args["MEMBER"]
})
# generate code
for line_number, line_str in enumerate (template_lines):
command, args = parse_line (line_str)
if not command:
outfile.write (line_str + "\n")
else:
if command in wrapper_command_strings.keys():
parent = args["PARENT"]
if command == "AddProperty":
content_type = args["TYPE"]
member_name = args["MEMBER"]
command_code = wrapper_command_strings[command][:]
command_code = command_code.replace (
"%PARENT%", parent).replace (
"%MEMBER%", member_name).replace (
"%TYPE%", content_type)
outfile.write (command_code + "\n")
else:
for member in generated_parent_members[parent]:
content_type = member["TYPE"]
member_name = member["MEMBER"]
command_code = wrapper_command_strings[command][:]
command_code = command_code.replace (
"%PARENT%", parent).replace (
"%MEMBER%", member_name).replace (
"%TYPE%", content_type)
outfile.write (command_code + "\n")
| 2.09375 | 2 |
regression-tests/exampletests/cumsum_doc_test.py | lewisc/spark-tk | 34 | 12772769 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" THIS TEST REQUIRES NO THIRD PARTY APPLICATIONS OTHER THAN THE SPARKTK
THIS TEST IS TO BE MAINTAINED AS A SMOKE TEST FOR THE ML SYSTEM
"""
import unittest
from sparktk import TkContext
class CumSumTest(unittest.TestCase):
def test_frame_basic(self):
"""Documentation test for classifiers"""
# The general workflow will be build a frame, run some analytics
# on the frame
# First Step, construct a frame
# Construct a frame to be uploaded, this is done using plain python
# lists uploaded to the server
# The following frame could represent some ordered list (such as
# customer orders) and a value associated with the order.
# The order is sorted on, and then the order value is accumulated
# Cumulative sum finds the sum up to and including a given order
# Create context
tc = TkContext()
# Create the frame using a list object
frame = tc.frame.create(data=[[0, 100],
[3, 20],
[1, 25],
[2, 90]],
schema=[("order", int),
("value", int)])
print frame.inspect()
# Sort on order, note this is a side effect based operation
frame.sort('order')
# calculate the cumulative sum
frame.cumulative_sum('value')
print frame.inspect()
# Fetch the results, and validate they are what you would expect
result = frame.take(frame.count())
self.assertItemsEqual(
result, [[0, 100, 100],
[3, 20, 235],
[1, 25, 125],
[2, 90, 215]])
if __name__ == '__main__':
unittest.main()
| 2.8125 | 3 |
tests/mock/MockGuild.py | ephreal/rollbot | 2 | 12772770 | # -*- coding: utf-8 -*-
"""
This software is licensed under the License (MIT) located at
https://github.com/ephreal/rollbot/Licence
Please see the license for any restrictions or rights granted to you by the
License.
"""
# A mock guild object for testing
class MockGuild():
def __init__(self, name=None, text_channels=None, users=None):
self.name = name
self.text_channels = text_channels
self.users = users
| 1.773438 | 2 |
Aula 12 - Condições Aninhadas/desafio040.py | josue-rosa/Python---Curso-em-Video | 3 | 12772771 | <reponame>josue-rosa/Python---Curso-em-Video
"""
Programa que leia duas notas de um aluno e calcule sua média,
mostrando uma mensagem no final, de acordo com a média atingida.
- media abaixo de 5.0:
reprovado
- media entre 5.0 e 6.9:
recuperação
- media 7.0 ou superior:
aprovado
"""
nota1 = float(input('Informe a primeira nota: '))
nota2 = float(input('Informe a segunda nota: '))
media = (nota1 + nota2) / 2
if media < 5.0:
print(f'Sua média foi {media}. Por isso, você está REPROVADO')
elif media <= 6.9:
print(f'Sua média foi {media}. Você ficou em RECUPERAÇÃO')
else:
print(f'Sua média foi {media}. Você está APROVADO')
| 3.78125 | 4 |
src/utils/helpers.py | PanyalaSainathReddy/authorization | 0 | 12772772 | <reponame>PanyalaSainathReddy/authorization<gh_stars>0
def get_first_matching_attr(obj, *attrs, default=None):
for attr in attrs:
if hasattr(obj, attr):
return getattr(obj, attr)
return default
def get_error_message(exc) -> str:
if hasattr(exc, 'message_dict'):
return exc.message_dict
error_msg = get_first_matching_attr(exc, 'message', 'messages')
if isinstance(error_msg, list):
error_msg = ', '.join(error_msg)
if error_msg is None:
error_msg = str(exc)
return error_msg | 2.34375 | 2 |
file_manager/migrations/0039_auto_20211021_1551.py | xiaofengxie128/Proteomic-Data-Manager | 0 | 12772773 | # Generated by Django 3.2.7 on 2021-10-21 21:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('file_manager', '0038_auto_20211021_1513'),
]
operations = [
migrations.AddField(
model_name='rawfile',
name='content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='contenttypes.contenttype'),
),
migrations.AddField(
model_name='rawfile',
name='object_id',
field=models.PositiveIntegerField(default=5),
),
migrations.AlterField(
model_name='rawfile',
name='note_file',
field=models.ManyToManyField(blank=True, to='file_manager.NoteFile'),
),
]
| 1.5625 | 2 |
setup.py | david-lindner/idrl | 9 | 12772774 | <filename>setup.py
import setuptools
setuptools.setup(
name="active-reward-learning",
author="<NAME>"
version="0.1dev",
description="",
long_description=open("README.md").read(),
classifiers=[
"Environment :: Console",
"Intended Audience :: Science/Research",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
install_requires=[
"numpy>=1.16.1",
"scipy==1.5.4", # other versions lead to problems in optimizing highway
"matplotlib",
"gym==0.17.3",
"sacred==0.8.2",
"cvxopt",
"networkx",
"wrapt",
"seaborn",
"frozendict",
"gast==0.2.2",
"torch",
"stable_baselines3",
"tensorboard",
"opencv-python",
],
setup_requires=["pytest-runner"],
extras_require={
"interactive_environments": ["pygame"],
"web_interface": ["flask"],
"mobile_experiment_notifications": ["python-telegram-bot"],
},
tests_require=["pytest", "pytest-cov"],
packages=setuptools.find_packages(),
zip_safe=True,
entry_points={},
test_suite="active_reward_learning.tests",
)
| 1.265625 | 1 |
python/tests/test_TermSim_write_read_Resnik_CC.py | paulbible/ggtk | 2 | 12772775 | <filename>python/tests/test_TermSim_write_read_Resnik_CC.py<gh_stars>1-10
#/*=============================================================================
#Copyright (c) 2016 <NAME>
#
#Distributed under the Boost Software License, Version 1.0. (See accompanying
#file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#==============================================================================*/
import unittest
import ggtk
from ggtk.TermMaps import TermInformationContentMap
from ggtk.TermSimilarity import ResnikSimilarity
from ggtk.TermSimilarity import TermSimilarityWriter
from ggtk.TermSimilarity import PrecomputedMatrixTermSimilarity
class TermSimWriterResnikCC_TestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
self.graph = ggtk.GoParser.parse("../../example_graphs/go-basic.obo","obo")
self.annos = ggtk.AnnotationParser.parse("../../example_annotations/goa_human.gaf")
self.ic_map = TermInformationContentMap(self.graph, self.annos)
self.sim = ResnikSimilarity(self.graph, self.ic_map)
self.writer = TermSimilarityWriter(self.graph, self.annos)
self.mat_file = "matrix_files/test_cc_mat.txt"
self.onto_code = ggtk.go_enums.ontologyStringToCode("cellular_component")
self.writer.writeSimilarityMatrix(self.sim, self.mat_file, self.onto_code)
self.mat_sim_cc = PrecomputedMatrixTermSimilarity(self.mat_file)
class CoreMethodsTests(TermSimWriterResnikCC_TestCase):
##########################################################################
# Test types
##########################################################################
def test_sim_writer_type(self):
"""
Test similarity between two bad terms.
"""
self.assertEqual(type(self.writer), TermSimilarityWriter)
self.assertEqual(type(self.mat_sim_cc), PrecomputedMatrixTermSimilarity)
##########################################################################
# Test PrecomputedMatrixTermSimilarity raises error on bad filename
##########################################################################
def test_precomputed_matrix_bad_file(self):
"""
Test similarity between two bad terms.
"""
with self.assertRaises(IOError):
p = PrecomputedMatrixTermSimilarity('fake_file.txt')
##########################################################################
# Non-exsitent terms used as input
##########################################################################
def test_similarity_Resnik_bad_ids(self):
"""
Test similarity between two bad terms.
"""
self.assertEqual(self.sim.calculateTermSimilarity("bad_id","bad_id2"),0)
self.assertEqual(self.mat_sim_cc.calculateTermSimilarity("bad_id","bad_id2"),0)
def test_similarity_Resnik_1_bad_1_good_id(self):
"""
Test similarity between on good term and one bad term.
"""
self.assertEqual(self.sim.calculateTermSimilarity("GO:0032991","bad_id2"),0)
self.assertEqual(self.mat_sim_cc.calculateTermSimilarity("GO:0032991","bad_id2"),0)
##########################################################################
# Normalized Similarity [0-1], on CC terms
##########################################################################
def test_normalized_similarity_Resnik_CC_reflexive_sim(self):
"""
Test normalized similarity between two terms in the CC ontology.
GO:0043234 -> protein complex
GO:0043234 -> protein complex
"""
rs_val = self.sim.calculateNormalizedTermSimilarity("GO:0043234", "GO:0043234")
mat_val = self.mat_sim_cc.calculateNormalizedTermSimilarity("GO:0043234", "GO:0043234")
#assert similarity up to 6 places
self.assertAlmostEqual(rs_val, mat_val, 6)
def test_normalized_similarity_Resnik_CC(self):
"""
Test normalized similarity between two terms in the CC ontology.
GO:0043234 -> protein complex
GO:0000791 -> euchromatin
"""
rs_val = self.sim.calculateNormalizedTermSimilarity("GO:0043234", "GO:0000791")
mat_val = self.mat_sim_cc.calculateNormalizedTermSimilarity("GO:0043234", "GO:0000791")
#assert similarity up to 6 places
self.assertAlmostEqual(rs_val, mat_val, 6)
def test_normalized_similarity_Resnik_CC_1_good_1_root(self):
"""
Test normalized similarity between two terms in the CC ontology.
GO:0005575 -> cellular_component
GO:0043234 -> protein complex
"""
rs_val = self.sim.calculateNormalizedTermSimilarity("GO:0043234", "GO:0005575")
mat_val = self.mat_sim_cc.calculateNormalizedTermSimilarity("GO:0043234", "GO:0005575")
#assert similarity up to 6 places
self.assertAlmostEqual(rs_val, mat_val, 6)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(CoreMethodsTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 2.203125 | 2 |
mediapipe/calculators/tensorflow/tensorflow_inference_calculator_pb2.py | mengfu188/simple-mediapipe | 5 | 12772776 | <reponame>mengfu188/simple-mediapipe<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/tensorflow/tensorflow_inference_calculator.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
try:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
except AttributeError:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe.framework.calculator_options_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/tensorflow/tensorflow_inference_calculator.proto',
package='mediapipe',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nFmediapipe/calculators/tensorflow/tensorflow_inference_calculator.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\"\xc6\x02\n$TensorFlowInferenceCalculatorOptions\x12\x16\n\x0esignature_name\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\x05\x12&\n\x18\x61\x64\x64_batch_dim_to_tensors\x18\x03 \x01(\x08:\x04true\x12\x1a\n\x12recurrent_tag_pair\x18\x04 \x03(\t\x12\'\n\x18skip_on_missing_features\x18\x05 \x01(\x08:\x05\x66\x61lse\x12&\n\x1bmax_concurrent_session_runs\x18\x06 \x01(\x05:\x01\x30\x32]\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\x8b\xe1\x9f\x36 \x01(\x0b\x32/.mediapipe.TensorFlowInferenceCalculatorOptions'
,
dependencies=[mediapipe_dot_framework_dot_calculator__pb2.DESCRIPTOR,])
_TENSORFLOWINFERENCECALCULATOROPTIONS = _descriptor.Descriptor(
name='TensorFlowInferenceCalculatorOptions',
full_name='mediapipe.TensorFlowInferenceCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='signature_name', full_name='mediapipe.TensorFlowInferenceCalculatorOptions.signature_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='batch_size', full_name='mediapipe.TensorFlowInferenceCalculatorOptions.batch_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='add_batch_dim_to_tensors', full_name='mediapipe.TensorFlowInferenceCalculatorOptions.add_batch_dim_to_tensors', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='recurrent_tag_pair', full_name='mediapipe.TensorFlowInferenceCalculatorOptions.recurrent_tag_pair', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='skip_on_missing_features', full_name='mediapipe.TensorFlowInferenceCalculatorOptions.skip_on_missing_features', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_concurrent_session_runs', full_name='mediapipe.TensorFlowInferenceCalculatorOptions.max_concurrent_session_runs', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.TensorFlowInferenceCalculatorOptions.ext', index=0,
number=113766539, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=450,
)
DESCRIPTOR.message_types_by_name['TensorFlowInferenceCalculatorOptions'] = _TENSORFLOWINFERENCECALCULATOROPTIONS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TensorFlowInferenceCalculatorOptions = _reflection.GeneratedProtocolMessageType('TensorFlowInferenceCalculatorOptions', (_message.Message,), {
'DESCRIPTOR' : _TENSORFLOWINFERENCECALCULATOROPTIONS,
'__module__' : 'mediapipe.calculators.tensorflow.tensorflow_inference_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.TensorFlowInferenceCalculatorOptions)
})
_sym_db.RegisterMessage(TensorFlowInferenceCalculatorOptions)
_TENSORFLOWINFERENCECALCULATOROPTIONS.extensions_by_name['ext'].message_type = _TENSORFLOWINFERENCECALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_TENSORFLOWINFERENCECALCULATOROPTIONS.extensions_by_name['ext'])
# @@protoc_insertion_point(module_scope)
| 1.375 | 1 |
binvox_gen.py | wangyida/voxel-dcgan | 1 | 12772777 | <gh_stars>1-10
import glob
import os
import subprocess
import sys
'''
voxsize = 32
paths = glob.glob("/media/wangyida/D0-P1/database/ShapeNetCore.v2/*/*/*/*.obj")
print("number of data:", len(paths))
with open(os.devnull, 'w') as devnull:
for i, path in enumerate(paths):
cmd = "binvox -d {0} -cb -e {1}".format(voxsize, path)
ret = subprocess.check_call(cmd.split(' '), stdout=devnull, stderr=devnull)
if ret != 0:
print("error", i, path)
else:
print(i, path)
'''
paths = glob.glob("/media/wangyida/D0-P1/database/ShapeNetCore.v2/*/*/*/model_normalized.binvox")
print("number of data:", len(paths))
with open(os.devnull, 'w') as devnull:
for i, path in enumerate(paths):
cmd1 = "thinvox {0}".format(path)
ret1 = subprocess.check_call(cmd1.split(' '), stdout=devnull, stderr=devnull)
cmd2 = "mv thinned.binvox {0}".format(path+'.thinned')
ret2 = subprocess.check_call(cmd2.split(' '), stdout=devnull, stderr=devnull)
if ret1 != 0 or ret2 != 0:
print("error", i, path)
else:
print(i, path)
| 2.234375 | 2 |
pycacher/batcher.py | garindra/pycacher | 1 | 12772778 | class Batcher(object):
"""
Batcher enables developers to batch multiple retrieval requests.
Example usage #1::
from pycacher import Cacher
cacher = Cacher()
batcher = cacher.create_batcher()
batcher.add('testkey')
batcher.add('testkey1')
batcher.add('testkey2')
values = batcher.batch()
It is possible to use batcher as context manager. Inside the context manager,
developers can call the `.register` method of cached functions
to register its keys to the currently active batcher for later batching. Then,
when the actual cached functions that were registered earlier inside the
context manager were actually called, it will seek its value from the batcher
context.
Example usage #2::
from pycacher import Cacher
cacher = Cacher()
batcher = cacher.create_batcher()
with batcher:
cached_func.register(1, 2)
cached_func_2.register(1, 2)
cached_func_3.register(3, 5)
batcher.batch()
#Later..
with batcher:
cached_func(1, 2) #will look for its value from the batcher
cached_func_2(1, 2)
cached_func(3, 5)
#You can also do this:
batcher.register(cached_func, 1, 2)
batcher.register(cached_func_2, 1, 2)
"""
def __init__(self, cacher=None):
self.cacher = cacher
self._keys = set()
self._last_batched_values = None
self._autobatch_flag = False
self._hooks = {'register':[], 'call':[]}
def add(self, key):
if isinstance(key, list):
for k in key:
self._keys.add(k)
else:
self._keys.add(key)
def reset(self):
self._keys = set()
def batch(self):
self._last_batched_values = self.cacher.backend.multi_get(self._keys)
return self._last_batched_values
def has_batched(self):
return self._last_batched_values is not None
def register(self, decorated_func, *args):
cache_key = decorated_func.build_cache_key(*args)
self.add(cache_key)
#run the hooks on the batcher first
self.trigger_hooks('register', cache_key, self)
self.cacher.trigger_hooks('register', cache_key, self)
def register_list(self, decorated_list_func, *args, **kwargs):
"""Registers a cached list function.
"""
skip = kwargs['skip']
limit = kwargs['limit']
#add the ranged cache keys to the actual internal key batch list.
for ranged_cache_key in decorated_list_func.get_ranged_cache_keys(skip=skip, limit=limit, *args):
self.add(ranged_cache_key)
#Build the "root" cache key to be passed to the hook functions.
#Note that we do not pass the ranged cache key to the hook functions,
#it's completely for internal use.
cache_key = decorated_list_func.build_cache_key(*args)
#run the hooks on the batcher first
self.trigger_hooks('register', cache_key, self)
self.cacher.trigger_hooks('register', cache_key, self)
def get_last_batched_values(self):
return self._last_batched_values
def get_values(self):
return self.get_last_batched_values()
def get_keys(self):
return self._keys
def get(self, key):
if self._last_batched_values:
return self._last_batched_values.get(key)
return None
def is_batched(self, key):
"""Checks whether a key is included in the latest batch.
Example:
self.batcher.add('test-1')
self.batcher.batch()
self.batcher.is_batched('test-1')
>> True
"""
if self._last_batched_values:
return key in self._last_batched_values
return False
def autobatch(self):
"""autobatch enables the batcher to automatically batch the batcher keys
in the end of the context manager call.
Example Usage::
with batcher.autobatch():
expensive_function.register(1, 2)
#is similar to:
with batcher:
expensive_function.register(1, 2)
batcher.batch()
"""
self._autobatch_flag = True
return self
def __enter__(self):
""" On context manager enter step, we're basically pushing this Batcher instance
to the parent cacher's batcher context, so when the there are decorated
functions that are registering, they know to which batcher to register to."""
self.cacher.push_batcher(self)
def __exit__(self, type, value, traceback):
""" On exit, pop the batcher. """
if self._autobatch_flag:
self.batch()
self._autobatch_flag = False
self.cacher.pop_batcher()
def add_hook(self, event, fn):
""" Add hook function to be executed on event.
Example usage::
def on_cacher_invalidate(key):
pass
cacher.add_hook('invalidate', on_cacher_invalidate)
"""
if event not in ('invalidate', 'call', 'register'):
raise InvalidHookEventException(\
"Hook event must be 'invalidate', 'call', or 'register'")
self._hooks[event].append(fn)
def trigger_hooks(self, event, *args, **kwargs):
if event not in ('invalidate', 'call', 'register'):
raise InvalidHookEventException(\
"Hook event must be 'invalidate', 'call', or 'register'")
for fn in self._hooks[event]:
fn(*args, **kwargs)
def remove_all_hooks(self, event):
if event not in ('invalidate', 'call', 'register'):
raise InvalidHookEventException(\
"Hook event must be 'invalidate', 'call', or 'register'")
#reset
self._hooks[event] = []
| 3.625 | 4 |
venv/Lib/site-packages/pyrogram/errors/exceptions/bad_request_400.py | iamgeorgiy/heroku-userbot | 0 | 12772779 | <filename>venv/Lib/site-packages/pyrogram/errors/exceptions/bad_request_400.py
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from ..rpc_error import RPCError
class BadRequest(RPCError):
"""Bad Request"""
CODE = 400
"""``int``: RPC Error Code"""
NAME = __doc__
class FirstnameInvalid(BadRequest):
"""The first name is invalid"""
ID = "FIRSTNAME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class LastnameInvalid(BadRequest):
"""The last name is invalid"""
ID = "LASTNAME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberInvalid(BadRequest):
"""The phone number is invalid"""
ID = "PHONE_NUMBER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneCodeHashEmpty(BadRequest):
"""phone_code_hash is missing"""
ID = "PHONE_CODE_HASH_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneCodeEmpty(BadRequest):
"""phone_code is missing"""
ID = "PHONE_CODE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneCodeExpired(BadRequest):
"""The confirmation code has expired"""
ID = "PHONE_CODE_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneCodeInvalid(BadRequest):
"""The confirmation code is invalid"""
ID = "PHONE_CODE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ApiIdInvalid(BadRequest):
"""The api_id/api_hash combination is invalid"""
ID = "API_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberOccupied(BadRequest):
"""The phone number is already in use"""
ID = "PHONE_NUMBER_OCCUPIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberUnoccupied(BadRequest):
"""The phone number is not yet being used"""
ID = "PHONE_NUMBER_UNOCCUPIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsersTooFew(BadRequest):
"""Not enough users (to create a chat, for example)"""
ID = "USERS_TOO_FEW"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsersTooMuch(BadRequest):
"""The maximum number of users has been exceeded (to create a chat, for example)"""
ID = "USERS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TypeConstructorInvalid(BadRequest):
"""The type constructor is invalid"""
ID = "TYPE_CONSTRUCTOR_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartInvalid(BadRequest):
"""The file part number is invalid. The value is not between 0 and 3999"""
ID = "FILE_PART_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartsInvalid(BadRequest):
"""Invalid number of parts. The value is not between 1 and 4000"""
ID = "FILE_PARTS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartMissing(BadRequest):
"""Part {x} of the file is missing from storage"""
ID = "FILE_PART_X_MISSING"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class Md5ChecksumInvalid(BadRequest):
"""The file's checksum did not match the md5_checksum parameter"""
ID = "MD5_CHECKSUM_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoInvalidDimensions(BadRequest):
"""The photo dimensions are invalid"""
ID = "PHOTO_INVALID_DIMENSIONS"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FieldNameInvalid(BadRequest):
"""The field with the name FIELD_NAME is invalid"""
ID = "FIELD_NAME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FieldNameEmpty(BadRequest):
"""The field with the name FIELD_NAME is missing"""
ID = "FIELD_NAME_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MsgWaitFailed(BadRequest):
"""A waiting call returned an error"""
ID = "MSG_WAIT_FAILED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PeerIdInvalid(BadRequest):
"""The id/access_hash combination is invalid"""
ID = "PEER_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageEmpty(BadRequest):
"""The message sent is empty"""
ID = "MESSAGE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EncryptedMessageInvalid(BadRequest):
"""The special binding message (bind_auth_key_inner) contains invalid data"""
ID = "ENCRYPTED_MESSAGE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InputMethodInvalid(BadRequest):
"""The method called is invalid"""
ID = "INPUT_METHOD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PasswordHashInvalid(BadRequest):
"""Two-step verification password is invalid"""
ID = "PASSWORD_HASH_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsernameNotOccupied(BadRequest):
"""The username is not occupied by anyone"""
ID = "USERNAME_NOT_OCCUPIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsernameInvalid(BadRequest):
"""The username is invalid"""
ID = "USERNAME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageIdInvalid(BadRequest):
"""The message id is invalid"""
ID = "MESSAGE_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageNotModified(BadRequest):
"""The message was not modified because you tried to edit it using the same content"""
ID = "MESSAGE_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EntityMentionUserInvalid(BadRequest):
"""The mentioned entity is not an user"""
ID = "ENTITY_MENTION_USER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageTooLong(BadRequest):
"""The message text is over 4096 characters"""
ID = "MESSAGE_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AccessTokenExpired(BadRequest):
"""The bot token is invalid"""
ID = "ACCESS_TOKEN_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotMethodInvalid(BadRequest):
"""The method can't be used by bots"""
ID = "BOT_METHOD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QueryTooShort(BadRequest):
"""The query is too short"""
ID = "QUERY_TOO_SHORT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SearchQueryEmpty(BadRequest):
"""The query is empty"""
ID = "SEARCH_QUERY_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatIdInvalid(BadRequest):
"""The chat id is invalid"""
ID = "CHAT_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class DateEmpty(BadRequest):
"""The date argument is empty"""
ID = "DATE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PersistentTimestampEmpty(BadRequest):
"""The pts is empty"""
ID = "PERSISTENT_TIMESTAMP_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CdnMethodInvalid(BadRequest):
"""The method can't be used on CDN DCs"""
ID = "CDN_METHOD_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class VolumeLocNotFound(BadRequest):
"""The volume location can't be found"""
ID = "VOLUME_LOC_NOT_FOUND"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FileIdInvalid(BadRequest):
"""The file id is invalid"""
ID = "FILE_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class LocationInvalid(BadRequest):
"""The file address is invalid"""
ID = "LOCATION_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatAdminRequired(BadRequest):
"""The method requires chat admin privileges"""
ID = "CHAT_ADMIN_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberBanned(BadRequest):
"""The phone number is banned"""
ID = "PHONE_NUMBER_BANNED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AboutTooLong(BadRequest):
"""The about text is too long"""
ID = "ABOUT_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MultiMediaTooLong(BadRequest):
"""The album contains more than 10 items"""
ID = "MULTI_MEDIA_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsernameOccupied(BadRequest):
"""The username is already in use"""
ID = "USERNAME_OCCUPIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotInlineDisabled(BadRequest):
"""The inline feature of the bot is disabled"""
ID = "BOT_INLINE_DISABLED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InlineResultExpired(BadRequest):
"""The inline bot query expired"""
ID = "INLINE_RESULT_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InviteHashInvalid(BadRequest):
"""The invite link hash is invalid"""
ID = "INVITE_HASH_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserAlreadyParticipant(BadRequest):
"""The user is already a participant of this chat"""
ID = "USER_ALREADY_PARTICIPANT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TtlMediaInvalid(BadRequest):
"""The media does not support self-destruction"""
ID = "TTL_MEDIA_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MaxIdInvalid(BadRequest):
"""The max_id parameter is invalid"""
ID = "MAX_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelInvalid(BadRequest):
"""The channel parameter is invalid"""
ID = "CHANNEL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class DcIdInvalid(BadRequest):
"""The dc_id parameter is invalid"""
ID = "DC_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class LimitInvalid(BadRequest):
"""The limit parameter is invalid"""
ID = "LIMIT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class OffsetInvalid(BadRequest):
"""The offset parameter is invalid"""
ID = "OFFSET_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EmailInvalid(BadRequest):
"""The email provided is invalid"""
ID = "EMAIL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserIsBot(BadRequest):
"""A bot cannot send messages to other bots or to itself"""
ID = "USER_IS_BOT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebpageCurlFailed(BadRequest):
"""Telegram server could not fetch the provided URL"""
ID = "WEBPAGE_CURL_FAILED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StickersetInvalid(BadRequest):
"""The requested sticker set is invalid"""
ID = "STICKERSET_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PeerFlood(BadRequest):
"""The method can't be used because your account is limited"""
ID = "PEER_FLOOD"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MediaCaptionTooLong(BadRequest):
"""The media caption is longer than 1024 characters"""
ID = "MEDIA_CAPTION_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserNotMutualContact(BadRequest):
"""The user is not a mutual contact"""
ID = "USER_NOT_MUTUAL_CONTACT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserChannelsTooMuch(BadRequest):
"""The user is already in too many channels or supergroups"""
ID = "USER_CHANNELS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ApiIdPublishedFlood(BadRequest):
"""You are using an API key that is limited on the server side"""
ID = "API_ID_PUBLISHED_FLOOD"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserNotParticipant(BadRequest):
"""The user is not a member of this chat"""
ID = "USER_NOT_PARTICIPANT"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelPrivate(BadRequest):
"""The channel/supergroup is not accessible"""
ID = "CHANNEL_PRIVATE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageIdsEmpty(BadRequest):
"""The requested message doesn't exist"""
ID = "MESSAGE_IDS_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebpageMediaEmpty(BadRequest):
"""The URL doesn't contain any valid media"""
ID = "WEBPAGE_MEDIA_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QueryIdInvalid(BadRequest):
"""The callback query id is invalid"""
ID = "QUERY_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MediaEmpty(BadRequest):
"""The media is invalid"""
ID = "MEDIA_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserIsBlocked(BadRequest):
"""The user blocked you"""
ID = "USER_IS_BLOCKED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class YouBlockedUser(BadRequest):
"""You blocked this user"""
ID = "YOU_BLOCKED_USER"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AdminsTooMuch(BadRequest):
"""The chat has too many administrators"""
ID = "ADMINS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotsTooMuch(BadRequest):
"""The chat has too many bots"""
ID = "BOTS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserAdminInvalid(BadRequest):
"""The action requires admin privileges"""
ID = "USER_ADMIN_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InputUserDeactivated(BadRequest):
"""The target user has been deactivated"""
ID = "INPUT_USER_DEACTIVATED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PasswordRecoveryNa(BadRequest):
"""The password recovery e-mail is not available"""
ID = "PASSWORD_RECOVERY_NA"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PasswordEmpty(BadRequest):
"""The password entered is empty"""
ID = "PASSWORD_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhoneNumberFlood(BadRequest):
"""This number has tried to login too many times"""
ID = "PHONE_NUMBER_FLOOD"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TakeoutInvalid(BadRequest):
"""The takeout id is invalid"""
ID = "TAKEOUT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class TakeoutRequired(BadRequest):
"""The method must be invoked inside a takeout session"""
ID = "TAKEOUT_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessagePollClosed(BadRequest):
"""You can't interact with a closed poll"""
ID = "MESSAGE_POLL_CLOSED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MediaInvalid(BadRequest):
"""The media is invalid"""
ID = "MEDIA_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BotScoreNotModified(BadRequest):
"""The bot score was not modified"""
ID = "BOT_SCORE_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserBotRequired(BadRequest):
"""The method can be used by bots only"""
ID = "USER_BOT_REQUIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ImageProcessFailed(BadRequest):
"""The server failed to process your image"""
ID = "IMAGE_PROCESS_FAILED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UsernameNotModified(BadRequest):
"""The username was not modified"""
ID = "USERNAME_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CallAlreadyAccepted(BadRequest):
"""The call is already accepted"""
ID = "CALL_ALREADY_ACCEPTED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class CallAlreadyDeclined(BadRequest):
"""The call is already declined"""
ID = "CALL_ALREADY_DECLINED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoExtInvalid(BadRequest):
"""The photo extension is invalid"""
ID = "PHOTO_EXT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ExternalUrlInvalid(BadRequest):
"""The external media URL is invalid"""
ID = "EXTERNAL_URL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatNotModified(BadRequest):
"""The chat settings were not modified"""
ID = "CHAT_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ResultsTooMuch(BadRequest):
"""The result contains too many items"""
ID = "RESULTS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ResultIdDuplicate(BadRequest):
"""The result contains items with duplicated identifiers"""
ID = "RESULT_ID_DUPLICATE"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AccessTokenInvalid(BadRequest):
"""The bot access token is invalid"""
ID = "ACCESS_TOKEN_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InviteHashExpired(BadRequest):
"""The chat invite link is no longer valid"""
ID = "INVITE_HASH_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserBannedInChannel(BadRequest):
"""You are limited, check @SpamBot for details"""
ID = "USER_BANNED_IN_CHANNEL"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MessageEditTimeExpired(BadRequest):
"""You can no longer edit this message"""
ID = "MESSAGE_EDIT_TIME_EXPIRED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FolderIdInvalid(BadRequest):
"""The folder id is invalid"""
ID = "FOLDER_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MegagroupPrehistoryHidden(BadRequest):
"""The action failed because the supergroup has the pre-history hidden"""
ID = "MEGAGROUP_PREHISTORY_HIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChatLinkExists(BadRequest):
"""The action failed because the supergroup is linked to a channel"""
ID = "CHAT_LINK_EXISTS"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class LinkNotModified(BadRequest):
"""The chat link was not modified because you tried to link to the same target"""
ID = "LINK_NOT_MODIFIED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BroadcastIdInvalid(BadRequest):
"""The channel is invalid"""
ID = "BROADCAST_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class MegagroupIdInvalid(BadRequest):
"""The supergroup is invalid"""
ID = "MEGAGROUP_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ButtonDataInvalid(BadRequest):
"""The button callback data contains invalid data or exceeds 64 bytes"""
ID = "BUTTON_DATA_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class StartParamInvalid(BadRequest):
"""The start parameter is invalid"""
ID = "START_PARAM_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ArticleTitleEmpty(BadRequest):
"""The article title is empty"""
ID = "ARTICLE_TITLE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartTooBig(BadRequest):
"""The size limit (512 KB) for the content of the file part has been exceeded"""
ID = "FILE_PART_TOO_BIG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartEmpty(BadRequest):
"""The file part sent is empty"""
ID = "FILE_PART_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartSizeInvalid(BadRequest):
"""512 KB cannot be evenly divided by part_size"""
ID = "FILE_PART_SIZE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FilePartSizeChanged(BadRequest):
"""The part size is different from the size of one of the previous parts in the same file"""
ID = "FILE_PART_SIZE_CHANGED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FileMigrate(BadRequest):
"""The file is in Data Center No. {x}"""
ID = "FILE_MIGRATE_X"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ResultTypeInvalid(BadRequest):
"""The result type is invalid"""
ID = "RESULT_TYPE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoThumbUrlEmpty(BadRequest):
"""The photo thumb URL is empty"""
ID = "PHOTO_THUMB_URL_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoThumbUrlInvalid(BadRequest):
"""The photo thumb URL is invalid"""
ID = "PHOTO_THUMB_URL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoContentUrlEmpty(BadRequest):
"""The photo content URL is empty"""
ID = "PHOTO_CONTENT_URL_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PhotoContentTypeInvalid(BadRequest):
"""The photo content type is invalid"""
ID = "PHOTO_CONTENT_TYPE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebdocumentInvalid(BadRequest):
"""The web document is invalid"""
ID = "WEBDOCUMENT_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebdocumentUrlEmpty(BadRequest):
"""The web document URL is empty"""
ID = "WEBDOCUMENT_URL_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebdocumentUrlInvalid(BadRequest):
"""The web document URL is invalid"""
ID = "WEBDOCUMENT_URL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class WebdocumentMimeInvalid(BadRequest):
"""The web document mime type is invalid"""
ID = "WEBDOCUMENT_MIME_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ButtonUrlInvalid(BadRequest):
"""The button url is invalid"""
ID = "BUTTON_URL_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AuthBytesInvalid(BadRequest):
"""The authorization bytes are invalid"""
ID = "AUTH_BYTES_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class UserIdInvalid(BadRequest):
"""The user ID is invalid"""
ID = "USER_ID_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ChannelsTooMuch(BadRequest):
"""You have joined too many channels or supergroups"""
ID = "CHANNELS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AdminRankInvalid(BadRequest):
"""The custom administrator title is invalid or is longer than 16 characters"""
ID = "ADMIN_RANK_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class AdminRankEmojiNotAllowed(BadRequest):
"""Emojis are not allowed in custom administrator titles"""
ID = "ADMIN_RANK_EMOJI_NOT_ALLOWED"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FileReferenceEmpty(BadRequest):
"""The file reference is empty"""
ID = "FILE_REFERENCE_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FileReferenceInvalid(BadRequest):
"""The file reference is invalid"""
ID = "FILE_REFERENCE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class ReplyMarkupTooLong(BadRequest):
"""The reply markup is too long"""
ID = "REPLY_MARKUP_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class SecondsInvalid(BadRequest):
"""The seconds interval is invalid, for slow mode try with 0 (off), 10, 30, 60 (1m), 300 (5m), 900 (15m) or 3600 (1h)"""
ID = "SECONDS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QuizMultipleInvalid(BadRequest):
"""A quiz can't have multiple answers"""
ID = "QUIZ_MULTIPLE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QuizCorrectAnswersEmpty(BadRequest):
"""The correct answers of the quiz are empty"""
ID = "QUIZ_CORRECT_ANSWERS_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QuizCorrectAnswerInvalid(BadRequest):
"""The correct answers of the quiz are invalid"""
ID = "QUIZ_CORRECT_ANSWER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class QuizCorrectAnswersTooMuch(BadRequest):
"""The quiz contains too many correct answers"""
ID = "QUIZ_CORRECT_ANSWERS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class OptionsTooMuch(BadRequest):
"""The poll options are too many"""
ID = "OPTIONS_TOO_MUCH"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PollAnswersInvalid(BadRequest):
"""The poll answers are invalid"""
ID = "POLL_ANSWERS_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PollQuestionInvalid(BadRequest):
"""The poll question is invalid"""
ID = "POLL_QUESTION_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class FreshChangeAdminsForbidden(BadRequest):
"""Recently logged-in users cannot change admins"""
ID = "FRESH_CHANGE_ADMINS_FORBIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class BroadcastPublicVotersForbidden(BadRequest):
"""Polls with public voters cannot be sent in channels"""
ID = "BROADCAST_PUBLIC_VOTERS_FORBIDDEN"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class InputFilterInvalid(BadRequest):
"""The filter is invalid for this query"""
ID = "INPUT_FILTER_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EmoticonEmpty(BadRequest):
"""The emoticon parameter is empty"""
ID = "EMOTICON_EMPTY"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class EmoticonInvalid(BadRequest):
"""The emoticon parameter is invalid"""
ID = "EMOTICON_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class VideoFileInvalid(BadRequest):
"""The video file is invalid"""
ID = "VIDEO_FILE_INVALID"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
class PrivacyTooLong(BadRequest):
"""Your privacy exception list has exceeded the maximum capacity"""
ID = "PRIVACY_TOO_LONG"
"""``str``: RPC Error ID"""
MESSAGE = __doc__
| 2.703125 | 3 |
savu/plugins/loaders/utils/my_safe_constructor.py | nghia-vo/Savu | 0 | 12772780 | from yaml.constructor import SafeConstructor
# Create custom safe constructor class that inherits from SafeConstructor
class MySafeConstructor(SafeConstructor):
# Create new method handle boolean logic
def add_bool(self, node):
return self.construct_scalar(node)
# Inject the above boolean logic into the custom constuctor
MySafeConstructor.add_constructor('tag:yaml.org,2002:bool',
MySafeConstructor.add_bool)
| 2.703125 | 3 |
lazy/api/timez.py | trisongz/lazycls | 2 | 12772781 | from __future__ import annotations
import time
from datetime import datetime, timezone
from lazy.models.timez import TimeCls
from .config import TimeZoneConfigz
from .base_imports import *
def _require_pytz():
pytz = Lib.import_lib('pytz')
Lib.reload_module(pytz)
def _require_dateparser():
dateparser = Lib.import_lib('dateparser')
Lib.reload_module(dateparser)
if _pytz_available:
import pytz
api_timezone = pytz.timezone(TimeZoneConfigz.desired)
utc_timezone = pytz.timezone("UTC")
dtime_now_tz = lambda: datetime.now(api_timezone)
else:
api_timezone = _require_pytz
utc_timezone = _require_pytz
dtime_now_tz = _require_pytz
if _dateparser_available:
import dateparser
dtime_now = lambda: datetime.now()
dtime_now_utc = lambda: datetime.now(timezone.utc)
def timer(s: float = None):
return time.time() if not s else (time.time() - s)
def dtime_parse(timeframe: str = '30 mins', future: bool = False):
if not _dateparser_available: _require_dateparser()
if future:
timeframe = 'in ' + timeframe
prefer = 'future'
else:
timeframe += ' ago'
prefer = 'past'
return dateparser.parse(timeframe, settings={'PREFER_DATES_FROM': prefer, 'TIMEZONE': 'UTC', 'RETURN_AS_TIMEZONE_AWARE': True})
def dtime_diff(dtime: datetime = None, timeframe: str = '30 mins', future: bool = False, secs_only: bool = False):
if not dtime: dtime = datetime.now(timezone.utc)
dtime_to_obj = dtime_parse(timeframe=timeframe, future=future)
dtime_diff = (dtime_to_obj - dtime) if future else (dtime - dtime_to_obj)
if secs_only: return dtime_diff.total_seconds()
return dtime_diff
def get_dtime(dtime: datetime = None, start: datetime = None, timeframe: str = None, future: bool = False) -> datetime:
if dtime and start: return start - dtime
if dtime and timeframe: return dtime_diff(dtobj=dtime, timeframe=timeframe, future=future)
if timeframe: return dtime_parse(timeframe=timeframe, future=future)
return dtime_now_utc()
def get_dtime_str(dtime: datetime = None, start: datetime = None, timeframe: str = None, future: bool = False, tz_format: bool = True, dt_format: str = None):
dt = get_dtime(dtime=dtime, start=start, timeframe=timeframe, future=future)
if tz_format: return dt.strftime(TimeZoneConfigz.tz_format)
if dt_format: return dt.strftime(dt_format)
return dt.isoformat('T')
def get_dtime_iso(dtime_str: str, z_break: str = 'Z', z_repl: str = '.000000+00:00'):
"""Breaks the timestamp at z_break and replaces with z_repl if z_break is not empty. """
# We use central timezone for rancher clusters, so need to convert CST -> UTC
if z_break: return datetime.fromisoformat(dtime_str.replace(z_break, z_repl))
return datetime.fromisoformat(dtime_str).astimezone(utc_timezone)
def get_date(timeframe: str = None, future: bool = False):
if not timeframe: return dtime_now_utc()
return dtime_parse(timeframe=timeframe, future=future)
def get_dtime_secs(dtime: datetime = None, start: datetime = None, as_cls: bool = False):
if dtime and start: return (start - dtime).total_seconds()
if as_cls: return TimeCls((dtime_now() - dtime).total_seconds())
try: return (dtime_now_utc() - dtime).total_seconds()
except: return (dtime_now() - dtime).total_seconds()
dtime = get_dtime
dtstr = get_dtime_str
dtsecs = get_dtime_secs
dtnow = dtime_now
dtnow_utc = dtime_now_utc
get_timestamp = dtime_now
get_timestamp_tz = dtime_now_tz
get_timestamp_utc = dtime_now_utc
dtiso = get_dtime_iso
__all__ = [
'time',
'datetime',
'dtime_now',
'dtime_now_tz',
'dtime_now_utc',
'get_timestamp',
'get_timestamp_tz',
'get_timestamp_utc',
'api_timezone',
'utc_timezone',
'timezone_format',
'timer',
'dtime_parse',
'dtime_diff',
'get_dtime',
'get_dtime_str',
'get_dtime_iso',
'get_date',
'get_dtime_secs',
'TimeCls',
] | 2.5625 | 3 |
main.py | Israel7777/plant_disease_experements | 26 | 12772782 | <reponame>Israel7777/plant_disease_experements
import os
import argparse
import subprocess
import numpy as np
from PIL import Image
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.applications.inception_v3 import preprocess_input
# species names
APPLE = 'apple'
BEAN = 'bean'
BLUEBERRY = 'blueberry'
CHERRY = 'cherry'
CORN = 'corn'
GRAPE = 'grape'
GRAPEFRUIT = 'grapefruit'
ORANGE = 'orange'
PEACH = 'peach'
PEPPER = 'pepper'
POTATO = 'potato'
RASPBERRY = 'raspberry'
SORGHUM = 'sorghum'
SOYBEAN = 'soybean'
SQUASH = 'squash'
STRAWBERRY = 'strawberry'
SUGARCANE = 'sugarcane'
TOMATO = 'tomato'
# all species and supported species names
SPECIES = [APPLE, BEAN, BLUEBERRY, CHERRY, CORN, GRAPE, GRAPEFRUIT, ORANGE, PEACH,
PEPPER, POTATO, RASPBERRY, SORGHUM, SOYBEAN, SQUASH, STRAWBERRY, SUGARCANE, TOMATO]
DISEASE_SUPPORTED_SPECIES = {APPLE, CHERRY, CORN, GRAPE, PEACH, PEPPER, POTATO, STRAWBERRY, SUGARCANE, TOMATO, }
# classes for each species
APPLE_CLASSES = ['Apple___Apple_scab', 'Apple___Black_rot', 'Apple___Cedar_apple_rust', 'Apple___healthy']
CHERRY_CLASSES = ['Cherry_(including_sour)___Powdery_mildew', 'Cherry_(including_sour)___healthy']
CORN_CLASSES = ['Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot', 'Corn_(maize)___Common_rust_',
'Corn_(maize)___Northern_Leaf_Blight', 'Corn_(maize)___healthy']
GRAPE_CLASSES = ['Grape___Black_rot', 'Grape___Esca_(Black_Measles)', 'Grape___Leaf_blight_(Isariopsis_Leaf_Spot)',
'Grape___healthy']
PEACH_CLASSES = ['Peach___Bacterial_spot', 'Peach___healthy']
PEPPER_CLASSES = ['Pepper,_bell___Bacterial_spot', 'Pepper,_bell___healthy']
POTATO_CLASSES = ['Potato___Early_blight', 'Potato___Late_blight', 'Potato___healthy']
STRAWBERRY_CLASSES = ['Strawberry___Leaf_scorch', 'Strawberry___healthy']
SUGARCANE_CLASSES = ['Sugarcane leaf spot', 'Sugarcane aphid', 'Sugarcane coal fouling']
TOMATO_CLASSES = ['Tomato___Bacterial_spot', 'Tomato___Early_blight', 'Tomato___Late_blight', 'Tomato___Leaf_Mold',
'Tomato___Septoria_leaf_spot', 'Tomato___Spider_mites Two-spotted_spider_mite',
'Tomato___Target_Spot', 'Tomato___Tomato_Yellow_Leaf_Curl_Virus', 'Tomato___Tomato_mosaic_virus',
'Tomato___healthy']
# all species classes with their species name as key
PLANT_CLASSES = {
APPLE: APPLE_CLASSES,
CHERRY: CHERRY_CLASSES,
CORN: CORN_CLASSES,
GRAPE: GRAPE_CLASSES,
PEACH: PEACH_CLASSES,
PEPPER: PEPPER_CLASSES,
POTATO: POTATO_CLASSES,
STRAWBERRY: STRAWBERRY_CLASSES,
SUGARCANE: SUGARCANE_CLASSES,
TOMATO: TOMATO_CLASSES,
}
# types of models to be used for predictions
VGG_ARCHITECTURE = 'vgg'
INCEPTIONV3_ARCHITECTURE = 'inceptionv3'
SUPPORTED_MODEL_TYPES = {VGG_ARCHITECTURE, INCEPTIONV3_ARCHITECTURE}
# modes of detection i.e detecting plant disease or species
DISEASE_DETECTION = 'disease_detection'
SPECIES_DETECTION = 'species_detection'
# image target sizes for our supported model architectures
TARGET_IMAGE_SIZES = {
VGG_ARCHITECTURE: {
DISEASE_DETECTION: (64, 64),
SPECIES_DETECTION: (100, 100),
},
INCEPTIONV3_ARCHITECTURE: {
DISEASE_DETECTION: (100, 100),
SPECIES_DETECTION: (100, 100),
}
}
# vgg models to be used with their species name as key
VGG_MODELS = {
APPLE: 'Apple_0.9395_VGG.h5',
CHERRY: 'Cherry_0.9873_VGG.h5',
CORN: 'Corn_0.8926_VGG.h5',
GRAPE: 'Grape_0.9293_VGG.h5',
PEACH: 'Peach_97_VGG.h5',
TOMATO: 'Tomato_0.8675_VGG.h5',
PEPPER: 'pepper_95.90.h5',
POTATO: 'potato_90.62.h5',
STRAWBERRY: 'starwberry_99.h5',
SUGARCANE: 'Sugarcane_0.8356_VGG.h5'
}
# inceptionv3 models to be used with their species name as key
INCEPTIONV3_MODELS = {
APPLE: 'InceptionV3-scratch_segApple.h5',
CHERRY: 'InceptionV3-scratch_segCherry.h5',
CORN: 'InceptionV3-scratch_segCorn.h5',
GRAPE: 'InceptionV3-scratch_segGrape.h5',
PEACH: 'InceptionV3-scratch_segPeach.h5',
TOMATO: 'InceptionV3-scratch_segTomato.h5',
PEPPER: 'InceptionV3-scratch_segPepper.h5',
POTATO: 'InceptionV3-scratch_segPotato.h5',
STRAWBERRY: 'InceptionV3-scratch_segStrawberry.h5',
SUGARCANE: 'InceptionV3-scratch_segSugarcane.h5'
}
# base path from where models will be loaded
MODEL_STORAGE_BASE = 'Plant_Disease_Detection_Benchmark_models/Models'
def get_classes(species_name):
"""
Get classes of disease for a species
Args:
species_name: name of species
Returns:
a list of disease classes for a specific species
"""
return PLANT_CLASSES[species_name]
def get_disease_model(species, model_type):
"""
Get appropriate disease classifier model file name
Args:
species: species name to identify which species model should be used
model_type: type of model to be used for prediction
Returns:
disease classifier model file name
"""
if species not in DISEASE_SUPPORTED_SPECIES:
raise ValueError("`{}` species has no disease model yet.\n"
"Species tha have disease models are {}".format(species, DISEASE_SUPPORTED_SPECIES))
if model_type == VGG_ARCHITECTURE:
return VGG_MODELS[species]
elif model_type == INCEPTIONV3_ARCHITECTURE:
return INCEPTIONV3_MODELS[species]
else:
raise ValueError("No such `{}` model type is supported.\n"
"Supported model types are {}".format(model_type, SUPPORTED_MODEL_TYPES))
def get_species_model(model_type):
"""
Get appropriate species classifier model file name
Args:
species: species name to identify which species model should be used
model_type: type of model to be used for prediction
Returns:
species classifier model file name
"""
if model_type == VGG_ARCHITECTURE:
return 'VGG_all_100p_94.h5'
elif model_type == INCEPTIONV3_ARCHITECTURE:
return 'InceptionV3-scratch_segspecies.h5'
else:
raise ValueError("No such `{}` model type is supported.\n"
"Supported model types are {}".format(model_type, SUPPORTED_MODEL_TYPES))
def get_predictions(model_path, img_path, img_target_size):
"""
Loads model and image and make predictions using them
Args:
model_path: filesystem path of model
img_path: filesystem path of image
img_target_size: target image size to reshape the image if necessary
Returns:
a tuple of:
1. array of prediction values by the model for all classes
2. array of indices that can sort the classes from best prediction to worst
"""
if not os.path.exists(model_path):
raise ValueError('No such `{}` file found\n'
'Please, checkout the readme of the project '
'on github and download required models'.format(model_path))
model = load_model(model_path)
# get image as array and resize it if necessary
pil_img = Image.open(img_path)
if pil_img.size != img_target_size:
pil_img = pil_img.resize(img_target_size)
img = image.img_to_array(pil_img)
# if alpha channel found, discard it
if img.shape[2] == 4:
img = img[:, :, :3]
# preprocess image
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
preds = model.predict(img).flatten()
# get predictions index sorted based on the best predictions
value_ = preds.argsort()
sorted_preds_index = value_[::-1]
return preds, sorted_preds_index
def segment_image(img_path):
"""
Segment leaf from an image and create new segmented image file
Args:
img_path: filesystem path of an image
Returns:
segmented image file name
"""
image_name, extension = os.path.splitext(img_path)
segmented_image_name = image_name + "_marked" + extension # the future segmented image name to be
result = subprocess.check_output(['python', "leaf-image-segmentation/segment.py", "-s", img_path])
print('Info: Input image segmented.')
return segmented_image_name
def segment_and_predict_species(img_path, model_type=VGG_ARCHITECTURE, do_print=True):
"""
Given image path, first segment the image and predict species on the segmented image
Args:
img_path: filesystem path of an image
do_print: print information about the prediction
model_type: type of model to be used for prediction
Returns:
a tuple of:
1. the top one predicted species
2. segmented image path
"""
segmented_image_name = segment_image(img_path)
model_path = os.path.join(MODEL_STORAGE_BASE, get_species_model(model_type))
target_image_size = TARGET_IMAGE_SIZES[model_type][SPECIES_DETECTION]
preds, sorted_preds_index = get_predictions(model_path, segmented_image_name, target_image_size)
if do_print:
print("Plant Species :")
for i in sorted_preds_index:
print("\t - " + str(SPECIES[i]) + ": \t" + str(preds[i]))
return str(SPECIES[sorted_preds_index[0]]), segmented_image_name
def predict_species(img_path, model_type=VGG_ARCHITECTURE, do_print=True):
"""
Given an image path, predict the species on the raw image without segmenting
Args:
img_path: filesystem path of an image
do_print: print information about the prediction
model_type: type of model to be used for prediction
Returns:
the top one predicted species
"""
model_path = os.path.join(MODEL_STORAGE_BASE, get_species_model(model_type))
target_image_size = TARGET_IMAGE_SIZES[model_type][SPECIES_DETECTION]
preds, sorted_preds_index = get_predictions(model_path, img_path, target_image_size)
if do_print:
print("Plant Species :")
for i in sorted_preds_index:
print("\t - " + str(SPECIES[i]) + ": \t" + str(preds[i]))
return str(SPECIES[sorted_preds_index[0]])
def predict_disease(img_path, species, model_type=VGG_ARCHITECTURE, do_print=True):
"""
Given an image path and species of the image, predict the disease on the raw image without segmenting
Args:
img_path: filesystem path of an image
species: name of species
model_type: type of model to be used for prediction
do_print: print information about the prediction
Returns:
the top one predicted disease or None if the species is not supported(has no disease model yet for the species)
"""
if species not in SPECIES:
raise ValueError("No such `{}` species is supported.\n"
"Supported species are {}".format(species, SPECIES))
if species not in DISEASE_SUPPORTED_SPECIES:
print("Info: For `{}` species, a disease can not be predicted "
"since its disease model is not implemented yet.".format(species))
return None
else:
SPECIES_CLASSES = get_classes(species)
model_path = os.path.join(MODEL_STORAGE_BASE, get_disease_model(species, model_type))
target_image_size = TARGET_IMAGE_SIZES[model_type][DISEASE_DETECTION]
preds, sorted_preds_index = get_predictions(model_path, img_path, target_image_size)
if do_print:
print("Plant Disease : ")
for i in sorted_preds_index:
print("\t-" + str(SPECIES_CLASSES[i]) + ": \t" + str(preds[i]))
return str(SPECIES_CLASSES[sorted_preds_index[0]])
def get_cmd_args():
"""
Get command line arguments if found or use default ones
Returns:
list of command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("image", type=str, help='Image file path')
parser.add_argument('--model', type=str.lower, default=VGG_ARCHITECTURE,
choices=[VGG_ARCHITECTURE, INCEPTIONV3_ARCHITECTURE],
help='Type of model to user for prediction')
parser.add_argument("--segment", action='store_true', help='Perform segmentation before prediction')
parser.add_argument("--species", type=str.lower, default='', help='Species Name if known')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_cmd_args()
# if not segment and species is not known
if args.segment == False and args.species == '':
species = predict_species(args.image, args.model)
predict_disease(args.image, species, args.model)
# if not segment and species is given
elif args.segment == False and args.species != '':
predict_disease(args.image, args.species, args.model)
# if segment and species is not known
elif args.segment == True and args.species == '':
species, image_name = segment_and_predict_species(args.image, args.model)
predict_disease(image_name, species)
# if segment and species is given
elif args.segment == True and args.species != '':
image_name = segment_image(args.image)
predict_disease(image_name, args.species, args.model)
# should not enter here
else:
print("Make Sure Your Command is Correct") | 2.265625 | 2 |
tools/finto-skos-to-marc/finto-skos-to-marc.py | NatLibFi/Finto-data | 14 | 12772783 | #!/usr/bin/env python3
# coding=utf-8
from rdflib import Graph, Namespace, URIRef, BNode, Literal, RDF
from rdflib.namespace import SKOS, XSD, OWL, DC
from rdflib.namespace import DCTERMS as DCT
from SPARQLWrapper import SPARQLWrapper, SPARQLExceptions
import socket
import time
from pymarc import Record, Field, XMLWriter, MARCReader, parse_xml_to_array
from lxml import etree as ET
import shutil
import pickle
import os
import argparse
import hashlib
import unicodedata
from configparser import ConfigParser, ExtendedInterpolation
import sys
import logging
from datetime import datetime, date, timedelta
import subprocess
import urllib
from collections import namedtuple
from collections.abc import Sequence
from html.parser import HTMLParser
# globaalit muuttujat
CONVERSION_PROCESS = "Finto SKOS to MARC 1.03"
CONVERSION_URI = "https://www.kiwi.fi/x/XoK6B" # konversio-APIn uri tai muu dokumentti, jossa kuvataan konversio
CREATOR_AGENCY = "FI-NL" # Tietueen luoja/omistaja & luetteloiva organisaatio, 040-kentat
DEFAULTCREATIONDATE = "1980-01-01"
KEEPMODIFIEDAFTER = "ALL"
KEEPDEPRECATEDAFTER = "ALL"
ENDPOINT_ADDRESS = "http://api.dev.finto.fi/sparql"
ENDPOINTGRAPHS = [] # palvelupisteen graafien osoitteet, jotka ladataan läpikäytäviin muihin graafeihin
IGNOREOTHERGRAPHWARNINGS = False # lokitetaanko virheet muissa, kuin prosessoitavassa graafissa
NORMALIZATION_FORM = "NFD" # käytetään UTF8-merkkien dekoodauksessa
YSO=Namespace('http://www.yso.fi/onto/yso/')
YSOMETA=Namespace('http://www.yso.fi/onto/yso-meta/')
YSOPAIKATGRAPH=Namespace("http://www.yso.fi/onto/yso-paikat/")
YSA=Namespace('http://www.yso.fi/onto/ysa/')
YSAMETA=Namespace('http://www.yso.fi/onto/ysa-meta/')
ALLARS=Namespace('http://www.yso.fi/onto/allars/')
ALLARSMETA=Namespace("http://www.yso.fi/onto/allars-meta/")
KOKO=Namespace('http://www.yso.fi/onto/koko/')
LCSH=Namespace("http://id.loc.gov/authorities/subjects/")
LCGF=Namespace("http://id.loc.gov/authorities/genreForms/")
RDAU=Namespace('http://rdaregistry.info/Elements/u/')
ISOTHES=Namespace('http://purl.org/iso25964/skos-thes#')
SKOSEXT=Namespace('http://purl.org/finnonto/schema/skosext#')
SLM=Namespace("http://urn.fi/URN:NBN:fi:au:slm:")
UDC=Namespace("http://udcdata.info/")
WIKIDATA=Namespace("http://www.wikidata.org/entity/")
LANGUAGES = {
'fi': 'fin',
'sv': 'swe',
'en': 'eng',
'de': 'ger',
'et': 'est',
'fr': 'fre',
'it': 'ita',
'ru': 'rus',
'sme': 'sme', # pohjoissaame
'sma': 'sma', # eteläsaame
'smn': 'smn', # inarinsaame
'sms': 'sms', # koltansaame
'smj': 'smj', # luulajansaame
}
#LCSH mäpättävät 1xx-kentät
LCSH_1XX_FIELDS = ["100", "110", "111", "130", "147", "148", "150", "151", "155", "162", "180", "181", "182", "185"]
TRANSLATIONS = {
SKOSEXT.partOf: {
"fi": "osa kokonaisuutta/käsitettä",
"sv": "är en del av",
"en": "is part of"
},
"682iDEFAULT": {
"fi": "Käytöstä poistetun termin korvaava termi",
"sv": "Termen som ersättar den avlagda termen",
"en": "Term replacing the deprecated term"
},
"688aCREATED": {
"fi": "Luotu",
"sv": "Skapad",
"en": "Created"
},
"688aMODIFIED": {
"fi": "Viimeksi muokattu",
"sv": "Senast editerad",
"en": "Last modified"
}
}
# arvot tulevat osakentan $w 1. merkkipaikkaan
SEEALSOPROPS = {
SKOS.broader : 'g',
SKOS.narrower : 'h',
SKOS.related : 'n',
RDAU.P60683 : 'a',
RDAU.P60686 : 'b',
SKOSEXT.partOf : 'i',
ISOTHES.broaderPartitive : "g",
ISOTHES.narrowerPartitive : "h"
}
SORT_5XX_W_ORDER = {
'g': '001',
'h': '002',
'n': '003',
'i': '004',
'a': '005',
'b': '006'
}
# paikka 5, 'n' = uusi, 'c' = muuttunut/korjattu, d = poistettu (ei seuraajia), x = 1 seuraaja, s = >= 2 seuraajaa
LEADERNEW = '00000nz a2200000n 4500'
LEADERCHANGED = '00000cz a2200000n 4500'
LEADERDELETED0 = '00000dz a2200000n 4500'
LEADERDELETED1 = '00000xz a2200000n 4500'
LEADERDELETED2 = '00000sz a2200000n 4500'
CATALOGCODES = '|n|anznnbabn | ana '
CATALOGCODES_NA = '|n|enznnbbbn | ana '
GROUPINGCLASSES = [ISOTHES.ConceptGroup, ISOTHES.ThesaurusArray, SKOS.Collection, YSOMETA.Hierarchy]
# tuple helpottamaan getValues-apufunktion arvojen käsittelyä
ValueProp = namedtuple("ValueProp", ['value', 'prop'])
# apufunktiot
def readCommandLineArguments():
parser = argparse.ArgumentParser(description="Program for converting Finto SKOS-vocabularies into MARC (.mrcx).")
parser.add_argument("-c", "--config",
help="Config file location. The key/value pairs defined in the config file are overwritten with possible CLI key/value pairs.")
parser.add_argument("-cs", "--config_section",
help="Config section identifier. Set if vocabulary code is different from section identifier.")
parser.add_argument("-e", "--endpoint", help="Endpoint address to be used for querying linked concepts.")
parser.add_argument("-eg", "--endpoint_graphs",
help="The graphs one wants to query from the endpoint, e.g., http://www.yso.fi/onto/yso/. In case of multiple, separate them with space.")
parser.add_argument("-ignoreOtherGraphWarnings", "--ignore_other_graph_warnings",
help="Do you want ignore warnings produced whilst processing other graphs? Set this flag only if you want to ignore.", action="store_true")
parser.add_argument("-i", "--input", help="Input file location, e.g., yso-skos.ttl")
parser.add_argument("-if", "--input_format", help="Input file format. Default: turtle")
parser.add_argument("-o", "--output", help="Output file name, e.g., yso.mrcx.")
parser.add_argument("-vocId", "--vocabulary_code", help="MARC code used in tag 040 subfield f.", required=True)
parser.add_argument("-lang", "--languages",
help="The RDF language tag of the language one is willing to convert. In case of multiple, separate them with space.")
parser.add_argument("-m", "--multilanguage_vocabulary", action='store_true',
help="Is the vocabulary using language specified vocabulary codes, e.g., yso/fin? Set this flag only if it is.")
parser.add_argument("-gc", "--grouping_classes",
help="Types of classes not meant for describing/cataloging items in the vocabulary, e.g, hierarchical ones. In case of multiple, seperate them with space.")
parser.add_argument("-log", "--log_file", help="Log file location.")
parser.add_argument("-locDir", "--loc_directory",
help="Library of Congress directory from which to look for and download to LoC marcxml files. One shall not set if one does not want LoC links.")
parser.add_argument("-pv", "--pickle_vocabulary",
help="File location for the vocabulary in Python's pickle format for faster execution. \
If file's modification date is earlier than today, the file is overwritten. Else the vocabulary is loaded from this file.")
parser.add_argument("-modificationDates", "--modification_dates",
help="File location for pickle file, which contains latest modification dates for concepts (e. g. {'concept uri': 'YYYY-MM-DD'}) \
The file is updated after new records are created, if keepModifiedAfter is left out of command line arguments")
parser.add_argument("-keepModifiedAfter", "--keep_modified_after",
help="Create separate batch of MARC21 files for concepts modified after the date given (set in YYYY-MM-DD format).")
parser.add_argument("-defaultCreationDate", "--default_creation_date",
help="Default creation date (set in YYYY-MM-DD format) for a concept if it has not been declared explicitly. Default: " + DEFAULTCREATIONDATE)
parser.add_argument("-keepDeprecatedAfter", "--keep_deprecated_after",
help="Keep deprecated concepts deprecated after (not inclusive) the date given (set in YYYY-MM-DD format). Set to 'ALL' for no limits and 'NONE' to discard all.")
parser.add_argument("-keepGroupingClasses", "--keep_grouping_classes",
help="Keep grouping classes defined in config file.")
args = parser.parse_args()
return args
def readEndpointGraphs(settings):
sparql = SPARQLWrapper(settings.get("endpoint"))
queryStart = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
CONSTRUCT {
?concept skos:prefLabel ?prefLabel .
?concept skos:inScheme ?inScheme .
?concept owl:deprecated ?deprecated .
?concept a ?types .
}"""
queryEnd = """
WHERE {
?concept a skos:Concept .
?concept skos:prefLabel ?prefLabel .
?concept a ?types .
OPTIONAL {?concept skos:inScheme ?inScheme .}
OPTIONAL {?concept owl:deprecated ?deprecated .}
}
"""
queryFrom = ""
ret = Graph()
for endpointGraphIRI in settings.get("endpointGraphs").split(","):
sparql.setQuery(queryStart + "\nFROM <" + str(endpointGraphIRI) + ">" + queryEnd)
sparql.setMethod("GET")
sparql.setTimeout(600)
ret_length = len(ret)
try:
ret += sparql.query().convert()
if ret_length == len(ret):
logging.warning("Querying graph <" + str(endpointGraphIRI) +
"> from endpoint " + settings.get("endpoint") +
" returned 0 triples. Continuing.")
except (SPARQLExceptions.SPARQLWrapperException) as err:
logging.warning("Whilst querying endpoint " + settings.get("endpoint") +
" for graph <" + str(endpointGraphIRI) +
"> the following error occurred: " + err.__class__.__name__ + ": " + err.msg +
". Skipping the graph.")
except (urllib.error.HTTPError, urllib.error.URLError) as err:
logging.warning("SPARQL endpoint not found in url " + settings.get("endpoint") +
". Skipping querying linked concepts.")
break
except socket.timeout as e:
logging.warning("SPARQL endpoint now answering within timeout limit. " +
"Skipping querying linked concepts.")
return ret
# funktio konfiguraatiotiedostoissa olevien monimutkaisten merkkijonojen lukemiseen ja siistimiseen
def readConfigVariable(string, separator=None):
if separator:
return [x.strip() for x in string.split(separator) if len(x.strip()) > 0]
else:
return string.strip()
# funktio åäöÅÄÖ-kirjainten muuttamiseksi takaisin UTF8-merkeiksi (decomposed -> composed)
def decomposedÅÄÖtoUnicodeCharacters(string):
return (string.replace("A\u030a", "Å").replace("a\u030a", "å").
replace("A\u0308", "Ä").replace("a\u0308", "ä").
replace("O\u0308", "Ö").replace("o\u0308", "ö"))
def getValues(graph, target, props, language=None, literal_datatype=None):
"""Given a subject, get all values for a list of properties
in the order in which those properties were defined.
Args:
graph (Graph): The graph from which to search for the properties of the target.
target (URIRef|BNode): Concept.
props (URIRef|sequence(URIRef)): Property or list of properties to search for.
language (str, optional): Language of literals. Defaults to None (return all literals with languages).
Set to empty string ("") for empty lang tag.
literal_datatype (URIRef, optional): Datatype of datatyped literals. Defaults to None (return all literals with datatypes).
Returns:
list(TypeValue): List containing TypeValue namedtuples
prop (URIRef): Matched property
value (URIRef|BNode|Literal): For matched property, object value
Raises:
ValueError: If parameters do not respect the required types
"""
if isinstance(props, URIRef):
# cast to list in order to uniform code
props = [props]
if not (isinstance(target, URIRef) or isinstance(target, BNode)):
raise ValueError("Parameter 'target' must be of type URIRef or BNode.")
elif isinstance(props, str) or not isinstance(props, Sequence):
raise ValueError(
"Type of parameter 'props' must be a URIRef or sequence; got %s." % (type(props)))
elif language is not None and not isinstance(language, str):
raise ValueError("Parameter 'language' must be string if set.")
elif literal_datatype is not None and not isinstance(literal_datatype, URIRef):
raise ValueError("Parameter 'datatype' must be URIRef if set.")
v = []
# setup the language filtering
if language is not None:
if language == '': # we only want not language-tagged literals
langfilter = lambda l: l.language == None
else:
langfilter = lambda l: l.language == language
else: # we don't care about language tags
langfilter = lambda l: True
# setup the datatype filtering
if literal_datatype is not None:
typefilter = lambda l: l.datatype == literal_datatype
else:
typefilter = lambda l: True
for prop in props:
if not isinstance(prop, URIRef):
raise ValueError(
"Types of properties must be URIRefs; got %s from property '%s'." % (type(prop), str(prop)))
# values that pass restrictions are returned
values = [l for l in graph.objects(target, prop) if
(isinstance(l, URIRef) or isinstance(l, BNode)) or
(l.datatype == None and langfilter(l)) or
(l.datatype != None and typefilter(l))
]
# loop through the values and add them to the list
for val in values:
v.append(ValueProp(value=val, prop=prop))
return v
# apufunktio urlien parsimiseen merkkijonosta
# mietittävä uudelleen, jos näitä rakenteistetaan
def getURLs(string):
urls = []
for word in string:
if len(word) < 10:
continue
if word[0] in ["(", "["]:
word = word[1:-1]
res = urllib.parse.urlparse(word)
if res.scheme in ("http", "https") and \
len(res.netloc) > 3 and "." in res.netloc:
urls.append(word)
return urls
class ConvertHTMLYSOATags(HTMLParser):
'''
Korvaa mahdolliset yso-linkit $a-osakenttämerkillä siten, että käytettävä termi
jää näkyviin. Muu osa tekstistä on $i-osakentissä. Käytetään mm. kentässä 680
TODO: Virheiden käsittely ja HTML-erikoisentiteettien/kommenttien käsittely
'''
merkkijono = ["$i"]
in_a_yso = False
ended_a_yso = False
def initialize(self):
self.merkkijono = ["$i"]
self.in_a_yso = False
self.ended_a_yso = False
def handle_starttag(self, tag, attrs):
if tag == "a":
for attr in attrs:
if attr[0] == "href":
link = attr[1]
if link.startswith(YSO):
self.in_a_yso = True
self.merkkijono[-1] = self.merkkijono[-1].rstrip()
self.merkkijono.append("$a")
return
self.merkkijono.append("<" + tag)
for attr in attrs:
self.merkkijono.append(" " + attr[0] + "='" + attr[1] + "'")
self.merkkijono.append(">")
def handle_endtag(self, tag):
if tag == "a" and self.in_a_yso:
self.in_a_yso = False
self.ended_a_yso = True
else:
self.merkkijono.append("</" + tag + ">")
def handle_data(self, data):
if self.ended_a_yso:
self.merkkijono.append("$i")
self.ended_a_yso = False
# korjaa normaalien tekstistä löytyvien '<'-merkkien käsittely
# TODO: Selvitä, tarvitseeko samanlainen korjaus tehdä myös alla
# määritellyille funktioille
if self.merkkijono[-1] != "$i" and self.merkkijono[-1] != "$a":
self.merkkijono[-1] += data
else:
# tavallinen tapaus - lisätään vain käsitelty teksti uuteen osioon
self.merkkijono.append(data)
def handle_comment(self, data):
self.merkkijono.append(data)
def handle_entityref(self, name):
# TODO: tarkista, mitä nämä esimerkkikoodit tekevät
#c = chr(name2codepoint[name])
self.merkkijono.append(name)
def handle_charref(self, name):
# TODO: tarkista, mitä nämä esimerkkikoodit tekevät
#if name.startswith('x'):
# c = chr(int(name[1:], 16))
#else:
# c = chr(int(name))
self.merkkijono.append(name)
def handle_decl(self, data):
self.merkkijono.append(data)
# pääfunktio
def convert(cs, vocabulary_name, language, g, g2):
# kääntää graafin (g) kielellä (language) ConfigParser-sektion (cs) ohjeiden mukaisesti MARCXML-muotoon
# g2 sisältää vieraat graafit (poislukien mahdolliset lcsh & lcgf-viitteet), joista etsitään
# käytettyjä termejä 7XX kenttiin
# vocabulary_name-parametriä tarvitaan tunnistamaan, että kyseessä YSO-paikat-ontolohgia, joihin tehdään 670-kenttiä
vocId = cs.get("vocabulary_code")
# variable for a bit complicated constants and casting/converting them to appropiate types
helper_variables = {
"vocCode" : (cs.get("vocabulary_code") + "/" + LANGUAGES[language] \
if cs.getboolean("multilanguage", fallback=False) \
else vocId),
"groupingClasses" : [URIRef(x) for x in cs.get("groupingClasses", fallback=",".join(GROUPINGCLASSES)).split(",")],
"groupingClassesDefault" : [URIRef(x) for x in cs.parser.get("DEFAULT", "groupingClasses", fallback=",".join(GROUPINGCLASSES)).split(",")],
'modificationDates': cs.get("modificationDates", fallback=None),
'keepModified' : cs.get("keepModifiedAfter", fallback=None),
'keepDeprecated' : cs.get("keepDeprecatedAfter", fallback=KEEPDEPRECATEDAFTER).lower() != "none",
'keepGroupingClasses' : cs.getboolean("keepGroupingClasses", fallback=False),
'write688created' : cs.get("defaultCreationDate", fallback=None) != None,
'defaultOutputFileName' : "yso2marc-" + cs.name.lower() + "-" + language + ".mrcx"
}
if helper_variables['keepModified']:
helper_variables['keepModifiedLimit'] = False \
if cs.get("keepModifiedAfter", fallback=KEEPMODIFIEDAFTER).lower() == "all" \
else datetime.date(datetime.strptime(cs.get("keepModifiedAfter"), "%Y-%m-%d"))
if helper_variables['keepDeprecated']:
helper_variables['keepDeprecatedLimit'] = False \
if cs.get("keepDeprecatedAfter", fallback=KEEPDEPRECATEDAFTER).lower() == "all" \
else datetime.date(datetime.strptime(cs.get("keepDeprecatedAfter"), "%Y-%m-%d"))
if cs.get("output", fallback=None):
parts = cs.get("languages").split(",")
if len(parts) > 1:
output = cs.get("output")
if len(output.split(".")) > 1:
helper_variables["outputFileName"] = ".".join(output.split(".")[:-1]) + "-" + language + "." + output.split(".")[-1]
else:
helper_variables["outputFileName"] = output + "-" + language
if not "outputFileName" in helper_variables:
helper_variables["outputFileName"] = cs.get("output", fallback=helper_variables["defaultOutputFileName"])
#modified_dates on dict-objekti, joka sisältää tietueen id:n avaimena ja
#arvona tuplen, jossa on tietueen viimeinen muokkauspäivämäärä ja tietueen sisältö MD5-tiivisteenä
if helper_variables['modificationDates']:
if os.path.isfile(helper_variables['modificationDates']):
with open(helper_variables['modificationDates'], 'rb') as pickle_file:
try:
modified_dates = pickle.load(pickle_file)
except EOFError:
logging.error("The file %s for modification dates is empty "%helper_variables['modificationDates'])
sys.exit(2)
else:
modified_dates = {}
logging.info("Processing vocabulary with vocabulary code '%s' in language '%s'" % (vocId, language))
incrementor = 0
deprecated_counter = 0
writer_records_counter = 0
ysoATagParser = ConvertHTMLYSOATags()
ET_namespaces = {"marcxml": "http://www.loc.gov/MARC21/slim",
"atom": "http://www.w3.org/2005/Atom"}
handle = open(cs.get("output", fallback=helper_variables["defaultOutputFileName"]), "wb")
writer = XMLWriter(handle)
pref_labels = set()
for conc in g.subjects(RDF.type, SKOS.Concept):
pref_label = g.preferredLabel(conc, lang=language)
if pref_label:
pref_labels.add(str(pref_label[0][1]).lower())
concs = []
# haetaan Kongressin kirjaston päivitykset viimeisen viikon ajalta,
# jos ohjelmaa ei ole ajettu aiemmin samana päivänä
loc_update_dict = {}
update_loc_concepts = True
loc_update_file = os.path.join(cs.get("locDirectory"), "updates.pkl")
if os.path.exists(loc_update_file):
timestamp = os.path.getmtime(loc_update_file)
file_date = date.fromtimestamp(timestamp)
if file_date == date.today():
update_loc_concepts = False
with open(loc_update_file, 'rb') as input_file:
try:
loc_update_dict = pickle.load(input_file)
except EOFError:
logging.error("EOFError in "%loc_update_file)
limit_date = date.today() - timedelta(days=7)
lc_namespaces = [LCGF, LCSH]
feed_prefix = "feed/"
for ns in lc_namespaces:
limit_reached = False
for idx in range(1,100):
if limit_reached:
break
file_path = os.path.join(str(ns), feed_prefix, str(idx))
try:
with urllib.request.urlopen(file_path, timeout=5) as atom_xml:
recordNode = ET.parse(atom_xml)
root = recordNode.getroot()
for entry in root.findall("atom:entry", ET_namespaces):
label = None
for updated in entry.findall("atom:updated", ET_namespaces):
updated = datetime.strptime(updated.text[:10], "%Y-%m-%d").date()
if updated >= limit_date:
for link in entry.findall("atom:link", ET_namespaces):
if not 'type' in link.attrib:
uri = link.attrib['href']
if uri in loc_update_dict:
if loc_update_dict[uri]['date'] < updated:
loc_update_dict[uri]['date'] = updated
loc_update_dict[uri]['updatable'] = True
else:
limit_reached = True
except ET.ParseError as e:
logging.warning("Failed to parse Library of Congress update feed")
if not limit_reached:
logging.warning("More than 10 000 updates in Library of Congress feed %s"%ns)
if helper_variables['keepModified']:
# käydään läpi vain muuttuneet käsitteet
for uri in modified_dates:
if modified_dates[uri][0] >= helper_variables['keepModifiedLimit']:
concs.append(URIRef(uri))
else:
concs = g.subjects(RDF.type, SKOS.Concept)
for concept in sorted(concs):
incrementor += 1
if incrementor % 1000 == 0:
logging.info("Processing %sth concept" % (incrementor))
# skipataan deprekoidut, jos niitä ei haluta mukaan. Jos haetaan muuttuneita käsitteitä, tulostetaan kaikki
if not helper_variables['keepModified'] and (concept, OWL.deprecated, Literal(True)) in g:
if not helper_variables['keepDeprecated']:
deprecated_counter += 1
continue
#skipataan ryhmittelevät käsitteet
if not helper_variables['keepGroupingClasses']:
if any (conceptType in helper_variables["groupingClasses"] for conceptType in g.objects(concept, RDF.type)):
continue
rec = Record()
deprecatedString = ""
loc_concept_downloaded = False
# dct:modified -> 005 EI TULOSTETA, 688
# tutkitaan, onko käsite muuttunut vai alkuperäinen
# ja valitaan leader sen perusteella
mod = g.value(concept, DCT.modified, None)
if mod is None:
rec.leader = cs.get("leaderNew", fallback=LEADERNEW)
else:
rec.leader = cs.get("leaderChanged", fallback=LEADERCHANGED)
modified = mod.toPython() # datetime.date or datetime.datetime object
if not type(modified) in [date, datetime]:
logging.error("Modification date invalid in concept %s "%concept)
modified = None
# dct:created -> 008
crt = g.value(concept, DCT.created, None)
if crt is None:
created = datetime.date(datetime.strptime(cs.get("defaultCreationDate", fallback=DEFAULTCREATIONDATE), "%Y-%m-%d"))
else:
created = crt.toPython() # datetime.date or datetime.datetime object
if not type(created) in [date, datetime]:
logging.error("Creation date invalid in concept %s "%concept)
created = datetime.date(datetime.strptime(cs.get("defaultCreationDate", fallback=DEFAULTCREATIONDATE), "%Y-%m-%d"))
code = cs.get("catalogCodes", fallback=CATALOGCODES)
# asetetaan kuvailukielto käsitteelle, jos tyypiä ryhmittelevä käsite
for conceptType in g.objects(concept, RDF.type):
if conceptType in helper_variables["groupingClasses"]:
code = cs.get("catalogCodes_na", fallback=CATALOGCODES_NA)
break
# jos kyseessä on poistettu käsite, asetetaan leaderit ja koodit asianmukaisesti
if (concept, OWL.deprecated, Literal(True)) in g:
replacers = sorted(g.objects(concept, DCT.isReplacedBy))
if len(replacers) == 0:
rec.leader = cs.get("leaderDeleted0", fallback=LEADERDELETED0)
elif len(replacers) == 1:
rec.leader = cs.get("leaderDeleted1", fallback=LEADERDELETED1)
else:
rec.leader = cs.get("leaderDeleted2", fallback=LEADERDELETED2)
code = cs.get("catalogCodes_na", fallback=CATALOGCODES_NA)
# jos on lisäksi asetettu jokin päivämäärärajoite
if helper_variables['keepDeprecatedLimit']:
# mikäli scopeNote puuttuu, poistettu tulkitaan uudeksi poistoksi ja sen tulkitaan
# "ylittävän" asetetun limitin eli jää tulosjoukkoon
for valueProp in sorted(getValues(g, concept, SKOS.scopeNote, language=""),
key=lambda o: str(o.value)):
if valueProp.value.startswith("deprecated on"):
deprecatedString = str(valueProp.value)
break
if deprecatedString:
deprecatedDateString = deprecatedString.split(" ")[-1]
try:
# yritetään parsia päivämäärä kahdessa eri formaatissa
deprecatedDate = datetime.date(datetime.strptime(deprecatedDateString, "%d.%m.%Y"))
if helper_variables['keepDeprecatedLimit'] > deprecatedDate:
deprecated_counter += 1
continue # skipataan ennen vanhentamisrajaa vanhennetut termit
except ValueError:
try:
deprecatedDate = datetime.date(datetime.strptime(deprecatedDateString, "%Y-%m-%d"))
if helper_variables['keepDeprecatedLimit'] > deprecatedDate:
deprecated_counter += 1
continue # skipataan ennen vanhentamisrajaa vanhennetut termit
except ValueError:
logging.warning("Converting deprecated date failed for concept %s. Proceeding." %
(concept))
if not created and not helper_variables["write688created"]:
logging.warning("No explicit creation date defined for concept %s. Using default value '%s' for character positions 00-05 in tag 008." % (
concept, datetime.date(datetime.strptime(DEFAULTCREATIONDATE, "%Y-%m-%d")).strftime('%y%m%d')))
rec.add_field(
Field(
tag='008',
data=created.strftime('%y%m%d') + code
)
)
# 024 muut standarditunnukset - käsitteen URI tallennetaan tähän
rec.add_field(
Field(
tag='024',
indicators = ['7', ' '],
subfields = [
'a', concept,
'2', "uri"
]
)
)
# 034 paikkojen koordinaatit - yso-paikat?
# 035 yso-tietueen numero?
# 040 luetteloiva organisaatio
rec.add_field(
Field(
tag='040',
indicators = [' ', ' '],
subfields = [
'a', cs.get("creatorAgency", fallback=CREATOR_AGENCY),
'b', LANGUAGES[language],
'f', helper_variables["vocCode"]
]
)
)
# 043 - ysopaikat, käytetäänkö
# http://marc21.kansalliskirjasto.fi/aukt/01X-09X.htm#043
# 045 - yso-ajanjaksot, käytetäänkö
# http://marc21.kansalliskirjasto.fi/aukt/01X-09X.htm#045
# 046 - erikoiskoodatut ajanjaksot?
# 052 - maantieteellinen luokitus
# 7#$a(480)$2udc$0http://udcdata.info/004604
# jos 151 kaytossa, pitaisiko kayttaa? Jarmo: UDC-luokitus, Suomi "(480)"
#ConceptGroup / skos:member -> 065 yso-aihealuekoodi
# vain siina tapauksessa, kun ne halutaan mukaan Asteriin
# jos luokkanumeroa ei löydy, ei tulosteta
# vain jos vocId = "yso", tehdään tämä
if vocId == "yso":
for group in sorted(g.subjects(SKOS.member, concept)):
if not helper_variables['keepDeprecated'] and \
(group, OWL.deprecated, Literal(True)) in g:
continue # skip deprecated group concepts
if (group, RDF.type, ISOTHES.ConceptGroup) not in g:
continue
# ryhmätunnuksen ekstraktointi: yritä ensin kaivaa skos:notationista, muuten prefLabelista
groupno = g.value(group, SKOS.notation, None)
if groupno is None:
valueProps = sorted(getValues(g, group, SKOS.prefLabel, language=language),
key=lambda o: o.value)
if len(valueProps) == 0:
logging.warning("Could not find preflabel for target %s in language: %s. Skipping property %s target for concept %s." %
(group, language, SKOS.member, concept))
continue
elif len(valueProps) != 1:
logging.warning("Multiple prefLabels detected for concept %s in language %s. Taking the first only." %
(concept, language))
groupname = str(valueProps[0].value)
try:
groupno = str(groupname[0:groupname.index(" ")])
groupname = str(groupname[len(groupno) + 1:])
except ValueError:
logging.warning("Tried to parse group number for group %s from concept %s in language %s but failed." %
(group, valueProps[0].value, language))
continue
rec.add_field(
Field(
tag='065',
indicators = [' ', ' '],
subfields = [
'a', groupno,
'c', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, groupname)),
#'c', groupname,
'0', group,
'2', vocId
]
)
)
# 080 - UDK-luokka. Asiasanaan liittyva UDK-luokka
# 147 Tapahtuman nimi. Ei kayteta?
# 148 Aikaa merkitseva termi. Selvitetaan.
# skos:prefLabel -> 150 aihetta ilmaiseva termi
valueProps = sorted(getValues(g, concept, SKOS.prefLabel, language=language),
key=lambda o: o.value)
if len(valueProps) == 0:
logging.warning("Could not find preflabel for concept %s in language %s. Skipping the whole concept." %
(concept, language))
continue
elif len(valueProps) != 1:
logging.warning("Multiple prefLabels detected for concept %s in language %s. Choosing the first." %
(concept, language))
# tunnistetaan käsitteen tyyppi (aika, yleinen, paikka, genre)
# -> 148, 150, 151, 155, 162
# tukee tällä hetkellä tavallisia asiasanoja (150), YSO-paikkoja (151) & SLM:ää (155)
tag = "150"
if (concept, SKOS.inScheme, YSO.places) in g:
tag = "151"
elif vocId == "slm":
tag = "155"
rec.add_field(
Field(
tag=tag,
indicators = [' ', ' '],
subfields=[
'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(valueProps[0].value)))
#'a', str(valueProps[0].value)
]
)
)
# skos:altLabel -> 447, 448, 450, 451, 455
# 450 katso-viittaus
# poistetaan toisteiset skos:hiddenLabelit
# jätetään tuottamatta 45X-kentät, jotka ovat toisessa käsitteessä 15X-kenttinä, paitsi altLabelein kohdalla
# OLETUS: poistettujen käsitteiden seuraajien tietoihin EI merkitä poistetun käsitteen
# skos:prefLabelia näihin kenttiin, sillä sen oletetaan jo olevan skos:altLabelina kun siihen
# on haluttu viitata vanhalla muodolla
seen_values = set()
for valueProp in sorted(getValues(g, concept, [SKOS.altLabel, YSOMETA.singularPrefLabel,
YSOMETA.singularAltLabel, SKOS.hiddenLabel], language=language),
key=lambda o: str(o.value)):
# singularPrefLabel, singularAltLabel ja hiddenLabel jätetään pois 45X-kentistä,
# jos ne kirjainkoosta riippumatta ovat jossain 15X-kentässä
if valueProp.prop != SKOS.altLabel and str(valueProp.value.lower()) in pref_labels:
continue
if valueProp.prop == SKOS.hiddenLabel:
if str(valueProp.value) in seen_values:
continue
seen_values.add(str(valueProp.value))
tag = "450"
if (concept, SKOS.inScheme, YSO.places) in g:
tag = "451"
elif vocId == "slm":
tag = "455"
rec.add_field(
Field(
tag = tag,
indicators = [' ', ' '],
subfields = [
'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(valueProp.value)))
#'a', str(valueProp.value)
]
)
)
# broader/narrower/related/successor/predecessor/skosext:partOf
# -> 550 "katso myos" viittaus
# HUOM: Objektit vain olioita
# TODO: ysoon lisätään myöhemmin partOf-suhteiden käänteinen suhde
# TODO: useat erityyppiset i-kentät eivät toimi tällä hetkellä
fields = list()
for prop, wval in SEEALSOPROPS.items():
for target in sorted(g.objects(concept, prop)):
if not helper_variables['keepDeprecated'] and \
(target, OWL.deprecated, Literal(True)) in g:
continue # skip deprecated concepts
valueProps = getValues(g, target, SKOS.prefLabel, language=language)
if len(valueProps) == 0:
logging.warning("Could not find preflabel for target %s in language %s. Skipping property %s target for concept %s." %
(target, language, prop, concept))
continue
elif len(valueProps) != 1:
logging.warning("Multiple prefLabels detected for target %s in language %s. Choosing the first." %
(target, language))
label = valueProps[0].value
tag = "550" # alustetaan 550-arvoon
if (target, SKOS.inScheme, YSO.places) in g:
tag = "551"
elif vocId == "slm":
tag = "555"
subfields = []
#TODO: YSOn mahdolliset SKOSEXT-ominaisuudet?
#TODO: tarkista tämä YSOn tietomalliuudistusta varten
if wval == "i":
if (target, SKOS.inScheme, YSO.places) in g:
if prop == SKOSEXT.partOf:
subfields.extend(('w', 'g'))
elif prop == SKOSEXT.hasPart:
subfields.extend(('w', 'h'))
else:
subfields.extend(('w', wval,
"i", TRANSLATIONS[prop][language]
))
else:
subfields.extend(('w', wval,
"i", TRANSLATIONS[prop][language]
))
else:
subfields.extend(('w', wval))
subfields.extend(('a',
decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(label)))
#str(label)
))
subfields.extend(('0', target))
# yso-paikoissa on sekä ISOTHES.broaderPartitive, että
# SKOS.broader redundanttina,
# samoin ISOTHES.narrowerPartitive - SKOS.narrower
# Otetaan kuitenkin toistaiseksi varmuudeksi kummatkin konversioon mukaan
# ja poistetaan tuplakentät tässä
see_also_field = Field(
tag = tag,
indicators = [' ', ' '],
subfields = subfields
)
if not any(str(see_also_field) == str(f) for f in fields):
fields.append(see_also_field)
# järjestä 5XX-kentät ja lisää ne tietueeseen
for sorted_field in sorted(fields, key=lambda o: (
o.tag,
SORT_5XX_W_ORDER[o.get_subfields("w")[0]] if o.get_subfields("w") else "999",
o.get_subfields('a')[0]
)):
rec.add_field(sorted_field)
# TODO: JS: laitetaan 667 kenttään SLM:n käsiteskeemat jokaiselle käsitteelle
# dc:source -> 670 kasitteen tai kuvauksen lahde
# tulostetaan vain yso-paikkojen kohdalla url, joka closeMatchissa
# haetaan ensin lähdetiedoista Maanmittauslaitoksen paikannimirekisterin tyyppitieto
# liitetään se paikkatieto-URIin closeMatchissa, jos kumpiakin on vain yksi
if vocabulary_name == "YSO-PAIKAT":
subfield_list = []
subfield_b = None
geographical_types = set()
for valueProp in sorted(getValues(g, concept, DC.source, language=language), key=lambda o: str(o.value)):
if "Maanmittauslaitoksen paikannimirekisteri; " in valueProp.value:
geographical_type = valueProp.value.split("; ")
if len(geographical_type) > 1:
geographical_type = geographical_type[1]
geographical_types.add(geographical_type)
elif not any(substring in valueProp.value for substring in ["Wikidata",
"Sijaintitietojen lähde",
"Källa för positionsinformation"]):
# dc:sourcessa on ollut myös URLeja. Siivotaan ne tässä pois
if not valueProp.value.startswith("http"):
subfield_list.append([
'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, valueProp.value))
])
if len(geographical_types) == 1:
subfield_b = next(iter(geographical_types))
# ruotsinkieliseen sanastoon ei laiteta paikanninimirekisterilinkkejä, koska ruotsinkielinen selite puuttuu
for valueProp in sorted(getValues(g, concept, SKOS.closeMatch, language=language), key=lambda o: str(o.value)):
if "http://paikkatiedot.fi" in valueProp.value:
if subfield_b:
subfield_list.append([
'a', 'Maanmittauslaitoksen paikannimirekisteri',
'b', subfield_b,
'u', valueProp.value
])
for subfields in subfield_list:
rec.add_field(
Field(
tag='670',
indicators = [' ', ' '],
subfields = subfields
)
)
# skos:definition -> 677 huomautus määritelmästä
# määritelmän lähde voidaan merkitä osakenttään $v
# sitä varten tulee sopia tavasta merkitä tämä lähde, jotta
# se voidaan koneellisesti erottaa tekstistä
# JS ehdottaa: jos tekstissä on merkkijono ". Lähde: ",
# kaikki sen perässä oleva teksti merkitään osakenttään $v
# entä jos linkki lähteen perässä?
# JS ehdottaa: linkki aivan viimeisenä sanana
# 4.5.2018 - palataan myöhemmin tähän
# 6.8.2018 - ei vielä käsitelty
# 5.9.2018 - määritelmän lähde tulee määritelmän jälkeen kahdella tavuviivalla (--) erotettuna
# jätetään toistaiseksi paikalleen (13 kpl)
for valueProp in sorted(getValues(g, concept, SKOS.definition, language=language),
key=lambda o: str(o.value)):
subfields = [
'a',
decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(valueProp.value)))
#str(valueProp.value)
]
# TODO: linkkien koodaus tarkistetaan/tehdään myöhemmin
#urls = getURLs(valueProp.value)
#for url in urls:
# subfields.append("u")
# subfields.append(url)
rec.add_field(
Field(
tag='677',
indicators = [' ', ' '],
subfields = subfields
)
)
# skos:note -> 680 yleinen huomautus, julkinen
for valueProp in sorted(getValues(g, concept, [SKOS.note, SKOS.scopeNote, SKOS.example], language=language),
key=lambda o: str(o.value)):
ysoATagParser.initialize()
ysoATagParser.feed(valueProp.value)
if len(ysoATagParser.merkkijono)%2 == 1:
logging.warning("Parsing the property %s for concept %s into seperate subfields failed. Continuing with complete value." % (valueProp.prop, concept))
subfieldCodeValuePair = ("i", valueProp.value.strip())
if len(subfieldCodeValuePair[1]) == 0:
subfieldCodeValuePair = []
else:
subfieldCodeValuePair = [[x[1], ysoATagParser.merkkijono[ind+1].strip()] for (ind,x) in enumerate(ysoATagParser.merkkijono) if ind%2 == 0]
# poistetaan viimeinen i-tägi, jos se on vain 1 merkin mittainen (loppupisteet)
if subfieldCodeValuePair[-1][0] == "i" and len(subfieldCodeValuePair[-1][1]) <= 1 and len(subfieldCodeValuePair) > 1:
subfieldCodeValuePair[-2][1] = subfieldCodeValuePair[-2][1] + subfieldCodeValuePair[-1][1]
subfieldCodeValuePair = subfieldCodeValuePair[:-1]
subfield_values = []
for subfield in subfieldCodeValuePair:
subfield_values.extend(
(subfield[0], decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, subfield[1])))
#(subfield[0], subfield[1])
)
rec.add_field(
Field(
tag='680',
indicators = [' ', ' '],
subfields = subfield_values
)
)
# mahdollinen deprekointitieto lisätään erikseen
if deprecatedString:
rec.add_field(
Field(
tag='680',
indicators = [' ', ' '],
subfields = ['i', deprecatedString]
)
)
# owl:deprecated -> 682 Huomautus poistetusta otsikkomuodosta (ei toistettava)
# Ohjaus uuteen/uusiin käsitteisiin
# seuraaja-suhde
# a-kenttään seuraajan preflabel, 0-kenttään URI, i selite
# TODO: onko seuraajaa vai ei, lisäksi mietittävä deprekoidun käsitteen
# tyyppi (onko hierarkia jne.). Deprekaattorin huomautustekstiä kehitettävä
# (kentät mietittävä uudelleen - EI skos:scopeNote kuten nyt on 4.5.2018)
# 2018-12-05 Huomattiin, että ei ole toistettavissa --> ongelma useiden korvaajien tapauksessa ($0)
# kongressin kirjasto on työstämässä parhaista käytännöistä $0-kentän toistettavuudesta vielä tämän vuoden aikana
# päätettiin jättää tässä vaiheessa $0-kentät kokonaan pois
if (concept, OWL.deprecated, Literal(True)) in g:
target = None
labels = []
for target in sorted(g.objects(concept, DCT.isReplacedBy)):
if not helper_variables['keepDeprecated'] and \
(target, OWL.deprecated, Literal(True)) in g:
continue # skip deprecated concepts
valueProps = sorted(getValues(g, target, SKOS.prefLabel, language=language), key=lambda o: str(o.value))
replacedByURIRef = URIRef(target)
if len(valueProps) > 1:
logging.warning("Multiple prefLabels detected for target %s in language %s. Choosing the first." %
(target, language))
elif len(valueProps) == 0:
logging.warning("Could not find preflabel for target %s in language: %s. Skipping property %s target for concept %s." %
(target, language, DCT.isReplacedBy, concept))
continue
label = valueProps[0].value
labels.append(valueProps[0].value)
#rec.add_field(
# Field(
# tag = '682',
# indicators = [' ', ' '],
# subfields = [
# 'i', TRANSLATIONS["682iDEFAULT"][language],
# 'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(label))),
# #'a', str(label),
# '0', target
# ]
# )
#)
if len(labels) > 0:
subfield_values = ['i', TRANSLATIONS["682iDEFAULT"][language]]
for label in labels[:-1]:
subfield_values.extend(('a',
decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(label) + ","))
#str(label)
))
subfield_values.extend(('a',
decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(labels[-1])))
#str(label)
))
#subfield_values.extend(('0', target)) #TODO: seurataan kongressin kirjaston tulevia ohjeistuksia
rec.add_field(
Field(
tag='682',
indicators = [' ', ' '],
subfields = subfield_values
)
)
if helper_variables["write688created"]:
rec.add_field(
Field(
tag = '688',
indicators = [' ', ' '],
subfields = [
'a', TRANSLATIONS["688aCREATED"][language] + ": " + created.strftime('%Y-%m-%d')
]
)
)
if mod and modified:
rec.add_field(
Field(
tag = '688',
indicators = [' ', ' '],
subfields = [
'a', TRANSLATIONS["688aMODIFIED"][language] + ": " + modified.strftime('%Y-%m-%d')
]
)
)
# all skos:match*es -> 7XX linkkikenttiin
# halutaan linkit kaikkiin kieliversioihin
# lisäksi saman sanaston erikieliset preflabelit tulevat tänne
# graafit on haettu etukäteen ohjelman muistiin ohjelman alussa
# 750 $a label, $4 relaatiotyyppi, $2 sanastolahde, $0 uri
# miten $w? JS: ei oteta mukaan ollenkaan
# 2.5.2018-kokouksessa päätettiin, että DCT.spatialia ei käännetä
# MARC-muotoon
# 13.8.2018 LCSH/LCGF käsitellään erikseen; niille on tehty oma kansio, joka
# on tallennettu locDirectory-muuttujaan. Puuttuvat loc-linkit haetaan
# dynaamisesti tarvittaessa ja lisätään kansioon, josta ne sitten luetaan ohjelman käyttöön
valueProps = getValues(g, concept, [SKOS.prefLabel, SKOS.exactMatch, SKOS.closeMatch,
SKOS.broadMatch, SKOS.narrowMatch,
SKOS.relatedMatch])
fields = list() # kerätään kentät tähän muuttujaan, joka sitten lopuksi järjestetään
for valueProp in valueProps:
if valueProp.prop == SKOS.prefLabel:
# suodatetaan samankieliset, jotka menivät jo 1xx-kenttiin
# valueProp.value sisältää tässä poikkeuksellisesti jo halutun literaalin
# (vrt. kun muissa on solmu)
if valueProp.value.language == language:
continue
matchURIRef = URIRef(concept)
else:
# tehdään osumasta URIRef
matchURIRef = URIRef(valueProp.value)
#if not helper_variables['keepDeprecated'] and \
if (matchURIRef, OWL.deprecated, Literal(True)) in g2:
# skip deprecated matches
# 19.12.2018 käyty keskustelua tästä - päätetty tässä vaiheessa
# olla seuraamatta dct:isReplacedBy-suhteita ja lisäämättä näitä
# TODO-listalle?
continue
# 27.12.2018 pitäisikö tarkistaa myös groupingClassesien varalta?
# Ratkaisu: Ei - nämä on merkitty omissa tietueissaan ei-käytettäviksi
second_indicator = "7"
tag = "750"
loc_object = None
if (matchURIRef, SKOS.inScheme, YSO.places) in g2 or \
(matchURIRef, SKOS.inScheme, YSO.places) in g: #or matchType == DCT.spatial:
tag = "751"
# TODO: nimetyt graafit, kohdista kyselyt niihin?
# Comment: if we want to direct queries to spesific graphs, one per vocab,
# that graph needs to be selected here based on the void:uriSpace
sub0 = concept
sub2 = ""
if matchURIRef.startswith(LCSH):
second_indicator = "0"
loc_object = {"prefix": str(LCSH), "id": matchURIRef.split("/")[-1]}
elif matchURIRef.startswith(LCGF):
sub2 = "lcgft"
loc_object = {"prefix": str(LCGF), "id": matchURIRef.split("/")[-1]}
elif matchURIRef.startswith(ALLARS):
if (matchURIRef, RDF.type, ALLARSMETA.GeographicalConcept) in g2: #or matchType == DCT.spatial:
tag = "751"
sub2 = "allars"
#continue
elif matchURIRef.startswith(KOKO):
continue # skip KOKO concepts
elif matchURIRef.startswith(SLM):
tag = "755"
sub2 = "slm"
elif matchURIRef.startswith(YSA):
if (matchURIRef, RDF.type, YSAMETA.GeographicalConcept) in g2: #or matchType == DCT.spatial:
tag = "751"
sub2 = "ysa"
#continue
elif matchURIRef.startswith(YSO):
sub2 = "yso"
else:
second_indicator = "4"
if not cs.getboolean("ignoreOtherGraphWarnings", fallback=IGNOREOTHERGRAPHWARNINGS):
logging.warning("Matched target %s did not belong to any known vocabulary" % (str(matchURIRef)))
# do not put subfield 2 in this case
if not ((matchURIRef, None, None) in g or
(matchURIRef, None, None) in g2):
if not loc_object and not cs.getboolean("ignoreOtherGraphWarnings", fallback=IGNOREOTHERGRAPHWARNINGS):
logging.warning("Matched target %s did not belong to any known vocabulary. Skipping." % (str(matchURIRef)))
continue
sub4 = ""
if valueProp.prop == SKOS.broadMatch:
sub4 = "BM"
elif valueProp.prop == SKOS.narrowMatch:
sub4 = "NM"
elif valueProp.prop == SKOS.exactMatch:
sub4 = "EQ"
elif valueProp.prop == SKOS.prefLabel:
sub4 = "EQ"
# kovakoodattu yso ja slm - muuten niiden tulisi olla jossain globaalissa muuttujassa
if sub2 == "yso" or sub2 == "slm" or cs.getboolean("multilanguage", fallback=False):
sub2 = sub2 + "/" + LANGUAGES[valueProp.value.language]
# englanninkielisten YSO-paikkojen prefLabelit ovat Wikidatasta peräisin
if tag == "751" and LANGUAGES[valueProp.value.language] in ["en", "eng"]:
wdEntities = []
closeMatches = getValues(g, concept, [SKOS.closeMatch])
for closeMatch in closeMatches:
if closeMatch.value.startswith(WIKIDATA):
wdEntities.append(URIRef(closeMatch.value))
if len(wdEntities) == 1:
sub0 = wdEntities[0]
sub2 = "wikidata"
sub4 = "~EQ"
else:
sub2 = None
sub4 = None
if sub2 and sub4:
fields.append(
Field(
tag=tag,
indicators = [' ', second_indicator],
subfields = [
'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(valueProp.value))),
'4', sub4,
'2', sub2,
'0', sub0
]
)
)
continue
elif valueProp.prop == SKOS.closeMatch:
sub4 = "~EQ"
else:
sub4 = "RM"
# library of congress -viitteet käsitellään erikseen
if loc_object:
if cs.get("locDirectory", fallback=None) == None:
continue
recordNode = None
local_loc_source = os.path.join(cs.get("locDirectory"), loc_object["id"] + ".marcxml.xml")
if matchURIRef not in loc_update_dict:
loc_update_dict[matchURIRef] = {'date': date.today()}
if os.path.exists(local_loc_source):
loc_update_dict[matchURIRef]['updatable'] = False
else:
loc_update_dict[matchURIRef]['updatable'] = True
if not loc_update_dict[matchURIRef]['updatable'] and os.path.exists(local_loc_source):
try:
with open(local_loc_source, encoding="utf-8") as f:
recordNode = ET.parse(f)
except ET.ParseError as e:
logging.warning("Failed to parse the following file: %s. Skipping the property for concept %s." %
(local_loc_source, concept))
elif update_loc_concepts:
try:
# Kongressin kirjastosta voi ladata 120 käsitettä minuutissa, varmistetaan aikaviiveellä, ettei raja ylity
time.sleep(0.5)
with urllib.request.urlopen(loc_object["prefix"] + loc_object["id"] + ".marcxml.xml", timeout=5) as marcxml, \
open(local_loc_source, 'wb') as out_file:
shutil.copyfileobj(marcxml, out_file)
logging.info("Downloaded LCSH link to %s." %
(local_loc_source))
loc_concept_downloaded = True
loc_update_dict[matchURIRef]['updatable'] = False
except urllib.error.URLError as e:
logging.warning('Unable to load the marcxml for %s. Reason: %s. Skipping the property for concept %s.' %
(loc_object["id"], e.reason, concept))
except OSError as e:
logging.warning("Failed to create a file for %s under %s directory. Skipping the property for concept %s." %
(loc_object["id"], cs.get("locDirectory"), concept))
if loc_concept_downloaded:
try:
with open(local_loc_source, encoding="utf-8") as f:
recordNode = ET.parse(f)
except OSError as e:
logging.warning("Failed to read the file for %s under %s directory. Skipping the property for concept %s" %
(loc_object["id"], cs.get("locDirectory"), concept))
except ET.ParseError as e:
logging.warning("Failed to parse the following file: %s. Skipping the property for concept %s." %
(local_loc_source, concept))
if recordNode:
tagNode = None
for tagNumber in LCSH_1XX_FIELDS:
tagNode = recordNode.find("./marcxml:datafield[@tag='" + tagNumber + "']", ET_namespaces)
if tagNode is not None:
# otetaan ensimmäinen
break
if tagNode is not None:
tag = "7" + tagNode.attrib["tag"][1:]
first_indicator = tagNode.attrib["ind1"]
subfields = []
for child in tagNode:
subfields.extend((child.attrib["code"],
decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(child.text)))
#str(child.text)
))
subfields.extend(("4", sub4))
if second_indicator == "7":
subfields.extend(("2", sub2))
subfields.extend(("0", str(matchURIRef)))
fields.append(
Field(
tag = tag,
indicators = [first_indicator, second_indicator],
subfields = subfields
)
)
else:
logging.warning("Could not find any marcxml:datafield objects with a tag number in the following list: %s for the following record: %s. %s" %
(LCSH_1XX_FIELDS, loc_object["id"], "Skipping the property for concept " + concept + "."))
#continue
else:
#käsitellään kaikki muut sanastot paitsi lcsh & lcgf
prefLabel = None
multipleLanguages = False
languagesEncountered = set()
sortedPrefLabels = sorted(g2.preferredLabel(matchURIRef,
labelProperties=(SKOS.prefLabel)))
for label in sortedPrefLabels:
languagesEncountered.add(label[1].language)
if len(languagesEncountered) > 1:
multipleLanguages = True
break
processedLanguages = set()
for type2, prefLabel in sortedPrefLabels:
prefLabelLanguage = prefLabel.language if prefLabel.language != None else ""
if prefLabelLanguage:
if LANGUAGES.get(prefLabelLanguage):
pass
else:
if not cs.getboolean("ignoreOtherGraphWarnings", fallback=IGNOREOTHERGRAPHWARNINGS):
logging.warning("LANGUAGES dictionary has no key for language '%s' found from the skos:prefLabel %s of target %s. Skipping." %
(prefLabelLanguage, matchURIRef, concept))
continue
if prefLabelLanguage in processedLanguages:
if not cs.getboolean("ignoreOtherGraphWarnings", fallback=IGNOREOTHERGRAPHWARNINGS):
logging.warning("Multiple prefLabels detected for target %s in language %s. Skipping prefLabel %s." %
(matchURIRef, prefLabelLanguage, prefLabel))
continue
processedLanguages.add(prefLabelLanguage)
subfields = [
'a', decomposedÅÄÖtoUnicodeCharacters(unicodedata.normalize(NORMALIZATION_FORM, str(prefLabel))),
#'a', str(prefLabel),
'4', sub4
]
if prefLabelLanguage == "":
multipleLanguagesEnd = ""
else:
# kovakoodattu yso & slm tännekin
multipleLanguagesEnd = "/" + LANGUAGES[prefLabel.language] if sub2 in ["yso", "slm"] or multipleLanguages else ""
if second_indicator != "4":
subfields.extend(("2",
sub2 + multipleLanguagesEnd
))
subfields.extend(("0", str(matchURIRef)))
fields.append(
Field(
tag=tag,
indicators = [' ', second_indicator],
subfields = subfields
)
)
if not prefLabel and not cs.getboolean("ignoreOtherGraphWarnings", fallback=IGNOREOTHERGRAPHWARNINGS):
logging.warning("Could not find preflabel for target %s. Skipping property %s target for concept %s." %
(str(matchURIRef), str(valueProp.prop), concept))
#continue
# sort fields and add them
for sorted_field in sorted(fields, key=lambda o: (
o.tag,
o.value().lower()
)):
rec.add_field(sorted_field)
writer_records_counter += 1
writer.write(rec)
if helper_variables['modificationDates']:
md5 = hashlib.md5()
md5.update(str.encode(str(rec)))
hash = md5.hexdigest()
if str(concept) in modified_dates:
if not hash == modified_dates[str(concept)][1] or loc_concept_downloaded:
modified_dates[str(concept)] = (date.today(), hash)
else:
modified_dates[str(concept)] = (date.today(), hash)
if handle is not sys.stdout:
writer.close()
if helper_variables['modificationDates']:
with open(helper_variables['modificationDates'], 'wb') as output:
pickle.dump(modified_dates, output, pickle.HIGHEST_PROTOCOL)
with open(loc_update_file, 'wb') as output:
pickle.dump(loc_update_dict, output, pickle.HIGHEST_PROTOCOL)
# tuotetaan tuotetaan lopuksi käsitteet laveassa XML-muodossa
parser = ET.XMLParser(remove_blank_text=True,strip_cdata=False)
file_path = helper_variables["outputFileName"]
tree = ET.parse(file_path, parser)
e = tree.getroot()
handle = open(cs.get("output", fallback=helper_variables["defaultOutputFileName"]), "wb")
handle.write(ET.tostring(e, encoding='UTF-8', pretty_print=True, xml_declaration=True))
if handle is not sys.stdout:
handle.close()
# lokitetaan vähän tietoa konversiosta
if helper_variables['keepDeprecated']:
logging.info(
"Processed %s concepts, from which %s were left out because of deprecation. Wrote %s MARCXML records." %
(incrementor, deprecated_counter, writer_records_counter)
)
else:
logging.info(
"Processed %s concepts. Wrote %s MARCXML records." %
(incrementor, writer_records_counter)
)
if cs.get("outputSpecified", fallback=None) == None:
outputChannel = sys.stdout.buffer
with open(cs.get("output", fallback=helper_variables['defaultOutputFileName']), "rb") as f:
shutil.copyfileobj(f, outputChannel)
if cs.get("outputSpecified", fallback=None) == None:
os.remove(cs.get("output", fallback=helper_variables['defaultOutputFileName']))
logging.info("Conversion completed: %s"%datetime.now().replace(microsecond=0).isoformat())
# MAIN
def main():
settings = ConfigParser(interpolation=ExtendedInterpolation())
args = readCommandLineArguments()
if args.config:
settings.read(args.config)
else:
settings.add_section(args.vocabulary_code.upper())
# for extracting meaningful leading/trailing spaces
# (removing double quotes around the string)
for sec in settings.sections():
for (key, val) in settings.items(sec):
if len(val) > 0 and val[-1] == '"' and val[0] == '"':
settings.set(sec, key, val[1:-1])
cs = args.vocabulary_code.upper() # default config section to vocabulary code
settings.set("DEFAULT", "vocabulary_code", cs.lower())
# Used in MARC code used in tag 040 subfield f
# and 7XX foreign language prefLabels
graphi = Graph()
other_graphs = Graph()
if args.config_section:
# override default config section
cs = args.config_section.upper()
# prepare settings
# configure logging
loglevel = logging.INFO
logFormatter = logging.Formatter('%(levelname)s - %(message)s')
if args.log_file:
logging.basicConfig(filename=args.log_file, filemode="w")
logger = logging.getLogger()
logger.setLevel(loglevel)
logger.propagate = False
logging.info("Conversion started: %s"%datetime.now().replace(microsecond=0).isoformat())
if args.endpoint:
settings.set(cs, "endpoint", args.endpoint)
# normalize endpoint graphs
if args.endpoint_graphs:
settings.set(cs, "endpointGraphs", ",".join(readConfigVariable(args.endpoint_graphs, " ")))
elif settings.get(cs, "endpointGraphs", fallback=None) != None:
settings.set(cs, "endpointGraphs", ",".join(readConfigVariable(settings.get(cs, "endpointGraphs"), ",")))
else:
settings.set(cs, "endpointGraphs", ",".join(ENDPOINTGRAPHS))
if args.ignore_other_graph_warnings:
settings.set(cs, "ignoreOtherGraphWarnings", "true")
if args.grouping_classes:
settings.set(cs, "groupingClasses", ",".join(readConfigVariable(args.grouping_classes, " ")))
elif settings.get(cs, "groupingClasses", fallback=None) != None:
settings.set(cs, "groupingClasses", ",".join(readConfigVariable(settings.get(cs, "groupingClasses"), ",")))
else:
settings.set(cs, "groupingClasses", "")
if not args.input:
logging.error("Input is required.")
sys.exit(2)
if args.input_format:
settings.set(cs, "inputFormat", args.input_format)
graphi = Graph()
graph_loaded = False
if args.pickle_vocabulary:
pickleFile = args.pickle_vocabulary
else:
pickleFile = settings.get(cs, "pickleVocabulary", fallback=None)
if pickleFile:
if os.path.isfile(pickleFile):
timestamp = os.path.getmtime(pickleFile)
file_date = date.fromtimestamp(timestamp)
if file_date == date.today():
with open(pickleFile, 'rb') as input_file:
try:
graphi = pickle.load(input_file)
graph_loaded = True
except EOFError:
logging.error("EOFError in "%pickleFile)
if not graph_loaded:
graphi += Graph().parse(args.input, format=settings.get(cs, "inputFormat", fallback="turtle"))
if pickleFile:
with open(pickleFile, 'wb') as output:
pickle.dump(graphi, output, pickle.HIGHEST_PROTOCOL)
if args.output:
settings.set(cs, "output", args.output)
settings.set(cs, "outputSpecified", "true")
if args.languages != None:
settings.set(cs, "languages", ",".join(readConfigVariable(args.languages, " ")))
elif settings.get(cs, "languages", fallback=None) == None:
logging.error("Language is required. Set with --languages.")
sys.exit(2)
else:
settings.set(cs, "languages", ",".join(readConfigVariable(settings.get(cs, "languages"), ",")))
if args.multilanguage_vocabulary:
settings.set(cs, "multilanguage", "true")
if args.loc_directory:
settings.set(cs, "locDirectory", args.loc_directory)
if args.keep_modified_after and not args.modification_dates:
logging.error('Arguments required with --keep_modified_after: --modification_dates')
sys.exit(2)
if args.modification_dates:
settings.set(cs, "modificationDates", args.modification_dates)
if args.keep_modified_after:
settings.set(cs, "keepModifiedAfter", args.keep_modified_after)
modifiedLimit = settings.get(cs, "keepModifiedAfter")
if modifiedLimit.lower() == "all":
pass
elif modifiedLimit.lower() == "none":
pass
else:
try:
datetime.date(datetime.strptime(modifiedLimit, "%Y-%m-%d"))
except ValueError:
logging.error("Cannot interpret 'keepModifiedAfter' value set in configuration file or given as a CLI parameter. Possible values are 'ALL', 'NONE' and ISO 8601 format for dates.")
sys.exit(2)
if args.default_creation_date:
settings.set(cs, "defaultCreationDate", args.default_creation_date)
if settings.get(cs, "defaultCreationDate", fallback=None) != None:
try:
datetime.date(datetime.strptime(settings.get(cs, "defaultCreationDate"), "%Y-%m-%d"))
except ValueError:
logging.error("Cannot interpret 'defaultCreationDate' value set in configuration file or given as a CLI parameter. Possible values: ISO 8601 format for dates.")
sys.exit(2)
if args.keep_deprecated_after:
settings.set(cs, "keepDeprecatedAfter", args.keep_deprecated_after)
if settings.get(cs, "keepDeprecatedAfter", fallback=None) != None:
deprecationLimit = settings.get(cs, "keepDeprecatedAfter")
if deprecationLimit.lower() == "all":
pass
elif deprecationLimit.lower() == "none":
pass
else:
try:
datetime.date(datetime.strptime(deprecationLimit, "%Y-%m-%d"))
except ValueError:
logging.error("Cannot interpret 'keepDeprecatedAfter' value set in configuration file or given as a CLI parameter. Possible values are 'ALL', 'NONE' and ISO 8601 format for dates.")
sys.exit(2)
if settings.get(cs, "endpointGraphs"):
if settings.get(cs, "endpoint", fallback=None) == None:
logging.warning("No endpoint address for endpoint graphs (set with --endpoint). Skipping endpoint graphs.")
else:
other_graphs += readEndpointGraphs(settings[cs])
pass
for lang in settings.get(cs, "languages").split(","):
convert(settings[cs], cs, lang, graphi, other_graphs)
if __name__ == "__main__":
try:
main()
except BaseException as e:
logging.exception(e)
| 1.867188 | 2 |
Turnauswertung-py3/common/urls.py | naechtner/turn-events | 0 | 12772784 | from django.conf.urls import url
from common import views
urlpatterns = [
url(r'^$', views.index, name='home'),
url(r'^disciplines/?$', views.disciplines_index, name='disciplines.index'),
url(r'^disciplines/new$',
views.DisciplineCreateView.as_view(), name='disciplines.new'),
url(r'^disciplines/(?P<slug>[-\w\d]*)-(?P<id>\d+)$',
views.discipline_detail, name='disciplines.detail'),
url(r'^disciplines/(?P<slug>[-\w\d]*)-(?P<pk>\d+)/edit$',
views.DisciplineUpdateView.as_view(), name='disciplines.edit'),
url(r'^disciplines/(?P<slug>[-\w\d]*)-(?P<pk>\d+)/delete$',
views.DisciplineDeleteView.as_view(), name='disciplines.delete'),
url(r'^performances/?$',
views.performances_index, name='performances.index'),
url(r'^performances/new$',
views.PerformanceCreateView.as_view(), name='performances.new'),
url(r'^performances/(?P<pk>\d+)$',
views.PerformanceDetailView.as_view(), name='performances.detail'),
url(r'^performances/(?P<pk>\d+)/edit$',
views.PerformanceUpdateView.as_view(), name='performances.edit'),
url(r'^performances/(?P<pk>\d+)/delete$',
views.PerformanceDeleteView.as_view(), name='performances.delete'),
]
| 1.882813 | 2 |
scripts/utils.py | akx/mapnificent_generator | 6 | 12772785 | <reponame>akx/mapnificent_generator
import errno
import re
import os
import yaml
NON_WORD_RE = re.compile('[^\w-]')
def slugify(s):
return NON_WORD_RE.sub('-', s)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def ydump(e):
return yaml.safe_dump(e, allow_unicode=True, default_flow_style=False, encoding='utf-8', width=10000)
| 2.515625 | 3 |
onto/domain_model.py | billyrrr/onto | 1 | 12772786 | <gh_stars>1-10
from onto.primary_object import PrimaryObject
class DomainModel(PrimaryObject):
"""
Domain model is intended for handling business logic.
"""
@classmethod
def _datastore(cls):
from onto.context import Context as CTX
return CTX.db
| 2.28125 | 2 |
setup.py | drivet/pylint-server | 17 | 12772787 | """
pylint-server
----
A small Flask application to keep keep track of pylint reports and ratings
on a per-repository basis.
"""
from setuptools import setup
setup(
name='pylint-server',
version='0.1',
url='https://github.com/drivet/pylint-server/',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='A Flask application to keep keep track of pylint information',
long_description=__doc__,
py_modules=['pylint-server'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'TravisPy'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Software Development :: Version Control',
],
)
| 1.460938 | 1 |
vyperlogix/__init__.py | raychorn/chrome_gui | 1 | 12772788 | __copyright__ = """\
(c). Copyright 2008-2020, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
from .misc import _utils
from .misc._utils import DeCamelCaseMethods
class Str(str):
def de_camel_case(self,delim=' ',method=DeCamelCaseMethods.default):
return _utils.de_camel_case(self,delim=delim,method=method)
# BEGIN: The following code causes problems for isinstance(foo,str)...
#import __builtin__
#__builtin__.str = Str
# END! The following code causes problems for isinstance(foo,str)...
__version__ = '1.0.0.0'
#print 'Loaded... %s v%s' % (str(__name__).capitalize(),__version__)
| 2.21875 | 2 |
ckanext/dalrrd_emc_dcpr/cli/utils.py | ricardogsilva/ckanext-dalrrd-emc-dcpr | 0 | 12772789 | import enum
import logging
import typing
import click
from ckan import model
from ckan.logic import NotFound
from ckan.lib import jinja_extensions
from ckan.plugins import toolkit
from flask_babel import gettext as flask_ugettext, ngettext as flask_ungettext
from jinja2 import Environment
logger = logging.getLogger(__name__)
class DatasetCreationResult(enum.Enum):
CREATED = "created"
NOT_CREATED_ALREADY_EXISTS = "already_exists"
def get_jinja_env():
jinja_env = Environment(**jinja_extensions.get_jinja_env_options())
jinja_env.install_gettext_callables(flask_ugettext, flask_ungettext, newstyle=True)
# custom filters
jinja_env.policies["ext.i18n.trimmed"] = True
jinja_env.filters["empty_and_escape"] = jinja_extensions.empty_and_escape
# jinja_env.filters["ungettext"] = flask_ungettext
return jinja_env
def create_single_dataset(
user: typing.Dict, dataset: typing.Dict, close_session: bool = False
) -> DatasetCreationResult:
try:
toolkit.get_action("package_show")(
{"user": user["name"]}, data_dict={"id": dataset["name"]}
)
except toolkit.ObjectNotFound:
package_exists = False
else:
package_exists = True
if not package_exists:
toolkit.get_action("package_create")({"user": user["name"]}, data_dict=dataset)
result = DatasetCreationResult.CREATED
else:
# logger.debug(f"dataset {dataset['name']!r} already exists, skipping...")
result = DatasetCreationResult.NOT_CREATED_ALREADY_EXISTS
if close_session:
model.Session.remove()
return result
def create_org_user(
user_id: str,
user_password: str,
*,
organization_memberships: typing.List[typing.Dict[str, str]],
user_email: typing.Optional[str] = None,
) -> typing.Dict:
creator = toolkit.get_action("get_site_user")({"ignore_auth": True}, {})
user_details = toolkit.get_action("user_create")(
context={
"user": creator["name"],
},
data_dict={
"name": user_id,
"email": user_email or f"{<EMAIL>",
"password": <PASSWORD>,
},
)
for membership in organization_memberships:
org_details = toolkit.get_action("organization_show")(
data_dict={"id": membership["org_id"]}
)
member_details = toolkit.get_action("organization_member_create")(
context={
"user": creator["name"],
},
data_dict={
"id": org_details.name,
"username": user_id,
"role": membership["role"],
},
)
return user_details
def maybe_create_organization(
name: str,
title: typing.Optional[str] = None,
description: typing.Optional[str] = None,
close_session: bool = False,
) -> typing.Tuple[typing.Dict, bool]:
try:
organization = toolkit.get_action("organization_show")(
data_dict={
"id": name,
"include_users": True,
"include_datasets": False,
"include_dataset_count": False,
"include_groups": False,
"include_tags": False,
"include_followers": False,
}
)
created = False
except NotFound: # org does not exist yet, create it
user = toolkit.get_action("get_site_user")({"ignore_auth": True}, {})
data_dict = {
"name": name,
"title": title,
"description": description,
}
data_dict = {k: v for k, v in data_dict.items() if v is not None}
organization = toolkit.get_action("organization_create")(
context={"user": user["name"]},
data_dict=data_dict,
)
created = True
if close_session:
model.Session.remove()
return organization, created
class ClickLoggingHandler(logging.Handler):
"""Custom logging handler to allow using click output functions"""
def emit(self, record: logging.LogRecord) -> None:
fg = None
bg = None
if record.levelno == logging.DEBUG:
fg = "black"
bg = "bright_white"
elif record.levelno == logging.INFO:
fg = "bright_blue"
elif record.levelno == logging.WARNING:
fg = "bright_magenta"
elif record.levelno == logging.CRITICAL:
fg = "bright_red"
elif record.levelno == logging.ERROR:
fg = "bright_white"
bg = "red"
click.secho(self.format(record), bg=bg, fg=fg)
| 1.976563 | 2 |
nextdl/postprocessor/execafterdownload.py | devenu85/nextdl | 1 | 12772790 | """
MIT License
Copyright (c) 2021 nextdl
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import unicode_literals
import subprocess
from ..compat import compat_shlex_quote
from ..utils import PostProcessingError, encodeArgument
from .common import PostProcessor
class ExecAfterDownloadPP(PostProcessor):
def __init__(self, downloader, exec_cmd):
super(ExecAfterDownloadPP, self).__init__(downloader)
self.exec_cmd = exec_cmd
def run(self, information):
cmd = self.exec_cmd
if "{}" not in cmd:
cmd += " {}"
cmd = cmd.replace("{}", compat_shlex_quote(information["filepath"]))
self._downloader.to_screen("[exec] Executing command: %s" % cmd)
retCode = subprocess.call(encodeArgument(cmd), shell=True)
if retCode != 0:
raise PostProcessingError("Command returned error code %d" % retCode)
return [], information
| 2 | 2 |
sqlalchemy_imageattach/store.py | jpmn/sqlalchemy-imageattach | 83 | 12772791 | """:mod:`sqlalchemy_imageattach.store` --- Image storage backend interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module declares a common interface for physically agnostic storage
backends. Whatever a way to implement a storage, it needs only common
operations of the interface. This consists of some basic operations
like writing, reading, deletion, and finding urls.
Modules that implement the storage interface inside
:mod:`sqlalchemy_imageattach.storages` package might help to implement
a new storage backend.
"""
import io
import numbers
import shutil
from .file import FileProxy, SeekableFileProxy
__all__ = 'Store',
class Store(object):
"""The interface of image storage backends. Every image storage
backend implementation has to implement this.
"""
def put_file(self, file, object_type, object_id, width, height, mimetype,
reproducible):
"""Puts the ``file`` of the image.
:param file: the image file to put
:type file: file-like object, :class:`file`
:param object_type: the object type of the image to put
e.g. ``'comics.cover'``
:type object_type: :class:`str`
:param object_id: the object identifier number of the image to put
:type object_id: :class:`numbers.Integral`
:param width: the width of the image to put
:type width: :class:`numbers.Integral`
:param height: the height of the image to put
:type height: :class:`numbers.Integral`
:param mimetype: the mimetype of the image to put
e.g. ``'image/jpeg'``
:type mimetype: :class:`str`
:param reproducible: :const:`True` only if it's reproducible by
computing e.g. resized thumbnails.
:const:`False` if it cannot be reproduced
e.g. original images
:type reproducible: :class:`bool`
.. note::
This is an abstract method which has to be implemented
(overridden) by subclasses.
It's not for consumers but implementations, so consumers
should use :meth:`store()` method instead of this.
"""
raise NotImplementedError('put_file() has to be implemented')
def delete_file(self, object_type, object_id, width, height, mimetype):
"""Deletes all reproducible files related to the image.
It doesn't raise any exception even if there's no such file.
:param object_type: the object type of the image to put
e.g. ``'comics.cover'``
:type object_type: :class:`str`
:param object_id: the object identifier number of the image to put
:type object_id: :class:`numbers.Integral`
:param width: the width of the image to delete
:type width: :class:`numbers.Integral`
:param height: the height of the image to delete
:type height: :class:`numbers.Integral`
:param mimetype: the mimetype of the image to delete
e.g. ``'image/jpeg'``
:type mimetype: :class:`str`
"""
raise NotImplementedError('delete_file() has to be implemented')
def get_file(self, object_type, object_id, width, height, mimetype):
"""Gets the file-like object of the given criteria.
:param object_type: the object type of the image to find
e.g. ``'comics.cover'``
:type object_type: :class:`str`
:param object_id: the object identifier number of the image to find
:type object_id: :class:`numbers.Integral`
:param width: the width of the image to find
:type width: :class:`numbers.Integral`
:param height: the height of the image to find
:type height: :class:`numbers.Integral`
:param mimetype: the mimetype of the image to find
e.g. ``'image/jpeg'``
:type mimetype: :class:`str`
:returns: the file of the image
:rtype: file-like object, :class:`file`
:raise IOError: when such file doesn't exist
.. note::
This is an abstract method which has to be implemented
(overridden) by subclasses.
It's not for consumers but implementations, so consumers
should use :meth:`open()` method instead of this.
"""
raise NotImplementedError('get_file() has to be implemented')
def get_url(self, object_type, object_id, width, height, mimetype):
"""Gets the file-like object of the given criteria.
:param object_type: the object type of the image to find
e.g. ``'comics.cover'``
:type object_type: :class:`str`
:param object_id: the object identifier number of the image to find
:type object_id: :class:`numbers.Integral`
:param width: the width of the image to find
:type width: :class:`numbers.Integral`
:param height: the height of the image to find
:type height: :class:`numbers.Integral`
:param mimetype: the mimetype of the image to find
e.g. ``'image/jpeg'``
:type mimetype: :class:`str`
:returns: the url locating the image
:rtype: :class:`str`
.. note::
This is an abstract method which has to be implemented
(overridden) by subclasses.
It's not for consumers but implementations, so consumers
should use :meth:`locate()` method instead of this.
"""
raise NotImplementedError('get_url() has to be implemented')
def store(self, image, file):
"""Stores the actual data ``file`` of the given ``image``.
::
with open(imagefile, 'rb') as f:
store.store(image, f)
:param image: the image to store its actual data file
:type image: :class:`sqlalchemy_imageattach.entity.Image`
:param file: the image file to put
:type file: file-like object, :class:`file`
"""
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
elif not callable(getattr(file, 'read', None)):
raise TypeError('file must be a readable file-like object that '
'implements read() method, not ' + repr(file))
self.put_file(file, image.object_type, image.object_id,
image.width, image.height, image.mimetype,
not image.original)
def delete(self, image):
"""Delete the file of the given ``image``.
:param image: the image to delete
:type image: :class:`sqlalchemy_imageattach.entity.Image`
"""
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
self.delete_file(image.object_type, image.object_id,
image.width, image.height, image.mimetype)
def open(self, image, use_seek=False):
"""Opens the file-like object of the given ``image``.
Returned file-like object guarantees:
- context manager protocol
- :class:`collections.abc.Iterable` protocol
- :class:`collections.abc.Iterator` protocol
- :meth:`~io.RawIOBase.read()` method
- :meth:`~io.IOBase.readline()` method
- :meth:`~io.IOBase.readlines()` method
To sum up: you definitely can read the file, in :keyword:`with`
statement and :keyword:`for` loop.
Plus, if ``use_seek`` option is :const:`True`:
- :meth:`~io.IOBase.seek()` method
- :meth:`~io.IOBase.tell()` method
For example, if you want to make a local copy of
the image::
import shutil
with store.open(image) as src:
with open(filename, 'wb') as dst:
shutil.copyfileobj(src, dst)
:param image: the image to get its file
:type image: :class:`sqlalchemy_imageattach.entity.Image`
:param use_seek: whether the file should seekable.
if :const:`True` it maybe buffered in the memory.
default is :const:`False`
:type use_seek: :class:`bool`
:returns: the file-like object of the image, which is a context
manager (plus, also seekable only if ``use_seek``
is :const:`True`)
:rtype: :class:`file`, :class:`~sqlalchemy_imageattach.file.FileProxy`,
file-like object
:raise IOError: when such file doesn't exist
"""
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
elif image.object_id is None:
raise TypeError('image.object_id must be set; it is currently '
'None however')
elif not isinstance(image.object_id, numbers.Integral):
raise TypeError('image.object_id must be integer, not ' +
repr(image.object_id))
f = self.get_file(image.object_type, image.object_id,
image.width, image.height, image.mimetype)
for method in 'read', 'readline', 'readlines':
if not callable(getattr(f, method, None)):
raise TypeError(
'{0!r}.get_file() must return file-like object which '
'has {1}() method, not {2!r}'.format(self, method, f)
)
ctxt = (callable(getattr(f, '__enter__', None)) and
callable(getattr(f, '__exit__', None)))
if use_seek:
if not callable(getattr(f, 'seek', None)):
f2 = io.BytesIO()
shutil.copyfileobj(f, f2)
f2.seek(0)
return f2
if ctxt:
return f
return SeekableFileProxy(f)
if ctxt:
return f
return FileProxy(f)
def locate(self, image):
"""Gets the URL of the given ``image``.
:param image: the image to get its url
:type image: :class:`sqlalchemy_imageattach.entity.Image`
:returns: the url of the image
:rtype: :class:`str`
"""
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
url = self.get_url(image.object_type, image.object_id,
image.width, image.height, image.mimetype)
if '?' in url:
fmt = '{0}&_ts={1}'
else:
fmt = '{0}?_ts={1}'
return fmt.format(url, image.created_at.strftime('%Y%m%d%H%M%S%f'))
| 2.796875 | 3 |
costprediction/predict.py | Guillaume-Docquier/python-azure-func-tutorial | 1 | 12772792 | import tensorflow as tf
import numpy as np
import os
SCRIPT_PATH = os.path.abspath(__file__)
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
MODEL_PATH = os.path.join(SCRIPT_DIR, "model/model.h5")
MODEL = None
INPUT_SIZE = 7 * 12
OUTPUT_SIZE = 1
def _load_model():
"""
Load the TensorFlow model if it is not loaded in the current context
Azure functions often preserve their contexts between executions
https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python#global-variables
"""
global MODEL
if MODEL is None:
MODEL = tf.keras.models.load_model(MODEL_PATH)
def normalize(costs):
return np.log(costs + 1)
def denormalize(costs):
return np.exp(costs) - 1
def make_subsequences(data, subsequence_size):
"""
Create subsequences of subsequence_size with the array
Example
-------
>>> make_subsequences(np.array([1, 2, 3, 4]), 2)
array([
[1, 2],
[2, 3],
[3, 4],
])
"""
number_of_subsequences = data.shape[0] - subsequence_size + 1
return np.array([data[index:subsequence_size+index] for index in range(number_of_subsequences)])
def predict_costs(actual_costs):
_load_model()
normalized_costs = normalize(np.array(actual_costs))
subsequences = make_subsequences(normalized_costs, INPUT_SIZE)
predictions = MODEL.predict(subsequences, subsequences.shape[0]).flatten()
predictions = denormalize(predictions)
return predictions.tolist()
| 2.734375 | 3 |
angr_seek.py | ucsb-seclab/hacrs-input-generator | 3 | 12772793 | import json
import re
import sys
import angr
import IPython
import os
import logging
import time
import simuvex
import select
from angr.exploration_techniques import ExecuteAddressGoal
from config import *
#base_dir = KUBERNETES_BASE_DIR
base_dir = DESKTOP_BASE_DIR
def heardEnter():
i,o,e = select.select([sys.stdin],[],[], 0.0001)
for s in i:
if s == sys.stdin:
input = sys.stdin.readline()
return True
return False
def read_crash_addr(crash_info_path):
with open(crash_info_path) as f:
crash_info = json.load(f)
trace_line = crash_info['crash_trace']
match = re.match(r'Trace 0x[0-9a-fA-F]* \[([0-9a-fA-F]*)\]', trace_line)
addr_hex = match.group(1)
crash_addr = int(addr_hex, base=16)
return crash_addr
def replay_pov(event, challenge, pov_name):
l = logging.getLogger(name="angr.analyses.cfg_accurate")
#l.setLevel(logging.DEBUG)
directory = os.path.join(get_results_dir(base_dir, event), challenge)
assert os.path.isdir(directory)
#crash_addr = read_crash_addr(os.path.join(directory, 'pov', pov_name + '.crash_info'))
crash_addr = 0x080500ae
proj = angr.Project(os.path.join(directory, 'bin', challenge), load_options={'auto_load_libs': False})
state = proj.factory.full_init_state(add_options=simuvex.o.unicorn, remove_options={simuvex.o.LAZY_SOLVES})
pg = proj.factory.path_group(state)
execute_crash_addr_goal = ExecuteAddressGoal(crash_addr)
director_explorer = angr.exploration_techniques.Director(goals=[execute_crash_addr_goal])
pg.use_technique(director_explorer)
while len(pg.active) > 0:
print "step, {}".format({key: len(val) for key, val in pg.stashes.iteritems()})
pg.step()
reached_
for stash in pg.stashes:
for path in pg.stashes[stash]:
if crash_addr in path.history._addr:
print "Path {} reached the target address".format(path)
#if any([crash_addr in path.history._addr for path in pg.active] + [crash_add])
if heardEnter() or len(c.crashes) != num_crashes:
after_time = time.time()
print "Finding a new crash took {} seconds".format(after_time - before_time)
IPython.embed()
before_time = time.time()
num_crashes = len(c.crashes)
after_time = time.time()
print "Ran out of paths after {} seconds".format(after_time - before_time)
IPython.embed()
if __name__ == '__main__':
import sys
import signal
if len(sys.argv) < 4:
print "Usage: {} <examples|finals|qualifiers> <challenge_name> <pov_name>".format(sys.argv[0])
sys.exit(1)
minute = 60
hour = 60 * minute
day = 24 * hour
signal.alarm(10 * minute)
replay_pov(sys.argv[1], sys.argv[2], sys.argv[3])
| 2.015625 | 2 |
ogkeeper.py | paulober/OGKeeper | 0 | 12772794 | #!/usr/bin/python3
import json
import os
import shutil
import sys
import time
def main(config_file="/etc/ogkeeper/config.json"):
with open(config_file, 'r') as f:
content = ''.join(f.readlines())
config = json.loads(content)
time.sleep(int(config["countdownInMinutesNotFloatingpoint"])*60)
for i in config["keeping"]:
with open(i["og"], 'rb') as fsrc, open(i["newfile"], 'wb+') as fdst:
fdst.truncate()
shutil.copyfileobj(fsrc, fdst)
os.system(str(config["serviceRestartCmd"]))
if __name__ == '__main__':
assert len(sys.argv) > 1
main(sys.argv[1])
| 2.421875 | 2 |
Scripts/patser_list_parallel.py | colinwalshbrown/CWB_utils | 0 | 12772795 | <gh_stars>0
#!/usr/bin/env python
import sys
import re
import patser_tools
import xgrid_tools
CHUNKSIZE=10000
class seq_scanner():
def __init__(self,
seqs=None,
matrices=None):
self.seqs=seqs
self.matrices=matrices
self.results=None
self.header=None
def runXgrid(self):
(self.results,self.header) = scan_seqs(self.seqs,self.matrices)
return(self.results,self.header)
def _main(args):
parallel = False
if len(args) < 1:
print "usage: patser_list.py --parallel [mtx1][mtx2] ... < seqs.fa"
sys.exit(1)
if "-p" in args:
parallel = True
args.remove("-p")
elif "--parallel" in args:
parallel = True
args.remove("--parallel")
matrices = args
seqs = sys.stdin
if not parallel:
(header,result) = scan_seqs(seqs,matrices)
print header
print results
else:
seqlist = []
jobs
for (i,s) in enumerate(seqs):
if (i % CHUNKSIZE) ==
def scan_seqs(seqs,matrices):
hits = {}
name = ""
seq = ""
header = ""
results = ""
for s in seqs:
nameres = re.search(">(\S+)",s)
if nameres and not (name == ""):
hits[name] = {'seq' : seq}
name = nameres.group(0)
seq = ""
elif nameres and (name == ""):
#print "1"
name = nameres.group(0)
seq = ""
else:
seq += s[:-1]
#print hits
mtx_names = []
for (name,d) in hits.iteritems():
for mtx in matrices:
hit_annot = patser_tools.makePatserAnnotation(sequence=d['seq'],matrix=mtx,seqname=name,scorecut=-100)
features = hit_annot.getAllFeatures()
hit = None
if len(features) > 0:
max = features[0]
for x in features:
if x.tags['score'] > max.tags['score']:
max = x
hit = max
else:
print >> sys.stderr, "No hit for matrix %s in %s" % (mtx,d['seq'])
continue
#print hit
d[hit.tags['motif_name']] = hit
if hit.tags['motif_name'] not in mtx_names:
mtx_names.append(hit.tags['motif_name'])
#print hits
header = "name\tsequence\t",
for x in mtx_names:
header += "%s_score\t%s_pval\t" % (x,x),
for (name,h) in hits.iteritems():
matrices = [x for x in h.keys() if not x == 'seq']
result += "%s\t%s\t" % (name,h['seq']) ,
for x in matrices:
result += str(h[x].tags['score']) + "\t",
if 'pval' in h[x].tags.keys():
result += str(h[x].tags['pval']) + "\t",
else:
result += "-",
return (header,result)
if __name__ == "__main__":
_main(sys.argv[1:])
| 2.6875 | 3 |
layers.py | anish9/Deblur-Network | 6 | 12772796 | import os
import cv2
import numpy as np
from tensorflow.keras.layers import *
import tensorflow as tf
from tensorflow.keras.layers import add
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
from tensorflow.keras.preprocessing.image import load_img,img_to_array
from tensorflow.keras.optimizers import Nadam,Adam
from tensorflow.keras.initializers import Initializer
def conv_global(x,t,stride=False):
xin = Conv2D(64,3,padding="same",name="convg_"+str(t))(x)
xin = BatchNormalization(axis=-1)(xin)
xin = Activation("relu")(xin)
if stride:
xin = Conv2D(64,3,padding="same",strides=stride,name="convg_"+str(t))(x)
xin = BatchNormalization(axis=-1)(xin)
xin = Activation("relu")(xin)
return xin
def RDBlocks(x,name , count = 6 , g=32):
li = [x]
pas = Convolution2D(filters=g, kernel_size=(3,3), strides=(1, 1), padding='same' , activation='relu' , name = name+'_conv1')(x)
for i in range(2 , count+1):
li.append(pas)
out = Concatenate(axis = -1)(li) # conctenated out put
pas = Convolution2D(filters=g, kernel_size=(3,3), strides=(1, 1), padding='same' , activation='relu', name = name+'_conv'+str(i))(out)
# feature extractor from the dense net
li.append(pas)
out = Concatenate(axis = -1)(li)
feat = Convolution2D(filters=64, kernel_size=(1,1), strides=(1, 1), padding='same',activation='relu' , name = name+'_Local_Conv')(out)
feat = Add()([feat , x])
return feat
def tensor_depth_to_space(imag,block_size,names):
x = tf.depth_to_space(imag,block_size,name=names)
return x
def tf_subpixel_conv(tensor,block_size,filters):
x = Conv2D(filters,3,strides=(1,1),padding="same")(tensor)
x = Lambda(lambda x : tensor_depth_to_space(x,block_size,names="subpixel_conv"))(x)
x = PReLU(shared_axes=[1, 2])(x)
return x
| 2.796875 | 3 |
metadata/resources/xrt.py | bmampaey/SDA | 0 | 12772797 | # Generated by command write_metadata_files version 1
from metadata.models import Xrt
from .base_metadata import BaseMetadataResource
__all__ = ['XrtResource']
class XrtResource(BaseMetadataResource):
'''RESTful resource for model Xrt'''
class Meta(BaseMetadataResource.Meta):
abstract = False
queryset = Xrt.objects.all()
resource_name = 'metadata_xrt'
| 2 | 2 |
lambda_module.py | sernst/HelloLambda | 0 | 12772798 | <gh_stars>0
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import os
import sys
#*******************************************************************************
# Initialize the python environment by adding the necessary local paths to
# Python's search path.
MY_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
# ADD TO PATH: Local project files
sys.path.append(os.path.join(MY_DIRECTORY, 'src'))
# ADD TO PATH: Local site packages
sys.path.append(os.path.join(
MY_DIRECTORY,
'venv_py27', 'lib', 'python2.7', 'site-packages'))
#*******************************************************************************
# Define the lambda function entry point that will be executed by events.
import hello_lambda
def handler(event, context):
""" The lambda function entry point
:param event:
Dictionary containing parameters related to the function call
:param context:
Runtime execution and environmental information for this event
"""
results = hello_lambda.run(event)
print('\n'.join([ '{}'.format(r[0]) for r in results[:20] ]))
return 'Jargon Count: {}'.format(len(results))
| 2.65625 | 3 |
homeassistant/components/webostv/trigger.py | MrDelik/core | 30,023 | 12772799 | """webOS Smart TV trigger dispatcher."""
from __future__ import annotations
from typing import cast
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.const import CONF_PLATFORM
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.typing import ConfigType
from .triggers import TriggersPlatformModule, turn_on
TRIGGERS = {
"turn_on": turn_on,
}
def _get_trigger_platform(config: ConfigType) -> TriggersPlatformModule:
"""Return trigger platform."""
platform_split = config[CONF_PLATFORM].split(".", maxsplit=1)
if len(platform_split) < 2 or platform_split[1] not in TRIGGERS:
raise ValueError(
f"Unknown webOS Smart TV trigger platform {config[CONF_PLATFORM]}"
)
return cast(TriggersPlatformModule, TRIGGERS[platform_split[1]])
async def async_validate_trigger_config(
hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate config."""
platform = _get_trigger_platform(config)
return cast(ConfigType, platform.TRIGGER_SCHEMA(config))
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
) -> CALLBACK_TYPE:
"""Attach trigger of specified platform."""
platform = _get_trigger_platform(config)
assert hasattr(platform, "async_attach_trigger")
return cast(
CALLBACK_TYPE,
await getattr(platform, "async_attach_trigger")(
hass, config, action, automation_info
),
)
| 2.1875 | 2 |
ares/inference/FitGlobal21cm.py | eklem1/ares | 0 | 12772800 | """
ModelFit.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Mon May 12 14:01:29 MDT 2014
Description:
"""
import signal
import numpy as np
from ..util.PrintInfo import print_fit
from ..util.Pickling import write_pickle_file
from ..physics.Constants import nu_0_mhz
import gc, os, sys, copy, types, time, re
from .ModelFit import ModelFit, LogLikelihood, FitBase
from ..simulations import Global21cm as simG21
from ..analysis import Global21cm as anlGlobal21cm
from ..simulations import Global21cm as simGlobal21cm
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
size = MPI.COMM_WORLD.size
except ImportError:
rank = 0
size = 1
def_kwargs = {'verbose': False, 'progress_bar': False}
class loglikelihood(LogLikelihood):
def __init__(self, xdata, ydata, error, turning_points):
"""
Computes log-likelihood at given step in MCMC chain.
Parameters
----------
"""
LogLikelihood.__init__(self, xdata, ydata, error)
self.turning_points = turning_points
def __call__(self, sim):
"""
Compute log-likelihood for model generated via input parameters.
Returns
-------
Tuple: (log likelihood, blobs)
"""
# Compute the likelihood if we've made it this far
if self.turning_points:
tps = sim.turning_points
try:
nu = [nu_0_mhz / (1. + tps[tp][0]) \
for tp in self.turning_points]
T = [tps[tp][1] for tp in self.turning_points]
except KeyError:
return -np.inf
yarr = np.array(nu + T)
assert len(yarr) == len(self.ydata)
else:
yarr = np.interp(self.xdata, sim.history['nu'], sim.history['dTb'])
if np.any(np.isnan(yarr)):
return -np.inf
lnL = -0.5 * (np.sum((yarr - self.ydata)**2 \
/ self.error**2 + np.log(2. * np.pi * self.error**2)))
return lnL + self.const_term
class FitGlobal21cm(FitBase):
@property
def loglikelihood(self):
if not hasattr(self, '_loglikelihood'):
self._loglikelihood = loglikelihood(self.xdata, self.ydata,
self.error, self.turning_points)
return self._loglikelihood
@property
def turning_points(self):
if not hasattr(self, '_turning_points'):
self._turning_points = False
return self._turning_points
@turning_points.setter
def turning_points(self, value):
if type(value) == bool:
if value:
self._turning_points = list('BCD')
else:
self._turning_points = False
elif type(value) == tuple:
self._turning_points = list(value)
elif type(value) == list:
self._turning_points = value
elif isinstance(value, basestring):
if len(value) == 1:
self._turning_points = [value]
else:
self._turning_points = list(value)
@property
def frequencies(self):
if not hasattr(self, '_frequencies'):
raise AttributeError('Must supply frequencies by hand!')
return self._frequencies
@frequencies.setter
def frequencies(self, value):
self._frequencies = value
@property
def data(self):
if not hasattr(self, '_data'):
raise AttributeError('Must set data by hand!')
return self._data
@data.setter
def data(self, value):
"""
Set x and ydata at the same time, either by passing in
a simulation instance, a dictionary of parameters, or a
sequence of brightness temperatures corresponding to the
frequencies defined in self.frequencies (self.xdata).
"""
if type(value) == dict:
kwargs = value.copy()
kwargs.update(def_kwargs)
sim = simGlobal21cm(**kwargs)
sim.run()
self.sim = sim
elif isinstance(value, simGlobal21cm) or \
isinstance(value, anlGlobal21cm):
sim = self.sim = value
elif type(value) in [list, tuple]:
sim = None
else:
assert len(value) == len(self.frequencies)
assert not self.turning_points
self.xdata = self.frequencies
self.ydata = value
return
if self.turning_points is not None:
self.xdata = None
if sim is not None:
z = [sim.turning_points[tp][0] for tp in self.turning_points]
T = [sim.turning_points[tp][1] for tp in self.turning_points]
nu = nu_0_mhz / (1. + np.array(z))
self.ydata = np.array(list(nu) + T)
else:
assert len(value) == 2 * len(self.turning_points)
self.ydata = value
else:
self.xdata = self.frequencies
if hasattr(self, 'sim'):
nu = self.sim.history['nu']
dTb = self.sim.history['dTb']
self.ydata = np.interp(self.xdata, nu, dTb).copy() \
+ self.noise
@property
def noise(self):
if not hasattr(self, '_noise'):
self._noise = np.zeros_like(self.xdata)
return self._noise
@noise.setter
def noise(self, value):
self._noise = np.random.normal(0., value, size=len(self.frequencies))
@property
def error(self):
if not hasattr(self, '_error'):
raise AttributeError('Must set errors by hand!')
return self._error
@error.setter
def error(self, value):
if type(value) is dict:
nu = [value[tp][0] for tp in self.turning_points]
T = [value[tp][1] for tp in self.turning_points]
self._error = np.array(nu + T)
else:
if hasattr(self, '_data'):
assert len(value) == len(self.data), \
"Data and errors must have same shape!"
self._error = value
def _check_for_conflicts(self):
"""
Hacky at the moment. Preventative measure against is_log=True for
spectrum_logN. Could generalize.
"""
for i, element in enumerate(self.parameters):
if re.search('spectrum_logN', element):
if self.is_log[i]:
raise ValueError('spectrum_logN is already logarithmic!')
| 2.0625 | 2 |
abaqus_inside.py | rodrigo1392/abaqus_macros | 4 | 12772801 | """Functions to manage Abaqus objects and actions internally.
Intended to be used by the Abaqus CAE Python interpreter.
Developed by <NAME>.
https://github.com/rodrigo1392
"""
from abaqus import *
from abaqusConstants import *
from driverUtils import *
import odbAccess
# Flexibilize for Abaqus viewer
try:
from caeModules import *
except:
pass
import ast
import os
import sys
from tools_submodule import filesystem_tools as ft
# Abaqus mesh-stats keywords.
ELEMENT_TYPES = ['numLineElems', 'numMeshedRegions', 'numNodes',
'numPointElems', 'numPyramidElems', 'numQuadElems',
'numTetBoundaryElems', 'numTetElems', 'numTriElems',
'numWedgeElems']
def assign_2d_parts_properties(model_name, section_name,
first_letters=None):
"""Assign section properties to parts in a current database model.
Parameters
----------
model_name : str
Name of model of interest.
section_name : str
Name of section properties object.
first_letters : str, optional
If given, filter parts by initial substring.
Returns
-------
None
"""
# Get list of model parts objects.
parts_list = [i for i in mdb.models[model_name].parts.values()]
# Optionally, filter by first letters.
if first_letters:
parts_list = [i for i in parts_list if
i.name.startswith(first_letters)]
# Iterate over parts objects list and assign properties.
for part in parts_list:
faces = part.faces.getSequenceFromMask(mask=('[#1 ]',), )
region = part.Set(faces=faces, name='BODY')
part.SectionAssignment(region=region, sectionName=section_name,
offset=0.0, offsetType=MIDDLE_SURFACE,
offsetField='', thicknessAssignment=FROM_SECTION)
def clean_parts_properties(model_name, first_letters=None):
"""Deletes section properties assignments of parts in a model.
Parameters
----------
model_name : str
Name of model of interest.
first_letters : str, optional
If given, filter parts by initial substring.
Returns
-------
None
"""
# Get list of model parts objects.
parts_list = [i for i in mdb.models[model_name].parts.values()]
# Optionally, filter by first letters.
if first_letters:
parts_list = [i for i in parts_list if i.name.startswith(first_letters)]
# Iterate over parts objects list and delete all properties.
for part in parts_list:
assignments_number = len(part.sectionAssignments)
for i in range(0, assignments_number):
del part.sectionAssignments[0]
def export_parts_iges(model_name, output_path, first_letters=None):
"""Export parts in a model as iges files.
Parameters
----------
model_name : str
Name of model of interest.
output_path : Path-like str
Path to export iges parts to.
first_letters : str, optional
If given, filter parts by initial substring.
Returns
-------
None
"""
# Get list of model parts objects.
parts_list = [i for i in mdb.models[model_name].parts.values()]
# Optionally, filter by first letters.
if first_letters:
parts_list = [i for i in parts_list if i.name.startswith(first_letters)]
# Iterate over parts objects list and export them.
for part in parts_list:
part.writeIgesFile(fileName=output_path + r"\\" + part.name + '.igs',
flavor=STANDARD)
def extract_set_mesh_nodes(odb, set_name):
"""Get mesh nodes labels and coordinates of a set of points.
The method builds a dictionary with a dict {set name: instance name}
as keys and a list of tuples (mesh node labels : nodes coordinates)
as values, for all the points corresponding to `set_name`.
Parameters
----------
odb : Odb object
To read from.
set_name : str
Name of set of points of interest.
Returns
-------
Dict
{set name: instance name} : [(node labels : nodes coordinates)].
"""
# Normalize input to Odb object
print('Extracting nodes...')
odb = normalize_odb_object(odb)
# Get nodes set, instance names and build output dict.
node_set = odb.rootAssembly.nodeSets[set_name]
instances_names_list = [i for i in node_set.instanceNames]
output = {(set_name, instance_name):
[(node.label, node.coordinates) for node in node_set.nodes[num]]
for num, instance_name in enumerate(instances_names_list)}
return output
def get_folder_calc_time(odbs_folder, show=True, recursive=False,
close_odbs=True):
"""Get job calculation time from all Odb objects in folder.
Parameters
----------
odbs_folder : Path
Folder containing Odb objects.
show : bool, optional
If True, print Odb calculation time.
recursive : bool, optional
If True, list Odb files recursively, including subfolders.
close_odbs : bool, optional
If True, close current session Odb objects at the end of script.
Returns
-------
dict
Odb names : Dict of calculation times pairs.
"""
# List odb full paths.
odb_list = ft.list_files_with_extension(root_path=odbs_folder,
extension='.odb',
full_path=True,
recursively=recursive)
print(len(odb_list), 'Odb objects found')
# Iterate trough Odb files, open them and extract calculation time.
output = {}
for job_key in odb_list:
odb = normalize_odb_object(job_key)
output[job_key] = get_odb_calc_time(odb, show)
# Optionally, close all opened Odb objects in current session.
if close_odbs:
from abaqusMacros import close_all_odbs
close_all_odbs()
return output
def get_odb_calc_time(odb, show=True):
"""Get job calculation time from a Odb object.
Parameters
----------
odb : Odb object
To read data from.
show : bool, optional
If True, print Odb calculation time.
Returns
-------
dict
Seconds as values and systemTime, userTime and wallclockTime
objects as keys.
"""
# Normalize input to Odb object.
odb = normalize_odb_object(odb)
calc_time = odb.diagnosticData.jobTime
# Get calculation time and convert time to dict.
output = ast.literal_eval(str(calc_time)[1:-1])
# Print calculation time.
if show:
odb_name = (os.path.splitext(os.path.basename(odb.name))[0])
print(odb_name, ': ', str(calc_time))
return output
def log_message(input_string):
"""Set an output message to pass to Popen subprocess stdout method.
Parameters
----------
input_string : str
Message to be output.
Returns
-------
None
"""
print >>sys.__stdout__, input_string
def normalize_odb_object(odb_ish):
"""Return a odb object from current session.
If input is already a Odb object, return it. If it is a string,
look for corresponding object within opened Odbs, if none is find,
try to open it.
Parameters
----------
odb_ish : Odb object or string-like Path
Odb object identifier.
Returns
-------
Odb object
Opened Odb object from current session.
"""
# Verify is odb is already opened, if not, try to it.
if isinstance(odb_ish, str):
try:
odb = session.odbs[odb_ish]
except KeyError:
odb = session.openOdb(odb_ish, readOnly=False)
# If neither could be done, just return its reference.
else:
odb = odb_ish
return odb
def print_model_mesh_stats(model_name, total_stats=False):
"""Show amount of mesh elements, instance by instance.
Parameters
----------
model_name : str
Name of model of interest.
total_stats : bool, optional
If True, show whole model stats in addition to instances stats.
Returns
-------
None
"""
# Iterate trough model instances and show mesh stats by
# element type.
print('*****', '\nModel:', model_name, '\n')
model = mdb.models[model_name]
for inst_key, instance in model.rootAssembly.instances.items():
print('\nInstance:', inst_key)
try:
for element_type in ELEMENT_TYPES:
number_of_elements = eval('stats.' + element_type)
if number_of_elements:
print(element_type, ':', number_of_elements)
except AttributeError:
pass
# Optionally, show whole model mesh stats.
if total_stats:
print('\n--- TOTAL ---')
try:
for element_type in ELEMENT_TYPES:
number_of_elements = eval('tot_stats.' + element_type)
if number_of_elements:
print(element_type, ':', number_of_elements)
except AttributeError:
pass
def rename_model(model_name, new_name):
"""Assign a new name to a model in current database.
Parameters
----------
model_name : str
Current model name.
new_name : str
New name for model.
Returns
-------
None
"""
mdb.models.changeKey(fromName=model_name, toName=new_name)
def rename_set(set_name, new_set_name):
"""Rename a set of points in all models from current database.
Parameters
----------
set_name : str
Current set name.
new_set_name : str
New name for set.
Returns
-------
None
"""
# Iterate trough models and change set name.
for model_key, model in mdb.models.items():
model.rootAssembly.sets.changeKey(fromName=set_name,
toName=new_set_name)
return
def retrieve_odb_name(number, show_all=False):
"""Get Odb name from session Odbs list, depending on its position.
This function orders session Odbs list alphabetically, and retrieves
the name of an Odb object, depending on its position in that list.
Useful when a Odb name is not known, but its position is.
Parameters
----------
number : int
Position of Odb object in session Odbs list.
show_all : bool, optional
If True, print name of all opened Odbs.
Returns
-------
str
Name of opened Odb object from current session.
"""
# Get list of opened Odbs keys, sort it and select one by position.
keys = session.odbs.keys()
keys = sorted(keys)
selected_key = keys[number]
# Print list of opened Odbs.
if show_all:
print('Currently opened Odbs', keys)
return selected_key
def retrieve_odb_set_name(odb, number, show_all=False):
"""Get set name from a Odb object, depending on its position.
This function orders Odb sets list alphabetically, and retrieves
the name of a set object, depending on its position in that list.
Useful when set name is not known, but its position is.
Parameters
----------
odb : Odb object or string.like Path
Odb object identifier.
number : int
Position of set object in Odb sets list.
show_all : bool, optional
If True, print name of all Odb sets name.
Returns
-------
str
Name of set name in a Odb object from current session.
"""
# Get list of Odb sets keys, sort it and select one by position.
odb = normalize_odb_object(odb)
keys = odb.rootAssembly.nodeSets.keys()
keys = sorted(keys)
selected_key = keys[number]
# Print list of available node sets.
if show_all:
print('Available node sets', keys)
return selected_key
def upgrade_odbs_folder(odbs_folder, recursive=False, print_every=1):
"""Upgrade version of all Odb objects in a folder.
Parameters
----------
odbs_folder : Path
Folder containing Odb objects.
recursive : bool, optional
If True, list Odb files recursively, including subfolders.
print_every : int, optional
If given, reduces printing reports frequency.
Returns
-------
None
"""
# List Odb paths, filter only old versioned and report.
odb_list = ft.list_files_with_extension(odbs_folder, '.odb', 1, recursive)
upgradable_odb_list = [i for i in
odb_list if odbAccess.isUpgradeRequiredForOdb(i)]
print(len(odb_list), 'Odb objects found', len(upgradable_odb_list),
'require upgrade')
# Set temporary names and iterate over old versioned Odbs.
temp_name = os.path.join(odbs_folder, 'temp_odb_name.odb')
for job_number, job_key in enumerate(upgradable_odb_list):
# Optionally, report less times.
if divmod(job_number, print_every)[1] == 0:
print('Processing', job_key,
job_number + 1, 'of', len(upgradable_odb_list))
# Upgrade and rename new and old Odb files.
new_name = job_key
old_name = job_key.replace('.odb', '-old.odb')
session.upgradeOdb(job_key, temp_name)
os.rename(job_key, old_name)
os.rename(temp_name, new_name)
print('DONE')
return
| 2.359375 | 2 |
dnt_handle/Main.py | mybluedog24/dragonnest_skill_simulator | 0 | 12772802 | <gh_stars>0
import os
from OutputForSim import*
from OutputCSV import*
from FileIO import*
from OutputJS import*
def upate(dnt_list, class_list, exclusive_skills, json_str_dict, class_dict, uistring, skilltreetable, jobtable, playerleveltable, weapondict, folderCSV,folderDNT,folderJSON,folderSKILLJS):
''''''
exit = "Booooo! Program exited, bye! Have a nice day!"
print("======================")
ans = input("Did you copy dnt files to <verXXX/dnt> folder? y/n")
if ans.lower() != "y":
print(exit)
return
print("======================")
ans = input("Did any new class added? y/n")
if ans.lower() == "y":
ans = input("If so, did you update all dicts and lists in this program?\nAnd did you add their images and skill_tree_background? y/n")
if ans.lower() != "y":
print(exit)
return
elif ans.lower() != 'n':
print("Wait, what did you put in here? Wrong answer!")
print(exit)
return
print("======================")
ans = input("Did any new exclusive skills added to classes? y/n")
if ans.lower() == "y":
ans = input("If so, did you update the exclusive skill list in this program? y/n")
if ans.lower() != "y":
print(exit)
return
elif ans.lower() != 'n':
print("Wait, what did you put in here? Wrong answer!")
print(exit)
return
print("======================")
ans = input("update level cap csv? y/n")
if ans.lower() == "y":
update_level_cap(playerleveltable, jobtable, lvlcap, folderCSV)
print("========================")
print("Update level cap ended.")
print("Copy the printed information to sim_silveredge.js")
print("========================")
ans = input("Continue? y/n")
if ans.lower() != "y":
print(exit)
return
print("======================")
ans = input("update csv files? y/n")
if ans.lower() == "y":
update_csv(skilltreetable, jobtable, uistring, dnt_list, weapondict, folderDNT, folderCSV)
print("========================")
print("Update csv files ended.")
print("========================")
ans = input("Continue? y/n")
if ans.lower() != "y":
print(exit)
return
print("======================")
ans = input("update skillJS files (xxx_pve.js, xxx_pvp.js)? y/n")
if ans.lower() == "y":
update_skillJS(json_str_dict, folderSKILLJS, folderCSV)
print("========================")
print("Update skillJS files ended.")
print("========================")
ans = input("Continue? y/n")
if ans.lower() != "y":
print(exit)
return
print("======================")
ans = input("update json_str files (for represent skill tree)? y/n")
if ans.lower() == "y":
update_json_str(json_str_dict, class_list, class_dict, exclusive_skills, lvlcap, folderJSON, folderCSV)
print("========================")
print("Update json_str files ended.")
print("========================")
ans = input("Continue? y/n")
if ans.lower() != "y":
print(exit)
return
print("======================")
ans = input("Do you need to update skill tree background and icons? y/n")
if ans.lower() == "y":
ans = input("If so, did you update it? y/n")
if ans.lower() != "y":
print(exit)
return
elif ans.lower() != 'n':
print("Wait, what did you put in here? Wrong answer!")
print(exit)
return
print("======================")
ans = input("Do you need to update html pages? y/n")
if ans.lower() == "y":
ans = input("If so, did you update it? y/n")
if ans.lower() != "y":
print(exit)
return
elif ans.lower() != 'n':
print("Wait, what did you put in here? Wrong answer!")
print(exit)
return
print("======================")
ans = input("Finally did you copy all new files to uploadfiles folder? y/n")
if ans.lower() == "n":
print(exit)
return
elif ans.lower() != 'y':
print("Wait, what did you put in here? Wrong answer!")
print(exit)
return
print("Congratulations! Update done.")
##================================================
##====== Run this when level cap increases =======
##================================================
##==================== csv =======================
##================================================
## 1. Copy the printed information to sim_silveredge.js
## 2. increase the lvlcap in write_levelSP_csv
## 3. Check playerleveltable to see if the ratio of SP changed
def update_level_cap(playerleveltable, jobtable, lvlcap, folderCSV):
''''''
write_levelSP_csv(playerleveltable, jobtable, lvlcap, folderCSV+"levelSP.csv")
print_SP_for_simJS(lvlcap, folderCSV+"levelSP.csv")
##================================================
##====== Run this when game is updated ===========
##================================================
##==================== csv =======================
##================================================
def update_csv(skilltreetable, jobtable, uistring, dnt_list, weapondict, folderDNT, folderCSV):
''''''
for i in dnt_list:
skillleveltable = folderDNT+i[0]
skilltable = folderDNT+i[1]
write_skillinfo_csv(skillleveltable, folderCSV, uistring, skilltable, skilltreetable, jobtable, weapondict)
##================================================
##====== Run this when game is updated ===========
##================================================
##================== skillJS =====================
##================================================
def update_skillJS(json_str_dict, folderSKILLJS, folderCSV):
''''''
for i in json_str_dict:
name1 = "skillinfo_skillleveltable_character" + i + "pve.csv"
name2 = "skillinfo_changeskill_skillleveltable_character" + i + "pve.csv"
name3 = i + "_pve"
output_skillstringJS(folderCSV+name1, folderCSV+name2, name3, folderSKILLJS)
name1 = "skillinfo_skillleveltable_character" + i + "pvp.csv"
name2 = "skillinfo_changeskill_skillleveltable_character" + i + "pvp.csv"
name3 = i + "_pvp"
output_skillstringJS(folderCSV+name1, folderCSV+name2, name3, folderSKILLJS)
##================================================
##====== Run this only if skill trees changed ====
##================================================
##================= json_str =====================
##================================================
## 1. name must be uppercase and second class
## 2. guess no need for both pve and pvp since it's
## just for tree slot and skill lvl stuff, not
## explanation.
## 3. Add exclusive skill to darkavenger.
def update_json_str(json_str_dict, class_list, class_dict, exclusive_skills, lvlcap, folderJSON, folderCSV):
''''''
cid_dict = cids(class_list)
for i in json_str_dict:
name1 = "skillinfo_skillleveltable_character" + i + "pve.csv"
for j in json_str_dict[i]:
output_json_str(folderCSV+name1, j, class_dict, cid_dict, lvlcap, exclusive_skills, folderJSON)
if __name__ == "__main__":
lvlcap = 90
ver = "versions/ver515/"
print("Current version folder is \n" + ver)
ans = input("Update version folder? y/n")
if ans == "y":
ans = input("Input folder name(verXXX): \n")
ver = "versions/" + ans + "/"
if not os.path.exists(ver):
os.makedirs(ver)
print("========================")
print("Update version folder ended.")
print("========================")
print("======================")
print("Current level cap is \n" + str(lvlcap))
ans = input("Update level cap? y/n")
if ans == "y":
ans = input("Input level cap: \n")
lvlcap = int(ans)
print("========================")
print("Update level cap ended.")
print("========================")
folderCSV = ver+"csv/"
folderDNT = ver+"dnt/"
folderJSON = ver+"json_str/"
folderSKILLJS = ver+"skillJS/"
folders = [folderCSV,folderDNT,folderJSON,folderSKILLJS]
for folder in folders:
if not os.path.exists(folder):
os.makedirs(folder)
skilltreetable = folderDNT + "skilltreetable.dnt"
jobtable = folderDNT + "jobtable.dnt"
playerleveltable = folderDNT+"playerleveltable.dnt"
uistring = messages(folderDNT+"uistring.xml")
exclusive_skills = [('7521','7522'),('7523','7524'),('7525','7526'),('7527','7528')]
weapondict = {"-1":"", "0":"Greatsword", "1":"Gauntlet", "2":"Axe", "3":"Hammer",
"4":"Shortbow", "5":"Bow", "6":"Crossbow", "11":"Mace", "12":"Flail",
"13":"Wand", "14":"Shield", "15":"Quiver", "16":"Cannon", "17":"Bubble Blaster",
"23":"Dagger", "25":"Spear", "27":"Bracelet", "30":"Knuckle"}
class_list = ['TINKERER', 'ENGINEER', 'ALCHEMIST', 'SHOOTINGSTAR', 'GEARMASTER', 'ADEPT', 'PHYSICIAN',
'ARCHER', 'SHARPSHOOTER', 'ACROBAT', 'SNIPER', 'WARDEN', 'TEMPEST', 'WINDWALKER',
'ASSASSIN', 'SHINOBI', 'TAOIST', 'RAVEN', 'REAPER', 'LIGHTBRINGER', 'ABYSSWALKER',
'CLERIC', 'PALADIN', 'PRIEST', 'GUARDIAN', 'CRUSADER', 'SAINT', 'INQUISITOR',
'KALI', 'SCREAMER', 'DANCER', 'DARKSUMMONER', 'SOULEATER', 'BLADEDANCER', 'SPIRITDANCER',
'LENCEA', 'LANCER', 'DRAGOON', 'VALKYRIE',
'SORCERESS', 'ELEMENTALIST', 'MYSTIC', 'PYROMANCER', 'ICEWITCH', 'WARMAGE', 'CHAOSMAGE',
'WARRIOR', 'SWORDSMAN', 'MERCENARY', 'GLADIATOR', 'LUNARKNIGHT', 'BARBARIAN', 'DESTROYER',
'AVENGER', 'DARKAVENGER',
"MACHINA", "PATRONA", "RUINA", "DEFENSIO"
]
class_dict = {"GEARMASTER":["TINKERER", "ENGINEER", "GEARMASTER"],
"SHOOTINGSTAR":["TINKERER", "ENGINEER", "SHOOTINGSTAR"],
"ADEPT":["TINKERER", "ALCHEMIST", "ADEPT"],
"PHYSICIAN":["TINKERER", "ALCHEMIST", "PHYSICIAN"],
"SNIPER":["ARCHER", "SHARPSHOOTER", "SNIPER"],
"WARDEN":["ARCHER", "SHARPSHOOTER", "WARDEN"],
"TEMPEST":["ARCHER", "ACROBAT", "TEMPEST"],
"WINDWALKER":["ARCHER", "ACROBAT", "WINDWALKER"],
"RAVEN":["ASSASSIN", "SHINOBI", "RAVEN"],
"REAPER":["ASSASSIN", "SHINOBI", "REAPER"],
"LIGHTBRINGER":["ASSASSIN", "TAOIST", "LIGHTBRINGER"],
"ABYSSWALKER":["ASSASSIN", "TAOIST", "ABYSSWALKER"],
"GUARDIAN":["CLERIC", "PALADIN", "GUARDIAN"],
"CRUSADER":["CLERIC", "PALADIN", "CRUSADER"],
"SAINT":["CLERIC", "PRIEST", "SAINT"],
"INQUISITOR":["CLERIC", "PRIEST", "INQUISITOR"],
"DARKSUMMONER":["KALI", "SCREAMER", "DARKSUMMONER"],
"SOULEATER":["KALI", "SCREAMER", "SOULEATER"],
"BLADEDANCER":["KALI", "DANCER", "BLADEDANCER"],
"SPIRITDANCER":["KALI", "DANCER", "SPIRITDANCER"],
"DRAGOON":["LENCEA", "LANCER", "DRAGOON"],
"VALKYRIE":["LENCEA", "LANCER", "VALKYRIE"],
"PYROMANCER":["SORCERESS", "ELEMENTALIST", "PYROMANCER"],
"ICEWITCH":["SORCERESS", "ELEMENTALIST", "ICEWITCH"],
"WARMAGE":["SORCERESS", "MYSTIC", "WARMAGE"],
"CHAOSMAGE":["SORCERESS", "MYSTIC", "CHAOSMAGE"],
"GLADIATOR":["WARRIOR", "SWORDSMAN", "GLADIATOR"],
"LUNARKNIGHT":["WARRIOR", "SWORDSMAN", "LUNARKNIGHT"],
"BARBARIAN":["WARRIOR", "MERCENARY", "BARBARIAN"],
"DESTROYER":["WARRIOR", "MERCENARY", "DESTROYER"],
"DARKAVENGER":["WARRIOR", "AVENGER", "DARKAVENGER"],
"RUINA":["MACHINA", "PATRONA", "RUINA"],
"DEFENSIO":["MACHINA", "PATRONA", "DEFENSIO"]
}
dnt_list = [("skillleveltable_characteracademicpvp.dnt", "skilltable_characteracademic.dnt"),
("skillleveltable_characterarcherpvp.dnt", "skilltable_characterarcher.dnt"),
("skillleveltable_characterassassinpvp.dnt", "skilltable_characterassassin.dnt"),
("skillleveltable_characterclericpvp.dnt", "skilltable_charactercleric.dnt"),
("skillleveltable_characterkalipvp.dnt", "skilltable_characterkali.dnt"),
("skillleveltable_characterlenceapvp.dnt", "skilltable_characterlencea.dnt"),
("skillleveltable_charactersoceresspvp.dnt", "skilltable_charactersoceress.dnt"),
("skillleveltable_characterwarriorpvp.dnt", "skilltable_characterwarrior.dnt"),
("skillleveltable_characteracademicpve.dnt", "skilltable_characteracademic.dnt"),
("skillleveltable_characterarcherpve.dnt", "skilltable_characterarcher.dnt"),
("skillleveltable_characterassassinpve.dnt", "skilltable_characterassassin.dnt"),
("skillleveltable_characterclericpve.dnt", "skilltable_charactercleric.dnt"),
("skillleveltable_characterkalipve.dnt", "skilltable_characterkali.dnt"),
("skillleveltable_characterlenceapve.dnt", "skilltable_characterlencea.dnt"),
("skillleveltable_charactersoceresspve.dnt", "skilltable_charactersoceress.dnt"),
("skillleveltable_characterwarriorpve.dnt", "skilltable_characterwarrior.dnt"),
("skillleveltable_charactermachinapve.dnt", "skilltable_charactermachina.dnt"),
("skillleveltable_charactermachinapvp.dnt", "skilltable_charactermachina.dnt")
]
json_str_dict = {"academic" : ["SHOOTINGSTAR", "GEARMASTER", "ADEPT", "PHYSICIAN"],
"archer" : ["SNIPER", "WARDEN", "TEMPEST", "WINDWALKER"],
"assassin" : ["RAVEN", "REAPER", "LIGHTBRINGER", "ABYSSWALKER"],
"cleric" : ["GUARDIAN", "CRUSADER", "SAINT", "INQUISITOR"],
"kali" : ["DARKSUMMONER", "SOULEATER", "BLADEDANCER", "SPIRITDANCER"],
"lencea" : ["DRAGOON", "VALKYRIE"],
"soceress" : ["PYROMANCER", "ICEWITCH", "WARMAGE", "CHAOSMAGE"],
"warrior" : ["GLADIATOR", "LUNARKNIGHT", "BARBARIAN", "DESTROYER", "DARKAVENGER"],
"machina" : ["RUINA", "DEFENSIO"]
}
upate(dnt_list, class_list, exclusive_skills, json_str_dict, class_dict, uistring, skilltreetable, jobtable, playerleveltable, weapondict, folderCSV,folderDNT,folderJSON,folderSKILLJS)
#####################################################################################
#####################################################################################
#####################################################################################
#####################################################################################
#####################################################################################
##================================================
##====== Run this when level cap increases =======
##================================================
##==================== csv =======================
##================================================
## 1. Copy the printed information to sim_silveredge.js
## 2. increase the lvlcap in write_levelSP_csv
## 3. Check playerleveltable to see if the ratio of SP changed
#write_levelSP_csv(playerleveltable, jobtable, lvlcap, folderCSV+"levelSP.csv")
#print_SP_for_simJS(lvlcap, folderCSV+"levelSP.csv")
##================================================
##====== Run this when game is updated ===========
##================================================
##==================== csv =======================
##================================================
import time
tStart = time.time()
for i in dnt_list:
skillleveltable = folderDNT+i[0]
skilltable = folderDNT+i[1]
write_skillinfo_csv(skillleveltable, folderCSV, uistring, skilltable, skilltreetable, jobtable, weapondict)
tEnd = time.time()
print(tEnd - tStart)
##================================================
##====== Run this when game is updated ===========
##================================================
##================== skillJS =====================
##================================================
#for i in json_str_dict:
#name1 = "skillinfo_skillleveltable_character" + i + "pve.csv"
#name2 = "skillinfo_changeskill_skillleveltable_character" + i + "pve.csv"
#name3 = i + "_pve"
#output_skillstringJS(folderCSV+name1, folderCSV+name2, name3, folderSKILLJS)
#name1 = "skillinfo_skillleveltable_character" + i + "pvp.csv"
#name2 = "skillinfo_changeskill_skillleveltable_character" + i + "pvp.csv"
#name3 = i + "_pvp"
#output_skillstringJS(folderCSV+name1, folderCSV+name2, name3, folderSKILLJS)
##================================================
##====== Run this only if skill trees changed ====
##================================================
##================= json_str =====================
##================================================
## 1. name must be uppercase and second class
## 2. guess no need for both pve and pvp since it's
## just for tree slot and skill lvl stuff, not
## explanation.
## 3. Add exclusive skill to darkavenger.
#cid_dict = cids(class_list)
#for i in json_str_dict:
#name1 = "skillinfo_skillleveltable_character" + i + "pve.csv"
#for j in json_str_dict[i]:
#output_json_str(folderCSV+name1, j, class_dict, cid_dict, lvlcap, exclusive_skills, folderJSON)
| 2.78125 | 3 |
vows/pil_engine_vows.py | kilinger/thumbor | 0 | 12772803 | <filename>vows/pil_engine_vows.py<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com <EMAIL>
from pyvows import Vows, expect
ctx = Vows.Context
import thumbor.engines.pil as PIL
class MockImage:
def __init__(self, mode, width, height):
self.mode = mode
self.width = width
self.height = height
self.calls = []
def convert(self, mode):
self.calls.append('convert')
self.mode = mode
return self
def resize(self, size, filter):
self.calls.append('resize')
self.width = size[0]
self.height = size[1]
return self
@Vows.batch
class PilEngineVows(ctx):
class ResizedPaletteImage(ctx):
def topic(self):
engine = PIL.Engine(None)
engine.image = MockImage('P', 640, 480)
engine.resize(320, 240)
return engine.image
def should_convert_p_to_rgba(self, image):
expect(image.mode).to_equal('RGBA')
expect((image.width, image.height)).to_equal((320, 240))
expect(image.calls.index('convert') < image.calls.index('resize')).to_be_true()
class ResizedNonPaletteImage(ctx):
def topic(self):
engine = PIL.Engine(None)
engine.image = MockImage('other', 640, 480)
engine.resize(160, 120)
return engine.image
def should_not_convert_non_palette_images(self, image):
expect(image.mode).to_equal('other') # unchanged
expect((image.width, image.height)).to_equal((160, 120))
expect(image.calls).Not.to_include(['convert'])
class ShouldRaiseIfFiltersNotAvailable(ctx):
@Vows.capture_error
def topic(self):
FILTERS_AVAILABLE_BAK = PIL.FILTERS_AVAILABLE
PIL.FILTERS_AVAILABLE = False
engine = PIL.Engine(None)
try:
return engine.paste(None, None, True)
finally:
PIL.FILTERS_AVAILABLE = FILTERS_AVAILABLE_BAK
def should_be_an_error(self, topic):
expect(topic).to_be_an_error()
expect(topic).to_be_an_error_like(RuntimeError)
expected = 'You need filters enabled to use paste with merge. Please reinstall thumbor with proper ' + \
'compilation of its filters.'
expect(topic).to_have_an_error_message_of(expected)
| 2.09375 | 2 |
publications/leveraging_domain_knowledge/2_false_merge_detection/evaluate/copy_and_crop.py | constantinpape/cluster_tools | 28 | 12772804 | <gh_stars>10-100
#! /bin/python
import os
import sys
import json
import luigi
import z5py
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
#
# Validation measure Tasks
#
class CopyAndCropBase(luigi.Task):
""" CopyAndCrop base class
"""
task_name = 'copy_and_crop'
src_file = os.path.abspath(__file__)
allow_retry = False
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
roi_start = luigi.ListParameter()
roi_size = luigi.ListParameter()
def run_impl(self):
# get the global config and init configs
shebang = self.global_config_values()[0]
self.init(shebang)
# load the task config
config = self.get_task_config()
config.update({'input_path': self.input_path, 'input_key': self.input_key,
'output_path': self.output_path, 'output_key': self.output_key,
'roi_start': self.roi_start, 'roi_size': self.roi_size})
n_jobs = 1
self.prepare_jobs(n_jobs, None, config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class CopyAndCropLocal(CopyAndCropBase, LocalTask):
""" CopyAndCrop on local machine
"""
pass
class CopyAndCropSlurm(CopyAndCropBase, SlurmTask):
""" CopyAndCrop on slurm cluster
"""
pass
class CopyAndCropLSF(CopyAndCropBase, LSFTask):
""" CopyAndCrop on lsf cluster
"""
pass
#
# Implementation
#
def copy_and_crop(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
# get the config
with open(config_path) as f:
config = json.load(f)
input_path = config['input_path']
input_key = config['input_key']
output_path = config['output_path']
output_key = config['output_key']
roi_start = config['roi_start']
roi_size = config['roi_size']
bb = tuple(slice(rs, rs + size) for rs, size in zip(roi_start, roi_size))
max_threads = config.get('threads_per_job', 1)
ds_in = z5py.File(input_path)[input_key]
ds_in.n_threads = max_threads
seg = ds_in[bb]
max_id = int(seg.max())
f = z5py.File(output_path)
ds_out = f.require_dataset(output_key, shape=seg.shape, chunks=ds_in.chunks,
compression='gzip', dtype='uint64')
ds_out.n_threads = max_threads
ds_out[:] = seg
ds_out.attrs['maxId'] = max_id
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
copy_and_crop(job_id, path)
| 1.945313 | 2 |
ascension/profiler.py | drekels/ascension | 0 | 12772805 | from util import Singleton
import datetime as dt
import logging
import pyglet
from ascension.settings import AscensionConf
LOG = logging.getLogger(__name__)
SLOW_FRAME_MESSAGE = (
"Last execution of '{profiler_label}' lasted {time_passed}, which is over the target "
"{target_time} by {time_over}!"
)
REPORT_MESSAGES = [
"PROFILER REPORT: {name}",
" {time_passed} passed since last report",
" {run_count} times ran",
" {max_time} max",
" {min_time} min",
" {average_time} average",
" {total_time} total",
" {share} share of time passed",
]
TIME_FORMAT = "{}s {:>3}ms {:>3}\xces"
def get_time_string(t):
if not t:
return "NO_VALUE"
return TIME_FORMAT.format(t.seconds, t.microseconds / 1000, t.microseconds % 1000)
class ProfilerBlock(object):
def __init__(self, name, targets=None, report_every=5):
self.name = name
self.targets = targets or []
self.report_every = report_every
self.start_time = None
self.schedule_report()
def reset_metrics(self):
self.report_start = dt.datetime.now()
self.count = 0
self.maximum = None
self.minimum = None
def schedule_report(self):
self.reset_metrics()
pyglet.clock.schedule_interval(self.report, self.report_every)
def report(self, *args):
report_end = dt.datetime.now()
time_passed_num = report_end - self.report_start
time_passed = get_time_string(time_passed_num)
max_time = get_time_string(self.maximum)
min_time = get_time_string(self.minimum)
average_time = get_time_string(self.total / self.count)
total_time = get_time_string(self.total)
share = "{0:.0f}%".format(self.total.total_seconds()*100 / time_passed_num.total_seconds())
for message in REPORT_MESSAGES:
LOG.info(message.format(
name=self.name, time_passed=time_passed, run_count=self.count,
max_time=max_time, min_time=min_time, average_time=average_time,
share=share, total_time=total_time
))
self.report_start = dt.datetime.now()
self.reset_metrics()
def start(self):
if self.start_time:
raise KeyError(
"Cannot start profiler '{}', it was not stopped since last call".format(self.name)
)
self.start_time = dt.datetime.now()
def stop(self):
if not self.start_time:
raise KeyError(
"Cannot start profiler '{}', it was not stopped since last call".format(self.name)
)
stop_time = dt.datetime.now()
time_passed = stop_time - self.start_time
self.start_time = None
self.count += 1
if self.count == 1:
self.maximum = time_passed
self.minimum = time_passed
self.total = time_passed
else:
self.maximum = time_passed > self.maximum and time_passed or self.maximum
self.minimum = time_passed < self.minimum and time_passed or self.minimum
self.total += time_passed
for log_level, target_time in self.targets:
if not target_time or time_passed > target_time:
time_over = time_passed - target_time
getattr(LOG, log_level.lower())(SLOW_FRAME_MESSAGE.format(
profiler_label=self.name, time_passed=get_time_string(time_passed),
time_over=get_time_string(time_over), target_time=get_time_string(target_time),
))
break
class ProfilerManager(object):
__metaclass__ = Singleton
def __init__(self):
self.profilers = {}
def start_draw(self):
loopend = dt.datetime.now()
if hasattr(self, "loopstart"):
self.record_loop()
self.loopstart = loopend
def add_profiler(self, profiler):
if profiler.name in self.profilers:
raise KeyError(
"ProfilerManager already has a prfiler named '{}'".format(profiler.name)
)
def start(self, name, targets=None, report_every=5):
if name in AscensionConf.disabled_profilers:
return
if name not in self.profilers:
self.profilers[name] = ProfilerBlock(name, targets=targets, report_every=report_every)
self.profilers[name].start()
def stop(self, name):
if name in AscensionConf.disabled_profilers:
return
if name not in self.profilers:
raise KeyError("No such profiler '{}' to stop".format(name))
self.profilers[name].stop()
| 2.453125 | 2 |
samples/through_put_test/receiver.py | LuigiVanin/ZigBee-Test-Tool | 2 | 12772806 | <gh_stars>1-10
from digi.xbee.devices import ZigBeeDevice
from zigbee_tool.core.tests.throughput import throughput_receiver
PORT_RECEIVER = "/dev/ttyUSB1"
BAUD_RATE = 115200
device_receiver = ZigBeeDevice(PORT_RECEIVER, BAUD_RATE)
try:
device_receiver.open()
print(device_receiver.get_node_id())
throughput_receiver(device_receiver)
finally:
if device_receiver is not None and device_receiver.is_open():
device_receiver.close() | 2.46875 | 2 |
golomb.py | damian1996/Information-Coding | 0 | 12772807 | import math as m
def is_power_of_2(num):
return num != 0 and ((num & (num - 1)) == 0)
def code(num, mod):
unary = num // mod
reminder = num % mod
code = '1' * unary + '0'
print(code)
if is_power_of_2(mod):
bits_needed = int(m.log(mod, 2))
add_bits = bin(reminder)[2:]
add_bits = '0' * (bits_needed - len(add_bits)) + add_bits
else:
bits_needed = int(m.ceil(m.log(mod, 2)))
cutoff = (1 << bits_needed) - mod
if reminder < cutoff:
add_bits = bin(reminder)[2:]
add_bits = '0' * (bits_needed - len(add_bits) - 1) + add_bits
else:
add_bits = bin(reminder + cutoff)[2:]
add_bits = '0' * (bits_needed - len(add_bits)) + add_bits
code = code + add_bits
return code
def decode(code, mod):
idx = code.find('0')
num = mod * idx
rem_part = code[idx+1:]
if is_power_of_2(mod):
num = num + int(rem_part, 2)
else:
bits_needed = int(m.ceil(m.log(mod, 2)))
cutoff = (1 << bits_needed) - mod
if len(rem_part) == bits_needed:
num = num + (int(rem_part, 2) - cutoff)
else:
num = num + int(rem_part, 2)
return num
N, N2, M1, M2 = 42, 47, 10, 16
c1 = code(N, M2)
c2 = code(N2, M2)
c3 = code(N, M1)
c4 = code(N2, M1)
dc1 = decode(c1, M2)
dc2 = decode(c2, M2)
dc3 = decode(c3, M1)
dc4 = decode(c4, M1)
print(N, c1, dc1)
print(N2, c2, dc2)
print(N, c3, dc3)
print(N2, c3, dc3) | 3.640625 | 4 |
nova/tests/api/openstack/test_versions.py | armaan/nova | 0 | 12772808 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import feedparser
import json
import stubout
import webob
from lxml import etree
from nova import context
from nova import test
from nova.api.openstack import versions
from nova.api.openstack import views
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.tests.api.openstack import common
from nova.tests.api.openstack import fakes
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/compute/api/v1.1'
}
VERSIONS = {
"v1.0": {
"id": "v1.0",
"status": "DEPRECATED",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "application/pdf",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.0/cs-devguide-20110125.pdf",
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.0/application.wadl",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute-v1.0+xml",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute-v1.0+json",
},
],
},
"v1.1": {
"id": "v1.1",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "application/pdf",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.1/cs-devguide-20110125.pdf",
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.1/application.wadl",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute-v1.1+xml",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute-v1.1+json",
},
],
},
}
class VersionsTest(test.TestCase):
def setUp(self):
super(VersionsTest, self).setUp()
self.context = context.get_admin_context()
self.stubs = stubout.StubOutForTesting()
fakes.stub_out_auth(self.stubs)
#Stub out VERSIONS
self.old_versions = versions.VERSIONS
versions.VERSIONS = VERSIONS
def tearDown(self):
versions.VERSIONS = self.old_versions
super(VersionsTest, self).tearDown()
def test_get_version_list(self):
req = webob.Request.blank('/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
versions = json.loads(res.body)["versions"]
expected = [
{
"id": "v1.0",
"status": "DEPRECATED",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.0/",
}],
},
{
"id": "v1.1",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/",
}],
},
]
self.assertEqual(versions, expected)
def test_get_version_1_0_detail(self):
req = webob.Request.blank('/v1.0/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = json.loads(res.body)
expected = {
"version": {
"id": "v1.0",
"status": "DEPRECATED",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.0/",
},
{
"rel": "describedby",
"type": "application/pdf",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.0/cs-devguide-20110125.pdf",
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.0/application.wadl",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/"
"vnd.openstack.compute-v1.0+xml",
},
{
"base": "application/json",
"type": "application/"
"vnd.openstack.compute-v1.0+json",
},
],
},
}
self.assertEqual(expected, version)
def test_get_version_1_1_detail(self):
req = webob.Request.blank('/v1.1/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = json.loads(res.body)
expected = {
"version": {
"id": "v1.1",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/",
},
{
"rel": "describedby",
"type": "application/pdf",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.1/cs-devguide-20110125.pdf",
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.1/application.wadl",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/"
"vnd.openstack.compute-v1.1+xml",
},
{
"base": "application/json",
"type": "application/"
"vnd.openstack.compute-v1.1+json",
},
],
},
}
self.assertEqual(expected, version)
def test_get_version_1_0_detail_xml(self):
req = webob.Request.blank('/v1.0/')
req.accept = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/xml")
version = etree.XML(res.body)
xmlutil.validate_schema(version, 'version')
expected = VERSIONS['v1.0']
self.assertTrue(version.xpath('/ns:version', namespaces=NS))
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.compare_media_types(media_types,
expected['media-types']))
for key in ['id', 'status', 'updated']:
self.assertEqual(version.get(key), expected[key])
links = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(links,
[{'rel': 'self', 'href': 'http://localhost/v1.0/'}]
+ expected['links']))
def test_get_version_1_1_detail_xml(self):
req = webob.Request.blank('/v1.1/')
req.accept = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/xml")
version = etree.XML(res.body)
xmlutil.validate_schema(version, 'version')
expected = VERSIONS['v1.1']
self.assertTrue(version.xpath('/ns:version', namespaces=NS))
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.compare_media_types(media_types,
expected['media-types']))
for key in ['id', 'status', 'updated']:
self.assertEqual(version.get(key), expected[key])
links = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(links,
[{'rel': 'self', 'href': 'http://localhost/v1.1/'}]
+ expected['links']))
def test_get_version_list_xml(self):
req = webob.Request.blank('/')
req.accept = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/xml")
root = etree.XML(res.body)
print res.body
xmlutil.validate_schema(root, 'versions')
self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
versions = root.xpath('ns:version', namespaces=NS)
self.assertEqual(len(versions), 2)
for i, v in enumerate(['v1.0', 'v1.1']):
version = versions[i]
expected = VERSIONS[v]
for key in ['id', 'status', 'updated']:
self.assertEqual(version.get(key), expected[key])
(link,) = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(link,
[{'rel': 'self', 'href': 'http://localhost/%s/' % v}]))
def test_get_version_1_0_detail_atom(self):
req = webob.Request.blank('/v1.0/')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual("application/atom+xml", res.content_type)
xmlutil.validate_schema(etree.XML(res.body), 'atom')
f = feedparser.parse(res.body)
self.assertEqual(f.feed.title, 'About This Version')
self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
self.assertEqual(f.feed.id, 'http://localhost/v1.0/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v1.0/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 1)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://localhost/v1.0/')
self.assertEqual(entry.title, 'Version v1.0')
self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v1.0 DEPRECATED (2011-01-21T11:33:21Z)')
self.assertEqual(len(entry.links), 3)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.0/')
self.assertEqual(entry.links[0]['rel'], 'self')
self.assertEqual(entry.links[1], {
'href': 'http://docs.rackspacecloud.com/servers/api/v1.0/'\
'cs-devguide-20110125.pdf',
'type': 'application/pdf',
'rel': 'describedby'})
self.assertEqual(entry.links[2], {
'href': 'http://docs.rackspacecloud.com/servers/api/v1.0/'\
'application.wadl',
'type': 'application/vnd.sun.wadl+xml',
'rel': 'describedby'})
def test_get_version_1_1_detail_atom(self):
req = webob.Request.blank('/v1.1/')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual("application/atom+xml", res.content_type)
xmlutil.validate_schema(etree.XML(res.body), 'atom')
f = feedparser.parse(res.body)
self.assertEqual(f.feed.title, 'About This Version')
self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
self.assertEqual(f.feed.id, 'http://localhost/v1.1/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v1.1/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 1)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://localhost/v1.1/')
self.assertEqual(entry.title, 'Version v1.1')
self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v1.1 CURRENT (2011-01-21T11:33:21Z)')
self.assertEqual(len(entry.links), 3)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.1/')
self.assertEqual(entry.links[0]['rel'], 'self')
self.assertEqual(entry.links[1], {
'href': 'http://docs.rackspacecloud.com/servers/api/v1.1/'\
'cs-devguide-20110125.pdf',
'type': 'application/pdf',
'rel': 'describedby'})
self.assertEqual(entry.links[2], {
'href': 'http://docs.rackspacecloud.com/servers/api/v1.1/'\
'application.wadl',
'type': 'application/vnd.sun.wadl+xml',
'rel': 'describedby'})
def test_get_version_list_atom(self):
req = webob.Request.blank('/')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/atom+xml")
f = feedparser.parse(res.body)
self.assertEqual(f.feed.title, 'Available API Versions')
self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
self.assertEqual(f.feed.id, 'http://localhost/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://localhost/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 2)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://localhost/v1.0/')
self.assertEqual(entry.title, 'Version v1.0')
self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v1.0 DEPRECATED (2011-01-21T11:33:21Z)')
self.assertEqual(len(entry.links), 1)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.0/')
self.assertEqual(entry.links[0]['rel'], 'self')
entry = f.entries[1]
self.assertEqual(entry.id, 'http://localhost/v1.1/')
self.assertEqual(entry.title, 'Version v1.1')
self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v1.1 CURRENT (2011-01-21T11:33:21Z)')
self.assertEqual(len(entry.links), 1)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.1/')
self.assertEqual(entry.links[0]['rel'], 'self')
def test_multi_choice_image(self):
req = webob.Request.blank('/images/1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v1.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v1.1/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute-v1.1+xml"
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute-v1.1+json"
},
],
},
{
"id": "v1.0",
"status": "DEPRECATED",
"links": [
{
"href": "http://localhost/v1.0/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute-v1.0+xml"
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute-v1.0+json"
},
],
},
], }
self.assertDictMatch(expected, json.loads(res.body))
def test_multi_choice_image_xml(self):
req = webob.Request.blank('/images/1')
req.accept = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/xml")
root = etree.XML(res.body)
self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
versions = root.xpath('ns:version', namespaces=NS)
self.assertEqual(len(versions), 2)
version = versions[0]
self.assertEqual(version.get('id'), 'v1.1')
self.assertEqual(version.get('status'), 'CURRENT')
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.compare_media_types(media_types,
VERSIONS['v1.1']['media-types']))
links = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(links,
[{'rel': 'self', 'href': 'http://localhost/v1.1/images/1'}]))
version = versions[1]
self.assertEqual(version.get('id'), 'v1.0')
self.assertEqual(version.get('status'), 'DEPRECATED')
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.compare_media_types(media_types,
VERSIONS['v1.0']['media-types']))
links = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(links,
[{'rel': 'self', 'href': 'http://localhost/v1.0/images/1'}]))
def test_multi_choice_server_atom(self):
"""
Make sure multi choice responses do not have content-type
application/atom+xml (should use default of json)
"""
req = webob.Request.blank('/servers/2')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
def test_multi_choice_server(self):
req = webob.Request.blank('/servers/2')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v1.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v1.1/servers/2",
"rel": "self",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute-v1.1+xml"
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute-v1.1+json"
},
],
},
{
"id": "v1.0",
"status": "DEPRECATED",
"links": [
{
"href": "http://localhost/v1.0/servers/2",
"rel": "self",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute-v1.0+xml"
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute-v1.0+json"
},
],
},
], }
self.assertDictMatch(expected, json.loads(res.body))
class VersionsViewBuilderTests(test.TestCase):
def test_view_builder(self):
base_url = "http://example.org/"
version_data = {
"v3.2.1": {
"id": "3.2.1",
"status": "CURRENT",
"updated": "2011-07-18T11:30:00Z",
}
}
expected = {
"versions": [
{
"id": "3.2.1",
"status": "CURRENT",
"updated": "2011-07-18T11:30:00Z",
"links": [
{
"rel": "self",
"href": "http://example.org/3.2.1/",
},
],
}
]
}
builder = views.versions.ViewBuilder(base_url)
output = builder.build_versions(version_data)
self.assertEqual(output, expected)
def test_generate_href(self):
base_url = "http://example.org/app/"
version_number = "v1.4.6"
expected = "http://example.org/app/v1.4.6/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href(version_number)
self.assertEqual(actual, expected)
class VersionsSerializerTests(test.TestCase):
def test_versions_list_xml_serializer(self):
versions_data = {
'versions': [
{
"id": "2.7.1",
"updated": "2011-07-18T11:30:00Z",
"status": "DEPRECATED",
"links": [
{
"rel": "self",
"href": "http://test/2.7.1",
},
],
},
]
}
serializer = versions.VersionsXMLSerializer()
response = serializer.index(versions_data)
root = etree.XML(response)
xmlutil.validate_schema(root, 'versions')
self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
version_elems = root.xpath('ns:version', namespaces=NS)
self.assertEqual(len(version_elems), 1)
version = version_elems[0]
self.assertEqual(version.get('id'), versions_data['versions'][0]['id'])
self.assertEqual(version.get('status'),
versions_data['versions'][0]['status'])
(link,) = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(link, [{
'rel': 'self',
'href': 'http://test/2.7.1',
'type': 'application/atom+xml'}]))
def test_versions_multi_xml_serializer(self):
versions_data = {
'choices': [
{
"id": "2.7.1",
"updated": "2011-07-18T11:30:00Z",
"status": "DEPRECATED",
"media-types": VERSIONS['v1.1']['media-types'],
"links": [
{
"rel": "self",
"href": "http://test/2.7.1/images",
},
],
},
]
}
serializer = versions.VersionsXMLSerializer()
response = serializer.multi(versions_data)
root = etree.XML(response)
self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
(version,) = root.xpath('ns:version', namespaces=NS)
self.assertEqual(version.get('id'), versions_data['choices'][0]['id'])
self.assertEqual(version.get('status'),
versions_data['choices'][0]['status'])
media_types = list(version)[0]
media_type_nodes = list(media_types)
self.assertEqual(media_types.tag.split('}')[1], "media-types")
media_types = version.xpath('ns:media-types/ns:media-type',
namespaces=NS)
self.assertTrue(common.compare_media_types(media_types,
versions_data['choices'][0]['media-types']))
(link,) = version.xpath('atom:link', namespaces=NS)
self.assertTrue(common.compare_links(link,
versions_data['choices'][0]['links']))
def test_version_detail_xml_serializer(self):
version_data = {
"version": {
"id": "v1.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.0/",
},
{
"rel": "describedby",
"type": "application/pdf",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.0/cs-devguide-20110125.pdf",
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.0/application.wadl",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute-v1.0+xml",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute-v1.0+json",
},
],
},
}
serializer = versions.VersionsXMLSerializer()
response = serializer.show(version_data)
root = etree.XML(response)
self.assertEqual(root.tag.split('}')[1], "version")
self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11)
children = list(root)
media_types = children[0]
media_type_nodes = list(media_types)
links = (children[1], children[2], children[3])
self.assertEqual(media_types.tag.split('}')[1], 'media-types')
for i, media_node in enumerate(media_type_nodes):
self.assertEqual(media_node.tag.split('}')[1], 'media-type')
for key, val in version_data['version']['media-types'][i].items():
self.assertEqual(val, media_node.get(key))
for i, link in enumerate(links):
self.assertEqual(link.tag.split('}')[0].strip('{'),
'http://www.w3.org/2005/Atom')
self.assertEqual(link.tag.split('}')[1], 'link')
for key, val in version_data['version']['links'][i].items():
self.assertEqual(val, link.get(key))
def test_versions_list_atom_serializer(self):
versions_data = {
'versions': [
{
"id": "2.9.8",
"updated": "2011-07-20T11:40:00Z",
"status": "CURRENT",
"links": [
{
"rel": "self",
"href": "http://test/2.9.8",
},
],
},
]
}
serializer = versions.VersionsAtomSerializer()
response = serializer.index(versions_data)
f = feedparser.parse(response)
self.assertEqual(f.feed.title, 'Available API Versions')
self.assertEqual(f.feed.updated, '2011-07-20T11:40:00Z')
self.assertEqual(f.feed.id, 'http://test/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://test/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 1)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://test/2.9.8')
self.assertEqual(entry.title, 'Version 2.9.8')
self.assertEqual(entry.updated, '2011-07-20T11:40:00Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)')
self.assertEqual(len(entry.links), 1)
self.assertEqual(entry.links[0]['href'], 'http://test/2.9.8')
self.assertEqual(entry.links[0]['rel'], 'self')
def test_version_detail_atom_serializer(self):
versions_data = {
"version": {
"id": "v1.1",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v1.1/",
},
{
"rel": "describedby",
"type": "application/pdf",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.1/cs-devguide-20110125.pdf",
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://docs.rackspacecloud.com/"
"servers/api/v1.1/application.wadl",
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute-v1.1+xml",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute-v1.1+json",
}
],
},
}
serializer = versions.VersionsAtomSerializer()
response = serializer.show(versions_data)
f = feedparser.parse(response)
self.assertEqual(f.feed.title, 'About This Version')
self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
self.assertEqual(f.feed.id, 'http://localhost/v1.1/')
self.assertEqual(f.feed.author, 'Rackspace')
self.assertEqual(f.feed.author_detail.href,
'http://www.rackspace.com/')
self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v1.1/')
self.assertEqual(f.feed.links[0]['rel'], 'self')
self.assertEqual(len(f.entries), 1)
entry = f.entries[0]
self.assertEqual(entry.id, 'http://localhost/v1.1/')
self.assertEqual(entry.title, 'Version v1.1')
self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
self.assertEqual(len(entry.content), 1)
self.assertEqual(entry.content[0].value,
'Version v1.1 CURRENT (2011-01-21T11:33:21Z)')
self.assertEqual(len(entry.links), 3)
self.assertEqual(entry.links[0]['href'], 'http://localhost/v1.1/')
self.assertEqual(entry.links[0]['rel'], 'self')
self.assertEqual(entry.links[1], {
'rel': 'describedby',
'type': 'application/pdf',
'href': 'http://docs.rackspacecloud.com/'
'servers/api/v1.1/cs-devguide-20110125.pdf'})
self.assertEqual(entry.links[2], {
'rel': 'describedby',
'type': 'application/vnd.sun.wadl+xml',
'href': 'http://docs.rackspacecloud.com/'
'servers/api/v1.1/application.wadl',
})
| 1.28125 | 1 |
src/Instructions/beq.py | martydill/PyCIL | 3 | 12772809 | <filename>src/Instructions/beq.py<gh_stars>1-10
# beq.py
# The CIL beq instruction
# Copyright 2010 <NAME> - see LICENSE for details
from Instruction import Instruction
import unittest
from Instructions.ldc import ldc
from Instructions.Instruction import register
from Variable import Variable
class beq(Instruction):
def __init__(self, args):
self.name = 'beq.' + args
if args.startswith('s'):
self.suffix = '.s'
self.target = args[2:]
self.opcode = 0x2E
else:
self.opcode = 0x3B
self.suffix = ''
self.target = args
def execute(self, vm):
# fixme check if there aren't enough stack values
value2 = vm.stack.pop()
value1 = vm.stack.pop()
if value1.value == value2.value:
index = vm.find_instruction_pointer_by_label(self.target)
vm.current_stack_frame().instructionPointer = index
# fixme - check for null objects
register('beq', beq)
class beqTest(unittest.TestCase):
def test_execute_true(self):
from VM import VM
from MethodDefinition import MethodDefinition
vm = VM()
m = MethodDefinition()
m.maxStack = 3
x = ldc('i4.1')
m.instructions.append(x)
m.instructions.append(x)
m.instructions.append(x)
dest = ldc('i4.3')
dest.label = 'asdf'
m.instructions.append(dest)
vm.set_current_method(m)
vm.stack.push(Variable(1))
vm.stack.push(Variable(1))
x = beq('asdf') # fixme optional parameters
x.execute(vm)
index = vm.get_instruction_pointer()
self.assertEqual(3, index);
self.assertEqual(vm.stack.count(), 0)
def test_execute_false(self):
from VM import VM
from MethodDefinition import MethodDefinition
vm = VM()
m = MethodDefinition()
m.maxStack = 3
x = ldc('i4.1')
m.instructions.append(x)
m.instructions.append(x)
m.instructions.append(x)
dest = ldc('i4.3')
dest.label = 'asdf'
m.instructions.append(dest)
vm.set_current_method(m)
vm.stack.push(Variable(0))
vm.stack.push(Variable(987))
x = beq('asdf') # fixme optional parameters
x.execute(vm)
index = vm.get_instruction_pointer()
self.assertEqual(0, index);
self.assertEqual(vm.stack.count(), 0)
def test_execute_true_s(self):
from VM import VM
from MethodDefinition import MethodDefinition
vm = VM()
m = MethodDefinition()
m.maxStack = 3
x = ldc('i4.1')
m.instructions.append(x)
m.instructions.append(x)
m.instructions.append(x)
dest = ldc('i4.3')
dest.label = 'asdf'
m.instructions.append(dest)
vm.set_current_method(m)
vm.stack.push(Variable(222))
vm.stack.push(Variable(222))
x = beq('s asdf') # fixme optional parameters
x.execute(vm)
index = vm.get_instruction_pointer()
self.assertEqual(3, index);
self.assertEqual(vm.stack.count(), 0)
def test_execute_false_s(self):
from VM import VM
from MethodDefinition import MethodDefinition
vm = VM()
m = MethodDefinition()
m.maxStack = 3
x = ldc('i4.1')
m.instructions.append(x)
m.instructions.append(x)
m.instructions.append(x)
dest = ldc('i4.3')
dest.label = 'asdf'
m.instructions.append(dest)
vm.set_current_method(m)
vm.stack.push(Variable(0))
vm.stack.push(Variable(987))
x = beq('s asdf') # fixme optional parameters
x.execute(vm)
index = vm.get_instruction_pointer()
self.assertEqual(0, index);
self.assertEqual(vm.stack.count(), 0)
| 2.484375 | 2 |
env.example.py | linnil1/sync-ytchat-to-discord | 2 | 12772810 | DISCORD_TOKEN="<PASSWORD>"
| 1.054688 | 1 |
fah_xchem/analysis/structures.py | choderalab/fah-xchem | 3 | 12772811 | """
Tools for extracting snapshots and structures from core22 FAH trajectories.
Limitations:
* The reference structure (`natoms_reference`) must share the same atom ordering as the first `natoms_reference` atoms of the trajectory.
For now, this means that the SpruceTK prepared structure (`Mpro-x10789_0_bound-protein-thiolate.pdb`) is used
Dependencies:
* mdtraj >= 1.9.4 (conda-forge)
"""
from functools import partial
import logging
import pathlib
import multiprocessing
import os
import tempfile
from typing import Dict, List, Optional
from pydantic import BaseModel, Field
import joblib
import mdtraj as md
from ..schema import TransformationAnalysis, AnalysisConfig
class SnapshotArtifactory(BaseModel):
"""Structural snapshot creator."""
config: AnalysisConfig
project_dir: pathlib.Path = Field(
description="Path to project directory (e.g. '/home/server/server2/projects/13422')"
)
project_data_dir: pathlib.Path = Field(
description="Path to project data directory (e.g. '/home/server/server2/data/SVR314342810/PROJ13422')"
)
cache_dir: pathlib.Path = Field(
None,
description="If specified, cache relevant parts of 'htf.npz' file in a local directory of this name",
)
@staticmethod
def _transformation_to_file_mapping(output_dir, run_id, ligand):
fnames = [
f"{ligand}_protein.pdb",
f"{ligand}_complex.pdb",
f"{ligand}_ligand.sdf",
]
outfiles = [
os.path.join(output_dir, f"RUN{run_id}", f"{fname}") for fname in fnames
]
return outfiles
@staticmethod
def load_trajectory(
project_dir: str, project_data_dir: str, run: int, clone: int, gen: int
) -> md.Trajectory:
"""
Load the trajectory from the specified PRCG.
Parameters
----------
project_dir : str
Path to project directory (e.g. '/home/server/server2/projects/13422')
project_data_dir : str
Path to project data directory (e.g. '/home/server/server2/data/SVR314342810/PROJ13422')
run : int
Run (e.g. 0)
clone : int
Clone (e.g. 0)
gen : int
Gen (e.g. 0)
Returns
-------
trajectory : mdtraj.Trajectory
The trajectory
"""
# Load trajectory
pdbfile_path = os.path.join(
project_dir, "RUNS", f"RUN{run}", "hybrid_complex.pdb"
)
# TODO: Reuse path logic from fah_xchem.lib
trajectory_path = os.path.join(
project_data_dir,
f"RUN{run}",
f"CLONE{clone}",
f"results{gen}",
"positions.xtc",
)
try:
pdbfile = md.load(pdbfile_path)
except OSError as e:
raise ValueError(f"Failed to load PDB file: {e}")
try:
trajectory = md.load(trajectory_path, top=pdbfile.top)
except OSError as e:
raise ValueError(f"Failed to load trajectory: {e}")
return trajectory
@staticmethod
def load_fragment(
structure_path: pathlib.Path,
target_name: str,
fragment_id: str,
annotations: str,
component: str,
) -> md.Trajectory:
"""
Load the reference fragment structure
Parameters
----------
structure_path : pathlib.Path
Path to reference structure directory.
target_name : str
Name of target (e.g. 'Mpro').
fragment_id : str
Fragment ID (e.g. 'x10789').
annotations : str
Additional characters in the reference file name (e.g. '_0A_bound').
component : str
Component of the system the reference corresponds to (e.g. 'protein')
Returns
-------
fragment : mdtraj.Trajectory
The fragment structure
"""
# several components here: path, target name, fragment id, annotations (e.g. "0A_bound"), and component (e.g. "protein", "ligand")
# separated by hyphens
# TODO: Put this in the covid-moonshot path, or generalize to an arbitrary file
# fragment = md.load(
# f"/home/server/server2/projects/available/covid-moonshot/receptors/monomer/Mpro-{fragment_id}_0A_bound-protein.pdb"
# )
fragment = md.load(
f"{structure_path}/{target_name}-{fragment_id}{annotations}-{component}.pdb"
)
return fragment
def _mdtraj_to_oemol(self, snapshot: md.Trajectory):
"""
Create an OEMol from an MDTraj file by writing and reading
NOTE: This uses terrible heuristics
Parameters
----------
snapshot : mdtraj.Trajectory
MDTraj Trajectory with a single snapshot
Returns
-------
oemol : openeye.oechem.OEMol
The OEMol
"""
from openeye import oechem
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, "tmp.pdb")
# Write the PDB file
snapshot.save(filename)
# Read it with OpenEye
with oechem.oemolistream(filename) as ifs:
for mol in ifs.GetOEGraphMols():
return mol
def extract_snapshot(
self,
project_dir: str,
project_data_dir: str,
run: int,
clone: int,
gen: int,
frame: int,
fragment_id: str,
):
"""
Extract the specified snapshot, align it to the reference fragment, and write protein and ligands to separate PDB files
Parameters
----------
project_dir : str
Path to project directory (e.g. '/home/server/server2/projects/13422')
run : str or int
Run (e.g. '0')
clone : str or int
Clone (e.g. '0')
gen : str or int
Gen (e.g. '0')
frame : int
fragment_id : str
Fragment ID (e.g. 'x10789')
Returns
-------
sliced_snapshot : dict of str : mdtraj.Trajectory
sliced_snapshot[name] is the Trajectory for name in ['protein', 'old_ligand', 'new_ligand', 'old_complex', 'new_complex']
components : dict of str : oechem.OEMol
components[name] is the OEMol for name in ['protein', 'old_ligand', 'new_ligand']
"""
# Load the trajectory
trajectory = self.load_trajectory(
project_dir, project_data_dir, run, clone, gen
)
# Load the fragment
fragment = self.load_fragment(structure_path=self.config.structure_path,
target_name=self.config.target_name,
fragment_id=fragment_id,
annotations=self.config.annotations,
component=self.config.component)
# Align the trajectory to the fragment (in place)
# trajectory.image_molecules(inplace=True) # No need to image molecules anymore now that perses adds zero-energy bonds between protein and ligand!
# trajectory.superpose(fragment, atom_indices=fragment.top.select("name CA"))
# TODO: fix this hardcode for *MPro*!
trajectory.superpose(
fragment,
atom_indices=fragment.top.select(
"(name CA) and (residue 145 or residue 41 or residue 164 or residue 165 or residue 142 or residue 163)"
),
) # DEBUG : Mpro active site only
# Extract the snapshot
snapshot = trajectory[frame]
# Slice out old or new state
sliced_snapshot = self.slice_snapshot(
snapshot, project_dir, run, self.cache_dir
)
# Convert to OEMol
# NOTE: This uses heuristics, and should be replaced once we start storing actual chemical information
components = dict()
for name in ["protein", "old_ligand", "new_ligand"]:
components[name] = self._mdtraj_to_oemol(sliced_snapshot[name])
return sliced_snapshot, components
@staticmethod
def _get_stored_atom_indices(project_dir: str, run: int):
"""
Load hybrid topology file and return relevant atom indices.
"""
import numpy as np
path = os.path.join(project_dir, "RUNS", f"RUN{run}")
htf = np.load(os.path.join(path, "htf.npz"), allow_pickle=True)[
"arr_0"
].tolist()
# Determine mapping between hybrid topology and stored atoms in the positions.xtc
# <xtcAtoms v="solute"/> eliminates waters
nonwater_atom_indices = htf.hybrid_topology.select("not water")
hybrid_to_stored_map = {
nonwater_atom_indices[index]: index
for index in range(len(nonwater_atom_indices))
}
# Get all atom indices from the hybrid system
# Omit hydrogens
protein_atom_indices = htf.hybrid_topology.select("protein and (mass > 1.1)")
hybrid_ligand_atom_indices = htf.hybrid_topology.select(
"resn MOL and (mass > 1.1)"
)
# Identify atom index subsets for the old and new ligands from the hybrid system
old_ligand_atom_indices = [
index
for index in hybrid_ligand_atom_indices
if index in htf._old_to_hybrid_map.values()
]
new_ligand_atom_indices = [
index
for index in hybrid_ligand_atom_indices
if index in htf._new_to_hybrid_map.values()
]
# Compute sliced atom indices using atom indices within positions.xtc
return {
"protein": [hybrid_to_stored_map[index] for index in protein_atom_indices],
"old_ligand": [
hybrid_to_stored_map[index] for index in old_ligand_atom_indices
],
"new_ligand": [
hybrid_to_stored_map[index] for index in new_ligand_atom_indices
],
"old_complex": [
hybrid_to_stored_map[index]
for index in list(protein_atom_indices) + list(old_ligand_atom_indices)
],
"new_complex": [
hybrid_to_stored_map[index]
for index in list(protein_atom_indices) + list(new_ligand_atom_indices)
],
}
def slice_snapshot(
self,
snapshot: md.Trajectory,
project_dir: str,
run: int,
cache_dir: Optional[str],
) -> Dict[str, md.Trajectory]:
"""
Slice snapshot to specified state in-place
.. TODO ::
The htf.npz file is very slow to load.
Replace this with a JSON file containing relevant ligand indices only
Parameters
----------
snapshot : mdtraj.Trajectory
Snapshot to slice
project_dir : str
Path to project directory (e.g. '/home/server/server2/projects/13422')
run : int
Run (e.g. '0')
cache_dir : str or None
If specified, cache relevant parts of "htf.npz" file in a local directory of this name
Returns
-------
sliced_snapshot : dict of str : mdtraj.Trajectory
sliced_snapshot[x] where x is one of ['protein', 'old_ligand', 'new_ligand', 'old_complex', 'new_complex']
"""
get_stored_atom_indices_cached = (
self._get_stored_atom_indices
if cache_dir is None
else joblib.Memory(cachedir=cache_dir, verbose=0).cache(
self._get_stored_atom_indices
)
)
stored_atom_indices = get_stored_atom_indices_cached(project_dir, run)
sliced_snapshot = dict()
for key, atom_indices in stored_atom_indices.items():
sliced_snapshot[key] = md.Trajectory(
snapshot.xyz[:, atom_indices, :], snapshot.topology.subset(atom_indices)
)
return sliced_snapshot
def generate_representative_snapshot(
self,
transformation: TransformationAnalysis,
output_dir: str,
overwrite: bool = False,
) -> None:
r"""
Generate representative snapshots for old and new ligands.
Illustration of frames:
old ---[0]\ /[3]
\ /
new \[1]---[2]/
Parameters
----------
transformation: TransformationAnalysis
The transformation record to operate on.
output_dir : str
Path where snapshots will be written.
overwrite : bool
If `True`, write over existing output files if present.
Otherwise, skip writing output files for a given transformation when already present.
Assumes that for a given `run_id` the output files do not ever change;
does *no* checking that files wouldn't be different if inputs for a given `run_id` have changed.
Returns
-------
None
"""
max_binding_free_energy = self.config.max_binding_free_energy
# create output directory if not present
run_id = transformation.transformation.run_id
os.makedirs(os.path.join(output_dir, f"RUN{run_id}"), exist_ok=True)
# TODO: Cache results and only update RUNs for which we have received new data
if (
max_binding_free_energy is not None
and transformation.binding_free_energy.point > max_binding_free_energy
):
logging.info(
"Skipping snapshot for RUN %d. Binding free energy estimate %g exceeds threshold %g",
transformation.transformation.run_id,
transformation.binding_free_energy.point,
max_binding_free_energy,
)
return None
gen_works = [
(gen, work)
for gen in transformation.complex_phase.gens
for work in gen.works
]
for ligand in ["old", "new"]:
# check if output files all exist; if so, skip unless we are told not to
if not overwrite:
outfiles = self._transformation_to_file_mapping(
output_dir, run_id, ligand
)
if all(map(os.path.exists, outfiles)):
continue
if ligand == "old":
gen_work = min(gen_works, key=lambda gen_work: gen_work[1].reverse)
frame = 3 # TODO: Magic numbers
else:
gen_work = min(gen_works, key=lambda gen_work: gen_work[1].forward)
frame = 1 # TODO: Magic numbers
gen_analysis, workpair = gen_work
# Extract representative snapshot
try:
sliced_snapshots, components = self.extract_snapshot(
project_dir=self.project_dir,
project_data_dir=self.project_data_dir,
run=run_id,
clone=workpair.clone,
gen=gen_analysis.gen,
frame=frame,
fragment_id=transformation.transformation.xchem_fragment_id,
)
# Write protein PDB
name = f"{ligand}_protein"
sliced_snapshots["protein"].save(
os.path.join(output_dir, f"RUN{run_id}", f"{name}.pdb")
)
# Write old and new complex PDBs
name = f"{ligand}_complex"
sliced_snapshots[name].save(
os.path.join(output_dir, f"RUN{run_id}", f"{name}.pdb")
)
# Write ligand SDFs
from openeye import oechem
name = f"{ligand}_ligand"
with oechem.oemolostream(
os.path.join(output_dir, f"RUN{run_id}", f"{name}.sdf")
) as ofs:
oechem.OEWriteMolecule(ofs, components[name])
except Exception as e:
print(
f"\nException occurred extracting snapshot from {self.project_dir} data {self.project_data_dir} run {run_id} clone {gen_work[1].clone} gen {gen_work[0].gen}"
)
print(e)
def generate_representative_snapshots(
self,
transformations: List[TransformationAnalysis],
output_dir: str,
num_procs: Optional[int],
overwrite: bool = False,
) -> None:
from rich.progress import track
with multiprocessing.Pool(num_procs) as pool:
result_iter = pool.imap_unordered(
partial(
self.generate_representative_snapshot,
output_dir=output_dir,
overwrite=overwrite,
),
transformations,
)
for _ in track(
result_iter,
total=len(transformations),
description="Generating representative snapshots",
):
pass
| 2.0625 | 2 |
casl_parser.py | Secret-Plans/CaSL | 0 | 12772812 | import json
class Parser:
"""A parser for converting a set of tokens into an abstract syntax tree.
"""
def __init__(self, statement_defs : dict, types : list, settings : dict):
"""Initializes the parser with a set of pre-defined statements and types
Arguments:
statement_defs {dict} -- Statement definitions.
types {list} -- List of different types handled by the language.
settings {dict} -- Parser settings. Change at Data/Config.json
"""
self.statement_defs = statement_defs
self.types = types
self.settings = settings
self.ast = {}
def get_token_type(self, token : str) -> str:
return token.split(":", 1)[0]
def get_token_value(self, token : str) -> str:
return token.split(":", 1)[1]
def create_expression(self, tokens : list, line_counter : int) -> dict:
expression = {}
order_of_ops = ["func", ""]
if len(tokens) > 1:
pass
else:
value = self.statement_defs["Value"]
_type = self.get_token_type(tokens[0])
if _type in self.types:
value["type"] = _type
else:
raise UnexpectedTokenError(token[0], line_counter)
return
def process_line(self, line : list, line_num : int) -> dict:
statement = {}
statement["type"] = ""
if "assign" in line:
# Handles Variable Declarations
if line[0] in self.types:
statement = self.statement_defs["Variable Declaration"]
statement["declaration"]["type"] = line[0]
statement["declaration"]["id"] = self.get_token_value(line[1])
# Handles Variable Assignments
elif self.get_token_type(line[0]) == "id":
statement = self.statement_defs["Variable Assignment"]
statement["assignment"]["id"] = self.get_token_value(line[0])
# Handles out of place assignment operators
else:
raise UnexpectedTokenError(line[0], line_num)
return statement
def parse(self, tokens : list) -> dict:
self.ast["program"] = {}
self.ast["program"]["body"] = []
line = []
line_counter = 0
for token in tokens:
if token == "newline":
if len(line) > 0:
self.ast["program"]["body"].append(self.process_line(line, line_counter))
line = []
line_counter += 1
else:
line.append(token)
if self.settings["Print AST"]:
print(f"AST: {self.ast}")
return self.ast
class ExpressionTreeError(Exception):
def __init__(self, line : int):
self.line = line
def __str__(self):
return f"Expression tree error on line {self.line}"
class UnexpectedTokenError(Exception):
def __init__(self, token : str, line : int):
self.token = token
self.line = line
def __str__(self):
return f"Unexpected token {self.token} at line {self.line}" | 3.953125 | 4 |
spreukbot/generator.py | mivdnber/spreukbot | 1 | 12772813 | <gh_stars>1-10
from collections import defaultdict
import random
import difflib
import io
import textwrap
import urllib.parse
import logging
import requests
from PIL import Image, ImageDraw, ImageFont
import click
import spreukbot.rendering as rendering
import spreukbot.facebook as facebook
import spreukbot.config as config
import spreukbot.pixabay as pixabay
logger = logging.getLogger('spreukbot')
def ngram(n, text):
words = text.split()
return zip(*[words[x:] for x in range(n)])
EMOJI = [
'😂',
'😍',
'😜',
'😩',
'😠',
'💩',
'💘',
'💔',
]
def get_random_emoji():
count = random.choice([0, 0, 0, 0, 1, 2, 3])
return random.choice(EMOJI) * count
class SpreukGenerator:
def __init__(self, source):
with open(source) as f:
self.lines = [l for l in f]
self.mc = defaultdict(list)
for line in self.lines:
for a, b, c in ngram(3, line):
self.mc[(a,b)].append(c)
self.mc[(a.lower(),b.lower())].append(c)
@property
def starting_words(self):
return [k for k in self.mc.keys() if k[0].istitle()]
def random_start(self):
print(len(set(self.starting_words)), 'possible starting words')
return random.choice(self.starting_words)
def generate(self, max_iterations=30, max_retries=5, min_length=9, accept_length=14):
output = ''
while len(output.split()) < min_length:
state = self.random_start()
output = ' '.join(state)
while self.mc[state] and max_iterations:
word = random.choice(self.mc[state])
output += ' ' + word
print(output)
state = (state[1], word)
if word.endswith(('.', '!')) and len(output.split()) > accept_length:
print("ending")
break
max_iterations -= 1
if not max_iterations or not self.unique(output):
print(f'Rejected; trying {max_retries} more time(s)')
return self.generate(max_retries=max_retries - 1)
return output.strip()
def unique(self, spreuk):
return all(
difflib.SequenceMatcher(a=spreuk, b=line).ratio() < .55 \
and spreuk not in line
for line in self.lines
)
@click.command()
@click.option('--post/--no-post', default=False)
@click.option('--file', default=None)
@click.option('--show/--no-show', default=False)
@click.option('--count', default=None, type=int)
def main(post, file, show, count):
gen = SpreukGenerator('spreukbot/corpus.txt')
if count is not None:
for i in range(count):
text = gen.generate()
print(text)
sys.exit(0)
logger.info('getting pixabay image')
image, w, h = pixabay.random_pixabay()
logger.info('generating text')
text = gen.generate()
logger.info('rendering image')
emoji = get_random_emoji()
png = rendering.render(image, w, h, text, emoji=emoji)
if file is not None:
logger.info('writing to file')
with open(file, 'wb') as f:
f.write(png)
if show:
logger.info('showing image')
Image.open(io.BytesIO(png)).show()
if post:
facebook.post_update(png)
if __name__ == '__main__':
main()
| 2.703125 | 3 |
plot.py | scivision/ascii2hdf5-examples | 0 | 12772814 | <reponame>scivision/ascii2hdf5-examples
"""
plot HDF5 proprietary data converted by ascii2hdf5.py
"""
from pathlib import Path
import xarray
import argparse
import numpy as np
from matplotlib.pyplot import subplots, show
from matplotlib.colors import LogNorm
def plot(dat: xarray.DataArray):
if dat.ndim != 3:
raise ValueError("Expected 3-D array")
iel = dat.elevation.size // 2 - 1 # 0 degrees
iaz = dat.azimuth.size // 2 - 1 # 0 degrees
speed = dat.speed
azr = np.radians(dat.azimuth)
elr = np.radians(dat.elevation)
fg, ax = subplots(nrows=1, ncols=2, subplot_kw=dict(projection="polar"))
h = ax[0].pcolormesh(azr, speed, dat[:, :, iel].T, shading="nearest", norm=LogNorm())
ax[0].set_thetalim(thetamin=dat.azimuth[0], thetamax=dat.azimuth[-1])
ax[0].set_xlabel("speed [km/sec]", rotation=dat.azimuth[0].item())
fg.colorbar(h, ax=ax[0], shrink=0.25)
h = ax[1].pcolormesh(elr, speed, dat[iaz, :, :], shading="nearest", norm=LogNorm())
ax[1].set_thetalim(thetamin=dat.elevation[0], thetamax=dat.elevation[-1])
ax[1].set_xlabel("speed [km/sec]", rotation=dat.elevation[0].item())
fg.colorbar(h, ax=ax[1], shrink=0.25)
fg.suptitle(filename.stem)
if __name__ == "__main__":
P = argparse.ArgumentParser()
P.add_argument("filename")
p = P.parse_args()
filename = Path(p.filename).expanduser()
dat = xarray.load_dataarray(filename)
plot(dat)
show()
| 2.8125 | 3 |
1303.py | liuwenxiang1981/lwx | 0 | 12772815 | <reponame>liuwenxiang1981/lwx<filename>1303.py
import requests
# def get_imageNet_data(path):
headers = {
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
}
response = requests.get('http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n02127808',headers=headers)
ImageUrls = response.text
split_urls = ImageUrls.split('\r\n')
for url in split_urls:
try:
picture = requests.get(url,headers=headers,timeout=3,allow_redirects=False)
except Exception as i:
print(i)
else:
# picture = requests.get(url=url,headers=headers)
imageName = url.split('/')[-1]
with open('/home/weifang/Desktop/liuwenxiang/{}.jpg'.format(imageName),mode='wb') as f:
f.write(picture.content)
| 2.796875 | 3 |
tests/unit/lib/aquilon/worker/commands/test_compile_hostname.py | ned21/aquilon | 7 | 12772816 | <reponame>ned21/aquilon
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2019 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
try:
from unittest import mock
except ImportError:
# noinspection PyUnresolvedReferences
import mock
# As these are unit tests, we do not need the full broker capability,
# we can thus mock the DbFactory in order for it not to try and open
# the database (which is not required anyway)
with mock.patch('aquilon.aqdb.db_factory.DbFactory', autospec=True):
from aquilon.worker.commands import compile_hostname
class TestCommandCompileHostname(unittest.TestCase):
@staticmethod
@mock.patch.object(compile_hostname.CommandCompileHostname, '__init__')
def get_command_instance(mock_init):
mock_init.return_value = None
command = compile_hostname.CommandCompileHostname()
return command
@mock.patch.object(compile_hostname.CommandCompileHostname,
'_preprocess')
def test_render_passes_correct_pancexclude_and_pancinclude_to_compile(
self, mock_preprocess):
# This test is to ensure that correct values of pancexclude and
# pancinclude are used to call TemplateDomain.compile() for
# pancdebug equal to both True and False.
command = self.get_command_instance()
mock_template_domain = mock.Mock()
mock_plenary = mock.Mock()
mock_plenary_key = mock.Mock()
mock_plenary_key.__enter__ = mock.Mock()
mock_plenary_key.__exit__ = mock.Mock(return_value=False)
mock_plenary.get_key.return_value = mock_plenary_key
mock_preprocess.return_value = mock_template_domain, mock_plenary
expected_pancinclude = r'.*'
expected_pancexclude = r'components/spma/functions.*'
self.assertEqual(mock_template_domain.compile.call_count, 0)
# Arguments pancexclude and pancinclude passed to render() should be
# passed to TemplateDomain.compile() unchanged when pancdebug is false.
command.render('session', 'logger', 'hostname',
'include', 'exclude', pancdebug=False,
cleandeps=True)
self.assertEqual(mock_template_domain.compile.call_count, 1)
keywords = mock_template_domain.compile.call_args_list[0][1]
self.assertEqual(keywords['panc_debug_exclude'], 'exclude')
self.assertEqual(keywords['panc_debug_include'], 'include')
# Arguments pancexclude and pancinclude passed to render() should be
# ignored when pancdebug is true. Instead, expected_pancinclude and
# expected_pancexclude should be passed to TemplateDomain.compile().
command.render('session', 'logger', 'hostname',
'include', 'exclude', pancdebug=True,
cleandeps=True)
self.assertEqual(mock_template_domain.compile.call_count, 2)
keywords = mock_template_domain.compile.call_args_list[1][1]
self.assertEqual(keywords['panc_debug_exclude'], expected_pancexclude)
self.assertEqual(keywords['panc_debug_include'], expected_pancinclude)
| 2.0625 | 2 |
spacy_dbpedia_spotlight/util.py | oroszgy/spacy-dbpedia-spotlight | 51 | 12772817 | <reponame>oroszgy/spacy-dbpedia-spotlight
try: # Python 3.8
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata # noqa: F401
pkg_meta = importlib_metadata.metadata(__name__.split(".")[0]) | 1.65625 | 2 |
TMB/scrape.py | clayton-rossiter/TakeMyBins | 0 | 12772818 | <reponame>clayton-rossiter/TakeMyBins
from bs4 import BeautifulSoup
from bs4.element import Tag
import requests
from TMB.result import ScrapeResult
class Scraper:
URL:str = r"https://www.adur-worthing.gov.uk/bin-day/?brlu-selected-address=100061879527&return-url=%2Fbin-day%2F"
def scrape(self) -> list[ScrapeResult]:
"""scrapes the table data from the council website"""
r = requests.get(self.URL)
if r.status_code != 200:
raise ValueError("Could not access website")
soup = BeautifulSoup(r.text, "lxml")
table = soup.find_all('table')[0]
# loop through table rows and generate data
rows = table.find_all('tr')
results = [parse_row(row) for row in rows[1:-1]]
return results
def parse_row(row: Tag) -> ScrapeResult:
"""parses a row from the master table to retrieve the bin collection information"""
bin_type = row.find_all('th')[0].text
eles = row.find_all('td')
bin_colour = eles[0].text.strip()
collection_days = [str(ele) for ele in eles[1].contents if not isinstance(ele,Tag)]
return ScrapeResult(bin_type, bin_colour, collection_days)
| 3.5 | 4 |
IPython/kernel/blocking/client.py | pyarnold/ipython | 1 | 12772819 | """Implements a fully blocking kernel client.
Useful for test suites and blocking terminal interfaces.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from IPython.utils.traitlets import Type
from IPython.kernel.client import KernelClient
from .channels import (
BlockingIOPubChannel, BlockingHBChannel,
BlockingShellChannel, BlockingStdInChannel
)
#-----------------------------------------------------------------------------
# Blocking kernel manager
#-----------------------------------------------------------------------------
class BlockingKernelClient(KernelClient):
# The classes to use for the various channels
shell_channel_class = Type(BlockingShellChannel)
iopub_channel_class = Type(BlockingIOPubChannel)
stdin_channel_class = Type(BlockingStdInChannel)
hb_channel_class = Type(BlockingHBChannel)
| 1.757813 | 2 |
pyfilter/src/filters/all_match_filter.py | zkscpqm/pyfilter | 0 | 12772820 | from typing import Text, List, Set
from pyfilter.src.filters import _BaseFilter
from pyfilter.src.filter_context import FilterContext
class _AllMatchFilter(_BaseFilter):
def __init__(self, keywords: List[Text]):
super().__init__(keywords)
def filter(self, input_string, ctx):
"""
Run a single input through the all-match filters.
:param input_string: The value to run through the filters.
:param ctx: A context with metadata pertaining to this filter request.
:return: True if all of the AllMatchFilter keywords were matched, otherwise False
"""
if not self.keywords or not self.enabled:
return True
for whitelist_keyword in self.keywords:
if ctx.casefold:
whitelist_keyword = whitelist_keyword.casefold()
input_string = input_string.casefold()
if whitelist_keyword not in input_string:
return False
return True
def get_all_matching_keywords(self, input_string: Text, ctx: FilterContext) -> Set[Text]:
"""
Returns all keywords which are seen in the input string.
:param input_string: The value to run through the filter.
:param ctx: A context with metadata pertaining to this filter request.
:return: A set of whitelist_keywords existing in the input_string and the filter's keywords
"""
seen = set()
for whitelist_keyword in self.keywords:
if ctx.casefold:
whitelist_keyword = whitelist_keyword.casefold()
input_string = input_string.casefold()
if whitelist_keyword in input_string:
seen.add(whitelist_keyword)
return seen
def all_match(self, keywords: Set[Text]) -> bool:
"""
Checks whether an input set matches all keywords
:param keywords: A set of items to check
:return: True if the sets are identical, otherwise False
"""
return keywords == set(self.keywords)
@property
def __name(self) -> Text: # pragma: no cover
return 'AllMatchFilter'
| 2.984375 | 3 |
mindspore/python/mindspore/scipy/utils_const.py | AK391/mindspore | 0 | 12772821 | <gh_stars>0
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""internal graph-compatible utility functions"""
from collections.abc import Iterable
from ..ops.primitive import constexpr
from .._c_expression import typing
@constexpr
def _callable_const(x):
"""Returns true if x is a function in graph mode."""
return isinstance(x, typing.Function)
@constexpr
def _type_convert(new_type, obj):
"""
Convert type of `obj` to `force`.
"""
return new_type(obj)
@constexpr
def _raise_value_error(info, *param):
"""
Raise ValueError in both graph/pynative mode
Args:
info(str): info string to display
param(tuple): any object that can be recognized by graph mode. All
param's value will be appended to info. Default is an empty tuple.
"""
for p in param:
info = info + f"{p}"
raise ValueError(info)
@constexpr
def _raise_type_error(info, *param):
"""
Raise TypeError in both graph/pynative mode
Args:
info(str): info string to display
param(tuple): any object that can be recognized by graph mode. All
param's value will be appended to info. Default is an empty tuple.
"""
for p in param:
info = info + f"{p}"
raise TypeError(info)
@constexpr
def _type_check(arg_name, arg_value, valid_types, prim_name=None):
"""
Checks whether a value is instance of some types.
The same as mindspore._checkparam.Validator.check_value_type.
This copy is to make it work in graph mode.
"""
valid_types = valid_types if isinstance(valid_types, Iterable) else (valid_types,)
def raise_error_msg():
"""func for raising error message when check failed"""
type_names = [t.__name__ if hasattr(t, '__name__') else str(t) for t in valid_types]
num_types = len(valid_types)
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
raise TypeError(f'{msg_prefix} type of `{arg_name}` should be {"one of " if num_types > 1 else ""}'
f'{type_names if num_types > 1 else type_names[0]}, '
f'but got \'{arg_value}\' with type {type(arg_value).__name__}.')
# Notice: bool is subclass of int, so `check_value_type('x', True, [int])` will check fail, and
# `check_value_type('x', True, [bool, int])` will check pass
if isinstance(arg_value, bool) and bool not in tuple(valid_types):
raise_error_msg()
if not isinstance(arg_value, tuple(valid_types)):
raise_error_msg()
return arg_value
| 1.929688 | 2 |
src/settings/_sqlite.py | Freonius/tranquillity | 0 | 12772822 | <filename>src/settings/_sqlite.py
from typing import Union, Dict
from sqlite3 import connect, Connection, Cursor, ProgrammingError
from .__interface import ISettings
class Sqlite(ISettings):
_connection: Connection
_key_col: str
_val_col: str
_tbl: str
_db_file: str
# pylint: disable=too-many-arguments
def __init__(self,
db_file: Union[str, None] = None,
table: Union[str, None] = None,
key_column: Union[str, None] = None,
value_column: Union[str, None] = None,
default_column: Union[str, None] = None,
raise_on_missing: bool = True,
read_only: bool = False) -> None:
super().__init__()
_vals: Dict[str, str] = Sqlite.create_stmt_if_not_exists(
db_file, table, key_column, value_column, default_column)
self._db_file = _vals['db_file']
self._connection = connect(self._db_file)
self._connection.execute(_vals['create_statement'])
self._key_col = _vals['key_column']
self._val_col = _vals['value_column']
self._tbl = _vals['table']
_curs: Cursor = self._connection.execute(
f'SELECT {self._key_col}, {self._val_col} FROM {self._tbl};')
_d: Dict[str, str] = {str(x[0]): str(x[1]) for x in _curs.fetchall()}
self._config(_d, raise_on_missing=raise_on_missing,
read_only=read_only)
@staticmethod
def create_stmt_if_not_exists(db_file: Union[str, None] = None,
table: Union[str, None] = None,
key_column: Union[str, None] = None,
value_column: Union[str, None] = None,
default_column: Union[str, None] = None) -> Dict[str, str]:
if db_file is None:
db_file = './settings.db:cachedb?mode=memory&cache=shared'
if table is None:
table = 'settings'
if key_column is None:
key_column = 'key_column'
if value_column is None:
value_column = 'value_column'
if default_column is None:
default_column = 'default_column'
_stmt: str = f'''
CREATE TABLE IF NOT EXISTS {table}
(
{key_column} TEXT PRIMARY KEY NOT NULL,
{value_column} TEXT NOT NULL,
{default_column} TEXT
);
'''
return {
'db_file': db_file,
'table': table,
'key_column': key_column,
'value_column': value_column,
'default_column': default_column,
'create_statement': _stmt
}
# pylint: enable=too-many-arguments
def close(self) -> None:
self._connection.close()
def connect(self) -> None:
self._connection = connect(self._db_file)
@property
def is_connected(self) -> bool:
try:
self._connection.cursor()
return True
except ProgrammingError:
return False
def _update(self, key: str, val: str) -> None:
_f: str = f'''INSERT INTO {self._tbl} ({self._key_col}, {self._val_col})
VALUES(?, ?)
ON CONFLICT ({self._key_col}) DO UPDATE SET
{self._val_col}=?;'''
self._connection.execute(_f, (key, val, val))
self._connection.commit()
| 2.875 | 3 |
chalmers/commands/add.py | Anaconda-Platform/chalmers | 11 | 12772823 | <reponame>Anaconda-Platform/chalmers
'''
Add a program without running it
eg:
chalmers add --name server1 -- python /path/to/myserver.py
or:
chalmers add --name server1 -c "python /path/to/myserver.py"
Note that this does not run the program by default. To run your program,
run `chalmers start NAME` or use the run-now option eg. `chalmers add --run-now ...`
'''
from __future__ import unicode_literals, print_function
from argparse import RawDescriptionHelpFormatter
import logging
import os
import shlex
from chalmers import errors
from chalmers.program import Program
log = logging.getLogger('chalmers.add')
def main(args):
if args.cmd and args.command:
raise errors.ChalmersError('Unknow arguments %r' % args.command)
elif not (args.cmd or args.command):
raise errors.ChalmersError('Must specify a command to add')
if args.cmd:
args.command = args.cmd
if not args.name:
args.name = args.command[0]
env = {}
for env_var in args.save_env:
if env_var in os.environ:
env[env_var] = os.environ[env_var]
else:
log.warn("Environment variable %s does not exist (from -e/--save-env)" % env_var)
program = Program.add(
args.name, args.command,
paused=args.paused, cwd=args.cwd,
stdout=args.stdout, stderr=args.stderr,
daemon_log=args.daemon_log, redirect_stderr=args.redirect_stderr,
env=env
)
log.info('Added program {args.name}'.format(args=args))
if args.run_now:
log.info('Running program {args.name}'.format(args=args))
program.start(daemon=not args.wait)
def add_parser(subparsers):
description = 'Add a command to run'
parser = subparsers.add_parser('add',
help=description, description=description,
epilog=__doc__,
formatter_class=RawDescriptionHelpFormatter
)
#===============================================================================
#
#===============================================================================
group = parser.add_argument_group('Starting State') \
.add_mutually_exclusive_group()
group.add_argument('--off', '--paused', action='store_true', dest='paused',
help="Don't start program automatically at system start (exclude from `chalmers start --all`)",
default=False)
group.add_argument('--on', '--un-paused', action='store_false', dest='paused',
help="Start program automatically at system start (include in `chalmers start --all`)")
group.add_argument('-r', '--run-now', action='store_true', default=False, dest='run_now',
help="Start program Right now (default: %(default)s)")
group.add_argument('-l', '--dont-run-now', '--run-later', action='store_false', dest='run_now',
help="Start the program later with `chalmers start ...`")
#===========================================================================
#
#===========================================================================
group = parser.add_argument_group('Process Output:')
group.add_argument('--stdout',
help='Filename to log stdout to')
group.add_argument('--stderr',
help='Filename to log stderr to')
group.add_argument('--daemon-log',
help='Filename to log meta information about this process to')
group.add_argument('--redirect-stderr', action='store_true', default=True,
dest='redirect_stderr',
help='Store stdout and stderr in the same log file (default: %(default)s)')
group.add_argument('--dont-redirect-stderr', action='store_false',
dest='redirect_stderr',
help='Store stdout and stderr in seporate log files')
#===========================================================================
#
#===========================================================================
parser.add_argument('-n', '--name',
help='Set the name of this program for future chalmers commands')
parser.add_argument('-w', '--wait', action='store_true', default=False, dest='wait',
help="Wait until program exits to return (default: %(default)s) (--run-now is implyed)")
parser.add_argument('--cwd', default=os.curdir,
help='Set working directory of the program (default: %(default)s)')
parser.add_argument('command', nargs='*', metavar='COMMAND',
help='Command to run')
split = lambda item: shlex.split(item, posix=os.name == 'posix')
parser.add_argument('-c', metavar='COMMAND', type=split, dest='cmd',
help='Command to run')
parser.add_argument('-e', '--save-env', metavar='ENV_VAR', action='append', default=[],
help='Save a current environment variable to be run( Eg. --save-env PATH)')
parser.set_defaults(main=main, state='pause')
| 2.796875 | 3 |
spea2/algorithm/individual.py | togulcan/SPEA2-ICoptimizer | 4 | 12772824 | <reponame>togulcan/SPEA2-ICoptimizer
from .fitness import Fitness
class Individual:
TARGETS = {}
CONSTRAINTS = {}
constraint_operations = []
constraint_constants = []
def __init__(self, circuit, N):
self.circuit = circuit
self.fitness = Fitness(N)
self.arch_fitness = Fitness(N)
self.status = 'not simulated'
def __hash__(self):
return hash(self.circuit)
def __eq__(self, other):
return self.circuit == other.circuit
@property
def targets(self):
t = []
for target_name, operation in self.TARGETS.items():
if operation == 'max':
t.append(getattr(self.circuit, target_name))
elif operation == 'min':
t.append(1 / getattr(self.circuit, target_name))
else:
raise ValueError(f"Operation should be 'max or 'min' "
f"but given {operation}")
return t
@property
def constraint_values(self):
return [getattr(self.circuit, cons_name)
for cons_name in self.CONSTRAINTS.keys()]
def reset_arch_fitness(self, N):
self.arch_fitness = Fitness(N)
| 2.671875 | 3 |
model.py | lFatality/tensorflow2caffe | 115 | 12772825 | <reponame>lFatality/tensorflow2caffe
from tflearn import input_data, conv_2d, max_pool_2d, fully_connected, dropout, Momentum, regression, DNN
#model of vgg-19
def vgg_net_19(width, height):
network = input_data(shape=[None, height, width, 3], name='input')
network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4)
opt = Momentum(learning_rate=0, momentum = 0.9)
network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets')
model = DNN(network, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='')
return model
#model of vgg-19 for testing of the activations
#rename the output you want to test, connect it to the next layer and change the output layer at the bottom (model = DNN(...))
#make sure to use the correct test function (depending if your output is a tensor or a vector)
def vgg_net_19_activations(width, height):
network = input_data(shape=[None, height, width, 3], name='input')
network1 = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network2 = conv_2d(network1, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network2, 2, strides=2)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4)
opt = Momentum(learning_rate=0, momentum = 0.9)
network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets')
model = DNN(network1, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='')
return model
| 2.53125 | 3 |
docs/snippets/ov_python_exclusives.py | kurylo/openvino | 1,127 | 12772826 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
#! [auto_compilation]
import openvino.runtime as ov
compiled_model = ov.compile_model("model.xml")
#! [auto_compilation]
#! [properties_example]
core = ov.Core()
input_a = ov.opset8.parameter([8])
res = ov.opset8.absolute(input_a)
model = ov.Model(res, [input_a])
compiled = core.compile_model(model, "CPU")
print(model.inputs)
print(model.outputs)
print(compiled.inputs)
print(compiled.outputs)
#! [properties_example]
#! [tensor_basics]
data_float64 = np.ones(shape=(2,8))
tensor = ov.Tensor(data_float64)
assert tensor.element_type == ov.Type.f64
data_int32 = np.ones(shape=(2,8), dtype=np.int32)
tensor = ov.Tensor(data_int32)
assert tensor.element_type == ov.Type.i32
#! [tensor_basics]
#! [tensor_shared_mode]
data_to_share = np.ones(shape=(2,8))
shared_tensor = ov.Tensor(data_to_share, shared_memory=True)
# Editing of the numpy array affects Tensor's data
data_to_share[0][2] = 6.0
assert shared_tensor.data[0][2] == 6.0
# Editing of Tensor's data affects the numpy array
shared_tensor.data[0][2] = 0.6
assert data_to_share[0][2] == 0.6
#! [tensor_shared_mode]
infer_request = compiled.create_infer_request()
data = np.random.randint(-5, 3 + 1, size=(8))
#! [passing_numpy_array]
# Passing inputs data in form of a dictionary
infer_request.infer(inputs={0: data})
# Passing inputs data in form of a list
infer_request.infer(inputs=[data])
#! [passing_numpy_array]
#! [getting_results]
# Get output tensor
results = infer_request.get_output_tensor().data
# Get tensor with CompiledModel's output node
results = infer_request.get_tensor(compiled.outputs[0]).data
# Get all results with special helper property
results = list(infer_request.results.values())
#! [getting_results]
#! [sync_infer]
# Simple call to InferRequest
results = infer_request.infer(inputs={0: data})
# Extra feature: calling CompiledModel directly
results = compiled_model(inputs={0: data})
#! [sync_infer]
#! [asyncinferqueue]
core = ov.Core()
# Simple model that adds two inputs together
input_a = ov.opset8.parameter([8])
input_b = ov.opset8.parameter([8])
res = ov.opset8.add(input_a, input_b)
model = ov.Model(res, [input_a, input_b])
compiled = core.compile_model(model, "CPU")
# Number of InferRequests that AsyncInferQueue holds
jobs = 4
infer_queue = ov.AsyncInferQueue(compiled, jobs)
# Create data
data = [np.array([i] * 8, dtype=np.float32) for i in range(jobs)]
# Run all jobs
for i in range(len(data)):
infer_queue.start_async({0: data[i], 1: data[i]})
infer_queue.wait_all()
#! [asyncinferqueue]
#! [asyncinferqueue_access]
results = infer_queue[3].get_output_tensor().data
#! [asyncinferqueue_access]
#! [asyncinferqueue_set_callback]
data_done = [False for _ in range(jobs)]
def f(request, userdata):
print(f"Done! Result: {request.get_output_tensor().data}")
data_done[userdata] = True
infer_queue.set_callback(f)
for i in range(len(data)):
infer_queue.start_async({0: data[i], 1: data[i]}, userdata=i)
infer_queue.wait_all()
assert all(data_done)
#! [asyncinferqueue_set_callback]
unt8_data = np.ones([100])
#! [packing_data]
from openvino.helpers import pack_data
packed_buffer = pack_data(unt8_data, ov.Type.u4)
# Create tensor with shape in element types
t = ov.Tensor(packed_buffer, [1, 128], ov.Type.u4)
#! [packing_data]
#! [unpacking]
from openvino.helpers import unpack_data
unpacked_data = unpack_data(t.data, t.element_type, t.shape)
assert np.array_equal(unpacked_data , unt8_data)
#! [unpacking]
#! [releasing_gil]
import openvino.runtime as ov
import cv2 as cv
from threading import Thread
input_data = []
# Processing input data will be done in a separate thread
# while compilation of the model and creation of the infer request
# is going to be executed in the main thread.
def prepare_data(input, image_path):
image = cv.imread(image_path)
h, w = list(input.shape)[-2:]
image = cv.resize(image, (h, w))
image = image.transpose((2, 0, 1))
image = np.expand_dims(image, 0)
input_data.append(image)
core = ov.Core()
model = core.read_model("model.xml")
# Create thread with prepare_data function as target and start it
thread = Thread(target=prepare_data, args=[model.input(), "path/to/image"])
thread.start()
# The GIL will be released in compile_model.
# It allows a thread above to start the job,
# while main thread is running in the background.
compiled = core.compile_model(model, "GPU")
# After returning from compile_model, the main thread acquires the GIL
# and starts create_infer_request which releases it once again.
request = compiled.create_infer_request()
# Join the thread to make sure the input_data is ready
thread.join()
# running the inference
request.infer(input_data)
#! [releasing_gil]
| 1.976563 | 2 |
src/ext/glbinding/codegeneration/scripts/gen_extensions.py | carl-221b/gen_planete | 18 | 12772827 | <filename>src/ext/glbinding/codegeneration/scripts/gen_extensions.py<gh_stars>10-100
from binding import *
from context import Context
def genExtensionContexts(extensions):
extensionContexts = []
for extension in extensions:
commandContexts = Context.listContext([{"identifier": functionBID(c), "name": c.name} for c in extension.reqCommands],
sortKey = lambda c: c["identifier"])
extensionContexts.append({"identifier": extensionBID(extension),
"name": extension.name,
"incore": extension.incore,
"incoreMajor": extension.incore.major if extension.incore else None,
"incoreMinor": extension.incore.minor if extension.incore else None,
"reqCommands": commandContexts})
return extensionContexts
| 1.953125 | 2 |
amphi-4.py | helionjf/TelloEduSwarmSearch | 0 | 12772828 | from fly_tello import FlyTello
my_tellos = list()
'''
// scenario specifique amphi MPG
// position initiale : 1 2 3 4 (tous les drones à 12h)
// Séparés de 1 m
// TO DO : à terminer
'''
#
# MAIN FLIGHT CONTROL LOGIC
#
# Define the Tello's we're using, in the order we want them numbered
my_tellos.append('0TQDG2KEDB4FH3') # numéro 1 == DC5CE0
my_tellos.append('0TQDG2KEDBWK3X') # numéro 2 == DC5F6C
my_tellos.append('0TQDFCHEDB3F86') # numéro 3 == D3FCE4
my_tellos.append('0TQDG2KEDB04T1') # numéro 4 == DC5CF3
#my_tellos.append('0TQDFCHEDBY3H0') # numéro 5 == D3F926
#my_tellos.append('0TQDG2KEDBPE19') # numéro 6 == DC5F05
# Control the flight
with FlyTello(my_tellos, get_status=True) as fly:
# TO DO : battery_check 20
fly.get_battery()
fly.print_status(sync=True)
# tous décolent à 2 secondes d'écart
for i in range(1,5):
fly.takeoff(i)
fly.pause(2)
fly.print_status(sync=True)
# tous montent de 1 m
fly.up(100)
#rotation 180 pour tous
fly.rotate_cw(180)
#deplacement latéral
fly.left(100,1)
fly.left(100,2)
fly.right(100,3)
fly.right(100,4)
#deplacement escalier
fly.up(80,1)
fly.up(20,2)
fly.up(20,3)
fly.up(80,4)
#fly.up(40,5)
#fly.up(60,6)
#deplacement avant de 400
#fly.forward(100)
#rotation 90
fly.rotate_cw(90,1)
fly.rotate_cw(90,2)
fly.rotate_ccw(90,3)
fly.rotate_ccw(90,4)
#deplacement avant de 200
fly.right(50,2)
fly.right(50,3)
fly.forward(200,1)
fly.forward(400,2)
fly.forward(400,3)
fly.forward(200,4)
#rotation 90
fly.rotate_cw(90,1)
fly.rotate_cw(90,2)
fly.rotate_ccw(90,3)
fly.rotate_ccw(90,4)
#deplacement latéral de 400
#fly.forward(400)
#rotation 180
fly.rotate_ccw(180)
fly.forward(50,2)
fly.down(2)
#flip avant
for i in range(1,5):
fly.flip("forward",i)
fly.pause(2)
#fly.flip("forward")
#fly.print_status(sync=True)
#atterrisage
#deplacement latéral
fly.left(50,1)
fly.left(350,2)
fly.right(250,3)
fly.right(100,4)
fly.land()
fly.get_battery(sync=True)
fly.get_sn() | 2.984375 | 3 |
source/setup.py | hytalo-bassi/aeternah | 1 | 12772829 | from importlib import import_module, invalidate_caches
from inspect import getmembers, isfunction
from aiogram import Dispatcher, Bot
from os.path import abspath
from ujson import loads
from os import environ
import logging
logging.basicConfig(level=logging.INFO, filename=".log", filemode="w+")
_langs = loads(open('source/langs.json', 'r').read())
conf = environ
bot = Bot(conf['API_TOKEN'], parse_mode='HTML')
dp = Dispatcher(bot)
plugins = loads(open('source/plugins.json').read())["plugins"]
helper = []
for plugin in plugins:
_tmp = import_module(plugin['module_path'])
for member in getmembers(_tmp, isfunction):
if member[0] == plugin['func']:
try:
helper.append({
"commands": plugin["commands"],
"doc": plugin['doc'],
})
dp.register_message_handler(member[1], commands=plugin["commands"])
except:
dp.register_message_handler(member[1], regexp=plugin["regex"])
invalidate_caches()
| 2.125 | 2 |
kingfisher_scrapy/spiders/kenya_makueni.py | open-contracting/kingfisher-collect | 7 | 12772830 | <reponame>open-contracting/kingfisher-collect<gh_stars>1-10
from kingfisher_scrapy.base_spider import SimpleSpider
from kingfisher_scrapy.util import handle_http_error, parameters
class KenyaMakueni(SimpleSpider):
"""
Domain
Makueni County
Swagger API documentation
https://opencontracting.makueni.go.ke/swagger-ui/#/ocds-controller
"""
name = 'kenya_makueni'
# BaseSpider
root_path = 'item'
# SimpleSpider
data_type = 'release_package'
def start_requests(self):
yield from self.request_page(0)
@handle_http_error
def parse(self, response):
yield from super().parse(response)
if response.json():
page = response.request.meta['page']
yield from self.request_page(page + 1)
def request_page(self, page):
url = f'https://opencontracting.makueni.go.ke/api/ocds/package/all?pageSize=1000&pageNumber={page}'
yield self.build_request(url, parameters('pageNumber'), meta={'page': page})
| 2.5 | 2 |
GamesKeeper/models/guild.py | ThatGuyJustin/GamesKeeper | 0 | 12772831 | from GamesKeeper.db import BaseModel
from peewee import (BigIntegerField, IntegerField, TextField, BooleanField,
DoesNotExist)
from playhouse.postgres_ext import BinaryJSONField, ArrayField
@BaseModel.register
class Guild(BaseModel):
guild_id = BigIntegerField(primary_key=True)
owner_id = BigIntegerField(null=False)
prefix = TextField(default="+", null=False)
games_category = BigIntegerField(null=True)
spectator_roles = ArrayField(BigIntegerField, null=True, index=False)
enabled_games = IntegerField()
referee_role = BigIntegerField(null=True)
role_allow_startgames = BigIntegerField(null=True)
booster_perks = BooleanField(default=False)
commands_disabled_channels = ArrayField(
BigIntegerField, null=True, index=False
)
logs_enabled = BooleanField(default=True)
log_channel = BigIntegerField(null=True)
class Meta:
db_table = 'guilds'
@classmethod
def get_settings(cls, guild_id):
try:
return Guild.get(guild_id=guild_id)
except Guild.DoesNotExist:
return
@classmethod
def using_id(cls, guild_id):
return Guild.get(guild_id=guild_id)
def enabled_games_emotes(self):
game_types = {
1 << 0: "<:uno:594231154098438153>", # Uno
1 << 1: "<:connectfour:594231155172179985>", # Connect4
1 << 2: "<:tictactoe:594231153830133761>", # TicTacToe
1 << 3: "<:hangman:594231153914019840>", # Hangman
# 1 << 4: "2048", #2048
# 1 << 5: "<:trivia:594231155012665354>", #Trivia
}
if self.enabled_games == 0:
return ['`None`']
games = []
for i in range(10):
if self.enabled_games & 1 << i:
games.append(game_types[1 << i])
return games
def disabled_games_emotes(self):
game_types = {
1 << 0: "<:uno:594231154098438153>", # Uno
1 << 1: "<:connectfour:594231155172179985>", # Connect4
1 << 2: "<:tictactoe:594231153830133761>", # TicTacToe
1 << 3: "<:hangman:594231153914019840>", # Hangman
# 1 << 4: "2048", #2048
# 1 << 5: "<:trivia:594231155012665354>", #Trivia
}
games = []
for i in range(len(game_types)):
if not self.enabled_games & 1 << i:
games.append(game_types[1 << i])
return games
def enabled_games_strings(self):
game_types = {
1 << 0: "Uno",
1 << 1: "Connect4",
1 << 2: "TicTacToe",
1 << 3: "HangMan",
# 1 << 4: "2048",
# 1 << 5: "Trivia",
}
games = []
for i in range(10):
if self.enabled_games & 1 << i:
games.append(game_types[1 << i])
return games
def check_if_listed(self, game, check_type):
game_types = {
"uno": 1 << 0, # Uno
'c4': 1 << 1, # Connect4
'ttt': 1 << 2, # TicTacToe
'hm': 1 << 3, # Hangman
# '2048': 1 << 4, #2048
# 'trivia': 1 << 5, #Trivia
}
if check_type == 'enabled':
if self.enabled_games & game_types[game]:
return True
else:
return False
if check_type == 'disabled':
if not self.enabled_games & game_types[game]:
return True
else:
return False
| 2.25 | 2 |
mrs_plugin/tests/unit/test_core.py | mike-lischke/mysql-shell-plugins | 11 | 12772832 | <reponame>mike-lischke/mysql-shell-plugins<filename>mrs_plugin/tests/unit/test_core.py
# Copyright (c) 2021, 2022, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0,
# as published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms, as
# designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an additional
# permission to link the program and your derivative works with the
# separately licensed software that they have included with MySQL.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pytest
from ... core import *
@pytest.mark.usefixtures("init_mrs")
def test_get_current_service():
set_current_objects()
current_service = get_current_service()
assert current_service is None
set_current_objects(service_id=1, schema_id=1, content_set_id=1)
current_service = get_current_service()
assert current_service is not None
assert current_service == {'id': 1,
'enabled': 1,
'url_protocol': 'HTTP',
'url_host_name': 'localhost',
'url_context_root': '/test',
'is_default': 1,
'comments': 'Test service',
'host_ctx': 'localhost/test'}
@pytest.mark.usefixtures("init_mrs")
def test_get_current_content_set():
set_current_objects()
content = get_current_content_set()
assert content is None
@pytest.mark.usefixtures("init_mrs")
def test_get_current_schema():
set_current_objects()
schema = get_current_schema()
assert schema is None
set_current_objects(service_id=1, schema_id=1, content_set_id=1)
schema = get_current_schema()
assert schema is not None
assert schema == {'id': 1,
'name': 'PhoneBook',
'service_id': 1,
'request_path': '/test_schema',
'requires_auth': 0,
'enabled': 1,
'items_per_page': 20,
'comments': 'test schema',
'host_ctx': 'localhost/test'}
@pytest.mark.usefixtures("init_mrs")
def test_get_interactive_default():
interactive_default = get_interactive_default()
assert interactive_default is not None
assert isinstance(interactive_default, bool)
@pytest.mark.usefixtures("init_mrs")
def test_get_current_session():
current_session = get_current_session()
assert current_session is not None
@pytest.mark.usefixtures("init_mrs")
def test_split_sql_script():
splitted_script = split_sql_script("")
assert splitted_script is not None
assert splitted_script == ['']
@pytest.mark.usefixtures("init_mrs")
def test_get_current_config():
config = get_current_config()
assert config is not None
assert config == {'current_service_id': 1,
'current_schema_id': 1,
'current_content_set_id': 1}
@pytest.mark.usefixtures("init_mrs")
def test_analyze_service_path():
service, schema, content_set = analyze_service_path()
assert service is None
assert schema is None
assert content_set is None
service, schema, content_set = analyze_service_path("localhost/test/test_schema")
assert service is not None
assert service == {'id': 1,
'enabled': 1,
'url_protocol': 'HTTP',
'url_host_name': 'localhost',
'url_context_root': '/test',
'is_default': 1,
'comments': 'Test service',
'host_ctx': 'localhost/test'}
assert schema is not None
assert schema == {'id': 1,
'name': 'PhoneBook',
'service_id': 1,
'request_path': '/test_schema',
'requires_auth': 0,
'enabled': 1,
'items_per_page': 20,
'comments': 'test schema',
'host_ctx': 'localhost/test'}
assert content_set is None
with pytest.raises(ValueError) as exc_info:
service, schema, content_set = analyze_service_path("localhost/test/schema")
assert str(exc_info.value) == "The given schema or content set was not found."
with pytest.raises(ValueError) as exc_info:
service, schema, content_set = analyze_service_path("127.0.0.1/test")
assert str(exc_info.value) == "The given MRS service was not found."
| 1.78125 | 2 |
dynamics_learning/networks/baseline/planet_baseline.py | wuphilipp/replay-overshooting | 0 | 12772833 | <reponame>wuphilipp/replay-overshooting
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Any, List, Optional, Tuple, overload
import torch
from fannypack.nn import resblocks
from fannypack.utils import Buddy
from torch import nn as nn
from dynamics_learning.data.datasets import VisData, VisDataIMG
from dynamics_learning.networks.estimator import Estimator, EstimatorConfig
from dynamics_learning.networks.image_models.kvae import (
GrayImageVAE,
_2d_latent_viz,
_3d_latent_viz,
_img_seq,
)
from dynamics_learning.utils.log_utils import log_basic, log_image
from dynamics_learning.utils.net_utils import (
batch_eye,
gaussian_log_prob,
reparameterize_gauss,
)
class OverShoot(Enum):
"""Overshoot method type."""
NONE = 1
LATENT = 2
OBSERVATION = 3
@dataclass(frozen=True)
class LossFeatures:
"""Holds features for computing the loss."""
q_mu_posterior_list: List[torch.Tensor]
q_log_var_posterior_list: List[torch.Tensor]
p_mu_prior_list: List[torch.Tensor]
p_log_var_prior_list: List[torch.Tensor]
@staticmethod
def create_empty() -> "LossFeatures":
"""Create empty LossFeatures."""
return LossFeatures([], [], [], [])
def make_feature_extractor(in_dim: int, out_dim: int, hidden_units: int) -> nn.Module:
"""Helper to create a simple MLP model for embedding."""
return nn.Sequential(
nn.Linear(in_dim, hidden_units),
nn.LeakyReLU(inplace=True),
resblocks.Linear(hidden_units, activation="leaky_relu"),
resblocks.Linear(hidden_units, activation="leaky_relu"),
resblocks.Linear(hidden_units, activation="leaky_relu"),
nn.Linear(hidden_units, out_dim),
)
class PlaNetBaselineFilter(Estimator):
"""Baseline model.
Observations are embedded with an MLP.
These are then passed through an RNN.
RNN latent states are decoded to produce predicted observations.
TODO: Document this class better.
"""
def __init__(self, config: "PlaNetBaselineFilterConfig") -> None:
"""Create a PlaNetBaselineFilter."""
super(PlaNetBaselineFilter, self).__init__(config)
self._cond_channels = 0 # for kvae plotting # TODO support conditioning
self.units = config.hidden_units
self.obs_latent_dim = config.latent_obs_dim
self.latent_dim = config.latent_dim
self._is_image = False
if len(self.config.dataset.obs_shape) == 1:
self.obs_dim = self.config.dataset.obs_shape[0]
# define obs encoder
self.obs_encoder = make_feature_extractor(
self.obs_dim, self.obs_latent_dim, self.units
)
# define obs decoder
self.obs_decoder = make_feature_extractor(
2 * self.latent_dim, self.obs_dim * 2, self.units
)
else:
# Account for images.
self._image_model = GrayImageVAE(
self.config.dataset.obs_shape[1],
self.config.dataset.obs_shape[2],
self.config.dataset.obs_shape[0],
self.obs_latent_dim,
pixel_res=self.config.dataset.pixel_res, # type: ignore
use_mmd=False,
use_dsd=True,
cond_channels=0,
)
# define obs encoder
self.obs_encoder = nn.Sequential(
self._image_model._encoder,
nn.Linear(self.obs_latent_dim * 2, self.obs_latent_dim),
)
# define obs decoder
self.obs_decoder = nn.Sequential(
nn.Linear(self.latent_dim * 2, self.obs_latent_dim),
self._image_model._decoder,
)
self._is_image = True
# Create deterministic dynamics
self.rnn_layers = nn.GRUCell(self.units, hidden_size=self.latent_dim)
# Create feature extractors
self.dynamics_encoder = make_feature_extractor(
self.latent_dim + self.config.ctrl_dim, self.units, self.units
)
self.dynamics_decoder = make_feature_extractor(
self.latent_dim, self.latent_dim * 2, self.units
)
self.posterior_encoder = make_feature_extractor(
self.latent_dim + self.obs_latent_dim, self.latent_dim * 2, self.units
)
z0 = torch.zeros(self.latent_dim, dtype=torch.float)
self._z0 = torch.nn.Parameter(z0)
def get_initial_hidden_state(
self, batch_size: int, z0: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""See parent class."""
# create initial hidden state
device = next(self.parameters()).device
eye = batch_eye(self.latent_dim, batch_size, device=device)
z0 = eye @ self._z0
return torch.cat((z0, torch.zeros_like(z0)), dim=-1)
def _dynamics_transition(
self, t: torch.Tensor, zd_t: torch.Tensor, zs_t: torch.Tensor, u_t: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""See reference.
https://github.com/google-research/planet/blob/cbe77fc011299becf6c3805d6007c5bf58012f87/planet/models/rssm.py#L96
"""
# belief, rnn_state are the same
# https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/contrib/rnn/python/ops/gru_ops.py#L197-L214.
a_t = torch.cat((zs_t, u_t), dim=-1)
_hidden_z = self.dynamics_encoder(a_t)
zd_t_1 = self.rnn_layers(_hidden_z, zd_t)
zs_t_1, mu_zs, log_var_zs = self._stochastic_state_distribution(t, zd_t_1)
return zd_t_1, zs_t_1, mu_zs, log_var_zs
def _stochastic_state_distribution(
self, t: torch.Tensor, zd_t: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
stochastic_distribution = self.dynamics_decoder(zd_t)
mu_zs_t = stochastic_distribution[..., : self.latent_dim]
log_var_zs_t = stochastic_distribution[..., self.latent_dim :]
zs_t = reparameterize_gauss(mu_zs_t, log_var_zs_t, log_flag=True)
return zs_t, mu_zs_t, log_var_zs_t
def _measurement_update(
self, t: torch.Tensor, zd_t_prior: torch.Tensor, y_t: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""See reference.
https://github.com/google-research/planet/blob/cbe77fc011299becf6c3805d6007c5bf58012f87/planet/models/rssm.py#L124-L128
"""
hidden = torch.cat([zd_t_prior, y_t], dim=-1)
hidden_distribution = self.posterior_encoder(hidden)
mu_zs = hidden_distribution[..., : self.latent_dim]
log_var_zs = hidden_distribution[..., self.latent_dim :]
zs_t_posterior = reparameterize_gauss(mu_zs, log_var_zs, log_flag=True)
return zs_t_posterior, mu_zs, log_var_zs
@overload
def forward(
self,
t: torch.Tensor,
y: torch.Tensor,
u: torch.Tensor,
z0: torch.Tensor,
return_hidden: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""See forward below."""
@overload
def forward(
self,
t: torch.Tensor,
y: torch.Tensor,
u: torch.Tensor,
z0: torch.Tensor,
return_hidden: bool,
return_loss_cache: bool,
) -> Tuple[torch.Tensor, torch.Tensor, LossFeatures]:
"""See forward below."""
def forward(
self, t, y, u, z0, return_hidden=False, return_loss_cache=False,
):
"""See parent class.
Note: z_cov is ignored since it has no meaning for this model.
z0 is of size 2 * latent_dim. The first latent_dim points are the deterministic
state, the second set of latent_dim points is the stochastic state.
"""
T, B = y.shape[:2]
# encode observations
y_shape = y.shape
encoded_obs_features = self.obs_encoder(y.reshape(-1, *y_shape[2:])).reshape(
T, B, -1
)
loss_cache = LossFeatures.create_empty()
# throw away the stochastic state since it has no meaning.
z0 = z0[..., : self.latent_dim]
zs_0, mu_zs_0, log_var_zs_0 = self._stochastic_state_distribution(
torch.tensor(0), z0
)
zd_list = [z0]
zs_list = []
loss_cache.p_mu_prior_list.append(mu_zs_0)
loss_cache.p_log_var_prior_list.append(log_var_zs_0)
zd_next = z0
for t_i, y_i, u_i in zip(t[:-1], encoded_obs_features[:-1], u[:-1]):
(
zs_next,
mu_zs_t_posterior,
log_var_zs_t_posterior,
) = self._measurement_update(t_i, zd_next, y_i)
(
zd_next,
_unused_zs_t_1,
mu_zs_t_1_prior,
log_var_zs_t_1_prior,
) = self._dynamics_transition(t_i, zd_next, zs_next, u_i)
# store hidden states and distributions at time i
zs_list.append(zs_next)
zd_list.append(zd_next)
loss_cache.q_mu_posterior_list.append(mu_zs_t_posterior)
loss_cache.q_log_var_posterior_list.append(log_var_zs_t_posterior)
loss_cache.p_mu_prior_list.append(mu_zs_t_1_prior)
loss_cache.p_log_var_prior_list.append(log_var_zs_t_1_prior)
# add trailing measurement update
zs_next, mu_zs_t_posterior, log_var_zs_t_posterior = self._measurement_update(
t[-1], zd_next, encoded_obs_features[-1]
)
zs_list.append(zs_next)
loss_cache.q_mu_posterior_list.append(mu_zs_t_posterior)
loss_cache.q_log_var_posterior_list.append(log_var_zs_t_posterior)
zd = torch.stack(zd_list)
zs = torch.stack(zs_list)
z_all = torch.cat((zd, zs), dim=-1)
# decode predictions
if return_hidden:
if return_loss_cache:
return z_all, torch.eye(self.latent_dim).repeat(T, B, 1, 1), loss_cache
else:
return z_all, torch.eye(self.latent_dim).repeat(T, B, 1, 1)
else:
if return_loss_cache:
raise NotImplementedError("This should never be reached.")
return self.latent_to_observation(z_all, None)
def predict(
self,
z0_mu: torch.Tensor,
z0_cov: torch.Tensor,
pred_times: torch.Tensor,
u: torch.Tensor,
return_hidden: bool = False,
cond: Any = None,
with_dist: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""See parent class.
Note: z_cov is ignored since it has no meaning for this model.
"""
B = z0_mu.shape[0]
zd_list = [z0_mu[..., : self.latent_dim]]
zs_list = [z0_mu[..., self.latent_dim :]]
zmu_list = [zs_list[0]] # an approximation
zcov_list = [z0_cov]
for t_i, u_i in zip(pred_times[:-1], u[:-1, ...]):
zd_next, zs_next, zs_mu, zs_cov = self._dynamics_transition(
t_i, zd_list[-1], zs_list[-1], u_i
)
zs_list.append(zs_next)
zd_list.append(zd_next)
zmu_list.append(zs_mu)
zcov_list.append(zs_cov)
zd = torch.stack(zd_list)
zs = torch.stack(zs_list)
z_all = torch.cat((zd, zs), dim=-1)
if with_dist:
zmu_list = torch.stack(zmu_list)
zcov_list = torch.stack(zcov_list)
return z_all, zmu_list, zcov_list
if return_hidden:
return (
z_all,
torch.eye(self.latent_dim).repeat(len(pred_times), B, 1, 1),
)
else:
return self.latent_to_observation(z_all, None)
def latent_to_observation(
self, z_mu: torch.Tensor, z_cov: Optional[torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""See parent class.
Note: z_cov is ignored since it has no meaning for this model.
"""
if self._is_image:
shape = z_mu.shape
log_dsd = self.obs_decoder(z_mu.reshape(-1, shape[-1]))
log_dsd = log_dsd.reshape(*shape[:-1], *log_dsd.shape[-3:])
return log_dsd
else:
decoded_obs_features = self.obs_decoder(z_mu)
y_mu = decoded_obs_features[..., : self.obs_dim]
y_cov = torch.diag_embed(
torch.exp(decoded_obs_features[..., self.obs_dim :])
)
return y_mu, y_cov
def loss(
self,
batch_t: torch.Tensor,
batch_y: torch.Tensor,
batch_u: torch.Tensor,
iteration: int,
avg: bool = True,
) -> torch.Tensor:
"""See parent class."""
T, B = batch_y.shape[:2]
z0_p = self.get_initial_hidden_state(B)
loss_cache: LossFeatures
z_samples, z_cov, loss_cache = self(
batch_t, batch_y, batch_u, z0_p, return_hidden=True, return_loss_cache=True
)
if self._is_image:
log_dsd = self.latent_to_observation(z_samples, z_cov)
reconstruction_loss = self._image_model.get_reconstruction_loss(
batch_y, log_dsd, avg=False
).reshape(T, B)
else:
y_mean, y_cov = self.latent_to_observation(z_samples, z_cov)
reconstruction_loss = -gaussian_log_prob(y_mean, y_cov, batch_y)
z_mean = torch.stack(loss_cache.q_mu_posterior_list)
z_log_var = torch.stack(loss_cache.q_log_var_posterior_list)
z_mean_prior = torch.stack(loss_cache.p_mu_prior_list)
z_log_var_prior = torch.stack(loss_cache.p_log_var_prior_list)
z_posterior = torch.distributions.normal.Normal(
z_mean, torch.exp(z_log_var) ** 0.5 + 1e-6
)
z_prior = torch.distributions.normal.Normal(
z_mean_prior, torch.exp(z_log_var_prior) ** 0.5 + 1e-6
)
kl = torch.distributions.kl.kl_divergence(z_posterior, z_prior).mean(-1)
# Reference for kl divergence computation
# https://github.com/google-research/planet/blob/cbe77fc011299becf6c3805d6007c5bf58012f87/planet/models/rssm.py#L87-L94
overshoot_loss = 0
if self.config.overshoot[0] != OverShoot.NONE:
K = min(self.config.overshoot[1], T - 1)
for t, (_, z_log_var_t) in enumerate(zip(z_mean[:-K], z_log_var[:-K])):
z_sample_t_t_k, z_mu_t_t_k, z_log_var_t_t_k = self.predict(
z_samples[t],
z_log_var_t,
batch_t[t : t + K],
batch_u[t : t + K, ...],
return_hidden=True,
with_dist=True,
)
if self.config.overshoot[0] == OverShoot.LATENT:
z_posterior = torch.distributions.normal.Normal(
z_mean[t : t + K, ...],
torch.exp(z_log_var[t : t + K, ...]) ** 0.5 + 1e-6,
)
z_prior = torch.distributions.normal.Normal(
z_mu_t_t_k, torch.exp(z_log_var_t_t_k) ** 0.5 + 1e-6
)
kl_t_K = torch.distributions.kl.kl_divergence(
z_posterior, z_prior
).mean(-1)
overshoot_loss += torch.sum(kl_t_K) / (K * B)
elif self.config.overshoot[0] == OverShoot.OBSERVATION:
if self._is_image:
log_dsd = self.latent_to_observation(z_sample_t_t_k, None)
overshoot_loss += (
self._image_model.get_reconstruction_loss(
batch_y[t : t + K], log_dsd, avg=False
)
.reshape(-1, B)
.mean()
)
else:
y_mean, y_cov = self.latent_to_observation(z_sample_t_t_k, None)
overshoot_loss += -gaussian_log_prob(
y_mean, y_cov, batch_y[t : t + K]
).mean()
overshoot_loss = overshoot_loss / (t + 1)
if avg:
return torch.sum(reconstruction_loss + kl) / (T * B) + overshoot_loss
else:
return reconstruction_loss + kl + overshoot_loss
def log(self, buddy: Buddy, viz: VisData, filter_length: int = 1) -> None:
"""Log information and visualization information.
Parameters
----------
See parent class.
"""
nll, ade = self.eval_loss(viz, 10, 20)
with buddy.log_scope("Eval_Metric"):
buddy.log_scalar("nll", nll)
buddy.log_scalar("ade", ade)
if self._is_image:
assert isinstance(viz, VisDataIMG)
predict_length = min(2 * filter_length, viz.t.shape[0] - filter_length)
total_len = filter_length + predict_length
p_img, _ = _img_seq(self, viz, filter_length, predict_length)
log_image(
buddy, p_img, "image_trajectories", scope="sequence_visualizations"
)
# ---latent visualization--- #
# smoothed samples
z0 = self.get_initial_hidden_state(viz.y.shape[1])
z_mu, _ = self(
viz.t[:total_len],
viz.y[:total_len],
viz.u[:total_len],
z0,
return_hidden=True,
)
z_samp_s = z_mu.cpu().numpy()
if self.latent_dim * 2 == 2:
p_img_s, _ = _2d_latent_viz(z_samp_s, "Filtered Latent Trajectories")
ftitle = "filtered_latent_trajectories"
log_image(buddy, p_img_s, f"{ftitle}", scope="latent_viz")
else:
# take the first 3
p_img_s, _ = _3d_latent_viz(
z_samp_s[:3], "Filtered Latent Trajectories"
)
ftitle = "filtered_latent_trajectories"
log_image(buddy, p_img_s, f"{ftitle}", scope="latent_viz")
else:
log_basic(self, buddy, viz, filter_length=filter_length)
viz_loss = self.loss(viz.t, viz.y, viz.u, 0) # iteration doesn't matter
with buddy.log_scope("Validation_BASELINE"):
buddy.log_scalar("Validation_Loss", viz_loss.item())
def eval_loss(self, viz: VisData, filt_points: int, pred_points: int) -> None:
"""Prints evaluation losses for the model.
Parameters
----------
viz : VisData
The visualization data with which to compute the loss.
filt_points : int
The number of points with which to filter.
pred_points : int
The desired number of prediction points.
"""
assert filt_points + pred_points <= len(viz.t)
# pend_img
if isinstance(viz, VisDataIMG):
# filtering and prediction time/data
t_filt = viz.t[:filt_points]
o_filt = viz.y[:filt_points]
u_filt = viz.u[:filt_points]
t_pred = viz.t[(filt_points - 1) : (filt_points + pred_points - 1)]
o_pred = viz.y[(filt_points - 1) : (filt_points + pred_points - 1)]
u_pred = viz.u[(filt_points - 1) : (filt_points + pred_points - 1)]
T, B = o_pred.shape[:2]
# filtering
T, B = viz.y.shape[:2]
z0_f = self.get_initial_hidden_state(B)
z_mu_f, z_cov_f = self(t_filt, o_filt, u_filt, z0_f, return_hidden=True)
# prediction
z0_mu_p = z_mu_f[-1]
z0_cov_p = z_cov_f[-1]
# prediction
if self._cond_channels > 0:
_o_pred = o_pred[:, :, : -self._cond_channels, 0, 0]
cond = o_pred[0, :, -self._cond_channels :, 0, 0] # conditional context
else:
_o_pred = o_pred
cond = None
# ORIGINAL
# computing l2 loss over 100 samples
log_dsd_p = self.predict(z0_mu_p, z0_cov_p, t_pred, u_pred, cond=cond)
_img_samples = []
for i in range(100):
# if i % 10 == 0:
# print(f"Sample {i} / 100")
_img_samples.append(self._image_model.sample_img(log_dsd_p))
img_samples = torch.stack(_img_samples)
loss_ade = torch.mean(torch.sqrt((img_samples - _o_pred.unsqueeze(0)) ** 2))
# NLL Loss on discrete log-softmax distribution
o_quant = (_o_pred.squeeze(2) * (self._image_model.pixel_res - 1)).long()
o_quant = o_quant.reshape(-1, *_o_pred.shape[-2:])
logits = log_dsd_p.reshape(
-1, *log_dsd_p.shape[-3:]
) # (B, num_cats, img_h, img_w)
# average loss over batches. dkl already batch-averaged.
nll_loss_func = nn.NLLLoss(reduction="sum")
loss_nll = nll_loss_func(logits, o_quant) / logits.shape[0]
# # reporting the evaluation
print(
f"Prediction Loss (filt_pts={filt_points}, pred_pts={pred_points}) \t"
f"L2 Loss: {loss_ade.item():.5f} \t"
f"NLL: {loss_nll.item():.3f}"
)
# stripped datasets
else:
# filtering and prediction time/data
t_filt = viz.t[:filt_points]
y_filt = viz.y[:filt_points]
u_filt = viz.u[:filt_points]
t_pred = viz.t[(filt_points - 1) : (filt_points + pred_points - 1)]
y_pred = viz.y[(filt_points - 1) : (filt_points + pred_points - 1)]
u_pred = viz.u[(filt_points - 1) : (filt_points + pred_points - 1)]
# filtering
B = viz.y.shape[1]
z0_f = self.get_initial_hidden_state(B)
z_mu_f, z_cov_f = self(t_filt, y_filt, u_filt, z0_f, return_hidden=True)
# prediction
z0_mu_p = z_mu_f[-1]
z0_cov_p = z_cov_f[-1]
y_samples = []
for i in range(100):
if i % 10 == 0:
print(f"Rollout {i} / 100")
y_mu_p, y_cov_p = self.predict(z0_mu_p, z0_cov_p, t_pred, u_pred)
y_sample = reparameterize_gauss(y_mu_p, y_cov_p)
y_samples.append(y_sample)
y_samples_torch = torch.stack(y_samples)
mean = y_samples_torch.mean(dim=0)
var = y_samples_torch.var(dim=0)
# computing losses (NLL and L2)
loss_nll = -torch.mean(
gaussian_log_prob(mean, torch.diag_embed(var), y_pred)
)
loss_ade = torch.mean(
torch.sqrt((y_samples_torch - y_pred.unsqueeze(0)) ** 2)
)
# reporting the evaluation
print(
f"Prediction Loss (filt_pts={filt_points}, pred_pts={pred_points}) \t"
f"NLL Loss: {loss_nll.item():.3f} \t ADE Loss: {loss_ade.item():.5f}"
)
return loss_nll.item(), loss_ade.item()
def summary_plot(self, viz: VisData, name: str, debug: bool = False) -> None:
"""Produces and saves summary plots. Will produce same plots as log().
See parent class.
"""
# pend_img
if isinstance(viz, VisDataIMG):
DIR_NAME = "summary_plots/" + name
Path(DIR_NAME).mkdir(parents=True, exist_ok=True)
filter_length = 25
pred_length = 50
# ---image sequences--- #
_, seq_fig = _img_seq(self, viz, filter_length, pred_length)
seq_fig.savefig(DIR_NAME + "/img_seq")
else:
super(PlaNetBaselineFilter, self).summary_plot(viz, name, debug)
@dataclass(frozen=True)
class PlaNetBaselineFilterConfig(EstimatorConfig):
"""Baseline specific configuration parameters."""
latent_obs_dim: int
hidden_units: int
overshoot: Tuple[OverShoot, int] = (OverShoot.NONE, 0)
def create(self) -> PlaNetBaselineFilter:
"""See parent."""
return PlaNetBaselineFilter(self)
| 2.421875 | 2 |
colour/utilities/__init__.py | jchwei/colour | 0 | 12772834 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .data_structures import Lookup, Structure, CaseInsensitiveMapping
from .common import (
handle_numpy_errors, ignore_numpy_errors, raise_numpy_errors,
print_numpy_errors, warn_numpy_errors, ignore_python_warnings, batch,
disable_multiprocessing, multiprocessing_pool, is_matplotlib_installed,
is_networkx_installed, is_openimageio_installed, is_pandas_installed,
is_iterable, is_string, is_numeric, is_integer, is_sibling, filter_kwargs,
filter_mapping, first_item, get_domain_range_scale, set_domain_range_scale,
domain_range_scale, to_domain_1, to_domain_10, to_domain_100,
to_domain_degrees, to_domain_int, from_range_1, from_range_10,
from_range_100, from_range_degrees, from_range_int)
from .array import (as_array, as_int_array, as_float_array, as_numeric, as_int,
as_float, as_namedtuple, closest_indexes, closest,
normalise_maximum, interval, is_uniform, in_array, tstack,
tsplit, row_as_diagonal, dot_vector, dot_matrix, orient,
centroid, linear_conversion, lerp, fill_nan, ndarray_write)
from .metrics import metric_mse, metric_psnr
from .verbose import (
ColourWarning, ColourUsageWarning, ColourRuntimeWarning, message_box,
show_warning, warning, runtime_warning, usage_warning, filter_warnings,
suppress_warnings, numpy_print_options, ANCILLARY_COLOUR_SCIENCE_PACKAGES,
ANCILLARY_RUNTIME_PACKAGES, ANCILLARY_DEVELOPMENT_PACKAGES,
ANCILLARY_EXTRAS_PACKAGES, describe_environment)
__all__ = ['Lookup', 'Structure', 'CaseInsensitiveMapping']
__all__ += [
'handle_numpy_errors', 'ignore_numpy_errors', 'raise_numpy_errors',
'print_numpy_errors', 'warn_numpy_errors', 'ignore_python_warnings',
'batch', 'disable_multiprocessing', 'multiprocessing_pool',
'is_matplotlib_installed', 'is_networkx_installed',
'is_openimageio_installed', 'is_pandas_installed', 'is_iterable',
'is_string', 'is_numeric', 'is_integer', 'is_sibling', 'filter_kwargs',
'filter_mapping', 'first_item', 'get_domain_range_scale',
'set_domain_range_scale', 'domain_range_scale', 'to_domain_1',
'to_domain_10', 'to_domain_100', 'to_domain_degrees', 'to_domain_int',
'from_range_1', 'from_range_10', 'from_range_100', 'from_range_degrees',
'from_range_int'
]
__all__ += [
'as_array', 'as_int_array', 'as_float_array', 'as_numeric', 'as_int',
'as_float', 'as_namedtuple', 'closest_indexes', 'closest',
'normalise_maximum', 'interval', 'is_uniform', 'in_array', 'tstack',
'tsplit', 'row_as_diagonal', 'dot_vector', 'dot_matrix', 'orient',
'centroid', 'linear_conversion', 'fill_nan', 'lerp', 'ndarray_write'
]
__all__ += ['metric_mse', 'metric_psnr']
__all__ += [
'ColourWarning', 'ColourUsageWarning', 'ColourRuntimeWarning',
'message_box', 'show_warning', 'warning', 'runtime_warning',
'usage_warning', 'filter_warnings', 'suppress_warnings',
'numpy_print_options', 'ANCILLARY_COLOUR_SCIENCE_PACKAGES',
'ANCILLARY_RUNTIME_PACKAGES', 'ANCILLARY_DEVELOPMENT_PACKAGES',
'ANCILLARY_EXTRAS_PACKAGES', 'describe_environment'
]
| 1.65625 | 2 |
telnyx-python/telnyx/aio/api_resources/list_object.py | team-telnyx/telnyx-2fa | 0 | 12772835 | <reponame>team-telnyx/telnyx-2fa
from telnyx import util
from telnyx.six.moves.urllib.parse import quote_plus
from telnyx.telnyx_object import TelnyxObject
class ListObject(TelnyxObject):
async def list(self, **params):
return await self.request("get", self["url"], params)
@classmethod
def empty_list(cls, params, url):
list_object = ListObject()
list_object.retrieve_params = params
list_object.url = url
list_object.data = []
return list_object
async def auto_paging_iter(self):
page = self
params = dict(self._retrieve_params)
while True:
for item in page.data:
yield item
if page.empty():
return
page = await page.next_page(**params)
async def auto_paging_iter_by_token(self):
page = self
params = dict(self._retrieve_params)
while True:
for item in page.data:
yield item
if page.empty():
return
page = await page.next_page_by_token(**params)
def empty(self):
return len(self.data) == 0
async def next_page(self, **params):
if not self.has_more():
return ListObject.empty_list(params, self.url)
next_page_number = self.page_number() + 1
pagination = {"number": next_page_number, "size": self.page_size()}
params["page"] = pagination
return await self.list(**params)
async def next_page_by_token(self, **params):
if self.token() is None:
return ListObject.empty_list(params, self.url)
pagination = {"token": self.token()}
params["page"] = pagination
return await self.list(**params)
async def previous_page(self, **params):
prev_page_number = self.page_number() - 1
prev_page_number = max(prev_page_number, 1)
pagination = {"number": prev_page_number, "size": self.page_size()}
params["page"] = pagination
return await self.list(**params)
def has_more(self):
return self.data != [] and self.get("metadata", {}).get(
"total_pages", 0
) > self.get("metadata", {}).get("page_number", 0)
def token(self):
return self.get("metadata", {}).get("next_page_token", None)
def page_number(self):
return self.get("metadata", {}).get("page_number", 1)
def page_size(self):
return self.get("metadata", {}).get("page_size", 20)
async def create(self, **params):
return await self.request("post", self["url"], params)
async def retrieve(self, id, **params):
base = self.get("url")
id = util.utf8(id)
extn = quote_plus(id)
url = "%s/%s" % (base, extn)
return await self.request("get", url, params)
def __iter__(self):
return getattr(self, "data", []).__iter__()
def __len__(self):
return getattr(self, "data", []).__len__()
| 2.609375 | 3 |
recitations/01_apache_kafka/producer.py | DerekHJH/seai | 1 | 12772836 | <filename>recitations/01_apache_kafka/producer.py
from time import sleep
from json import dumps
from kafka import KafkaProducer
# Create a producer to write data to kafka
producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
value_serializer=lambda x: dumps(x).encode('utf-8'))
# Write data via the producer
for e in range(10):
data = {'number' : e}
producer.send(topic='numtest-kartikri', value=data)
sleep(1) | 2.609375 | 3 |
invenio_app_rdm/records_ui/views/deposits.py | ntarocco/invenio-app-rdm | 0 | 12772837 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2021 CERN.
# Copyright (C) 2019-2021 Northwestern University.
# Copyright (C) 2021 <NAME>.
#
# Invenio App RDM is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Routes for record-related pages provided by Invenio-App-RDM."""
from flask import current_app, g, render_template
from flask_login import login_required
from invenio_i18n.ext import current_i18n
from invenio_rdm_records.proxies import current_rdm_records
from invenio_rdm_records.resources.config import RDMDraftFilesResourceConfig
from invenio_rdm_records.resources.serializers import UIJSONSerializer
from invenio_rdm_records.services.schemas import RDMRecordSchema
from invenio_rdm_records.services.schemas.utils import dump_empty
from invenio_rdm_records.vocabularies import Vocabularies
from ..utils import set_default_value
from .decorators import pass_draft, service
#
# Helpers
#
def get_form_config(**kwargs):
"""Get the react form configration."""
return dict(
vocabularies=Vocabularies.dump(),
current_locale=str(current_i18n.locale),
**kwargs
)
def get_search_url():
"""Get the search URL."""
# TODO: this should not be used
return current_app.config["APP_RDM_ROUTES"]["record_search"]
def new_record():
"""Create an empty record with default values."""
record = dump_empty(RDMRecordSchema)
defaults = current_app.config.get("APP_RDM_DEPOSIT_FORM_DEFAULTS") or {}
for key, value in defaults.items():
set_default_value(record, value, key)
return record
#
# Views
#
@login_required
def deposit_search():
"""List of user deposits page."""
return render_template(
"invenio_app_rdm/records/search_deposit.html",
searchbar_config=dict(searchUrl=get_search_url()),
)
@login_required
def deposit_create():
"""Create a new deposit."""
return render_template(
"invenio_app_rdm/records/deposit.html",
forms_config=get_form_config(createUrl=("/api/records")),
searchbar_config=dict(searchUrl=get_search_url()),
record=new_record(),
files=dict(
default_preview=None, enabled=True, entries=[], links={}
),
)
@login_required
@pass_draft
def deposit_edit(draft=None, pid_value=None):
"""Edit an existing deposit."""
files_list = current_rdm_records.draft_files_service.list_files(
id_=pid_value,
identity=g.identity,
links_config=RDMDraftFilesResourceConfig.links_config,
)
serializer = UIJSONSerializer()
record = serializer.serialize_object_to_dict(draft.to_dict())
return render_template(
"invenio_app_rdm/records/deposit.html",
forms_config=get_form_config(apiUrl=f"/api/records/{pid_value}/draft"),
record=record,
files=files_list.to_dict(),
searchbar_config=dict(searchUrl=get_search_url()),
permissions=draft.has_permissions_to(['new_version'])
)
| 1.71875 | 2 |
scripts/alt_evaluation/flair_vs_litbank_gs.py | therosko/Thesis-NER-in-English-novels | 0 | 12772838 | <gh_stars>0
########################################################################################################################
# This script reads in the Litbank gold standard and compairs it to the Flair output files
# The data used here consists of only the 12 overlapping novels with their respecive overlapping
# parts of the text.
#
# Output:
# The script appends a csv with the Precision, Recall, and F1 for the respective book
# and stores the false positives, false negatives, and correct detections
#
# LitBank recognises the following NER tags for the entity type PEOPLE
# B-PER - for the beginning of an entity
# I-PER - for a token inside of an entity
#
# Flair recognises the following NER tags for the entity type PEOPLE
# S-PER - for a single token entity
# B-PER - for the beginning of a multi-token entity
# I-PER - for a token inside of a multi-token entity
# E-PER - for the end of a multi-token entity (due to differences in parsing, sometimes there are two tokens with E-PER)
########################################################################################################################
import pandas as pd
import os
import csv
# import own script
from hyphens import *
from patch_flair_parsing import *
from calculate_metrics import *
books_mapping = {'AliceInWonderland': '11_alices_adventures_in_wonderland',
'DavidCopperfield': '766_david_copperfield',
'Dracula': '345_dracula',
'Emma': '158_emma',
'Frankenstein': '84_frankenstein_or_the_modern_prometheus',
'HuckleberryFinn': '76_adventures_of_huckleberry_finn',
'MobyDick': '2489_moby_dick',
'OliverTwist': '730_oliver_twist',
'PrideAndPrejudice': '1342_pride_and_prejudice',
'TheCallOfTheWild': '215_the_call_of_the_wild',
'Ulysses': '4300_ulysses',
'VanityFair': '599_vanity_fair'}
def check_for_inconsistencies(current_file,gs_lb):
try:
for index, word, ner in current_file.itertuples(index=True):
if word != gs_lb["original_word"].loc[index]:
print("Position ", index, " '", word, "' in current is not the same as '", gs_lb["original_word"].loc[index], "'in gs")
print(current_file.iloc[index-1:index+4])
print(gs_lb.iloc[index-1:index+4])
break
#Note: some original texts are longer than the annotated files, we stop the comparisson at that length
except KeyError:
print("Reached end of annotated file. Cropped currect_file.")
print("Last word ", word, " in line ", index)
current_file = current_file.truncate(after=index-1)
pass
def evaluate(merged_flair_litbank):
############################
# run evaluation
############################
# hold the lines range of the currently detected named entity
range_ne = []
# set booleans to keep of track of false positives/negatives of entities spreading over multiple rows
false_negative_flair = False
false_positive_flair = False
# lists to hold mistakes in the detection (used for the recognition of challenges)
list_false_negatives = []
list_false_positives = []
list_correct = []
# double-check that the merge is correct, calculate incorrect and correct by using lists
for index, original_word_x, flair, original_word_y, gs in merged_flair_litbank.itertuples(index=True):
if original_word_x != original_word_y:
print ("Mismatch in row ", index, ": ", original_word_x , " is not the same as ", original_word_y)
break
if gs == 'B-PER':
#single token entity
if flair == "S-PER" and merged_flair_litbank['gs'].iloc[index+1] == "O":
list_correct.append([index])
continue
#print (index, original_word_x, flair, original_word_y, gs)
#single token entity (sometimes marked as B-PER nevertheless)
elif flair == "B-PER" and merged_flair_litbank['ner'].iloc[index+1] == "O" and merged_flair_litbank['gs'].iloc[index+1] == "O":
list_correct.append([index])
continue
if len(range_ne) > 0:
if false_positive_flair == False: #make sure that the mistake is not a false positive, but instead the end of a gold standard entity
if merged_flair_litbank.iloc[range_ne[0]]['ner'] == 'B-PER' and merged_flair_litbank.iloc[range_ne[-1]]['ner'] == 'E-PER':
for line in range_ne[1:-1]: # check if flair detected it correctly
if merged_flair_litbank.iloc[line]['ner'] == 'I-PER':
continue
else: # if flair didn't detect it
false_negative_flair = True
if false_negative_flair == True:
list_false_negatives.append(range_ne)
false_negative_flair = False
else:
list_correct.append(range_ne)
range_ne = []
# add the new B-PER to the beginning of an entity
range_ne.append(index)
continue
else:
list_false_negatives.append(range_ne)
range_ne = []
range_ne.append(index)
continue
elif false_positive_flair == True: # if the mistake is a false positive
list_false_positives.append(range_ne)
range_ne = []
false_positive_flair = False
range_ne.append(index)
continue
else:
range_ne.append(index)
#print (index, original_word_x, flair, original_word_y, gs)
elif gs == 'I-PER':
range_ne.append(index)
elif gs == 'O':
if flair in ['S-PER','B-PER','I-PER'] :
if false_positive_flair == False: #first occurence of wrong
if len(range_ne) > 0 and false_negative_flair == False: # there was a correct detection immediatelly before
list_correct.append(range_ne)
range_ne = []
# both if statements should be ran!
if flair != 'S-PER':
false_positive_flair = True
range_ne.append(index)
continue
else: #if S-PER (single token entity)
range_ne.append(index)
list_false_positives.append(range_ne)
range_ne = []
continue
elif false_positive_flair == True:
range_ne.append(index)
continue
elif flair == 'E-PER' and false_positive_flair == True:
range_ne.append(index)
list_false_positives.append(range_ne)
range_ne = []
false_positive_flair = False
continue
elif len(range_ne) > 0 and false_positive_flair == False: #if it is the end of a gold standard entity
if merged_flair_litbank.iloc[range_ne[0]]['ner'] == 'B-PER' and merged_flair_litbank.iloc[range_ne[-1]]['ner'] == 'E-PER':
for line in range_ne[1:-1]: # check if flair detected it correctly
if merged_flair_litbank.iloc[line]['ner'] == 'I-PER':
continue
else: # if flair didn't detect it
false_negative_flair = True
if false_negative_flair == True:
list_false_negatives.append(range_ne)
false_negative_flair = False
else:
list_correct.append(range_ne)
range_ne = []
continue
else:
list_false_negatives.append(range_ne)
range_ne = []
continue
elif flair == 'O' and false_positive_flair == True:
list_false_positives.append(range_ne)
range_ne = []
false_positive_flair = False
continue
elif flair == 'O' and false_negative_flair == True:
list_false_negatives.append(range_ne)
false_negative_flair = False
range_ne = []
elif flair == 'O' and false_negative_flair == False and false_positive_flair == False:
continue
else:
# add error handling in case of a mistake
print ("1. Semantical mistake in analysing line ", index)
print(merged_flair_litbank.iloc[index-3:index+4])
break
else:
# add error handling in case of a mistake
print ("Semantical mistake in analysing line ", index)
break
print("list_false_positives",list_false_positives)
print("list_correct",list_correct)
print("list_false_negatives",list_false_negatives)
for i in list_false_positives:
print(merged_flair_litbank.iloc[i[0]-1:i[-1]+2])
return list_correct, list_false_positives, list_false_negatives
directory = os.fsencode('/mnt/flair/')
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".tsv"):
litbank_filepath = "/mnt/data/gold_standard/overlap/litbank/" + books_mapping.get(str(filename.replace('.tsv',''))) + ".tsv"
flair_filepath = '/mnt/flair/' + filename
print(filename)
# read flair
current_file= pd.read_csv(flair_filepath, sep='\t', quoting=csv.QUOTE_NONE, usecols=[0,1])
current_file = correct_hyphened(current_file)
# patch inconsistencies between parsing of flair and gold standards (using LitBank)
current_file = patch_flair(current_file, filename)
current_file.loc[~current_file["ner"].isin(['S-PER','I-PER','B-PER','E-PER']), "ner"] = "O"
# read litbank gs
gs_lb = pd.read_csv(litbank_filepath, sep='\t', quoting=csv.QUOTE_NONE, usecols=[0,1], names=["original_word", "gs"])
gs_lb.loc[~gs_lb["gs"].isin(['I-PER','B-PER']), "gs"] = "O"
check_for_inconsistencies(current_file,gs_lb)
# merge the two dataframes
merged_flair_litbank = pd.merge(current_file, gs_lb, left_index=True, right_index=True)
print(merged_flair_litbank.head(2))
#evaluate
list_correct, list_false_positives, list_false_negatives = evaluate(merged_flair_litbank)
###########################################
# get evaluation metrics and save to files
###########################################
path_evaluation = '/mnt/Git/results/overlap/flair_litbank_evaluation.csv'
path_fp = '/mnt/Git/results/overlap/flair_litbank_false_positive.csv'
path_fn = '/mnt/Git/results/overlap/flair_litbank_false_negative.csv'
get_metrics(merged_flair_litbank, list_correct, list_false_positives, list_false_negatives, path_evaluation, path_fp, path_fn, filename.replace('.tsv',''))
| 2.421875 | 2 |
api/urls.py | volCommunity/vol-django | 5 | 12772839 | <filename>api/urls.py
from django.conf.urls import url, include
from django.views.generic.base import RedirectView
from rest_framework import routers
from rest_framework.schemas import get_schema_view
from rest_framework_swagger.renderers import OpenAPIRenderer, SwaggerUIRenderer
from . import views
schema_view = get_schema_view(title="Vol.community API",
renderer_classes=[OpenAPIRenderer, SwaggerUIRenderer]
)
router = routers.DefaultRouter(trailing_slash=False)
router.register(r'labels', views.LabelViewSet)
router.register(r'organisations', views.OrganisationViewSet)
router.register(r'sites', views.SiteViewSet)
router.register(r'jobs', views.JobViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
# Allow clients to explore our API using Swagger
url(r'swagger', schema_view),
# Redirects to /api to make urls generated by django-rest-api and the swagger
# plug work as advertised -without the trailing slash urls would look like:
# "http://127.0.0.1:8000/apijobs" instead of "http://127.0.0.1:8000/api/jobs"
url(r'^$', RedirectView.as_view(url='/api/', permanent=False), name='index'),
url(r'/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| 2.125 | 2 |
paths_procedures/show_shortest_path_dialog.py | jamiecook/AequilibraE | 0 | 12772840 | <reponame>jamiecook/AequilibraE
"""
-----------------------------------------------------------------------------------------------------------
Package: AequilibraE
Name: Shortest path computation
Purpose: Dialog for computing and displaying shortest paths based on clicks on the map
Original Author: <NAME> (<EMAIL>)
Contributors:
Last edited by: <NAME>
Website: www.AequilibraE.com
Repository: https://github.com/AequilibraE/AequilibraE
Created: 2016-07-30
Updated: 30/09/2016
Copyright: (c) AequilibraE authors
Licence: See LICENSE.TXT
-----------------------------------------------------------------------------------------------------------
"""
from qgis.core import *
import qgis
from qgis.gui import QgsMapToolEmitPoint
from PyQt4 import QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4 import uic, QtCore
from random import randint
import sys
import os
from ..common_tools.auxiliary_functions import *
from point_tool import PointTool
from aequilibrae.paths.results import PathResults
no_binary = False
try:
from aequilibrae.paths import path_computation
except:
no_binary = True
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/aequilibrae/")
#sys.modules['qgsmaplayercombobox'] = qgis.gui
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'forms/ui_compute_path.ui'))
from ..common_tools import LoadGraphLayerSettingDialog
class ShortestPathDialog(QtGui.QDialog, FORM_CLASS):
def __init__(self, iface):
QDialog.__init__(self)
QtGui.QDialog.__init__(self, None, QtCore.Qt.WindowStaysOnTopHint)
self.iface = iface
self.setupUi(self)
self.field_types = {}
self.centroids = None
self.node_layer = None
self.line_layer = None
self.node_keys = None
self.node_fields = None
self.index = None
self.matrix = None
self.clickTool = PointTool(self.iface.mapCanvas())
self.path = standard_path()
self.node_id = None
self.res = PathResults()
self.link_features = None
self.do_dist_matrix.setEnabled(False)
self.load_graph_from_file.clicked.connect(self.prepare_graph_and_network)
self.from_but.clicked.connect(self.search_for_point_from)
self.to_but.clicked.connect(self.search_for_point_to)
self.do_dist_matrix.clicked.connect(self.produces_path)
def prepare_graph_and_network(self):
dlg2 = LoadGraphLayerSettingDialog(self.iface)
dlg2.show()
dlg2.exec_()
if dlg2.error is None and dlg2.graph_ok:
self.link_features = dlg2.link_features
self.line_layer = dlg2.line_layer
self.node_layer = dlg2.node_layer
self.node_keys = dlg2.node_keys
self.node_id = dlg2.node_id
self.node_fields = dlg2.node_fields
self.index = dlg2.index
self.graph = dlg2.graph
self.res.prepare(self.graph)
self.do_dist_matrix.setEnabled(True)
def clear_memory_layer(self):
self.link_features = None
def search_for_point_from(self):
self.iface.mapCanvas().setMapTool(self.clickTool)
QObject.connect(self.clickTool, SIGNAL("clicked"), self.fill_path_from)
self.from_but.setEnabled(False)
def search_for_point_to(self):
self.iface.mapCanvas().setMapTool(self.clickTool)
QObject.connect(self.clickTool, SIGNAL("clicked"), self.fill_path_to)
self.to_but.setEnabled(False)
def search_for_point_to_after_from(self):
self.iface.mapCanvas().setMapTool(self.clickTool)
QObject.connect(self.clickTool, SIGNAL("clicked"), self.fill_path_to)
def fill_path_to(self):
self.to_node = self.find_point()
self.path_to.setText(str(self.to_node))
self.to_but.setEnabled(True)
def fill_path_from(self):
self.from_node = self.find_point()
self.path_from.setText(str(self.from_node))
self.from_but.setEnabled(True)
self.search_for_point_to_after_from()
def find_point(self):
try:
point = self.clickTool.point
nearest = self.index.nearestNeighbor(point, 1)
self.iface.mapCanvas().setMapTool(None)
self.clickTool = PointTool(self.iface.mapCanvas())
node_id = self.node_keys[nearest[0]]
index_field = self.node_fields.index(self.node_id)
node_actual_id = node_id[index_field]
return node_actual_id
except:
pass
def produces_path(self):
self.to_but.setEnabled(True)
if self.path_from.text().isdigit() and self.path_to.text().isdigit():
self.res.reset()
path_computation(int(self.path_from.text()), int(self.path_to.text()), self.graph, self.res)
if self.res.path is not None:
## If you want to do selections instead of new layers, this is how to do it
# f = self.cb_link_id_field.currentText()
# t = ''
# for k in self.res.path[:-1]:
# t = t + f + "=" + str(k) + ' or '
# t = t + f + "=" + str(self.res.path[-1])
# expr = QgsExpression(t)
# it = self.line_layer.getFeatures(QgsFeatureRequest(expr))
#
# ids = [i.id() for i in it]
# self.line_layer.setSelectedFeatures(ids)
# If you want to create new layers
# This way is MUCH faster
crs = self.line_layer.dataProvider().crs().authid()
vl = QgsVectorLayer("LineString?crs={}".format(crs), self.path_from.text() +
" to " + self.path_to.text(), "memory")
pr = vl.dataProvider()
# add fields
pr.addAttributes(self.line_layer.dataProvider().fields())
vl.updateFields() # tell the vector layer to fetch changes from the provider
# add a feature
all_links = []
for k in self.res.path:
fet = self.link_features[k]
all_links.append(fet)
# add all links to the temp layer
pr.addFeatures(all_links)
# add layer to the map
QgsMapLayerRegistry.instance().addMapLayer(vl)
# format the layer with a thick line
registry = QgsSymbolLayerV2Registry.instance()
lineMeta = registry.symbolLayerMetadata("SimpleLine")
symbol = QgsLineSymbolV2()
lineLayer = lineMeta.createSymbolLayer({'width': '1', 'color': self.random_rgb(), 'offset': '0',
'penstyle': 'solid', 'use_custom_dash': '0',
'joinstyle': 'bevel', 'capstyle': 'square'})
symbol.deleteSymbolLayer(0)
symbol.appendSymbolLayer(lineLayer)
renderer = QgsSingleSymbolRendererV2(symbol)
vl.setRendererV2(renderer)
qgis.utils.iface.mapCanvas().refresh()
else:
qgis.utils.iface.messageBar().pushMessage("No path between " + self.path_from.text() +
' and ' + self.path_to.text(), '', level=3)
def random_rgb(self):
rgb = ''
for i in range(3):
rgb = rgb + str(randint(0, 255)) + ','
return rgb[:-1]
def exit_procedure(self):
self.close() | 2.140625 | 2 |
main.py | PTYin/ESRT | 0 | 12772841 | <filename>main.py
"""Training and testing the hierarchical embedding model for personalized product search
See the following paper for more information on the hierarchical embedding model.
* <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. 2017. Learning a Hierarchical Embedding Model for Personalized ProductSearch. In Proceedings of SIGIR '17
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import numpy as np
import tensorflow as tf
import yaml
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from esrt import input_feed, utils
from esrt.engine.param_table import ParamTable
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for testing.")
tf.app.flags.DEFINE_string("test_mode", "product_scores", "Test modes: product_scores -> output ranking results and ranking scores; output_embedding -> output embedding representations for users, items and words. (default is product_scores)")
#tf.app.flags.DEFINE_integer("rank_cutoff", 100,
# "Rank cutoff for output ranklists.")
tf.app.flags.DEFINE_string("setting_file", "config_common/Musical_Instruments/AEM.yaml", "a yaml contains all model settings.")
# "Automotive", "Cell_Phones_and_Accessories", "Clothing_Shoes_and_Jewelry",
# "Musical_Instruments", "Office_Products", "Toys_and_Games"
tf.app.flags.DEFINE_string("dataset",
"Office_Products",
"preprocessed path of the raw data")
tf.app.flags.DEFINE_string("processed_path", "/disk/yxk/processed/cf/ordinary/", "preprocessed path of the raw data")
FLAGS = tf.app.flags.FLAGS
def create_model(session, model_name, hparams, forward_only, data_set, model_dir):
"""Create translation model and initialize or load parameters in session."""
print("Create a learning model %s"%model_name)
model = utils.find_class(model_name)(data_set, hparams, forward_only)
model.build()
print("reading ckpt file from ", model_dir)
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt:
ckpt_file = model_dir + ckpt.model_checkpoint_path.split('/')[-1]
#print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
#model.saver.restore(session, ckpt.model_checkpoint_path)
print("Reading model parameters from %s" % ckpt_file)
model.saver.restore(session, ckpt_file)
else:
print("Created model with fresh parameters.")
session.run(tf.global_variables_initializer())
return model
def train():
# parse exp settings file
aparams, dparams, eparams, hparams = _parse_exp_settings(FLAGS.setting_file)
data_dir = dparams['data_dir']
input_train_dir = dparams['input_train_dir']
# Prepare data.
print("Reading data in %s" % data_dir)
# get module(arch) name information
dataset_str = aparams['dataset_type']
input_feed_str = aparams['input_feed']
model_str = aparams['learning_algorithm']
data_set = utils.find_class(dataset_str)(data_dir, input_train_dir, 'train')
data_set.sub_sampling(eparams['subsampling_rate'])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#config.log_device_placement=True
with tf.Session(config=config) as sess:
# Create model.
print("Creating model")
model = create_model(sess, model_str, hparams, False, data_set, dparams['model_dir'])
print("Create a input feed module %s"%input_feed_str)
input_feed = utils.find_class(input_feed_str)(model, hparams['batch_size'])
compat_input_feed = CompatInputFeed(input_feed)
print('Start training')
words_to_train = float(eparams['max_train_epoch'] * data_set.word_count) + 1
previous_words = 0.0
start_time = time.time()
step_time, loss = 0.0, 0.0
current_epoch = 0
current_step = 0
get_batch_time = 0.0
training_seq = [i for i in range(data_set.review_size)]
input_feed.setup_data_set(data_set, words_to_train)
while True:
random.shuffle(training_seq)
input_feed.intialize_epoch(training_seq)
has_next = True
while has_next:
time_flag = time.time()
batch_input_feed, has_next = input_feed.get_train_batch(debug=False)
get_batch_time += time.time() - time_flag
word_idxs = compat_input_feed.word_idxs(batch_input_feed, model)
learning_rate = compat_input_feed.learning_rate(batch_input_feed, model)
if len(word_idxs) > 0:
time_flag = time.time()
step_loss = model.step(sess, batch_input_feed, False)
# print(step_loss)
#step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / eparams['steps_per_checkpoint']
current_step += 1
# print(step_loss)
step_time += time.time() - time_flag
# Once in a while, we print statistics.
if current_step % eparams['steps_per_checkpoint'] == 0:
print("Epoch %d Words %d/%d: lr = %5.3f loss = %6.2f words/sec = %5.2f prepare_time %.2f step_time %.2f\r" %
(current_epoch, input_feed.finished_word_num, input_feed.words_to_train, learning_rate, loss,
(input_feed.finished_word_num- previous_words)/(time.time() - start_time), get_batch_time, step_time), end="")
sys.stdout.flush()
step_time, loss = 0.0, 0.0
current_step = 1
get_batch_time = 0.0
sys.stdout.flush()
previous_words = input_feed.finished_word_num
start_time = time.time()
current_epoch += 1
if not os.path.exists(dparams['model_dir']):
os.makedirs(dparams['model_dir'])
checkpoint_path_best = os.path.join(dparams['model_dir'], "ProductSearchEmbedding.ckpt")
model.saver.save(sess, checkpoint_path_best, global_step=model.global_step)
if current_epoch >= eparams['max_train_epoch']:
break
checkpoint_path_best = os.path.join(dparams['model_dir'], "ProductSearchEmbedding.ckpt")
model.saver.save(sess, checkpoint_path_best, global_step=model.global_step)
def output_embedding():
# parse exp settings file
aparams, dparams, eparams, hparams = _parse_exp_settings(FLAGS.setting_file)
data_dir = dparams['data_dir']
input_train_dir = dparams['input_train_dir']
# read data
print("Reading data in %s" % data_dir)
# get module(arch) name information
dataset_str = aparams['dataset_type']
input_feed_str = aparams['input_feed']
model_str = aparams['learning_algorithm']
# create dataset object
data_set = utils.find_class(dataset_str)(data_dir, input_train_dir, 'test')
data_set.read_train_product_ids(input_train_dir)
current_step = 0
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
anchors = np.load(os.path.join(FLAGS.processed_path, FLAGS.dataset, 'experiments', 'anchors_esrt_step2.npy'))
neighbors = np.load(os.path.join(FLAGS.processed_path, FLAGS.dataset, 'experiments', 'all_neighbors_esrt_step2.npy'))
def cosine_similarity(a, b):
norm_a = np.sqrt(np.sum(a * a, -1))
norm_b = np.sqrt(np.sum(b * b, -1))
dot = np.sum(a * b, -1)
score = dot / (norm_a * norm_b + 1e-8)
return score
def all_cosine_similarity(a, b):
norm_a = np.expand_dims(np.sqrt(np.sum(a * a, -1)), -1)
norm_b = np.expand_dims(np.sqrt(np.sum(b * b, -1)), 0)
dot = a @ b.T
score = dot / (norm_a @ norm_b + 1e-8)
return score
with tf.Session(config=config) as sess:
# Create model.
print("Read model")
model = create_model(sess, model_str, hparams, True, data_set, dparams['model_dir'])
user_emb = model.user_emb.eval()
# word_emb = model.word_emb.eval()
# user = user_emb[673]
# np.expand_dims(user, -2)
# similarity = cosine_similarity(user, word_emb)
# print('-----------------------------')
# print(np.array(sorted(tf.nn.softmax(similarity, axis=-1).eval().tolist(), reverse=True))[:20])
# print('-----------------------------')
anchor_embedding = np.expand_dims(user_emb[anchors], -2)[:32]
neighbor_embeddings = user_emb[neighbors][:32]
similarity = cosine_similarity(anchor_embedding, neighbor_embeddings).sum(axis=0)
# all_similarity = all_cosine_similarity(user_emb, user_emb).mean()
print('-----------------------------')
# print(similarity.mean(axis=0) / all_similarity)
# print(tf.nn.softmax(similarity, axis=-1).eval().tolist())
print('\t'.join(str(round(e, 3)) for e in (similarity / similarity.sum()).tolist()))
print('-----------------------------')
# input_feed= utils.find_class(input_feed_str)(model, hparams['batch_size'])
def get_product_scores():
# parse exp settings file
aparams, dparams, eparams, hparams = _parse_exp_settings(FLAGS.setting_file)
data_dir = dparams['data_dir']
input_train_dir = dparams['input_train_dir']
# read data
print("Reading data in %s" % data_dir)
# get module(arch) name information
dataset_str = aparams['dataset_type']
input_feed_str = aparams['input_feed']
model_str = aparams['learning_algorithm']
# create dataset object
data_set = utils.find_class(dataset_str)(data_dir, input_train_dir, 'test')
data_set.read_train_product_ids(input_train_dir)
current_step = 0
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# Create model.
print("Read model")
model = create_model(sess, model_str, hparams, True, data_set, dparams['model_dir'])
input_feed= utils.find_class(input_feed_str)(model, hparams['batch_size'])
user_ranklist_map = {}
user_ranklist_score_map = {}
print('Start Testing')
words_to_train = float(eparams['max_train_epoch'] * data_set.word_count) + 1
test_seq = [i for i in range(data_set.review_size)]
input_feed.setup_data_set(data_set, words_to_train)
input_feed.intialize_epoch(test_seq)
input_feed.prepare_test_epoch(debug=True)
has_next = True
while has_next:
batch_input_feed, has_next, uqr_pairs = input_feed.get_test_batch(debug=True)
# get params
user_idxs = batch_input_feed[model.user_idxs.name]
if len(user_idxs) > 0:
user_product_scores, _ = model.step(sess, batch_input_feed, True)
current_step += 1
print("product scores: ")
for uidx in range(len(user_product_scores)):
if uidx > 10:
break
# record the results
for i in range(len(uqr_pairs)):
u_idx, p_idx, q_idx, r_idx = uqr_pairs[i]
sorted_product_idxs = sorted(range(len(user_product_scores[i])),
key=lambda k: user_product_scores[i][k], reverse=True)
user_ranklist_map[(u_idx, q_idx)],user_ranklist_score_map[(u_idx, q_idx)] = data_set.compute_test_product_ranklist(u_idx,
user_product_scores[i], sorted_product_idxs, eparams['rank_cutoff']) #(product name, rank)
if current_step % eparams['steps_per_checkpoint']== 0:
print("Finish test review %d/%d\r" %
(input_feed.cur_uqr_i, len(input_feed.test_seq)), end="")
data_set.output_ranklist(user_ranklist_map, user_ranklist_score_map, dparams['model_dir'], hparams['similarity_func'], debug=True)
return
# def output_embedding(exp_settings):
# # parse exp settings file
# aparams, dparams, eparams, hparams = _parse_exp_settings(FLAGS.setting_file)
#
# # Hack the file path when use python -m test.main
# data_dir = os.path.join(os.path.dirname(__file__), '..', eparams.data_dir)
# input_train_dir = os.path.join(os.path.dirname(__file__), '..', eparams.input_train_dir)
#
# # read data
# print("Reading data in %s" % data_dir)
#
# # get module(arch) name information
# dataset_str = aparams.dataset_type
# input_feed_str = aparams.input_feed
# model_str = aparams.learning_algorithm
#
# # create dataset object
# data_set = utils.find_class(dataset_str)(data_dir, input_train_dir, 'test')
# data_set.read_train_product_ids(dparams.input_train_dir)
#
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# with tf.Session(config=config) as sess:
# # Create model.
# print("Read model")
# model = create_model(sess, exp_settings['arch']['learning_algorithm'], hparams, True, data_set, dparams.model_dir)
# input_feed = utils.find_class(input_feed_str)(model, hparams.batch_size)
# user_ranklist_map = {}
# print('Start Testing')
# words_to_train = float(eparams.max_train_epoch * data_set.word_count) + 1
# test_seq = [i for i in range(data_set.review_size)]
# input_feed.setup_data_set(data_set, words_to_train)
# input_feed.intialize_epoch(test_seq)
# input_feed.prepare_test_epoch()
# has_next = True
# user_idxs, product_idxs, query_word_idxs, review_idxs, word_idxs, context_word_idxs, learning_rate, has_next, uqr_pairs = input_feed.get_test_batch()
#
# if len(user_idxs) > 0:
# part_1 , part_2 = model.step(sess, learning_rate, user_idxs, product_idxs, query_word_idxs,
# review_idxs, word_idxs, context_word_idxs, True, FLAGS.test_mode)
#
# # record the results
# user_emb = part_1[0]
# product_emb = part_1[1]
# Wu = part_1[2]
# data_set.output_embedding(user_emb, dparams.model_dir + 'user_emb.txt')
# data_set.output_embedding(product_emb, dparams.model_dir + 'product_emb.txt')
# data_set.output_embedding(Wu, dparams.model_dir + 'Wu.txt')
# return
def _parse_exp_settings(settings_file):
hparams = ParamTable()
hparams.update_from_yaml(settings_file)
with open(settings_file, 'r') as f:
tdict = yaml.load(f, Loader=yaml.SafeLoader)
aparams = tdict['arch']
dparams = tdict['data']
eparams = tdict['experiment']
return aparams, dparams, eparams, hparams
def main(_):
exp_settings = _parse_exp_settings(FLAGS.setting_file)
#if FLAGS.input_train_dir == "":
#FLAGS.input_train_dir = FLAGS.data_dir
if FLAGS.decode:
if FLAGS.test_mode == 'output_embedding':
output_embedding()
else:
get_product_scores()
else:
train()
##################################
###### Helper utils ##############
##################################
class CompatInputFeed():
"""make different input_feed(HEMInputFeed, DREMInputFeed,...) object compatibility,
when they use their attributes.
"""
def __init__(self, input_feed):
self.input_feed = input_feed
def word_idxs(self,batch_input_feed, model) :
if isinstance(self.input_feed, input_feed.HEMInputFeed):
return batch_input_feed[model.word_idxs.name]
elif isinstance(self.input_feed, input_feed.LSEInputFeed):
return batch_input_feed[model.word_idxs.name]
elif isinstance(self.input_feed, input_feed.AEMInputFeed):
return batch_input_feed[model.word_idxs.name]
elif isinstance(self.input_feed, input_feed.DREMInputFeed):
#print("the current input feed is %s"%str(input_feed.DREMInputFeed))
return batch_input_feed[model.relation_dict['word']['idxs'].name]
elif isinstance(self.input_feed, input_feed.ZAMInputFeed):
return batch_input_feed[model.word_idxs.name]
else:
raise ValueError("The input feed class %s is not defined"%str(self.input_feed))
def learning_rate(self,batch_input_feed, model):
if isinstance(self.input_feed, input_feed.HEMInputFeed):
return batch_input_feed[model.learning_rate.name]
if isinstance(self.input_feed, input_feed.LSEInputFeed):
return batch_input_feed[model.learning_rate.name]
if isinstance(self.input_feed, input_feed.AEMInputFeed):
return batch_input_feed[model.learning_rate.name]
elif isinstance(self.input_feed, input_feed.DREMInputFeed):
return batch_input_feed[model.learning_rate.name]
if isinstance(self.input_feed, input_feed.ZAMInputFeed):
return batch_input_feed[model.learning_rate.name]
else:
raise ValueError("The input feed class %s is not defined"%str(self.input_feed))
if __name__ == "__main__":
# os.environ["CUDA_VISIBLE_DEVICES"] = '2, 3'
print(__file__)
tf.app.run()
| 2.5625 | 3 |
services/storage/close_uploader.py | samle-appsbroker/acquire | 21 | 12772842 | <filename>services/storage/close_uploader.py
from Acquire.Storage import DriveInfo
def run(args):
"""Close the uploader for a file - this stops new chunks being uploaded"""
drive_uid = str(args["drive_uid"])
file_uid = str(args["file_uid"])
secret = str(args["secret"])
drive = DriveInfo(drive_uid=drive_uid)
drive.close_uploader(file_uid=file_uid, secret=secret)
return True
| 2.59375 | 3 |
dinette/migrations/0001_initial.py | agiliq/Dinette | 17 | 12772843 | <filename>dinette/migrations/0001_initial.py
from south.db import db
from django.db import models
from dinette.models import *
class Migration:
def forwards(self, orm):
# Adding model 'DinetteUserProfile'
db.create_table('dinette_dinetteuserprofile', (
('id', orm['dinette.DinetteUserProfile:id']),
('user', orm['dinette.DinetteUserProfile:user']),
('last_activity', orm['dinette.DinetteUserProfile:last_activity']),
('userrank', orm['dinette.DinetteUserProfile:userrank']),
('last_posttime', orm['dinette.DinetteUserProfile:last_posttime']),
('photo', orm['dinette.DinetteUserProfile:photo']),
('signature', orm['dinette.DinetteUserProfile:signature']),
))
db.send_create_signal('dinette', ['DinetteUserProfile'])
# Adding model 'Ftopics'
db.create_table('dinette_ftopics', (
('id', orm['dinette.Ftopics:id']),
('category', orm['dinette.Ftopics:category']),
('subject', orm['dinette.Ftopics:subject']),
('slug', orm['dinette.Ftopics:slug']),
('message', orm['dinette.Ftopics:message']),
('file', orm['dinette.Ftopics:file']),
('attachment_type', orm['dinette.Ftopics:attachment_type']),
('filename', orm['dinette.Ftopics:filename']),
('viewcount', orm['dinette.Ftopics:viewcount']),
('replies', orm['dinette.Ftopics:replies']),
('created_on', orm['dinette.Ftopics:created_on']),
('updated_on', orm['dinette.Ftopics:updated_on']),
('posted_by', orm['dinette.Ftopics:posted_by']),
('announcement_flag', orm['dinette.Ftopics:announcement_flag']),
('is_closed', orm['dinette.Ftopics:is_closed']),
('is_sticky', orm['dinette.Ftopics:is_sticky']),
('is_hidden', orm['dinette.Ftopics:is_hidden']),
))
db.send_create_signal('dinette', ['Ftopics'])
# Adding model 'SiteConfig'
db.create_table('dinette_siteconfig', (
('id', orm['dinette.SiteConfig:id']),
('name', orm['dinette.SiteConfig:name']),
('tag_line', orm['dinette.SiteConfig:tag_line']),
))
db.send_create_signal('dinette', ['SiteConfig'])
# Adding model 'Category'
db.create_table('dinette_category', (
('id', orm['dinette.Category:id']),
('name', orm['dinette.Category:name']),
('slug', orm['dinette.Category:slug']),
('description', orm['dinette.Category:description']),
('ordering', orm['dinette.Category:ordering']),
('super_category', orm['dinette.Category:super_category']),
('created_on', orm['dinette.Category:created_on']),
('updated_on', orm['dinette.Category:updated_on']),
('posted_by', orm['dinette.Category:posted_by']),
))
db.send_create_signal('dinette', ['Category'])
# Adding model 'Reply'
db.create_table('dinette_reply', (
('id', orm['dinette.Reply:id']),
('topic', orm['dinette.Reply:topic']),
('posted_by', orm['dinette.Reply:posted_by']),
('message', orm['dinette.Reply:message']),
('file', orm['dinette.Reply:file']),
('attachment_type', orm['dinette.Reply:attachment_type']),
('filename', orm['dinette.Reply:filename']),
('created_on', orm['dinette.Reply:created_on']),
('updated_on', orm['dinette.Reply:updated_on']),
))
db.send_create_signal('dinette', ['Reply'])
# Adding model 'SuperCategory'
db.create_table('dinette_supercategory', (
('id', orm['dinette.SuperCategory:id']),
('name', orm['dinette.SuperCategory:name']),
('description', orm['dinette.SuperCategory:description']),
('ordering', orm['dinette.SuperCategory:ordering']),
('created_on', orm['dinette.SuperCategory:created_on']),
('updated_on', orm['dinette.SuperCategory:updated_on']),
('posted_by', orm['dinette.SuperCategory:posted_by']),
))
db.send_create_signal('dinette', ['SuperCategory'])
def backwards(self, orm):
# Deleting model 'DinetteUserProfile'
db.delete_table('dinette_dinetteuserprofile')
# Deleting model 'Ftopics'
db.delete_table('dinette_ftopics')
# Deleting model 'SiteConfig'
db.delete_table('dinette_siteconfig')
# Deleting model 'Category'
db.delete_table('dinette_category')
# Deleting model 'Reply'
db.delete_table('dinette_reply')
# Deleting model 'SuperCategory'
db.delete_table('dinette_supercategory')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dinette.category': {
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderated_by': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cposted'", 'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '110', 'db_index': 'True'}),
'super_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dinette.SuperCategory']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'dinette.dinetteuserprofile': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_posttime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'signature': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'userrank': ('django.db.models.fields.CharField', [], {'default': "'Junior Member'", 'max_length': '30'})
},
'dinette.ftopics': {
'announcement_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'attachment_type': ('django.db.models.fields.CharField', [], {'default': "'nofile'", 'max_length': '20'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dinette.Category']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'default': "'dummyname.txt'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'replies': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1034', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'viewcount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'dinette.reply': {
'attachment_type': ('django.db.models.fields.CharField', [], {'default': "'nofile'", 'max_length': '20'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'default': "'dummyname.txt'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dinette.Ftopics']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'dinette.siteconfig': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tag_line': ('django.db.models.fields.TextField', [], {'max_length': '100'})
},
'dinette.supercategory': {
'accessgroups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['dinette']
| 2.078125 | 2 |
examples/docs_snippets/docs_snippets/overview/configuration/configured_solid_example.py | AndersonReyes/dagster | 0 | 12772844 | <gh_stars>0
from dagster import Field, configured, solid
@solid(
config_schema={'iterations': int, 'word': Field(str, is_required=False, default_value='hello')}
)
def example_solid(context):
for _ in range(context.solid_config['iterations']):
context.log.info(context.solid_config['word'])
# returns a solid named 'example_solid'
new_solid = configured(example_solid)({'iterations': 6, 'word': 'wheaties'})
# returns a solid named 'configured_example_solid'
another_new_solid = configured(example_solid, name='configured_example')(
{'iterations': 6, 'word': 'wheaties'}
)
| 2.34375 | 2 |
mlxtend/mlxtend/evaluate/permutation.py | WhiteWolf21/fp-growth | 0 | 12772845 | # <NAME> 2014-2020
# mlxtend Machine Learning Library Extensions
#
# Nonparametric Permutation Test
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
from itertools import combinations
from math import factorial
try:
from nose.tools import nottest
except ImportError:
# Use a no-op decorator if nose is not available
def nottest(f):
return f
# decorator to prevent nose to consider
# this as a unit test due to "test" in the name
@nottest
def permutation_test(x, y, func='x_mean != y_mean', method='exact',
num_rounds=1000, seed=None):
"""
Nonparametric permutation test
Parameters
-------------
x : list or numpy array with shape (n_datapoints,)
A list or 1D numpy array of the first sample
(e.g., the treatment group).
y : list or numpy array with shape (n_datapoints,)
A list or 1D numpy array of the second sample
(e.g., the control group).
func : custom function or str (default: 'x_mean != y_mean')
function to compute the statistic for the permutation test.
- If 'x_mean != y_mean', uses
`func=lambda x, y: np.abs(np.mean(x) - np.mean(y)))`
for a two-sided test.
- If 'x_mean > y_mean', uses
`func=lambda x, y: np.mean(x) - np.mean(y))`
for a one-sided test.
- If 'x_mean < y_mean', uses
`func=lambda x, y: np.mean(y) - np.mean(x))`
for a one-sided test.
method : 'approximate' or 'exact' (default: 'exact')
If 'exact' (default), all possible permutations are considered.
If 'approximate' the number of drawn samples is
given by `num_rounds`.
Note that 'exact' is typically not feasible unless the dataset
size is relatively small.
num_rounds : int (default: 1000)
The number of permutation samples if `method='approximate'`.
seed : int or None (default: None)
The random seed for generating permutation samples if
`method='approximate'`.
Returns
----------
p-value under the null hypothesis
Examples
-----------
For usage examples, please see
http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/
"""
if method not in ('approximate', 'exact'):
raise AttributeError('method must be "approximate"'
' or "exact", got %s' % method)
if isinstance(func, str):
if func not in (
'x_mean != y_mean', 'x_mean > y_mean', 'x_mean < y_mean'):
raise AttributeError('Provide a custom function'
' lambda x,y: ... or a string'
' in ("x_mean != y_mean", '
'"x_mean > y_mean", "x_mean < y_mean")')
elif func == 'x_mean != y_mean':
def func(x, y):
return np.abs(np.mean(x) - np.mean(y))
elif func == 'x_mean > y_mean':
def func(x, y):
return np.mean(x) - np.mean(y)
else:
def func(x, y):
return np.mean(y) - np.mean(x)
rng = np.random.RandomState(seed)
m, n = len(x), len(y)
combined = np.hstack((x, y))
more_extreme = 0.
reference_stat = func(x, y)
# Note that whether we compute the combinations or permutations
# does not affect the results, since the number of permutations
# n_A specific objects in A and n_B specific objects in B is the
# same for all combinations in x_1, ... x_{n_A} and
# x_{n_{A+1}}, ... x_{n_A + n_B}
# In other words, for any given number of combinations, we get
# n_A! x n_B! times as many permutations; hoewever, the computed
# value of those permutations that are merely re-arranged combinations
# does not change. Hence, the result, since we divide by the number of
# combinations or permutations is the same, the permutations simply have
# "n_A! x n_B!" as a scaling factor in the numerator and denominator
# and using combinations instead of permutations simply saves computational
# time
if method == 'exact':
for indices_x in combinations(range(m + n), m):
indices_y = [i for i in range(m + n) if i not in indices_x]
diff = func(combined[list(indices_x)], combined[indices_y])
if diff > reference_stat:
more_extreme += 1.
num_rounds = factorial(m + n) / (factorial(m)*factorial(n))
else:
for i in range(num_rounds):
rng.shuffle(combined)
if func(combined[:m], combined[m:]) > reference_stat:
more_extreme += 1.
return more_extreme / num_rounds
| 3.265625 | 3 |
samples/aci-epg-reports-in-yaml.py | richardstrnad/acitoolkit | 351 | 12772846 | #!/usr/bin/env python
"""
Simple application that logs on to the APIC and displays all
EPGs.
"""
import socket
import yaml
import sys
from acitoolkit import Credentials, Session, Tenant, AppProfile, EPG, Endpoint
def main():
"""
Main show EPGs routine
:return: None
"""
# Login to APIC
description = ('Simple application that logs on to the APIC'
' and displays all of the EPGs.')
creds = Credentials('apic', description)
args = creds.get()
session = Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
return
# Download all of the tenants, app profiles, and EPGs
# and store the names as tuples in a list
tenants = Tenant.get_deep(session)
tenants_list = []
for tenant in tenants:
tenants_dict = {}
tenants_dict['name'] = tenant.name
if tenant.descr:
tenants_dict['description'] = tenant.descr
tenants_dict['app-profiles'] = []
for app in tenant.get_children(AppProfile):
app_profiles = {'name': app.name}
if app.descr:
app_profiles['description'] = app.descr
app_profiles['epgs'] = []
for epg in app.get_children(EPG):
epgs_info = {'name': epg.name}
if epg.descr:
epgs_info['description'] = epg.descr
epgs_info['endpoints'] = []
for endpoint in epg.get_children(Endpoint):
endpoint_info = {'name': endpoint.name}
if endpoint.ip != '0.0.0.0':
endpoint_info['ip'] = endpoint.ip
try:
hostname = socket.gethostbyaddr(endpoint.ip)[0]
except socket.error:
hostname = None
if hostname:
endpoint_info['hostname'] = hostname
if endpoint.descr:
endpoint_info['description'] = endpoint.descr
epgs_info['endpoints'].append(endpoint_info)
app_profiles['epgs'].append(epgs_info)
tenants_dict['app-profiles'].append(app_profiles)
tenants_list.append(tenants_dict)
tenants_info = {'tenants': tenants_list}
print(yaml.safe_dump(tenants_info, sys.stdout,
indent=4, default_flow_style=False))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| 2.546875 | 3 |
hello.py | blackswanburst/mikemccllstr-python-minecraft | 3 | 12772847 | <reponame>blackswanburst/mikemccllstr-python-minecraft<filename>hello.py
import mcpi.minecraft as minecraft
import mcpi.block as block
world = minecraft.Minecraft.create()
[x,y,z] = world.player.getPos()
world.postToChat( "Position is: %d %d %d" % (x,y,z) )
world.player.setPos( 0, 64, 0 )
| 2.71875 | 3 |
examples/org-website/map_sample.py | tonytheonlypony/hazelcast-python-client | 98 | 12772848 | <reponame>tonytheonlypony/hazelcast-python-client<gh_stars>10-100
import hazelcast
# Start the Hazelcast Client and connect to an already running Hazelcast Cluster on 127.0.0.1
client = hazelcast.HazelcastClient()
# Get the Distributed Map from Cluster.
my_map = client.get_map("my-distributed-map").blocking()
# Standard Put and Get
my_map.put("key", "value")
my_map.get("key")
# Concurrent Map methods, optimistic updating
my_map.put_if_absent("somekey", "somevalue")
my_map.replace_if_same("key", "value", "newvalue")
# Shutdown this Hazelcast Client
client.shutdown()
| 2.734375 | 3 |
tests/test_write.py | sphh/RPLCD | 231 | 12772849 | <reponame>sphh/RPLCD
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import pytest
from RPLCD.gpio import CharLCD
from RPLCD.common import LCD_SETDDRAMADDR
def test_write_simple(mocker, charlcd_kwargs):
"""
Write "HelloWorld" to the display.
"""
lcd = CharLCD(**charlcd_kwargs)
send_data = mocker.patch.object(lcd, '_send_data')
text = 'HelloWorld'
lcd.write_string(text)
assert send_data.call_count == len(text)
calls = [c[0] for c in send_data.call_args_list]
assert calls[0] == (72,)
assert calls[1] == (101,)
assert calls[2] == (108,)
assert calls[3] == (108,)
assert calls[4] == (111,)
assert calls[5] == (87,)
assert calls[6] == (111,)
assert calls[7] == (114,)
assert calls[8] == (108,)
assert calls[9] == (100,)
def test_caching(mocker, charlcd_kwargs):
"""
Characters should only be written if they have changed
"""
lcd = CharLCD(**charlcd_kwargs)
send_data = mocker.patch.object(lcd, '_send_data')
send_instruction = mocker.patch.object(lcd, '_send_instruction')
lcd.write_string('hello')
assert send_data.call_count == 5
data_calls = [c[0] for c in send_data.call_args_list]
assert data_calls[0] == (104,)
assert data_calls[1] == (101,)
assert data_calls[2] == (108,)
assert data_calls[3] == (108,)
assert data_calls[4] == (111,)
lcd.home()
send_data.reset_mock()
send_instruction.reset_mock()
lcd.write_string('he77o')
assert send_data.call_count == 2
assert send_instruction.call_count == 3
data_calls = [c[0] for c in send_data.call_args_list]
instruction_calls = [c[0] for c in send_instruction.call_args_list]
assert instruction_calls[0] == (LCD_SETDDRAMADDR | 1,)
assert instruction_calls[1] == (LCD_SETDDRAMADDR | 2,)
assert data_calls[0] == (55,)
assert data_calls[1] == (55,)
assert instruction_calls[2] == (LCD_SETDDRAMADDR | 5,)
@pytest.mark.parametrize(['charmap', 'ue'], [
('A00', 0b11110101),
('A02', 0b11111100),
])
def test_charmap(mocker, charmap, ue, charlcd_kwargs):
"""
The charmap should be used. The "ü" Umlaut should be encoded correctly.
"""
lcd = CharLCD(charmap=charmap, **charlcd_kwargs)
send = mocker.patch.object(lcd, '_send_data')
text = 'Züri'
lcd.write_string(text)
assert send.call_count == 4, 'call count was %d' % send.call_count
calls = [c[0] for c in send.call_args_list]
assert calls[0] == (90,)
assert calls[1] == (ue,)
assert calls[2] == (114,)
assert calls[3] == (105,)
@pytest.mark.parametrize(['rows', 'cols'], [
(2, 16),
(4, 20),
])
def test_write_newline(mocker, rows, cols, charlcd_kwargs):
"""
Write text containing CR/LF chars to the display.
"""
lcd = CharLCD(rows=rows, cols=cols, **charlcd_kwargs)
send_data = mocker.patch.object(lcd, '_send_data')
send_instruction = mocker.patch.object(lcd, '_send_instruction')
text = '\nab\n\rcd'
lcd.write_string(text)
assert send_data.call_count + send_instruction.call_count == len(text)
data_calls = [c[0] for c in send_data.call_args_list]
instruction_calls = [c[0] for c in send_instruction.call_args_list]
assert instruction_calls[0] == (0x80 + 0x40,), instruction_calls
assert data_calls[0] == (97,), data_calls
assert data_calls[1] == (98,), data_calls
if rows == 2:
assert instruction_calls[1] == (0x80 + 2,), instruction_calls
assert instruction_calls[2] == (0x80 + 0,), instruction_calls
else:
assert instruction_calls[1] == (0x80 + cols + 2,), instruction_calls
assert instruction_calls[2] == (0x80 + cols + 0,), instruction_calls
assert data_calls[2] == (99,), data_calls
assert data_calls[3] == (100,), data_calls
| 2.46875 | 2 |
visualizer/equityparser.py | mrhewitt/genotick-cs | 0 | 12772850 | <reponame>mrhewitt/genotick-cs
#from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import pandas as pd
class EquityParser:
def __init__(self, profit_file, multiplier):
self.pnl = pd.read_csv(profit_file,header=None,names=['date','cummulative','pertrade'],index_col=0,parse_dates=True)
self.multiplier = multiplier
self.pnl['cummulative'] = self.pnl['cummulative'].apply( lambda x: x*multiplier )
self.pnl['pertrade'] = self.pnl['pertrade'].apply( lambda x: x*multiplier )
def _apply_rolling_mean(self,period,data):
self.mean_column = '{} Period Rolling Mean'.format(period)
data[self.mean_column] = data['cummulative'].rolling(window=period).mean()
def _render(self, df, filename):
plot = df[['cummulative',self.mean_column,'filtered']].plot(figsize=(16,6))
plot.legend(['Cummulative Profit',self.mean_column,'Filtered'])
plot.set_xlabel(None)
plot.set_ylabel('Profit In Ticks')
plot.yaxis.grid(True)
plot.xaxis.grid(True)
plot.get_figure().savefig(filename)
def show_equity_curve(self, period, filtered):
if filtered:
# track the state of the equity curve, true if we are above the mean, false if below
system_active = True
profits = []
dates = []
cumm_profit = 0
self._apply_rolling_mean(period,self.pnl)
for index,row in self.pnl.iterrows():
# if system is active, apply the result of this trade to the filtered cummulative balance
if system_active:
# add this trade to the cumm profit and this day to the new equity curve
cumm_profit += row['pertrade']
profits.append(cumm_profit)
dates.append(index)
# if this trade took the default curve before the mean, stop trading for now
if row['cummulative'] < row[self.mean_column]:
system_active = False
elif row['cummulative'] >= row[self.mean_column]:
# this trade brought eq curve back above the mean, start trading again
system_active = True
profits.append(cumm_profit)
else:
profits.append(cumm_profit)
# create new dataframe for the filtered curve and output it
# eq = pd.DataFrame(profits, columns = ['cummulative'], index=dates)
self.pnl = self.pnl.assign( filtered = pd.Series(profits).values )
self._render(self.pnl,'equity.png')
| 3.015625 | 3 |