gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import importlib
import json
import os
import re
import gettext as gettext_module
from django import http
from django.conf import settings
from django.template import Context, Template
from django.utils.translation import check_for_language, to_locale, get_language
from django.utils.translation.trans_real import TranslationError
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils._os import upath
from django.utils.http import is_safe_url
from django.utils import six
try:
from django.utils.translation.trans_real import (compile_messages,
needs_compilation, has_reload_i18n_setting, purge_i18n_caches)
has_dynamic_compile = True
except Exception as ex:
has_dynamic_compile = False
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function (globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function (n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function (count) { return (count == 1) ? 0 : 1; };
{% endif %}
{% if catalog_str %}
/* gettext library */
django.catalog = {{ catalog_str }};
django.gettext = function (msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function (singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function (context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
{% else %}
/* gettext identity library */
django.gettext = function (msgid) { return msgid; };
django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; };
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) { return msgid; };
django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; };
{% endif %}
django.interpolate = function (fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function (format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Template(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
js_duplicate_key_error_template = """
console.log("{{ error_string }}");
"""
def render_duplicate_error_js(error):
template = Template(js_duplicate_key_error_template)
error = error.replace("\n", "\\n")
context = Context({
"error_string": error
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
if has_dynamic_compile:
if has_reload_i18n_setting():
purge_i18n_caches()
default_locale = to_locale(settings.LANGUAGE_CODE)
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
if has_dynamic_compile:
if needs_compilation(domain, path, 'en'):
compile_messages(domain, path, 'en')
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
if has_dynamic_compile:
if needs_compilation(domain, path, default_locale):
compile_messages(domain, path, default_locale)
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
if has_dynamic_compile:
if needs_compilation(domain, path, locale):
compile_messages(domain, path, locale)
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
catalog = None
try:
catalog, plural = get_javascript_catalog(locale, domain, packages)
except TranslationError as ex:
# We know how to handle one specific type of error - so let's do
# something useful there.
if re.match(r'.*duplicate message definition', ex.args[0]):
return render_duplicate_error_js(ex.args[0])
raise Exception(ex.msg)
return render_javascript_catalog(catalog, plural)
| |
import pickle
from functools import partial
from unittest.mock import patch
import pytest
import numpy as np
import pandas as pd
import matplotlib as mpl
import sklearn.datasets as datasets
from torch import nn, optim
from fastai.learner import Learner
from fastai.optimizer import OptimWrapper
from fastai.tabular.all import TabularDataLoaders
from fastai.callback.all import EarlyStoppingCallback, SaveModelCallback
import mlflow
import mlflow.fastai
from mlflow.fastai.callback import __MlflowFastaiCallback
from mlflow.utils.autologging_utils import BatchMetricsLogger
from tests.conftest import tracking_uri_mock # pylint: disable=unused-import
from mlflow.tracking.client import MlflowClient
mpl.use("Agg")
np.random.seed(1337)
NUM_EPOCHS = 3
MIN_DELTA = 99999999 # Forces earlystopping
def iris_dataframe():
iris = datasets.load_iris()
return pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
@pytest.fixture(scope="session")
def iris_data():
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = pd.Series(iris.target, name="label", dtype=np.float32)
return TabularDataLoaders.from_df(
df=pd.concat([X, y], axis=1), cont_names=list(X.columns), y_names="label"
)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(2, 3)
self.linear2 = nn.Linear(3, 1)
def forward(self, _, x_cont):
x = self.linear1(x_cont)
return self.linear2(x)
def splitter(model):
"""
Splits model parameters into multiple groups to allow fine-tuning
"""
params = list(model.parameters())
return [
# weights and biases of the first linear layer
params[:2],
# weights and biases of the second linear layer
params[2:],
]
def fastai_tabular_model(data, **kwargs):
# Create a fine-tunable learner
return Learner(data, Model(), loss_func=nn.MSELoss(), splitter=splitter, **kwargs)
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_one_cycle"])
def test_fastai_autolog_ends_auto_created_run(iris_data, fit_variant):
mlflow.fastai.autolog()
model = fastai_tabular_model(iris_data)
if fit_variant == "fit_one_cycle":
model.fit_one_cycle(1)
elif fit_variant == "fine_tune":
model.fine_tune(1, freeze_epochs=1)
else:
model.fit(1)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_one_cycle"])
def test_fastai_autolog_persists_manually_created_run(iris_data, fit_variant):
mlflow.fastai.autolog()
with mlflow.start_run() as run:
model = fastai_tabular_model(iris_data)
if fit_variant == "fit_one_cycle":
model.fit_one_cycle(NUM_EPOCHS)
elif fit_variant == "fine_tune":
model.fine_tune(NUM_EPOCHS - 1, freeze_epochs=1)
else:
model.fit(NUM_EPOCHS)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def fastai_random_tabular_data_run(iris_data, fit_variant):
# pylint: disable=unused-argument
mlflow.fastai.autolog()
model = fastai_tabular_model(iris_data)
if fit_variant == "fit_one_cycle":
model.fit_one_cycle(NUM_EPOCHS)
elif fit_variant == "fine_tune":
model.fine_tune(NUM_EPOCHS - 1, freeze_epochs=1)
else:
model.fit(NUM_EPOCHS)
client = mlflow.tracking.MlflowClient()
return model, client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_one_cycle", "fine_tune"])
def test_fastai_autolog_logs_expected_data(fastai_random_tabular_data_run, fit_variant):
# pylint: disable=unused-argument
model, run = fastai_random_tabular_data_run
data = run.data
# Testing metrics are logged
assert "train_loss" in data.metrics
assert "valid_loss" in data.metrics
for o in model.metrics:
assert o.name in data.metrics
# Testing explicitly passed parameters are logged correctly
if fit_variant != "fine_tune":
assert "n_epoch" in data.params
assert data.params["n_epoch"] == str(NUM_EPOCHS)
else:
assert "epochs" in data.params
assert data.params["epochs"] == str(NUM_EPOCHS - 1)
# Testing unwanted parameters are not logged
assert "cbs" not in data.params
assert "callbacks" not in data.params
assert "learn" not in data.params
# Testing optimizer parameters are logged
assert "opt_func" in data.params
assert data.params["opt_func"] == "Adam"
assert "wd" in data.params
assert "sqr_mom" in data.params
if fit_variant == "fit_one_cycle":
for param in ["lr", "mom"]:
for stat in ["min", "max", "init", "final"]:
assert param + "_" + stat in data.params
elif fit_variant == "fine_tune":
freeze_prefix = "freeze_"
assert freeze_prefix + "wd" in data.params
assert freeze_prefix + "sqr_mom" in data.params
assert freeze_prefix + "epochs" in data.params
assert data.params[freeze_prefix + "epochs"] == str(1)
for prefix in [freeze_prefix, ""]:
for param in ["lr", "mom"]:
for stat in ["min", "max", "init", "final"]:
assert prefix + param + "_" + stat in data.params
else:
assert "lr" in data.params
assert "mom" in data.params
# Testing model_summary.txt is saved
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "module_summary.txt" in artifacts
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_one_cycle", "fine_tune"])
def test_fastai_autolog_opt_func_expected_data(iris_data, fit_variant):
# pylint: disable=unused-argument
mlflow.fastai.autolog()
model = fastai_tabular_model(iris_data, opt_func=partial(OptimWrapper, opt=optim.Adam))
if fit_variant == "fit_one_cycle":
model.fit_one_cycle(NUM_EPOCHS)
elif fit_variant == "fine_tune":
model.fine_tune(NUM_EPOCHS - 1, freeze_epochs=1)
else:
model.fit(NUM_EPOCHS)
client = mlflow.tracking.MlflowClient()
data = client.get_run(client.list_run_infos(experiment_id="0")[0].run_id).data
assert "opt_func" in data.params
assert data.params["opt_func"] == "Adam"
if fit_variant == "fine_tune":
freeze_prefix = "freeze_"
assert freeze_prefix + "opt_func" in data.params
assert data.params[freeze_prefix + "opt_func"] == "Adam"
@pytest.mark.large
@pytest.mark.parametrize("log_models", [True, False])
def test_fastai_autolog_log_models_configuration(log_models, iris_data):
mlflow.fastai.autolog(log_models=log_models)
model = fastai_tabular_model(iris_data)
model.fit(NUM_EPOCHS)
client = mlflow.tracking.MlflowClient()
run_id = client.list_run_infos(experiment_id="0")[0].run_id
artifacts = client.list_artifacts(run_id)
artifacts = list(map(lambda x: x.path, artifacts))
assert ("model" in artifacts) == log_models
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit_one_cycle", "fine_tune"])
def test_fastai_autolog_logs_default_params(fastai_random_tabular_data_run, fit_variant):
# pylint: disable=unused-argument
client = mlflow.tracking.MlflowClient()
run_id = client.list_run_infos(experiment_id="0")[0].run_id
artifacts = client.list_artifacts(run_id)
artifacts = list(map(lambda x: x.path, artifacts))
if fit_variant == "fit_one_cycle":
for param in ["lr", "mom"]:
assert any(a.startswith(param + ".") for a in artifacts)
elif fit_variant == "fine_tune":
freeze_prefix = "freeze_"
for prefix in [freeze_prefix, ""]:
for param in ["lr", "mom"]:
assert any(a.startswith(prefix + param + ".") for a in artifacts)
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_one_cycle"])
def test_fastai_autolog_model_can_load_from_artifact(fastai_random_tabular_data_run):
run_id = fastai_random_tabular_data_run[1].info.run_id
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
model = mlflow.fastai.load_model("runs:/" + run_id + "/model")
model_wrapper = mlflow.fastai._FastaiModelWrapper(model)
model_wrapper.predict(iris_dataframe())
def get_fastai_random_data_run_with_callback(iris_data, fit_variant, callback, patience, tmpdir):
# pylint: disable=unused-argument
mlflow.fastai.autolog()
model = fastai_tabular_model(iris_data, model_dir=tmpdir)
if callback == "early":
cb = EarlyStoppingCallback(patience=patience, min_delta=MIN_DELTA)
model.add_cb(cb)
elif callback == "save_and_early_stop":
early_cb = EarlyStoppingCallback(patience=patience, min_delta=MIN_DELTA)
save_cb = SaveModelCallback(min_delta=MIN_DELTA)
model.add_cbs([save_cb, early_cb])
if fit_variant == "fit_one_cycle":
model.fit_one_cycle(NUM_EPOCHS)
elif fit_variant == "fine_tune":
model.fine_tune(NUM_EPOCHS - 1, freeze_epochs=1)
else:
model.fit(NUM_EPOCHS)
client = mlflow.tracking.MlflowClient()
return model, client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
@pytest.fixture
def fastai_random_data_run_with_callback(iris_data, fit_variant, callback, patience, tmpdir):
return get_fastai_random_data_run_with_callback(
iris_data, fit_variant, callback, patience, tmpdir
)
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_one_cycle"])
@pytest.mark.parametrize("callback", ["save_and_early_stop"])
@pytest.mark.parametrize("patience", [0, 1, 5])
def test_fastai_autolog_save_and_early_stop_logs(fastai_random_data_run_with_callback):
model, run = fastai_random_data_run_with_callback
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "valid_loss")
num_of_epochs = len(model.recorder.values)
assert len(metric_history) == num_of_epochs
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=run.info.run_id, artifact_path="model"
)
model_wrapper = mlflow.fastai._FastaiModelWrapper(model)
reloaded_model = mlflow.fastai.load_model(model_uri=model_uri)
reloaded_model_wrapper = mlflow.fastai._FastaiModelWrapper(reloaded_model)
model_result = model_wrapper.predict(iris_dataframe())
reloaded_result = reloaded_model_wrapper.predict(iris_dataframe())
np.testing.assert_array_almost_equal(model_result, reloaded_result)
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_one_cycle"])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
def test_fastai_autolog_early_stop_logs(fastai_random_data_run_with_callback, patience):
model, run = fastai_random_data_run_with_callback
params = run.data.params
assert "early_stop_patience" in params
assert params["early_stop_patience"] == str(patience)
assert "early_stop_monitor" in params
assert params["early_stop_monitor"] == "valid_loss"
assert "early_stop_comp" in params
assert params["early_stop_comp"] == "less"
assert "early_stop_min_delta" in params
assert params["early_stop_min_delta"] == "-{}".format(MIN_DELTA)
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "valid_loss")
num_of_epochs = len(model.recorder.values)
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_one_cycle"])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [11])
def test_fastai_autolog_early_stop_no_stop_does_not_log(
fastai_random_data_run_with_callback, patience
):
model, run = fastai_random_data_run_with_callback
params = run.data.params
assert "early_stop_patience" in params
assert params["early_stop_patience"] == str(patience)
assert "early_stop_monitor" in params
assert params["early_stop_monitor"] == "valid_loss"
assert "early_stop_comp" in params
assert "early_stop_min_delta" in params
assert params["early_stop_min_delta"] == "-{}".format(MIN_DELTA)
num_of_epochs = len(model.recorder.values)
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "valid_loss")
# Check the test epoch numbers are correct
assert num_of_epochs == NUM_EPOCHS
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_one_cycle"])
@pytest.mark.parametrize("callback", ["not-early"])
@pytest.mark.parametrize("patience", [5])
def test_fastai_autolog_non_early_stop_callback_does_not_log(fastai_random_data_run_with_callback):
model, run = fastai_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "early_stop_patience" not in params
assert "early_stop_monitor" not in params
assert "early_stop_comp" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "early_stop_min_delta" not in params
num_of_epochs = len(model.recorder.values)
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "valid_loss")
# Check the test epoch numbers are correct
assert num_of_epochs == NUM_EPOCHS
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_one_cycle"])
@pytest.mark.parametrize("callback", ["not-early"])
@pytest.mark.parametrize("patience", [5])
def test_fastai_autolog_batch_metrics_logger_logs_expected_metrics(
fit_variant, callback, patience, tmpdir, iris_data
):
patched_metrics_data = []
# Mock patching BatchMetricsLogger.record_metrics()
# to ensure that expected metrics are being logged.
original = BatchMetricsLogger.record_metrics
with patch(
"mlflow.utils.autologging_utils.BatchMetricsLogger.record_metrics", autospec=True
) as record_metrics_mock:
def record_metrics_side_effect(self, metrics, step=None):
patched_metrics_data.extend(metrics.items())
original(self, metrics, step)
record_metrics_mock.side_effect = record_metrics_side_effect
_, run = get_fastai_random_data_run_with_callback(
iris_data, fit_variant, callback, patience, tmpdir
)
patched_metrics_data = dict(patched_metrics_data)
original_metrics = run.data.metrics
for metric_name in original_metrics:
assert metric_name in patched_metrics_data
assert original_metrics[metric_name] == patched_metrics_data[metric_name]
assert "train_loss" in original_metrics
assert "train_loss" in patched_metrics_data
def test_callback_is_picklable():
cb = __MlflowFastaiCallback(
BatchMetricsLogger(run_id="1234"), log_models=True, is_fine_tune=False
)
pickle.dumps(cb)
@pytest.mark.large
def test_autolog_registering_model(iris_data):
registered_model_name = "test_autolog_registered_model"
mlflow.fastai.autolog(registered_model_name=registered_model_name)
with mlflow.start_run():
model = fastai_tabular_model(iris_data)
model.fit(NUM_EPOCHS)
registered_model = MlflowClient().get_registered_model(registered_model_name)
assert registered_model.name == registered_model_name
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adam for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.optimizers.Adam')
class Adam(optimizer_v2.OptimizerV2):
"""Optimizer that implements the Adam algorithm.
Adam optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments. According to the
reference, the method is 'computationally efficient, has little memory
requirement, invariant to diagonal rescaling of gradients, and is well suited
for problems that are large in terms of data/parameters'.
Note, amsgrad is currently not supported and the argument can only be False.
# References
See [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
For AMSGrad see [Reddi et al., 2-18]
(https://openreview.net/pdf?id=ryQu7f-RZ)
"""
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
name='Adam',
**kwargs):
r"""Construct a new Adam optimizer.
If amsgrad = False:
Initialization:
$$m_0 := 0 \text{(Initialize initial 1st moment vector)}$$
$$v_0 := 0 \text{(Initialize initial 2nd moment vector)}$$
$$t := 0 \text{(Initialize timestep)}$$
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section2 of the paper:
$$t := t + 1$$
$$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$
$$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
$$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
$$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$
If amsgrad = True:
Initialization:
$$m_0 := 0 \text{(Initialize initial 1st moment vector)}$$
$$v_0 := 0 \text{(Initialize initial 2nd moment vector)}$$
$$v_hat_0 := 0 \text{(Initialize initial 2nd moment vector)}$$
$$t := 0 \text{(Initialize timestep)}$$
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section2 of the paper:
$$t := t + 1$$
$$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$
$$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
$$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
$$v_hat_t := max(v_hat_{t-1}, v_t)
$$variable := variable - lr_t * m_t / (\sqrt{v_hat_t} + \epsilon)$$
The default value of 1e-7 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from
the paper "On the Convergence of Adam and beyond".
name: Optional name for the operations created when applying gradients.
Defaults to "Adam". @compatibility(eager) When eager execution is
enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be
a callable that takes no arguments and returns the actual value to use.
This can be useful for changing these values across different
invocations of optimizer functions. @end_compatibility
**kwargs: keyword arguments. Allowed to be {`decay`}
"""
super(Adam, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self._set_hyper('epsilon', epsilon)
self.amsgrad = amsgrad
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
if self.amsgrad:
for var in var_list:
self.add_slot(var, 'vhat')
def set_weights(self, weights):
params = self.weights
# If the weights are generated by Keras V1 optimizer, it includes vhats
# even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2
# optimizer has 2x + 1 variables. Filter vhats out for compatibility.
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[:len(params)]
super(Adam, self).set_weights(weights)
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
epsilon = self._get_hyper('epsilon', var_dtype)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
if not self.amsgrad:
return training_ops.resource_apply_adam(
var.handle,
m.handle,
v.handle,
beta_1_power,
beta_2_power,
lr_t,
beta_1_t,
beta_2_t,
epsilon,
grad,
use_locking=self._use_locking)
else:
vhat = self.get_slot(var, 'vhat')
return training_ops.resource_apply_adam_with_amsgrad(
var.handle,
m.handle,
v.handle,
vhat.handle,
beta_1_power,
beta_2_power,
lr_t,
beta_1_t,
beta_2_t,
epsilon,
grad,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
epsilon_t = self._get_hyper('epsilon', var_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * (1 - beta_1_t)
m_t = state_ops.assign(m, m * beta_1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * (1 - beta_2_t)
v_t = state_ops.assign(v, v * beta_2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
if not self.amsgrad:
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
else:
v_hat = self.get_slot(var, 'vhat')
v_hat_t = math_ops.maximum(v_hat, v_t)
with ops.control_dependencies([v_hat_t]):
v_hat_t = state_ops.assign(
v_hat, v_hat_t, use_locking=self._use_locking)
v_hat_sqrt = math_ops.sqrt(v_hat_t)
var_update = state_ops.assign_sub(
var,
lr * m_t / (v_hat_sqrt + epsilon_t),
use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, v_hat_t])
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def get_config(self):
config = super(Adam, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self._serialize_hyperparameter('epsilon'),
'amsgrad': self.amsgrad,
})
return config
| |
#!/usr/bin/env python
"""
SYNOPSIS
python dataset.py [-h,--help] [-v,--verbose] [-d,--directory DIRECTORY]
DESCRIPTION
Assert the existence of the GeoLife dataset within the specified DIRECTORY.
If the dataset is not present, this script will first see if a ZIP archive
is within that directory and will unpack it. If no ZIP archive exists, it
will download the GeoLife dataset, unpack it, and confirm the unpacking
resulted in PLX files now existing somewhere under the specified DIRECTORY.
This script can be used as a stand-alone script or imported into another
Python script as a module.
ARGUMENTS
-h, --help show this help message and exit
-v, --verbose verbose output
-d DIRECTORY, --directory DIRECTORY
directory where GeoLife dataset is stored
AUTHOR
Doug McGeehan <doug.mcgeehan@mst.edu>
LICENSE
This script is placed under the MIT License. Please refer to LICENSE file
in the parent directory for more details.
The GeoLife GPS Trajectory dataset is placed under the Microsoft Research
License Agreement that is described in its user guide that is included in
its ZIP archive.
"""
import argparse
import requests
from datetime import datetime
import os
import sys
import glob
import zipfile
import logging
logger = logging.getLogger("geolife.dataset")
# Direct link to the GeoLife ZIP archive.
# Valid as of 11 July, 2016.
GEOLIFE_ZIP_ARCHIVE_URL="https://download.microsoft.com/download/F/4/8/F4894AA5-FDBC-481E-9285-D5F8C4C4F039/Geolife%20Trajectories%201.3.zip"
# If the above URL is no longer valid, navigate to this page and manually
# download the dataset.
GEOLIFE_DOWNLOAD_PAGE="https://www.microsoft.com/en-us/download/details.aspx?id=52367"
def verify(directory="."):
"""
Verify the GeoLife dataset exists in this directory, and if not, make it
so. Return the dataset's root directory.
"""
# Check if uncompressed PLX files exist within the specified directory
try:
dataset_root = find_geolife_root(directory)
except PLXNotFound:
# If no PLX files exist in the directory, then check if a ZIP archive
# exists. If no ZIP archive exists, download it.
logger.warning("GeoLife PLX files not found in '{0}'. Checking for ZIP"
" archive.".format(directory)
)
zip_files = glob.glob(os.path.join(directory, "*.zip"))
if not zip_files:
logger.warning("No GeoLife ZIP archive. Proceeding with download.")
geolife_zip = download(url=GEOLIFE_ZIP_ARCHIVE_URL)
else:
geolife_zip = zip_files[0]
logger.info("GeoLife ZIP archive found at '{0}'".format(
geolife_zip
))
unpack(archive=geolife_zip, to=directory)
try:
dataset_root = find_geolife_root(directory)
except Exception:
logger.error(
"Unpacking the ZIP at '{zip}' did not result in PLX files."
" Perhaps '{zip}' is not a ZIP archive of the GeoLife files.\n"
"Please visit '{geolife_page}' and manually download the"
" GeoLife dataset. Make sure to place the ZIP archive in the"
" directory '{abs_path}' and try executing this script"
" again.".format(
zip=geolife_zip,
geolife_page=GEOLIFE_DOWNLOAD_PAGE,
abs_path=os.path.abspath(directory)
))
sys.exit(1)
return dataset_root
def find_geolife_root(directory_to_search, just_downloaded=False):
"""
Walk down tree until a PLT file is encountered. If none is found, raise
an exception.
"""
directory_containing_plt = None
for d, subd, files in os.walk(directory_to_search):
for f in files:
if f.lower().endswith(".plt"):
directory_containing_plt = d
break
if directory_containing_plt is None:
raise PLXNotFound
geolife_root = os.path.abspath(
os.path.dirname(os.path.dirname(directory_containing_plt))
)
logger.info("GeoLife dataset found within '{0}'".format(geolife_root))
return geolife_root
def download(url):
"""
Download the GeoLife dataset from Microsoft Research.
"""
logger.info("Downloading from '{0}'. Please be patient.".format(url))
logger.info(
"After this run, downloading shouldn't have to be performed again"
)
save_to = os.path.join(".", "geolife.zip")
downloader = requests.get(url, stream=True)
try:
progress_downloader(downloader, save_to=save_to)
except ImportError:
# You don't have progressbar2 installed, so you won't get a pretty
# progress bar to tell you how far along you are in the download.
# You can install it like so:
# $ sudo pip install progressbar2
size_in_MB = int(downloader.headers.get('content-length')) / 1e6
logger.warning(
"File size to download: {0:.2f} MB. This may take some time."
" Go have a coffee.".format(
size_in_MB
))
with open(save_to, "wb") as f:
for chunk in downloader.iter_content(chunk_size=4098):
if chunk:
f.write(chunk)
f.flush()
except Exception:
logger.error(
"It appears the download url '{url}' is no longer valid. Please"
" visit '{geolife_page}' and manually download the GeoLife dataset"
" from there. Make sure to place the ZIP archive in the directory"
" '{abs_path}' and try executing this script again.".format(
url=url, geolife_page=GEOLIFE_DOWNLOAD_PAGE,
abs_path=os.path.abspath(save_to)
))
sys.exit(1)
logger.info("Download complete!")
return save_to
def progress_downloader(downloader, save_to):
"""
Another downloader function, but with a progress bar so you don't have to
stare at a blank screen.
e.g.
71% |################# | Elapsed Time: 0:00:45 | ETA: 0:00:15 683.9 KiB/s
"""
from progressbar import ProgressBar
from progressbar import Percentage
from progressbar import Bar
from progressbar import Timer
from progressbar import ETA
from progressbar import AdaptiveTransferSpeed
download_size = int(downloader.headers.get('content-length'))
amount_downloaded = 0
widgets = [
Percentage(),
' ', Bar(),
' ', Timer(),
' | ', ETA(),
' ', AdaptiveTransferSpeed(),
]
download_progress = ProgressBar(widgets=widgets, max_value=download_size)
download_progress.start()
with open(save_to, "wb") as f:
for chunk in downloader.iter_content(chunk_size=4098):
if chunk:
f.write(chunk)
f.flush()
amount_downloaded += len(chunk)
download_progress.update(amount_downloaded)
download_progress.finish()
def unpack(archive, to):
"""
Unpack the zip archive.
"""
logger.info(
"Unpacking ZIP archive '{0}' to '{1}'. Please be patient.".format(
archive, to
))
logger.info(
"After this run, unpacking shouldn't have to be performed again"
)
unzipper = zipfile.ZipFile(archive, 'r')
unzipper.extractall(to)
unzipper.close()
logger.info("Unpacking complete!")
class PLXNotFound(IOError):
def __init__(self,*args,**kwargs):
IOError.__init__(self,*args,**kwargs)
def setup_logger(args):
# create logger with 'spam_application'
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('geolife.dataset.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
if args.verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
fh.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
))
ch.setFormatter(logging.Formatter(
'%(levelname)s - %(message)s'
))
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
if __name__ == '__main__':
try:
start_time = datetime.now()
parser = argparse.ArgumentParser(
description="Verify, unpack, or download the GeoLife GPS trajectory"
" dataset for further processing."
)
parser.add_argument('-v', '--verbose', action='store_true',
default=False, help='verbose output')
parser.add_argument('-d', '--directory', dest='directory',
default=".",
help="directory where GeoLife dataset is stored")
args = parser.parse_args()
setup_logger(args)
logger.debug(start_time)
verify(args.directory)
finish_time = datetime.now()
logger.debug(finish_time)
logger.debug('Execution time: {time}'.format(
time=(finish_time - start_time)
))
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
logger.exception("Something happened and I don't know what to do D:")
sys.exit(1)
| |
"""
NonPhysicsWalker.py is for avatars.
A walker control such as this one provides:
- creation of the collision nodes
- handling the keyboard and mouse input for avatar movement
- moving the avatar
it does not:
- play sounds
- play animations
although it does send messeges that allow a listener to play sounds or
animations based on walker events.
"""
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import DirectObject
from direct.controls.ControlManager import CollisionHandlerRayStart
from direct.showbase.InputStateGlobal import inputState
from direct.task.Task import Task
from pandac.PandaModules import *
class NonPhysicsWalker(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory("NonPhysicsWalker")
wantDebugIndicator = ConfigVariableBool('want-avatar-physics-indicator', False)
# Ghost mode overrides this:
slideName = "slide-is-disabled"
# special methods
def __init__(self):
DirectObject.DirectObject.__init__(self)
self.worldVelocity = Vec3.zero()
self.collisionsActive = 0
self.speed=0.0
self.rotationSpeed=0.0
self.slideSpeed=0.0
self.vel=Vec3(0.0, 0.0, 0.0)
self.stopThisFrame = 0
def setWalkSpeed(self, forward, jump, reverse, rotate):
assert self.debugPrint("setWalkSpeed()")
self.avatarControlForwardSpeed=forward
#self.avatarControlJumpForce=jump
self.avatarControlReverseSpeed=reverse
self.avatarControlRotateSpeed=rotate
def getSpeeds(self):
#assert self.debugPrint("getSpeeds()")
return (self.speed, self.rotationSpeed, self.slideSpeed)
def setAvatar(self, avatar):
self.avatar = avatar
if avatar is not None:
pass # setup the avatar
def setAirborneHeightFunc(self, getAirborneHeight):
self.getAirborneHeight = getAirborneHeight
def setWallBitMask(self, bitMask):
self.cSphereBitMask = bitMask
def setFloorBitMask(self, bitMask):
self.cRayBitMask = bitMask
def swapFloorBitMask(self, oldMask, newMask):
self.cRayBitMask = self.cRayBitMask &~ oldMask
self.cRayBitMask |= newMask
if self.cRayNodePath and not self.cRayNodePath.isEmpty():
self.cRayNodePath.node().setFromCollideMask(self.cRayBitMask)
def initializeCollisions(self, collisionTraverser, avatarNodePath,
avatarRadius = 1.4, floorOffset = 1.0, reach = 1.0):
"""
Set up the avatar for collisions
"""
assert not avatarNodePath.isEmpty()
self.cTrav = collisionTraverser
self.avatarNodePath = avatarNodePath
# Set up the collision sphere
# This is a sphere on the ground to detect barrier collisions
self.cSphere = CollisionSphere(0.0, 0.0, 0.0, avatarRadius)
cSphereNode = CollisionNode('NPW.cSphereNode')
cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = avatarNodePath.attachNewNode(cSphereNode)
cSphereNode.setFromCollideMask(self.cSphereBitMask)
cSphereNode.setIntoCollideMask(BitMask32.allOff())
# Set up the collison ray
# This is a ray cast from your head down to detect floor polygons.
# This ray start is arbitrarily high in the air. Feel free to use
# a higher or lower value depending on whether you want an avatar
# that is outside of the world to step up to the floor when they
# get under valid floor:
self.cRay = CollisionRay(0.0, 0.0, CollisionHandlerRayStart, 0.0, 0.0, -1.0)
cRayNode = CollisionNode('NPW.cRayNode')
cRayNode.addSolid(self.cRay)
self.cRayNodePath = avatarNodePath.attachNewNode(cRayNode)
cRayNode.setFromCollideMask(self.cRayBitMask)
cRayNode.setIntoCollideMask(BitMask32.allOff())
# set up wall collision mechanism
self.pusher = CollisionHandlerPusher()
self.pusher.setInPattern("enter%in")
self.pusher.setOutPattern("exit%in")
# set up floor collision mechanism
self.lifter = CollisionHandlerFloor()
self.lifter.setInPattern("on-floor")
self.lifter.setOutPattern("off-floor")
self.lifter.setOffset(floorOffset)
self.lifter.setReach(reach)
# Limit our rate-of-fall with the lifter.
# If this is too low, we actually "fall" off steep stairs
# and float above them as we go down. I increased this
# from 8.0 to 16.0 to prevent this
self.lifter.setMaxVelocity(16.0)
self.pusher.addCollider(self.cSphereNodePath, avatarNodePath)
self.lifter.addCollider(self.cRayNodePath, avatarNodePath)
# activate the collider with the traverser and pusher
self.setCollisionsActive(1)
def deleteCollisions(self):
del self.cTrav
del self.cSphere
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
del self.cRay
self.cRayNodePath.removeNode()
del self.cRayNodePath
del self.pusher
del self.lifter
def setTag(self, key, value):
self.cSphereNodePath.setTag(key, value)
def setCollisionsActive(self, active = 1):
assert self.debugPrint("setCollisionsActive(active%s)"%(active,))
if self.collisionsActive != active:
self.collisionsActive = active
if active:
self.cTrav.addCollider(self.cSphereNodePath, self.pusher)
self.cTrav.addCollider(self.cRayNodePath, self.lifter)
else:
self.cTrav.removeCollider(self.cSphereNodePath)
self.cTrav.removeCollider(self.cRayNodePath)
# Now that we have disabled collisions, make one more pass
# right now to ensure we aren't standing in a wall.
self.oneTimeCollide()
def placeOnFloor(self):
"""
Make a reasonable effor to place the avatar on the ground.
For example, this is useful when switching away from the
current walker.
"""
# With these on, getAirborneHeight is not returning the correct value so
# when we open our book while swimming we pop down underneath the ground
# self.oneTimeCollide()
# self.avatarNodePath.setZ(self.avatarNodePath.getZ()-self.getAirborneHeight())
# Since this is the non physics walker - wont they already be on the ground?
return
def oneTimeCollide(self):
"""
Makes one quick collision pass for the avatar, for instance as
a one-time straighten-things-up operation after collisions
have been disabled.
"""
tempCTrav = CollisionTraverser("oneTimeCollide")
tempCTrav.addCollider(self.cSphereNodePath, self.pusher)
tempCTrav.addCollider(self.cRayNodePath, self.lifter)
tempCTrav.traverse(render)
def addBlastForce(self, vector):
pass
def displayDebugInfo(self):
"""
For debug use.
"""
onScreenDebug.add("controls", "NonPhysicsWalker")
def _calcSpeeds(self):
# get the button states:
forward = inputState.isSet("forward")
reverse = inputState.isSet("reverse")
turnLeft = inputState.isSet("turnLeft")
turnRight = inputState.isSet("turnRight")
slide = inputState.isSet(self.slideName) or 0
#jump = inputState.isSet("jump")
# Check for Auto-Run
if base.localAvatar.getAutoRun():
forward = 1
reverse = 0
# Determine what the speeds are based on the buttons:
self.speed=(forward and self.avatarControlForwardSpeed or
reverse and -self.avatarControlReverseSpeed)
# Should fSlide be renamed slideButton?
self.slideSpeed=slide and ((reverse and turnLeft and -self.avatarControlReverseSpeed*(0.75)) or
(reverse and turnRight and self.avatarControlReverseSpeed*(0.75)) or
(turnLeft and -self.avatarControlForwardSpeed*(0.75)) or
(turnRight and self.avatarControlForwardSpeed*(0.75)))
self.rotationSpeed=not slide and (
(turnLeft and self.avatarControlRotateSpeed) or
(turnRight and -self.avatarControlRotateSpeed))
def handleAvatarControls(self, task):
"""
Check on the arrow keys and update the avatar.
"""
if not self.lifter.hasContact():
# hack fix for falling through the floor:
messenger.send("walkerIsOutOfWorld", [self.avatarNodePath])
self._calcSpeeds()
if __debug__:
debugRunning = inputState.isSet("debugRunning")
if debugRunning:
self.speed*=4.0
self.slideSpeed*=4.0
self.rotationSpeed*=1.25
if self.wantDebugIndicator:
self.displayDebugInfo()
# How far did we move based on the amount of time elapsed?
dt=ClockObject.getGlobalClock().getDt()
# Check to see if we're moving at all:
if self.speed or self.slideSpeed or self.rotationSpeed:
if self.stopThisFrame:
distance = 0.0
slideDistance = 0.0
rotation = 0.0
self.stopThisFrame = 0
else:
distance = dt * self.speed
slideDistance = dt * self.slideSpeed
rotation = dt * self.rotationSpeed
# Take a step in the direction of our previous heading.
self.vel=Vec3(Vec3.forward() * distance +
Vec3.right() * slideDistance)
if self.vel != Vec3.zero():
# rotMat is the rotation matrix corresponding to
# our previous heading.
rotMat=Mat3.rotateMatNormaxis(self.avatarNodePath.getH(), Vec3.up())
step=rotMat.xform(self.vel)
self.avatarNodePath.setFluidPos(Point3(self.avatarNodePath.getPos()+step))
self.avatarNodePath.setH(self.avatarNodePath.getH()+rotation)
messenger.send("avatarMoving")
else:
self.vel.set(0.0, 0.0, 0.0)
self.__oldPosDelta = self.avatarNodePath.getPosDelta(render)
self.__oldDt = dt
try:
self.worldVelocity = self.__oldPosDelta*(1/self.__oldDt)
except:
# divide by zero
self.worldVelocity = 0
return Task.cont
def doDeltaPos(self):
assert self.debugPrint("doDeltaPos()")
def reset(self):
assert self.debugPrint("reset()")
def getVelocity(self):
return self.vel
def enableAvatarControls(self):
"""
Activate the arrow keys, etc.
"""
assert self.debugPrint("enableAvatarControls")
assert self.collisionsActive
taskName = "AvatarControls-%s"%(id(self),)
# remove any old
taskMgr.remove(taskName)
# spawn the new task
taskMgr.add(self.handleAvatarControls, taskName)
def disableAvatarControls(self):
"""
Ignore the arrow keys, etc.
"""
assert self.debugPrint("disableAvatarControls")
taskName = "AvatarControls-%s"%(id(self),)
taskMgr.remove(taskName)
def flushEventHandlers(self):
if hasattr(self, 'cTrav'):
self.pusher.flush()
self.lifter.flush() # not currently defined or needed
if __debug__:
def debugPrint(self, message):
"""for debugging"""
return self.notify.debug(
str(id(self))+' '+message)
| |
#!/usr/bin/env python
import gflags
import os
import sys
import unittest
from ct.crypto import cert
from ct.crypto import error
from ct.crypto import verify
from ct.proto import client_pb2
from ct.serialization import tls_message
import mock
FLAGS = gflags.FLAGS
gflags.DEFINE_string("testdata_dir", "../test/testdata",
"Location of test certs")
def read_testdata_file(test_file):
with open(os.path.join(FLAGS.testdata_dir, test_file), 'rb') as f:
return f.read()
class LogVerifierTest(unittest.TestCase):
default_sth = client_pb2.SthResponse()
default_sth.tree_size = 42
default_sth.timestamp = 1348589667204
default_sth.sha256_root_hash = (
"18041bd4665083001fba8c5411d2d748e8abbfdcdfd9218cb02b68a78e7d4c23"
).decode("hex")
default_sth.tree_head_signature = (
"040300483046022100befd8060563763a5e49ba53e6443c13f7624fd6403178113736e"
"16012aca983e022100f572568dbfe9a86490eb915c4ee16ad5ecd708fed35ed4e5cd1b"
"2c3f087b4130").decode("hex")
default_key_info = client_pb2.KeyInfo()
default_key_info.type = client_pb2.KeyInfo.ECDSA
default_key_info.pem_key = (
"-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAES0AfBk"
"jr7b8b19p5Gk8plSAN16wW\nXZyhYsH6FMCEUK60t7pem/ckoPX8hupuaiJzJS0ZQ0SEoJ"
"GlFxkUFwft5g==\n-----END PUBLIC KEY-----\n")
def test_verify_sth(self):
verifier = verify.LogVerifier(LogVerifierTest.default_key_info)
self.assertTrue(verifier.verify_sth(LogVerifierTest.default_sth))
def test_verify_sth_fails_for_bad_signature(self):
verifier = verify.LogVerifier(LogVerifierTest.default_key_info)
default_sth = LogVerifierTest.default_sth
for i in range(len(default_sth.tree_head_signature)):
# Skip the bytes that encode ASN.1 lengths: this is covered in a
# separate test
if i == 5 or i == 7 or i == 42:
continue
sth = client_pb2.SthResponse()
sth.CopyFrom(default_sth)
sth.tree_head_signature = (
default_sth.tree_head_signature[:i] +
chr(ord(default_sth.tree_head_signature[i]) ^ 1) +
default_sth.tree_head_signature[i+1:])
# Encoding- or SignatureError, depending on whether the modified
# byte is a content byte or not.
self.assertRaises((error.EncodingError, error.SignatureError),
verifier.verify_sth, sth)
def test_verify_sth_for_bad_asn1_length(self):
verifier = verify.LogVerifier(LogVerifierTest.default_key_info)
default_sth = LogVerifierTest.default_sth
# The byte that encodes the length of the ASN.1 signature sequence
i = 5
# Decreasing the length truncates the sequence and causes a decoding
# error.
sth = client_pb2.SthResponse()
sth.CopyFrom(default_sth)
sth.tree_head_signature = (
default_sth.tree_head_signature[:i] +
chr(ord(default_sth.tree_head_signature[i]) - 1) +
default_sth.tree_head_signature[i+1:])
self.assertRaises(error.EncodingError, verifier.verify_sth, sth)
# Increasing the length means there are not enough ASN.1 bytes left to
# decode the sequence, however the ecdsa module silently slices it.
# TODO(ekasper): contribute a patch to upstream and make the tests fail
sth = client_pb2.SthResponse()
sth.CopyFrom(default_sth)
sth.tree_head_signature = (
default_sth.tree_head_signature[:i] +
chr(ord(default_sth.tree_head_signature[i]) + 1) +
default_sth.tree_head_signature[i+1:])
self.assertTrue(verifier.verify_sth(sth))
# The byte that encodes the length of the first integer r in the
# sequence (r, s). Modifying the length corrupts the second integer
# offset and causes a decoding error.
i = 7
sth = client_pb2.SthResponse()
sth.CopyFrom(default_sth)
sth.tree_head_signature = (
default_sth.tree_head_signature[:i] +
chr(ord(default_sth.tree_head_signature[i]) - 1) +
default_sth.tree_head_signature[i+1:])
self.assertRaises(error.EncodingError, verifier.verify_sth, sth)
sth = client_pb2.SthResponse()
sth.CopyFrom(default_sth)
sth.tree_head_signature = (
default_sth.tree_head_signature[:i] +
chr(ord(default_sth.tree_head_signature[i]) + 1) +
default_sth.tree_head_signature[i+1:])
self.assertRaises(error.EncodingError, verifier.verify_sth, sth)
# The byte that encodes the length of the second integer s in the
# sequence (r, s). Decreasing this length corrupts the integer, however
# increased length is silently sliced, as above.
i = 42
sth = client_pb2.SthResponse()
sth.CopyFrom(default_sth)
sth.tree_head_signature = (
default_sth.tree_head_signature[:i] +
chr(ord(default_sth.tree_head_signature[i]) - 1) +
default_sth.tree_head_signature[i+1:])
self.assertRaises(error.EncodingError, verifier.verify_sth, sth)
sth = client_pb2.SthResponse()
sth.CopyFrom(default_sth)
sth.tree_head_signature = (
default_sth.tree_head_signature[:i] +
chr(ord(default_sth.tree_head_signature[i]) + 1) +
default_sth.tree_head_signature[i+1:])
self.assertTrue(verifier.verify_sth(sth))
# Trailing garbage is correctly detected.
sth = client_pb2.SthResponse()
sth.CopyFrom(default_sth)
sth.tree_head_signature = (
default_sth.tree_head_signature[:3] +
# Correct outer length to include trailing garbage.
chr(ord(default_sth.tree_head_signature[3]) + 1) +
default_sth.tree_head_signature[4:]) + "\x01"
self.assertRaises(error.EncodingError, verifier.verify_sth, sth)
def test_verify_sth_consistency(self):
old_sth = LogVerifierTest.default_sth
new_sth = client_pb2.SthResponse()
new_sth.CopyFrom(old_sth)
new_sth.tree_size = old_sth.tree_size + 1
new_sth.timestamp = old_sth.timestamp + 1
new_sth.sha256_root_hash = "a new hash"
proof = ["some proof the mock does not care about"]
mock_merkle_verifier = mock.Mock()
mock_merkle_verifier.verify_tree_consistency.return_value = True
verifier = verify.LogVerifier(LogVerifierTest.default_key_info,
mock_merkle_verifier)
self.assertTrue(verifier.verify_sth_consistency(old_sth, new_sth,
proof))
mock_merkle_verifier.verify_tree_consistency.assert_called_once_with(
old_sth.tree_size, new_sth.tree_size, old_sth.sha256_root_hash,
new_sth.sha256_root_hash, proof)
def test_verify_sth_temporal_consistency(self):
old_sth = LogVerifierTest.default_sth
new_sth = client_pb2.SthResponse()
new_sth.CopyFrom(old_sth)
new_sth.tree_size = old_sth.tree_size + 1
new_sth.timestamp = old_sth.timestamp + 1
# Merkle verifier is never used so simply set to None
verifier = verify.LogVerifier(LogVerifierTest.default_key_info,
None)
# Note we do not care about root hash inconsistency here.
self.assertTrue(verifier.verify_sth_temporal_consistency(
old_sth, new_sth))
def test_verify_sth_temporal_consistency_equal_timestamps(self):
old_sth = LogVerifierTest.default_sth
new_sth = client_pb2.SthResponse()
new_sth.CopyFrom(old_sth)
new_sth.tree_size = old_sth.tree_size + 1
# Merkle verifier is never used so simply set to None
verifier = verify.LogVerifier(LogVerifierTest.default_key_info,
None)
self.assertRaises(error.ConsistencyError,
verifier.verify_sth_temporal_consistency,
old_sth, new_sth)
new_sth.tree_size = old_sth.tree_size - 1
self.assertRaises(error.ConsistencyError,
verifier.verify_sth_temporal_consistency,
old_sth, new_sth)
# But identical STHs are OK
self.assertTrue(verifier.verify_sth_temporal_consistency(
old_sth, old_sth))
def test_verify_sth_temporal_consistency_reversed_timestamps(self):
old_sth = LogVerifierTest.default_sth
new_sth = client_pb2.SthResponse()
new_sth.CopyFrom(old_sth)
new_sth.timestamp = old_sth.timestamp + 1
new_sth.tree_size = old_sth.tree_size + 1
# Merkle verifier is never used so simply set to None
verifier = verify.LogVerifier(LogVerifierTest.default_key_info,
None)
self.assertRaises(ValueError,
verifier.verify_sth_temporal_consistency,
new_sth, old_sth)
def test_verify_sth_temporal_consistency_newer_tree_is_smaller(self):
old_sth = LogVerifierTest.default_sth
new_sth = client_pb2.SthResponse()
new_sth.CopyFrom(old_sth)
new_sth.timestamp = old_sth.timestamp + 1
new_sth.tree_size = old_sth.tree_size - 1
# Merkle verifier is never used so simply set to None
verifier = verify.LogVerifier(LogVerifierTest.default_key_info,
None)
self.assertRaises(error.ConsistencyError,
verifier.verify_sth_temporal_consistency,
old_sth, new_sth)
def test_verify_sth_consistency_invalid_proof(self):
old_sth = LogVerifierTest.default_sth
new_sth = client_pb2.SthResponse()
new_sth.CopyFrom(old_sth)
new_sth.tree_size = old_sth.tree_size + 1
new_sth.timestamp = old_sth.timestamp + 1
new_sth.sha256_root_hash = "a new hash"
proof = ["some proof the mock does not care about"]
mock_merkle_verifier = mock.Mock()
mock_merkle_verifier.verify_tree_consistency.side_effect = (
error.ConsistencyError("Evil"))
verifier = verify.LogVerifier(LogVerifierTest.default_key_info,
mock_merkle_verifier)
self.assertRaises(error.ConsistencyError,
verifier.verify_sth_consistency,
old_sth, new_sth, proof)
def _test_verify_sct(self, proof, chain, fake_timestamp = None):
sct = client_pb2.SignedCertificateTimestamp()
tls_message.decode(read_testdata_file(proof), sct)
if fake_timestamp is not None:
sct.timestamp = fake_timestamp
chain = map(lambda name: cert.Certificate.from_pem_file(
os.path.join(FLAGS.testdata_dir, name)), chain)
key_info = client_pb2.KeyInfo()
key_info.type = client_pb2.KeyInfo.ECDSA
key_info.pem_key = read_testdata_file('ct-server-key-public.pem')
verifier = verify.LogVerifier(key_info)
return verifier.verify_sct(sct, chain)
def _test_verify_embedded_scts(self, chain):
chain = map(lambda name: cert.Certificate.from_pem_file(
os.path.join(FLAGS.testdata_dir, name)), chain)
key_info = client_pb2.KeyInfo()
key_info.type = client_pb2.KeyInfo.ECDSA
key_info.pem_key = read_testdata_file('ct-server-key-public.pem')
verifier = verify.LogVerifier(key_info)
return verifier.verify_embedded_scts(chain)
def test_verify_sct_valid_signature(self):
self.assertTrue(self._test_verify_sct(
'test-cert.proof',
['test-cert.pem', 'ca-cert.pem']))
def test_verify_sct_invalid_signature(self):
self.assertRaises(error.SignatureError,
self._test_verify_sct,
'test-cert.proof',
['test-cert.pem', 'ca-cert.pem'],
fake_timestamp = 1234567)
def test_verify_sct_precertificate_valid_signature(self):
self.assertTrue(self._test_verify_sct(
'test-embedded-pre-cert.proof',
['test-embedded-pre-cert.pem', 'ca-cert.pem']))
def test_verify_sct_precertificate_invalid_signature(self):
self.assertRaises(error.SignatureError,
self._test_verify_sct,
'test-embedded-pre-cert.proof',
['test-embedded-pre-cert.pem', 'ca-cert.pem'],
fake_timestamp = 1234567)
def test_verify_sct_precertificate_with_preca_valid_signature(self):
self.assertTrue(self._test_verify_sct(
'test-embedded-with-preca-pre-cert.proof',
['test-embedded-with-preca-pre-cert.pem',
'ca-pre-cert.pem', 'ca-cert.pem']))
def test_verify_sct_missing_leaf_cert(self):
self.assertRaises(error.IncompleteChainError,
self._test_verify_sct,
'test-cert.proof',
[])
def test_verify_sct_missing_issuer_cert(self):
self.assertRaises(error.IncompleteChainError,
self._test_verify_sct,
'test-embedded-pre-cert.proof',
['test-embedded-pre-cert.pem'])
def test_verify_sct_with_preca_missing_issuer_cert(self):
self.assertRaises(error.IncompleteChainError,
self._test_verify_sct,
'test-embedded-with-preca-pre-cert.proof',
['test-embedded-with-preca-pre-cert.pem',
'ca-pre-cert.pem'])
def test_verify_embedded_scts_valid_signature(self):
sct = client_pb2.SignedCertificateTimestamp()
tls_message.decode(read_testdata_file('test-embedded-pre-cert.proof'),
sct)
result = self._test_verify_embedded_scts(
['test-embedded-cert.pem', 'ca-cert.pem'])
self.assertEqual(result, [(sct, True)])
def test_verify_embedded_scts_invalid_signature(self):
result = self._test_verify_embedded_scts(
['test-invalid-embedded-cert.pem', 'ca-cert.pem'])
self.assertFalse(result[0][1])
def test_verify_embedded_scts_with_preca_valid_signature(self):
sct = client_pb2.SignedCertificateTimestamp()
tls_message.decode(
read_testdata_file('test-embedded-with-preca-pre-cert.proof'),
sct)
result = self._test_verify_embedded_scts(
['test-embedded-with-preca-cert.pem', 'ca-cert.pem'])
self.assertEqual(result, [(sct, True)])
if __name__ == "__main__":
sys.argv = FLAGS(sys.argv)
unittest.main()
| |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - Package Installer
@copyright: 2005 MoinMoin:AlexanderSchremmer,
2007-2010 MoinMoin:ReimarBauer
@license: GNU GPL, see COPYING for details.
"""
import os, re, sys
import zipfile
from MoinMoin import config, wikiutil, caching, user
from MoinMoin.Page import Page
from MoinMoin.PageEditor import PageEditor
from MoinMoin.logfile import editlog, eventlog
from MoinMoin.util import filesys
MOIN_PACKAGE_FILE = 'MOIN_PACKAGE'
MAX_VERSION = 1
# Exceptions
class PackageException(Exception):
""" Raised if the package is broken. """
pass
class ScriptException(Exception):
""" Raised when there is a problem in the script. """
def __unicode__(self):
""" Return unicode error message """
if isinstance(self.args[0], str):
return unicode(self.args[0], config.charset)
else:
return unicode(self.args[0])
class RuntimeScriptException(ScriptException):
""" Raised when the script problem occurs at runtime. """
class ScriptExit(Exception):
""" Raised by the script commands when the script should quit. """
def event_logfile(self, pagename, pagefile):
# add event log entry
eventtype = 'SAVENEW'
mtime_usecs = wikiutil.timestamp2version(os.path.getmtime(pagefile))
elog = eventlog.EventLog(self.request)
elog.add(self.request, eventtype, {'pagename': pagename}, 1, mtime_usecs)
def edit_logfile_append(self, pagename, pagefile, rev, action, logname='edit-log', comment=u'', author=u"Scripting Subsystem"):
glog = editlog.EditLog(self.request, uid_override=author)
pagelog = Page(self.request, pagename).getPagePath(logname, use_underlay=0, isfile=1)
llog = editlog.EditLog(self.request, filename=pagelog,
uid_override=author)
mtime_usecs = wikiutil.timestamp2version(os.path.getmtime(pagefile))
host = '::1'
extra = u''
glog.add(self.request, mtime_usecs, rev, action, pagename, host, comment)
llog.add(self.request, mtime_usecs, rev, action, pagename, host, extra, comment)
event_logfile(self, pagename, pagefile)
# Parsing and (un)quoting for script files
def packLine(items, separator="|"):
""" Packs a list of items into a string that is separated by `separator`. """
return '|'.join([item.replace('\\', '\\\\').replace(separator, '\\' + separator) for item in items])
def unpackLine(string, separator="|"):
""" Unpacks a string that was packed by packLine. """
result = []
token = None
escaped = False
for char in string:
if token is None:
token = ""
if escaped and char in ('\\', separator):
token += char
escaped = False
continue
escaped = (char == '\\')
if escaped:
continue
if char == separator:
result.append(token)
token = ""
else:
token += char
if token is not None:
result.append(token)
return result
def str2boolean(string):
"""
Converts the parameter to a boolean value by recognising different
truth literals.
"""
return (string.lower() in ('yes', 'true', '1'))
class ScriptEngine:
"""
The script engine supplies the needed commands to execute the installation
script.
"""
def _extractToFile(self, source, target):
""" Extracts source and writes the contents into target. """
# TODO, add file dates
target_file = open(target, "wb")
target_file.write(self.extract_file(source))
target_file.close()
def __init__(self):
self.themename = None
self.ignoreExceptions = False
self.goto = 0
#Satisfy pylint
self.msg = getattr(self, "msg", "")
self.request = getattr(self, "request", None)
def do_addattachment(self, zipname, filename, pagename, author=u"Scripting Subsystem", comment=u""):
"""
Installs an attachment
@param pagename: Page where the file is attached. Or in 2.0, the file itself.
@param zipname: Filename of the attachment from the zip file
@param filename: Filename of the attachment (just applicable for MoinMoin < 2.0)
"""
if self.request.user.may.write(pagename):
_ = self.request.getText
attachments = Page(self.request, pagename).getPagePath("attachments", check_create=1)
filename = wikiutil.taintfilename(filename)
zipname = wikiutil.taintfilename(zipname)
target = os.path.join(attachments, filename)
page = PageEditor(self.request, pagename, do_editor_backup=0, uid_override=author)
rev = page.current_rev()
path = page.getPagePath(check_create=0)
if not os.path.exists(target):
self._extractToFile(zipname, target)
if os.path.exists(target):
filesys.chmod(target, 0666 & config.umask)
action = 'ATTNEW'
edit_logfile_append(self, pagename, path, rev, action, logname='edit-log',
comment=u'%(filename)s' % {"filename": filename}, author=author)
self.msg += u"%(filename)s attached \n" % {"filename": filename}
else:
self.msg += u"%(filename)s not attached \n" % {"filename": filename}
else:
self.msg += u"action add attachment: not enough rights - nothing done \n"
def do_delattachment(self, filename, pagename, author=u"Scripting Subsystem", comment=u""):
"""
Removes an attachment
@param pagename: Page where the file is attached. Or in 2.0, the file itself.
@param filename: Filename of the attachment (just applicable for MoinMoin < 2.0)
"""
if self.request.user.may.write(pagename):
_ = self.request.getText
attachments = Page(self.request, pagename).getPagePath("attachments", check_create=1)
filename = wikiutil.taintfilename(filename)
target = os.path.join(attachments, filename)
page = PageEditor(self.request, pagename, do_editor_backup=0, uid_override=author)
rev = page.current_rev()
path = page.getPagePath(check_create=0)
if os.path.exists(target):
os.remove(target)
action = 'ATTDEL'
edit_logfile_append(self, pagename, path, rev, action, logname='edit-log',
comment=u'%(filename)s' % {"filename": filename}, author=author)
self.msg += u"%(filename)s removed \n" % {"filename": filename}
else:
self.msg += u"%(filename)s does not exist \n" % {"filename": filename}
else:
self.msg += u"action delete attachment: not enough rights - nothing done \n"
def do_print(self, *param):
""" Prints the parameters into output of the script. """
self.msg += '; '.join(param) + "\n"
def do_exit(self):
""" Exits the script. """
raise ScriptExit
def do_ignoreexceptions(self, boolean):
""" Sets the ignore exceptions setting. If exceptions are ignored, the
script does not stop if one is encountered. """
self.ignoreExceptions = str2boolean(boolean)
def do_ensureversion(self, version, lines=0):
"""
Ensures that the version of MoinMoin is greater or equal than
version. If lines is unspecified, the script aborts. Otherwise,
the next lines (amount specified by lines) are not executed.
@param version: required version of MoinMoin (e.g. "1.3.4")
@param lines: lines to ignore
"""
_ = self.request.getText
from MoinMoin.version import release
version_int = [int(x) for x in version.split(".")]
# use a regex here to get only the numbers of the release string (e.g. ignore betaX)
release = re.compile('\d+').findall(release)[0:3]
release = [int(x) for x in release]
if version_int > release:
if lines > 0:
self.goto = lines
else:
raise RuntimeScriptException(_("The package needs a newer version"
" of MoinMoin (at least %s).") %
version)
def do_setthemename(self, themename):
""" Sets the name of the theme which will be altered next. """
self.themename = wikiutil.taintfilename(str(themename))
def do_copythemefile(self, filename, ftype, target):
""" Copies a theme-related file (CSS, PNG, etc.) into a directory of the
current theme.
@param filename: name of the file in this package
@param ftype: the subdirectory of the theme directory, e.g. "css"
@param target: filename, e.g. "screen.css"
"""
_ = self.request.getText
if self.themename is None:
raise RuntimeScriptException(_("The theme name is not set."))
from MoinMoin.web.static import STATIC_FILES_PATH as htdocs_dir
if not os.access(htdocs_dir, os.W_OK):
raise RuntimeScriptException(_("Theme files not installed! Write rights missing for %s.") % htdocs_dir)
theme_file = os.path.join(htdocs_dir, self.themename,
wikiutil.taintfilename(ftype),
wikiutil.taintfilename(target))
theme_dir = os.path.dirname(theme_file)
if not os.path.exists(theme_dir):
os.makedirs(theme_dir)
self._extractToFile(filename, theme_file)
def do_installplugin(self, filename, visibility, ptype, target):
"""
Installs a python code file into the appropriate directory.
@param filename: name of the file in this package
@param visibility: 'local' will copy it into the plugin folder of the
current wiki. 'global' will use the folder of the MoinMoin python
package.
@param ptype: the type of the plugin, e.g. "parser"
@param target: the filename of the plugin, e.g. wiki.py
"""
visibility = visibility.lower()
ptype = wikiutil.taintfilename(ptype.lower())
if visibility == 'global':
basedir = os.path.dirname(__import__("MoinMoin").__file__)
elif visibility == 'local':
basedir = self.request.cfg.plugin_dir
target = os.path.join(basedir, ptype, wikiutil.taintfilename(target))
self._extractToFile(filename, target)
wikiutil._wiki_plugins = {}
def do_installpackage(self, pagename, filename):
"""
Installs a package.
@param pagename: Page where the file is attached. Or in 2.0, the file itself.
@param filename: Filename of the attachment (just applicable for MoinMoin < 2.0)
"""
_ = self.request.getText
attachments = Page(self.request, pagename).getPagePath("attachments", check_create=0)
package = ZipPackage(self.request, os.path.join(attachments, wikiutil.taintfilename(filename)))
if package.isPackage():
if not package.installPackage():
raise RuntimeScriptException(_("Installation of '%(filename)s' failed.") % {
'filename': filename} + "\n" + package.msg)
else:
raise RuntimeScriptException(_('The file %s is not a MoinMoin package file.') % filename)
self.msg += package.msg
def do_addrevision(self, filename, pagename, author=u"Scripting Subsystem", comment=u"", trivial=u"No"):
""" Adds a revision to a page.
@param filename: name of the file in this package
@param pagename: name of the target page
@param author: user name of the editor (optional)
@param comment: comment related to this revision (optional)
@param trivial: boolean, if it is a trivial edit
"""
_ = self.request.getText
trivial = str2boolean(trivial)
if self.request.user.may.write(pagename):
page = PageEditor(self.request, pagename, do_editor_backup=0)
try:
page.saveText(self.extract_file(filename).decode("utf-8"), 0, trivial=trivial, comment=comment)
except PageEditor.Unchanged:
pass
else:
self.msg += u"%(pagename)s added \n" % {"pagename": pagename}
else:
self.msg += u"action add revision: not enough rights - nothing done \n"
def do_renamepage(self, pagename, newpagename, author=u"Scripting Subsystem", comment=u"Renamed by the scripting subsystem."):
""" Renames a page.
@param pagename: name of the target page
@param newpagename: name of the new page
@param author: user name of the editor (optional)
@param comment: comment related to this revision (optional)
"""
if self.request.user.may.write(pagename):
_ = self.request.getText
page = PageEditor(self.request, pagename, do_editor_backup=0, uid_override=author)
if not page.exists():
raise RuntimeScriptException(_("The page %s does not exist.") % pagename)
newpage = PageEditor(self.request, newpagename)
page.renamePage(newpage.page_name, comment=u"Renamed from '%s'" % (pagename))
self.msg += u'%(pagename)s renamed to %(newpagename)s\n' % {
"pagename": pagename,
"newpagename": newpagename}
else:
self.msg += u"action rename page: not enough rights - nothing done \n"
def do_deletepage(self, pagename, comment="Deleted by the scripting subsystem."):
""" Marks a page as deleted (like the DeletePage action).
@param pagename: page to delete
@param comment: the related comment (optional)
"""
if self.request.user.may.write(pagename):
_ = self.request.getText
page = PageEditor(self.request, pagename, do_editor_backup=0)
if not page.exists():
raise RuntimeScriptException(_("The page %s does not exist.") % pagename)
page.deletePage(comment)
else:
self.msg += u"action delete page: not enough rights - nothing done \n"
def do_replaceunderlayattachment(self, zipname, filename, pagename, author=u"Scripting Subsystem", comment=u""):
"""
overwrite underlay attachments
@param pagename: Page where the file is attached. Or in 2.0, the file itself.
@param zipname: Filename of the attachment from the zip file
@param filename: Filename of the attachment (just applicable for MoinMoin < 2.0)
"""
if self.request.user.may.write(pagename):
_ = self.request.getText
filename = wikiutil.taintfilename(filename)
zipname = wikiutil.taintfilename(zipname)
page = PageEditor(self.request, pagename, do_editor_backup=0, uid_override=author)
pagedir = page.getPagePath(use_underlay=1, check_create=1)
attachments = os.path.join(pagedir, 'attachments')
if not os.path.exists(attachments):
os.mkdir(attachments)
target = os.path.join(attachments, filename)
self._extractToFile(zipname, target)
if os.path.exists(target):
filesys.chmod(target, 0666 & config.umask)
else:
self.msg += u"action replace underlay attachment: not enough rights - nothing done \n"
def do_replaceunderlay(self, filename, pagename):
"""
Overwrites underlay pages. Implementational detail: This needs to be
kept in sync with the page class.
@param filename: name of the file in the package
@param pagename: page to be overwritten
"""
page = Page(self.request, pagename)
pagedir = page.getPagePath(use_underlay=1, check_create=1)
revdir = os.path.join(pagedir, 'revisions')
cfn = os.path.join(pagedir, 'current')
revstr = '%08d' % 1
if not os.path.exists(revdir):
os.mkdir(revdir)
currentf = open(cfn, 'w')
currentf.write(revstr + "\n")
currentf.close()
pagefile = os.path.join(revdir, revstr)
self._extractToFile(filename, pagefile)
# Clear caches
# TODO Code from MoinMoin/script/maint/cleancache.py may be used
def runScript(self, commands):
""" Runs the commands.
@param commands: list of strings which contain a command each
@return True on success
"""
_ = self.request.getText
headerline = unpackLine(commands[0])
if headerline[0].lower() != "MoinMoinPackage".lower():
raise PackageException(_("Invalid package file header."))
self.revision = int(headerline[1])
if self.revision > MAX_VERSION:
raise PackageException(_("Package file format unsupported."))
lineno = 1
success = True
for line in commands[1:]:
lineno += 1
if self.goto > 0:
self.goto -= 1
continue
if line.startswith("#") or len(line) == 0:
continue
elements = unpackLine(line)
fnname = elements[0].strip().lower()
if fnname == '':
continue
try:
if fnname in self.request.cfg.packagepages_actions_excluded:
self.msg += u"action package %s: excluded \n" % elements[0].strip()
success = False
continue
else:
fn = getattr(self, "do_" + fnname)
except AttributeError:
self.msg += u"Exception RuntimeScriptException: %s\n" % (
_("Unknown function %(func)s in line %(lineno)i.") %
{'func': elements[0], 'lineno': lineno}, )
success = False
break
try:
fn(*elements[1:])
except ScriptExit:
break
except TypeError, e:
self.msg += u"Exception %s (line %i): %s\n" % (e.__class__.__name__, lineno, unicode(e))
success = False
break
except RuntimeScriptException, e:
if not self.ignoreExceptions:
self.msg += u"Exception %s (line %i): %s\n" % (e.__class__.__name__, lineno, unicode(e))
success = False
break
return success
class Package:
""" A package consists of a bunch of files which can be installed. """
def __init__(self, request):
self.request = request
self.msg = ""
def installPackage(self):
""" Opens the package and executes the script. """
_ = self.request.getText
if not self.isPackage():
raise PackageException(_("The file %s was not found in the package.") % MOIN_PACKAGE_FILE)
commands = self.getScript().splitlines()
return self.runScript(commands)
def getScript(self):
""" Returns the script. """
return self.extract_file(MOIN_PACKAGE_FILE).decode("utf-8").replace(u"\ufeff", "")
def extract_file(self, filename):
""" Returns the contents of a file in the package. """
raise NotImplementedError
def filelist(self):
""" Returns a list of all files. """
raise NotImplementedError
def isPackage(self):
""" Returns true if this package is recognised. """
raise NotImplementedError
class ZipPackage(Package, ScriptEngine):
""" A package that reads its files from a .zip file. """
def __init__(self, request, filename):
""" Initialise the package.
@param request: RequestBase instance
@param filename: filename of the .zip file
"""
Package.__init__(self, request)
ScriptEngine.__init__(self)
self.filename = filename
self._isZipfile = zipfile.is_zipfile(filename)
if self._isZipfile:
self.zipfile = zipfile.ZipFile(filename)
# self.zipfile.getinfo(name)
def extract_file(self, filename):
""" Returns the contents of a file in the package. """
_ = self.request.getText
try:
return self.zipfile.read(filename.encode("cp437"))
except KeyError:
raise RuntimeScriptException(_(
"The file %s was not found in the package.") % filename)
def filelist(self):
""" Returns a list of all files. """
return self.zipfile.namelist()
def isPackage(self):
""" Returns true if this package is recognised. """
return self._isZipfile and MOIN_PACKAGE_FILE in self.zipfile.namelist()
def main():
args = sys.argv
if len(args)-1 not in (2, 3) or args[1] not in ('l', 'i'):
print >> sys.stderr, """MoinMoin Package Installer v%(version)i
%(myname)s action packagefile [request URL]
action - Either "l" for listing the script or "i" for installing.
packagefile - The path to the file containing the MoinMoin installer package
request URL - Just needed if you are running a wiki farm, used to differentiate
the correct wiki.
Example:
%(myname)s i ../package.zip
""" % {"version": MAX_VERSION, "myname": os.path.basename(args[0])}
raise SystemExit
packagefile = args[2]
if len(args) > 3:
request_url = args[3]
else:
request_url = None
# Setup MoinMoin environment
from MoinMoin.web.contexts import ScriptContext
request = ScriptContext(url=request_url)
package = ZipPackage(request, packagefile)
if not package.isPackage():
print "The specified file %s is not a package." % packagefile
raise SystemExit
if args[1] == 'l':
print package.getScript()
elif args[1] == 'i':
if package.installPackage():
print "Installation was successful!"
else:
print "Installation failed."
if package.msg:
print package.msg
if __name__ == '__main__':
main()
| |
from gym.spaces import Box, Discrete
import numpy as np
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic
from ray.rllib.models.torch.torch_action_dist import TorchCategorical, \
TorchDeterministic
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.framework import get_variable
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.numpy import softmax, SMALL_NUMBER
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class ParameterNoise(Exploration):
"""An exploration that changes a Model's parameters.
Implemented based on:
[1] https://blog.openai.com/better-exploration-with-parameter-noise/
[2] https://arxiv.org/pdf/1706.01905.pdf
At the beginning of an episode, Gaussian noise is added to all weights
of the model. At the end of the episode, the noise is undone and an action
diff (pi-delta) is calculated, from which we determine the changes in the
noise's stddev for the next episode.
"""
def __init__(self,
action_space,
*,
framework: str,
policy_config: dict,
model: ModelV2,
initial_stddev=1.0,
random_timesteps=10000,
sub_exploration=None,
**kwargs):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev (float): The initial stddev to use for the noise.
random_timesteps (int): The number of timesteps to act completely
randomly (see [1]).
sub_exploration (Optional[dict]): Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(
action_space,
policy_config=policy_config,
model=model,
framework=framework,
**kwargs)
self.stddev = get_variable(
initial_stddev, framework=self.framework, tf_name="stddev")
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
# The weight variables of the Model where noise should be applied to.
# This excludes any variable, whose name contains "LayerNorm" (those
# are BatchNormalization layers, which should not be perturbed).
self.model_variables = [
v for k, v in self.model.variables(as_dict=True).items()
if "LayerNorm" not in k
]
# Our noise to be added to the weights. Each item in `self.noise`
# corresponds to one Model variable and holding the Gaussian noise to
# be added to that variable (weight).
self.noise = []
for var in self.model_variables:
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
self.noise.append(
get_variable(
np.zeros(var.shape, dtype=np.float32),
framework=self.framework,
tf_name=name_,
torch_tensor=True,
device=self.device))
# tf-specific ops to sample, assign and remove noise.
if self.framework == "tf" and not tf.executing_eagerly():
self.tf_sample_new_noise_op = \
self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = \
self._tf_add_stored_noise_op()
self.tf_remove_noise_op = \
self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
# a special schedule.
if isinstance(self.action_space, Discrete):
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [(0, 1.0), (random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01)],
"outside_value": 0.01
}
}
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(self,
*,
timestep=None,
explore=None,
tf_sess=None):
explore = explore if explore is not None else \
self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(self,
*,
action_distribution,
timestep,
explore=True):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution,
timestep=timestep,
explore=explore)
@override(Exploration)
def on_episode_start(self,
policy,
*,
environment=None,
episode=None,
tf_sess=None):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self,
policy,
*,
environment=None,
episode=None,
tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(self, policy, sample_batch, tf_sess=None):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions(
obs_batch=sample_batch[SampleBatch.CUR_OBS],
# TODO(sven): What about state-ins and seq-lens?
prev_action_batch=sample_batch.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=sample_batch.get(SampleBatch.PREV_REWARDS),
explore=self.weights_are_currently_noisy)
# Categorical case (e.g. DQN).
if policy.dist_class in (Categorical, TorchCategorical):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif policy.dist_class in [Deterministic, TorchDeterministic]:
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions(
obs_batch=sample_batch[SampleBatch.CUR_OBS],
prev_action_batch=sample_batch.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=sample_batch.get(SampleBatch.PREV_REWARDS),
explore=not self.weights_are_currently_noisy)
# Categorical case (e.g. DQN).
if policy.dist_class in (Categorical, TorchCategorical):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif policy.dist_class in [Deterministic, TorchDeterministic]:
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if policy.dist_class in (Categorical, TorchCategorical):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist *
np.log(noise_free_action_dist /
(noisy_action_dist + SMALL_NUMBER)), 1))
current_epsilon = self.sub_exploration.get_info(
sess=tf_sess)["cur_epsilon"]
delta = -np.log(1 - current_epsilon +
current_epsilon / self.action_space.n)
elif policy.dist_class in [Deterministic, TorchDeterministic]:
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist)))
current_scale = self.sub_exploration.get_info(
sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * \
current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
# Set self.stddev to calculated value.
if self.framework == "tf":
self.stddev.load(self.stddev_val, session=tf_sess)
else:
self.stddev = self.stddev_val
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == "tf":
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == "tfe":
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(
mean=torch.zeros(self.noise[i].size()), std=self.stddev)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(
tf1.assign(
noise,
tf.random.normal(
shape=noise.shape,
stddev=self.stddev,
dtype=tf.float32)))
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == "tf":
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override (bool): If True, undo any currently applied noise first,
then add the currently stored noise.
"""
# Make sure we only add noise to currently noise-free weights.
assert self.weights_are_currently_noisy is False
# Add stored noise to the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == "tfe":
self._tf_add_stored_noise_op()
else:
for i in range(len(self.noise)):
# Add noise to weights in-place.
self.model_variables[i].add_(self.noise[i])
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
# Make sure we only remove noise iff currently noisy.
assert self.weights_are_currently_noisy is True
# Removes the stored noise from the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == "tfe":
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Remove noise from weights in-place.
var.add_(-noise)
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_info(self, sess=None):
return {"cur_stddev": self.stddev_val}
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for autotuning performance knobs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class AutotuneBenchmark(test.Benchmark):
"""Benchmarks for autotuning performance knobs."""
def benchmarkMap(self):
a = self._benchmarkMap(autotune=False)
b = self._benchmarkMap(autotune=True)
print("speedup: %f" % (a / b))
def _benchmarkMap(self, autotune):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=optimization.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_autotune = autotune
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(10000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
self.report_benchmark(
iters=10000,
wall_time=np.median(deltas),
name="map" + ("_autotune" if autotune else ""))
return np.median(deltas)
def benchmarkMapAndBatch(self):
a = self._benchmarkMapAndBatch(autotune=False)
b = self._benchmarkMapAndBatch(autotune=True)
print("speedup: %f" % (a / b))
def _benchmarkMapAndBatch(self, autotune):
batch_size = 16
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.apply(
batching.map_and_batch(
math_ops.matmul,
num_parallel_calls=optimization.AUTOTUNE,
batch_size=batch_size))
options = dataset_ops.Options()
options.experimental_autotune = autotune
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(1000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
self.report_benchmark(
iters=1000,
wall_time=np.median(deltas),
name="map_and_batch" + ("_autotune" if autotune else ""))
return np.median(deltas)
def benchmarkInterleave(self):
a = self._benchmarkInterleave(autotune=False)
b = self._benchmarkInterleave(autotune=True)
print("speedup: %f" % (a / b))
def _benchmarkInterleave(self, autotune):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(math_ops.matmul)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
cycle_length=10,
num_parallel_calls=optimization.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_autotune = autotune
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(10000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
self.report_benchmark(
iters=10000,
wall_time=np.median(deltas),
name="interleave" + ("_autotune" if autotune else ""))
return np.median(deltas)
def benchmarkMapAndInterleave(self):
a = self._benchmarkMapAndInterleave(autotune=False)
b = self._benchmarkMapAndInterleave(autotune=True)
print("speedup: %f" % (a / b))
def _benchmarkMapAndInterleave(self, autotune):
k = 1024 * 1024
a = (np.random.rand(1, 8 * k), np.random.rand(8 * k, 1))
b = (np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))
c = (np.random.rand(1, 2 * k), np.random.rand(2 * k, 1))
dataset_a = dataset_ops.Dataset.from_tensors(a).repeat()
dataset_b = dataset_ops.Dataset.from_tensors(b).repeat()
dataset_c = dataset_ops.Dataset.from_tensors(c).repeat()
def f1(x, y):
return math_ops.matmul(x, y)
def f2(a, b):
x, y = b
return a, math_ops.matmul(x, y)
dataset = dataset_a
dataset = dataset.map(f1, num_parallel_calls=optimization.AUTOTUNE)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=optimization.AUTOTUNE,
cycle_length=2)
dataset = dataset_ops.Dataset.zip((dataset, dataset_b))
dataset = dataset.map(f2, num_parallel_calls=optimization.AUTOTUNE)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=optimization.AUTOTUNE,
cycle_length=2)
dataset = dataset_ops.Dataset.zip((dataset, dataset_c))
dataset = dataset.map(f2, num_parallel_calls=optimization.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_autotune = autotune
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next)
for _ in range(1000):
start = time.time()
sess.run(get_next)
end = time.time()
deltas.append(end - start)
print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
(np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
np.max(deltas)))
self.report_benchmark(
iters=1000,
wall_time=np.median(deltas),
name="map_and_interleave" + ("_autotune" if autotune else ""))
return np.median(deltas)
if __name__ == "__main__":
test.main()
| |
# Copyright (c) 2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Basic Test Class to verify REST API Client code "nexus_restapi_client.py"
"""
import mock
import requests
from networking_cisco.ml2_drivers.nexus import (
constants as const)
from networking_cisco.ml2_drivers.nexus import (
nexus_restapi_network_driver as rest)
from neutron.tests.unit import testlib_api
from oslo_serialization import jsonutils
class TestCiscoNexusRestapiClient(testlib_api.SqlTestCase):
"""Unit tests for Cisco REST API client."""
def setUp(self):
self.nexus_dict = self._build_nexus_switch_db()
self.mock_Session = mock.Mock()
mock.patch.object(requests,
'Session',
return_value=self.mock_Session).start()
self.r_driver = rest.CiscoNexusRestapiDriver(self.nexus_dict)
super(TestCiscoNexusRestapiClient, self).setUp()
def json(self):
return {u'imdata': [{u'eqptCh': {u'attributes': {
u'descr': u'Nexus9000 C9396PX Chassis'}}}]}
def json_cli(self):
return {u'ins_api': {u'outputs': {u'output': [{
u'msg': u'Success', u'body': {}, u'code': u'200'}, {
u'msg': u'Success', u'body': {}, u'code': u'200'}, {
u'body': u'warning', u'msg': u'Success', u'code': u'200'}]},
u'version': u'1.0', u'type': u'cli_conf', u'sid': u'eoc'}}
def json_err(self):
raise Exception("json_err raising an exception")
def _request_on_count(self, username, password, verify,
match_range=None, json_usr=None):
"""Generate side effect for restapi client Session.
This method verifies the username, password, and verify settings
are correct as input into restapi client request calls. This
verifies the credential data base is built correctly and
arguments are fed into the restapi client apis correctly.
Additionally, this method will set the status code to 403
based on count to values in match_range list. This
status_code indicates the cookie has expired and needs to
be refreshed to continue chatting with the host.
Usage Example:
The following call will verify username and password in the data
for POST requests trying to do a AAAlogin. For all requests,
the verify argument is checked against the verify arg passed into
request. On the 4th request call, it will return 403 which should
force a cookie refresh.
_request_on_count('admin', 'Shhh1', '/path/to/cafile.crt', ['4'])
"""
self.verify = verify
def _side_effect_request(
method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None):
if not hasattr(self, "count"):
self.count = 0
headers = {'content-type': 'application/json'}
if self.verify != verify:
raise Exception("request() 'verify' is incorrect")
rsp = requests.models.Response
match = False
if match_range is not None:
self.count += 1
match = self.count in match_range
if match:
rsp.status_code = 403
elif method == "POST" and "aaaLogin" in url:
testdata = jsonutils.loads(data)
if (password != testdata['aaaUser']['attributes']['pwd']):
raise Exception("request() 'password' is incorrect.")
if (username != testdata['aaaUser']['attributes']['name']):
raise Exception("request() 'username' is incorrect.")
rsp.status_code = 200
rsp.headers = {'Set-Cookie': 'this is a test'}
elif method == "POST" and "cli_conf" in data:
rsp.status_code = 200
rsp.headers = headers
rsp.json = self.json_cli
elif method == "GET":
rsp.status_code = 200
rsp.headers = headers
rsp.json = self.json
if json_usr:
rsp.json = json_usr
return rsp
return _side_effect_request
def _build_nexus_switch_db(self):
nexus_dict = {}
nexus_dict['1.1.1.1'] = {}
nexus_dict['1.1.1.1'][const.USERNAME] = 'admin'
nexus_dict['1.1.1.1'][const.PASSWORD] = 'Shhhh1'
nexus_dict['1.1.1.1'][const.HTTPS_VERIFY] = True
nexus_dict['1.1.1.1'][const.HTTPS_CERT] = (
'/home/caboucha/test_src/openstack-outfiles/nexus.crt')
nexus_dict['2.2.2.2'] = {}
nexus_dict['2.2.2.2'][const.USERNAME] = 'admin'
nexus_dict['2.2.2.2'][const.PASSWORD] = 'Shhhh2'
nexus_dict['2.2.2.2'][const.HTTPS_VERIFY] = True
nexus_dict['2.2.2.2'][const.HTTPS_CERT] = None
nexus_dict['3.3.3.3'] = {}
nexus_dict['3.3.3.3'][const.USERNAME] = 'admin'
nexus_dict['3.3.3.3'][const.PASSWORD] = 'Shhhh3'
nexus_dict['3.3.3.3'][const.HTTPS_VERIFY] = False
nexus_dict['3.3.3.3'][const.HTTPS_CERT] = None
nexus_dict['4.4.4.4'] = {}
nexus_dict['4.4.4.4'][const.USERNAME] = 'admin'
nexus_dict['4.4.4.4'][const.PASSWORD] = 'Shhhh1'
nexus_dict['4.4.4.4'][const.HTTPS_VERIFY] = True
nexus_dict['4.4.4.4'][const.HTTPS_CERT] = (
'/home/caboucha/test_src/openstack-outfiles/nexus.crt')
nexus_dict['4.4.4.4'][const.IF_PC] = 'user cmd1;user cmd2'
nexus_dict['5.5.5.5'] = {}
nexus_dict['5.5.5.5'][const.USERNAME] = 'admin'
nexus_dict['5.5.5.5'][const.PASSWORD] = 'Shhhh1'
nexus_dict['5.5.5.5'][const.HTTPS_VERIFY] = False
nexus_dict['5.5.5.5'][const.HTTPS_CERT] = None
nexus_dict['5.5.5.5'][const.IF_PC] = 'user cmd1;user cmd2'
return nexus_dict
def _check_get_nexus_type(self, ipaddr, expected_count):
nexus_type = self.r_driver.get_nexus_type(ipaddr)
if nexus_type != const.NEXUS_9K:
raise Exception("test_verify_with_local_certificate")
self.assertEqual(expected_count,
self.mock_Session.request.call_count,
"Expecting call count of 2")
self.mock_Session.reset_mock()
def _check_verify(self, ipaddr, username, password, verify):
config = {'request.side_effect':
self._request_on_count(username, password,
verify, range(4, 5))}
self.mock_Session.configure_mock(**config)
# Request called twice: 1-get initial cookie,
# 2-get nexus type
self._check_get_nexus_type(ipaddr, 2)
# Request called once: only get nexus type since cookie exists
self._check_get_nexus_type(ipaddr, 1)
# Request called 3 times: 1-first get nexus type returns 403,
# 2- get another cookie, 3-successful get nexus type
self._check_get_nexus_type(ipaddr, 3)
def test_verify_with_local_certificate(self):
ipaddr = '1.1.1.1'
self._check_verify(ipaddr,
self.nexus_dict[ipaddr][const.USERNAME],
self.nexus_dict[ipaddr][const.PASSWORD],
self.nexus_dict[ipaddr][const.HTTPS_CERT])
def test_verify_with_nonlocal_certificate(self):
ipaddr = '2.2.2.2'
self._check_verify(ipaddr,
self.nexus_dict[ipaddr][const.USERNAME],
self.nexus_dict[ipaddr][const.PASSWORD],
self.nexus_dict[ipaddr][const.HTTPS_VERIFY])
def test_verify_no_certificate(self):
ipaddr = '3.3.3.3'
self._check_verify(ipaddr,
self.nexus_dict[ipaddr][const.USERNAME],
self.nexus_dict[ipaddr][const.PASSWORD],
self.nexus_dict[ipaddr][const.HTTPS_VERIFY])
def test_verify_for_cli_with_local_cert(self):
# Since the config contains const.IF_PC, it will cause
# _send_cli_config_string to get called as opposed to
# send_edit_string. This is a different authentication path
# through the client's send_request since it sends CLI
# events instead of RESTAPI events to the Nexus.
ipaddr = '4.4.4.4'
config = {'request.side_effect':
self._request_on_count(
self.nexus_dict[ipaddr][const.USERNAME],
self.nexus_dict[ipaddr][const.PASSWORD],
self.nexus_dict[ipaddr][const.HTTPS_CERT])}
self.mock_Session.configure_mock(**config)
self.r_driver._apply_user_port_channel_config(
'4.4.4.4', 44)
# The verify value passed in send_request is checked
# in side_effect handling. If incorrect, exception raised.
# No need to check again.
self.mock_Session.reset_mock()
def test_verify_for_cli_no_cert(self):
# Since the config contains const.IF_PC, it will cause
# _send_cli_config_string to get called as opposed to
# send_edit_string. This is a different authentication path
# through the client's send_request since it sends CLI
# events instead of RESTAPI events to the Nexus.
ipaddr = '5.5.5.5'
config = {'request.side_effect':
self._request_on_count(
self.nexus_dict[ipaddr][const.USERNAME],
self.nexus_dict[ipaddr][const.PASSWORD],
self.nexus_dict[ipaddr][const.HTTPS_VERIFY])}
self.mock_Session.configure_mock(**config)
self.r_driver._apply_user_port_channel_config(
'5.5.5.5', 44)
# The verify value passed in send_request is checked
# in side_effect handling. If incorrect, exception raised.
# No need to check again.
self.mock_Session.reset_mock()
def test_bad_json_with_get_nexus_type(self):
ipaddr = '3.3.3.3'
config = {'request.side_effect':
self._request_on_count(
self.nexus_dict[ipaddr][const.USERNAME],
self.nexus_dict[ipaddr][const.PASSWORD],
self.nexus_dict[ipaddr][const.HTTPS_VERIFY],
json_usr=self.json_err)}
self.mock_Session.configure_mock(**config)
nexus_type = self.r_driver.get_nexus_type(ipaddr)
if nexus_type != -1:
raise Exception("bad json content test failed.")
self.mock_Session.reset_mock()
| |
# coding: utf-8
from argparse import ArgumentParser
from collections import OrderedDict
import datetime as dt
import logging
import sys
from path_helpers import path
import si_prefix as si
from .. import pformat_dict
from ..commands import (DEFAULT_INDEX_HOST, freeze, get_plugins_directory,
install, SERVER_URL_TEMPLATE, uninstall, search)
from ..hooks import on_plugin_install
logger = logging.getLogger(__name__)
default_plugins_directory = get_plugins_directory()
default_config_path = (default_plugins_directory.parent
.joinpath('microdrop.ini'))
# Parsers that may be reused by other modules.
LOG_PARSER = ArgumentParser(add_help=False)
LOG_PARSER.add_argument('-l', '--log-level', default='error',
choices=['error', 'debug', 'info'])
CONFIG_PARSER_ARGS = (('-c', '--config-file'),
dict(type=path, help='MicroDrop config file '
'(default="{default}").'
.format(default=default_config_path)))
CONFIG_PARSER = ArgumentParser(add_help=False)
CONFIG_PARSER.add_argument(*CONFIG_PARSER_ARGS[0], **CONFIG_PARSER_ARGS[1])
SERVER_PARSER = ArgumentParser(add_help=False)
SERVER_PARSER.add_argument('-s', '--server-url',
default=DEFAULT_INDEX_HOST, help='MicroDrop '
'plugin index URL (default="%(default)s")')
PLUGINS_PARSER = ArgumentParser(add_help=False)
PLUGINS_PARSER.add_argument('plugin', nargs='+')
PLUGINS_DIR_PARSER = ArgumentParser(add_help=False)
mutex_path = PLUGINS_DIR_PARSER.add_mutually_exclusive_group()
mutex_path.add_argument(*CONFIG_PARSER_ARGS[0], **CONFIG_PARSER_ARGS[1])
mutex_path.add_argument('-d', '--plugins-directory', type=path,
help='MicroDrop plugins directory '
'(default="{default}").'
.format(default=default_plugins_directory))
MPM_PARSER = ArgumentParser(add_help=False, parents=[LOG_PARSER,
PLUGINS_DIR_PARSER])
subparsers = MPM_PARSER.add_subparsers(help='help for subcommand',
dest='command')
install_parser = subparsers.add_parser('install', help='Install plugins.',
parents=[SERVER_PARSER])
install_parser.add_argument('--no-on-install', action='store_true',
help='Do not run `on_plugin_install` hook after '
'installing plugin')
plugin_group = install_parser.add_mutually_exclusive_group(required=True)
plugin_group.add_argument('-r', '--requirements-file', type=path)
plugin_group.add_argument('plugin', nargs='*', default=[])
search_parser = subparsers.add_parser('search', help='Search server for '
'plugin.', parents=[SERVER_PARSER])
search_parser.add_argument('plugin')
subparsers.add_parser('uninstall', help='Uninstall plugins.',
parents=[PLUGINS_PARSER])
subparsers.add_parser('freeze', help='Output installed packages in '
'requirements format.')
hook_parser = subparsers.add_parser('hook', help='Execute plugin hook')
hook_parser.add_argument('hook', choices=['on_install'], help='Plugin hook')
hook_parser.add_argument('plugin', nargs='*')
def parse_args(args=None):
'''Parses arguments, returns ``(options, args)``.'''
if args is None:
args = sys.argv
parser = ArgumentParser(description='MicroDrop plugin manager',
parents=[MPM_PARSER])
return parser.parse_args()
def validate_args(args):
'''
Apply custom validation and actions based on parsed arguments.
Parameters
----------
args : argparse.Namespace
Result from ``parse_args`` method of ``argparse.ArgumentParser``
instance.
Returns
-------
argparse.Namespace
Reference to input ``args``, which have been validated/updated.
'''
logging.basicConfig(level=getattr(logging, args.log_level.upper()))
if getattr(args, 'command', None) == 'install':
if args.requirements_file and not args.requirements_file.isfile():
print >> sys.stderr, ('Requirements file not available: {}'
.format(args.requirements_file))
raise SystemExit(-1)
elif not args.plugin and not args.requirements_file:
print >> sys.stderr, ('Requirements file or at least one plugin '
'must be specified.')
raise SystemExit(-2)
if hasattr(args, 'server_url'):
logger.debug('Using MicroDrop index server: "%s"', args.server_url)
args.server_url = SERVER_URL_TEMPLATE % args.server_url
if all([args.plugins_directory is None,
args.config_file is None]):
args.plugins_directory = get_plugins_directory()
elif args.plugins_directory is None:
args.config_file = args.config_file.realpath()
args.plugins_directory = get_plugins_directory(config_path=
args.config_file)
else:
args.plugins_directory = args.plugins_directory.realpath()
return args
def main(args=None):
if args is None:
args = parse_args()
args = validate_args(args)
logger.debug('Arguments: %s', args)
if args.command == 'freeze':
print '\n'.join(freeze(plugins_directory=args.plugins_directory))
elif args.command == 'hook':
if not args.plugin:
plugin_paths = args.plugins_directory.dirs()
else:
plugin_paths = [args.plugins_directory.joinpath(p)
for p in args.plugin]
print 50 * '*'
print '# Processing `on_install` hook for: #\n'
print '\n'.join([' - {}{}'.format(p.name, '' if p.exists()
else ' (not found)')
for p in plugin_paths])
print ''
if args.hook == 'on_install':
for plugin_path_i in plugin_paths:
print 50 * '-'
if plugin_path_i.exists():
on_plugin_install(plugin_path_i)
else:
print >> sys.stderr, '[warning] Skipping missing plugin'
elif args.command == 'install':
if args.requirements_file:
args.plugin = [line.strip() for line in
args.requirements_file.lines()
if not line.startswith('#')]
for plugin_i in args.plugin:
try:
path_i, meta_i = install(plugin_package=plugin_i,
plugins_directory=
args.plugins_directory,
server_url=args.server_url)
if not args.no_on_install:
on_plugin_install(path_i)
except KeyError, exception:
print '[{}] {}'.format(plugin_i, exception.message)
except ValueError, exception:
print exception.message
continue
elif args.command == 'search':
try:
plugin_name, releases = search(plugin_package=args.plugin,
server_url=args.server_url)
release_info = OrderedDict()
release_info['plugin_name'] = [plugin_name] + ((len(releases) - 1)
* [''])
release_info['version'] = releases.keys()
for k in ['upload_time', 'size']:
release_info[k] = [r[k] for r in releases.values()]
release_info['upload_time'] = map(lambda timestamp: dt.datetime
.strptime(timestamp,
r'%Y-%m-%dT'
r'%H:%M:%S.%f')
.strftime('%Y-%m-%d %H:%M'),
release_info['upload_time'])
release_info['size'] = map(lambda s:
si.si_format(s, precision=0, format_str=
'{value} {prefix}B'),
release_info['size'])
print '\n' + pformat_dict(release_info)
except KeyError, exception:
print >> sys.stderr, exception.message
elif args.command == 'uninstall':
for plugin_i in args.plugin:
uninstall(plugin_package=plugin_i,
plugins_directory=args.plugins_directory)
| |
'''
Atomix project, _db.py, (TODO: summary)
Copyright (c) 2015 Stanford University
Released under the Apache License v2.0. See the LICENSE file for details.
Author(s): Manu Bansal
'''
import sqlite3
import sys
from _util import *
class DB:
################################################################
# DB select queries
################################################################
def db_select_n_states(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT Count(*) FROM states')
return c.fetchall()
def db_select_n_cores(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT Max(corenum) FROM stateps')
return c.fetchall()
def db_select_states(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * FROM states ORDER BY statenum')
return c.fetchall()
#def db_select_decways(self):
# self.db.row_factory = sqlite3.Row
# c = self.db.cursor()
# c.execute('SELECT * FROM decways JOIN atoms ON decways.instancename = atoms.instancename')
# return c.fetchall()
def db_select_confs(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * FROM confs JOIN atoms ON confs.instancename = atoms.instancename')
return c.fetchall()
def db_select_missingconfs(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * FROM confs JOIN atoms ON confs.instancename = atoms.instancename')
c.execute('''
SELECT * FROM atoms
LEFT OUTER JOIN confs
ON atoms.instancename = confs.instancename
WHERE confs.instancename IS null
AND atoms.hasConf = 1
''')
return c.fetchall()
def db_select_missingwires(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * FROM wires JOIN atoms ON wires.instancename = atoms.instancename')
c.execute('''
SELECT * FROM atoms
LEFT OUTER JOIN wires
ON atoms.instancename = wires.instancename
WHERE wires.instancename IS null
''')
return c.fetchall()
def db_select_stateps(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * FROM stateps JOIN states ON stateps.statename = states.statename')
return c.fetchall()
def db_select_actions(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * FROM actions')
return c.fetchall()
def db_select_action_atomseq(self, atomseqid):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
self.logger.debug('atomseqid=%s' % atomseqid)
##c.execute('''SELECT * FROM
##actionatomseqs JOIN atoms ON actionatomseqs.instancename = atoms.instancename
##WHERE actionatomseqs.atomseqid = %s
##ORDER BY orderid''' % atomseqid)
###''' % atomseqid)
c.execute('''SELECT * FROM
actionatomseqs JOIN atom_wblocks ON actionatomseqs.instancename = atom_wblocks.atomname
WHERE actionatomseqs.atomseqid = %s
ORDER BY orderid''' % atomseqid)
return c.fetchall()
def db_select_state_atomseq(self, atomseqid):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
self.logger.debug('atomseqid=%s' % atomseqid)
#c.execute('''SELECT * FROM
#stateatomseqs JOIN atoms ON stateatomseqs.instancename = atoms.instancename
#WHERE stateatomseqs.atomseqid = %s
#ORDER BY orderid''' % atomseqid)
c.execute('''SELECT * FROM
stateatomseqs JOIN atom_wblocks ON stateatomseqs.instancename = atom_wblocks.atomname
WHERE stateatomseqs.atomseqid = %s
ORDER BY orderid''' % atomseqid)
#''' % atomseqid)
return c.fetchall()
################################################################
def db_select_fifo_buftypes(self, fifoname):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
#self.db.execute('Create table fifos_buftypes_many (instancename, atomname, buftype, inpouttype)')
#c.execute('SELECT DISTINCT instancename, buftype FROM fifos_buftypes_many WHERE instancename = "%s" AND buftype != "void"' % fifoname)
c.execute('SELECT DISTINCT instancename, buftype FROM fifos_buftypes_many WHERE instancename = "%s" AND buftype != "void"' % fifoname)
return c.fetchall()
def db_is_fifo_wired_to_optimized_atom(self, fifoname):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT MAX(optimized_atom) FROM fifos_buftypes_many WHERE instancename = "%s"' % fifoname)
dbresp = c.fetchall()
val, = dbresp[0]
return val
def db_select_fifo_buftypes_debug(self, fifoname):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
#self.db.execute('Create table fifos_buftypes_many (instancename, atomname, buftype, inpouttype)')
c.execute('SELECT DISTINCT * FROM fifos_buftypes_many WHERE instancename = "%s"' % fifoname)
return c.fetchall()
def db_select_fifos_untyped(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * FROM fifos')
return c.fetchall()
def db_select_fifos_typed_unfilled(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
#c.execute('''
#SELECT * FROM fifos
#LEFT OUTER JOIN fills
#ON fifos.instancename = fills.fifo_name
#WHERE fills.fifo_name IS null''')
c.execute('''
SELECT * FROM
(SELECT * FROM fifos JOIN fifos_buftypes_unique WHERE fifos.instancename = fifos_buftypes_unique.fifoname) AS a
LEFT OUTER JOIN fills
ON a.instancename = fills.fifo_name
WHERE fills.fifo_name IS null''')
return c.fetchall()
def db_select_fifos_typed_filled(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('''
SELECT * FROM fifos
INNER JOIN fills
ON fifos.instancename = fills.fifo_name''')
return c.fetchall()
def db_select_fills(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * FROM fills')
return c.fetchall()
def db_select_links(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
#c.execute('SELECT * FROM links')
#c.execute('SELECT links.*, numbufs AS numbufs1, numbufs AS numbufs2 FROM links, fifos AS fifos1, fifos AS fifos2 \
# WHERE links.fifo_src = fifos1.instancename, links.fifo_dst = fifos2.instancename')
#c.execute('SELECT links.*, numbufs FROM links, fifos WHERE links.fifo_src = fifos.instancename')
c.execute('SELECT links.*, fifos_src.numbufs AS numbufs_src, fifos_src.coreid AS coreid_src, fifos_dst.numbufs AS numbufs_dst FROM links, fifos fifos_src, fifos fifos_dst \
WHERE links.fifo_src = fifos_src.instancename AND links.fifo_dst = fifos_dst.instancename')
return c.fetchall()
def db_select_atom_row(self, atomname):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * FROM atoms WHERE atoms.instancename="%s"' % atomname)
return c.fetchall()
def db_select_blocks(self):
#self.db.execute('Create table atoms (instancename PRIMARY KEY, typename, coreid INTEGER, inptstr, outtstr, conftstr, hasConf INTEGER)')
#self.db.execute('''Create table wires (instancename, fifolist,
# FOREIGN KEY(instancename) REFERENCES atoms(instancename)
# )''')
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
#c.execute('SELECT DISTINCT typename FROM atoms')
c.execute('SELECT wires.*, typename FROM wires, atoms WHERE wires.instancename = atoms.instancename')
return c.fetchall()
def db_select_atoms(self):
# def where(w, *a):
# c = self.db.cursor()
# c.execute('Select * From Users where %s' % w, *a)
# return c.fetchall()
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
#c.execute('SELECT * FROM atoms')
c.execute('SELECT atoms.*, atom_wblocks.wblockname FROM atoms INNER JOIN atom_wblocks ON atoms.instancename = atom_wblocks.atomname')
return c.fetchall()
def db_select_wires(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
#c.execute('SELECT * FROM wires')
c.execute('SELECT wires.*, typename FROM wires, atoms WHERE wires.instancename = atoms.instancename')
return c.fetchall()
def db_select_wires_with_wblocknames(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
#c.execute('SELECT * FROM wires')
#c.execute('SELECT wires.*, typename FROM wires, atoms WHERE wires.instancename = atoms.instancename')
c.execute('SELECT wires.*, wblockname FROM wires, atom_wblocks WHERE wires.instancename = atom_wblocks.atomname')
return c.fetchall()
def db_select_wires_joined_atoms(self):
# self.db.execute('Create table atoms (instancename PRIMARY KEY, typename, coreid INTEGER, inptstr, outtstr, conftstr, hasConf INTEGER)')
# #self.db.execute('Create table fifos (instancename PRIMARY KEY, numbufs INTEGER, bufsize INTEGER, type STRING, coreid INTEGER)')
# self.db.execute('Create table fifos (instancename PRIMARY KEY, numbufs INTEGER, srcdsttype STRING, coreid INTEGER)')
# self.db.execute('Create table fifos_buftypes_many (instancename, atomname, buftype, inpouttype)')
# self.db.execute('Create table fifos_buftypes_unique (fifoname, buftype)')
#
# self.db.execute('Create table fills (fifo_name, data STRING, FOREIGN KEY(fifo_name) REFERENCES fifos(instancename))')
# self.db.execute('''Create table links (fifo_src, fifo_dst, link_owner,
# FOREIGN KEY(fifo_src) REFERENCES fifos(instancename),
# FOREIGN KEY(fifo_dst) REFERENCES fifos(instancename)
# )''')
# self.db.execute('''Create table wires (instancename, fifolist,
# FOREIGN KEY(instancename) REFERENCES atoms(instancename)
# )''')
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * FROM wires INNER JOIN atoms ON wires.instancename = atoms.instancename')
return c.fetchall()
def db_select_paras(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT paras.*, typename FROM paras, atoms WHERE paras.instancename = atoms.instancename')
return c.fetchall()
def db_insert_fifo_access_table(self, fifo_access_table):
self.db.execute('pragma foreign_keys=ON')
c = self.db.cursor()
for row in fifo_access_table:
#fifo_access_row = (fifoname, atomname, inpouttype, fifocoreid, atomcoreid)
c.execute('INSERT INTO fifo_access_table VALUES (?, ?, ?, ?, ?)', row)
def db_select_fifo_access_table(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * from fifo_access_table')
return c.fetchall()
def db_create(self):
self.db = sqlite3.connect(':memory:')
#self.db.execute('Create table Users (Name, Location, Type)')
#self.db.execute('Create table atoms (instancename PRIMARY KEY, typename, coreid INTEGER)')
self.db.execute('pragma foreign_keys=ON')
#self.db.execute('Create table atoms (instancename PRIMARY KEY, typename, coreid INTEGER, inptstr, outtstr, conftstr, hasConf INTEGER)')
self.db.execute('Create table atoms (instancename PRIMARY KEY, typename, coreid INTEGER, inptstr, outtstr, conftstr, hasConf INTEGER, optimized_atom INTEGER)')
#self.db.execute('Create table fifos (instancename PRIMARY KEY, numbufs INTEGER, bufsize INTEGER, type STRING, coreid INTEGER)')
self.db.execute('Create table fifos (instancename PRIMARY KEY, numbufs INTEGER, srcdsttype STRING, coreid INTEGER)')
self.db.execute('Create table fifos_buftypes_many (instancename, atomname, buftype, inpouttype, optimized_atom INTEGER)')
self.db.execute('Create table fifos_buftypes_unique (fifoname, buftype, fifo_optimized INTEGER)')
self.db.execute('''Create table fifo_access_table (fifoname, atomname, inpouttype, fifocoreid, atomcoreid)''')
self.db.execute('''Create table fifos_whether_multicore (fifoname, is_multicore BOOLEAN,
FOREIGN KEY (fifoname) REFERENCES fifos (instancename)
)''')
self.db.execute('Create table fills (fifo_name, data STRING, FOREIGN KEY(fifo_name) REFERENCES fifos(instancename))')
self.db.execute('''Create table links (fifo_src, fifo_dst, link_owner,
FOREIGN KEY(fifo_src) REFERENCES fifos(instancename),
FOREIGN KEY(fifo_dst) REFERENCES fifos(instancename)
)''')
self.db.execute('''Create table wires (instancename, fifolist,
FOREIGN KEY(instancename) REFERENCES atoms(instancename)
)''')
self.db.execute('''Create table paras (instancename, parslist,
FOREIGN KEY(instancename) REFERENCES atoms(instancename)
)''')
#wblockrow = (atomname, wblockname, blockname, inptstr, outtstr, conftstr, hasConf, fifo_access_signature_string)
self.db.execute('''Create table atom_wblocks (atomname, wblockname, blockname, inptstr, outtstr, conftstr, hasConf, fifo_access_signature_string,
FOREIGN KEY(atomname) REFERENCES atoms(instancename)
)''')
#self.db.execute('Create table actions (axnname PRIMARY KEY NOT NULL, atomseqid INTEGER AUTO_INCREMENT)')
self.db.execute('Create table actions (axnname UNIQUE NOT NULL, atomseqid INTEGER PRIMARY KEY)')
self.db.execute('''Create table states (statename UNIQUE NOT NULL, statenum INTEGER PRIMARY KEY)''')
# self.db.execute('''Create table states (statename UNIQUE NOT NULL, statenum INTEGER, corenum INTEGER, axnname,
# atomseqid INTEGER AUTO_INCREMENT,
# PRIMARY KEY (statenum, corenum),
# FOREIGN KEY (axnname) REFERENCES actions(axnname)
# )''')
self.db.execute('''Create table stateps (statename NOT NULL, corenum INTEGER, axnname,
atomseqid INTEGER PRIMARY KEY,
UNIQUE (statename, corenum),
FOREIGN KEY (axnname) REFERENCES actions(axnname)
)''')
# self.db.execute('''Create table actionatomseqs
# (atomseqid, orderid INTEGER AUTO_INCREMENT, instancename NOT NULL,
# FOREIGN KEY (atomseqid) REFERENCES actions(atomseqid),
# FOREIGN KEY (instancename) REFERENCES atoms(instancename)
# )''')
self.db.execute('''Create table actionatomseqs
(atomseqid, orderid INTEGER PRIMARY KEY, instancename NOT NULL,
FOREIGN KEY (atomseqid) REFERENCES actions(atomseqid),
FOREIGN KEY (instancename) REFERENCES atoms(instancename)
)''')
# self.db.execute('''Create table stateatomseqs
# (atomseqid , orderid INTEGER AUTO_INCREMENT, instancename NOT NULL,
# FOREIGN KEY (atomseqid) REFERENCES states(atomseqid),
# FOREIGN KEY (instancename) REFERENCES atoms(instancename)
# )''')
self.db.execute('''Create table stateatomseqs
(atomseqid, orderid INTEGER PRIMARY KEY, instancename NOT NULL,
FOREIGN KEY (atomseqid) REFERENCES stateps(atomseqid),
FOREIGN KEY (instancename) REFERENCES atoms(instancename)
)''')
# self.db.execute('''Create table decways (instancename, way, statename,
# UNIQUE (instancename, way, statename),
# FOREIGN KEY (instancename) REFERENCES atoms(instancename)
# FOREIGN KEY (statename) REFERENCES states(statename)
# )''')
self.db.execute('''Create table confs (instancename NOT NULL, vals,
UNIQUE (instancename),
FOREIGN KEY (instancename) REFERENCES atoms(instancename)
)''')
def db_select_statep_atoms_core_integrity_query(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('''SELECT * FROM
stateps
INNER JOIN stateatomseqs ON stateps.atomseqid = stateatomseqs.atomseqid
INNER JOIN atoms ON stateatomseqs.instancename = atoms.instancename
''')
return c.fetchall()
def db_select_axn_atoms_core_integrity_query(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('''SELECT * FROM
stateps
INNER JOIN actions ON stateps.axnname = actions.axnname
INNER JOIN actionatomseqs ON actions.atomseqid = actionatomseqs.atomseqid
INNER JOIN atoms ON actionatomseqs.instancename = atoms.instancename
''')
return c.fetchall()
def db_insert_fifo_is_multicore_row(self, row):
#row: (fifoname, fifo_is_multicore)
self.db.execute('pragma foreign_keys=ON')
c = self.db.cursor()
c.execute('INSERT INTO fifos_whether_multicore VALUES (?, ?)', row)
def db_select_fifos_whether_multicore(self):
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT * FROM fifos_whether_multicore')
return c.fetchall()
def db_insert_atom_wblock_row(self, row):
self.logger.debug(row)
self.db.execute('pragma foreign_keys=ON')
c = self.db.cursor()
#wblockrow = (atomname, wblockname, blockname, inptstr, outtstr, conftstr, hasConf, fifo_access_signature_string)
c.execute('INSERT INTO atom_wblocks VALUES (?, ?, ?, ?, ?, ?, ?, ?)', row)
def db_select_distinct_wblocks(self):
#wblockrow = (atomname, wblockname, blockname, inptstr, outtstr, conftstr, hasConf, fifo_access_signature_string)
#self.db.execute('''Create table atom_wblocks (atomname, wblockname, blockname, inptstr, outtstr, conftstr, hasConf, fifo_access_signature_string)
# FOREIGN KEY(atomname) REFERENCES atoms(instancename)
# )''')
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT DISTINCT wblockname, blockname, inptstr, outtstr, conftstr, hasConf, fifo_access_signature_string FROM atom_wblocks')
return c.fetchall()
def db_select_wblock_row(self, wblockname):
#wblockrow = (atomname, wblockname, blockname, inptstr, outtstr, conftstr, hasConf, fifo_access_signature_string)
#self.db.execute('''Create table atom_wblocks (atomname, wblockname, blockname, inptstr, outtstr, conftstr, hasConf, fifo_access_signature_string)
# FOREIGN KEY(atomname) REFERENCES atoms(instancename)
# )''')
self.db.row_factory = sqlite3.Row
c = self.db.cursor()
c.execute('SELECT DISTINCT wblockname, blockname, inptstr, outtstr, conftstr, hasConf, fifo_access_signature_string FROM atom_wblocks WHERE wblockname="%s"' % wblockname)
return c.fetchall()
def db_insert_fifos_buftype_unique_row(self, row):
self.logger.debug(row)
self.db.execute('pragma foreign_keys=ON')
c = self.db.cursor()
c.execute('INSERT INTO fifos_buftypes_unique VALUES (?, ?, ?)', row)
def db_insert_fifos_buftypes_many_row(self, row):
self.logger.debug(row)
self.db.execute('pragma foreign_keys=ON')
#self.db.execute('Create table fifos_buftypes_many (instancename, atomname, buftype, inpouttype)')
c = self.db.cursor()
c.execute('INSERT INTO fifos_buftypes_many VALUES (?, ?, ?, ?, ?)', row)
def db_insert_action_row(self, action_row):
self.logger.debug(action_row)
self.db.execute('pragma foreign_keys=ON')
c = self.db.cursor()
c.execute('INSERT INTO actions (axnname) VALUES (?)', action_row)
atomseqid = c.lastrowid
self.logger.debug("last actions atomseqid=%d" % (atomseqid))
self.db.commit()
#
return atomseqid
def db_insert_state_row(self, state_row):
self.logger.debug(state_row)
self.db.execute('pragma foreign_keys=ON')
c = self.db.cursor()
c.execute('INSERT INTO states (statename) VALUES (?)', state_row)
stateid = c.lastrowid
self.logger.debug("last state's id=%d" % (stateid))
self.db.commit()
#
return stateid
def db_insert_statep_row(self, statep_row):
self.logger.debug(statep_row)
self.db.execute('pragma foreign_keys=ON')
c = self.db.cursor()
c.execute('INSERT INTO stateps (statename, corenum, axnname) VALUES (?, ?, ?)', statep_row)
atomseqid = c.lastrowid
self.logger.debug("last statep's atomseqid=%d" % (atomseqid))
self.db.commit()
#
return atomseqid
def db_insert_actionatomseq_row(self, atomseqid, atomseq):
self.logger.debug('%s, %s' % (atomseqid, atomseq))
self.db.execute('pragma foreign_keys=ON')
c = self.db.cursor()
atoms = atomseq.split(';')
lastatom = atoms[-1]
if lastatom != '':
self.logger.error('PARSE ERROR: Missing a semicolon in atom sequence definition?')
exit(9)
atoms = atoms[0:-1]
for atom in atoms:
atom = atom.strip()
self.logger.debug('Query: c.execute(\'INSERT INTO actionatomseqs (atomseqid, instancename) values (?, ?)\', (atomseqid, atom) with (%s, %s)' % (atomseqid, atom))
try:
c.execute('INSERT INTO actionatomseqs (atomseqid, instancename) values (?, ?)', (atomseqid, atom))
orderid = c.lastrowid
except (sqlite3.OperationalError, sqlite3.IntegrityError) as e:
print_line()
self.logger.error("error parsing action: Atom named %s is not defined." % atom)
print_line()
#db_atomseqids = self.db.execute('''SELECT atomseqid FROM states''')
#db_atomnames = self.db.execute('''SELECT instancename FROM atoms''')
sys.exit(1)
self.db.commit()
def db_insert_stateatomseq_row(self, atomseqid, atomseq):
self.logger.debug('%s, %s' % (atomseqid, atomseq))
self.db.execute('pragma foreign_keys=ON')
c = self.db.cursor()
atoms = atomseq.split(';')
lastatom = atoms[-1]
if lastatom != '':
self.logger.error('PARSE ERROR: Missing a semicolon in atom sequence definition?')
exit(10)
atoms = atoms[0:-1]
for atom in atoms:
atom = atom.strip()
self.logger.debug('Inserting atom "%s" into stateatomseqs' % atom)
try:
c.execute('INSERT INTO stateatomseqs (atomseqid, instancename) values (?, ?)', (atomseqid, atom))
orderid = c.lastrowid
except sqlite3.OperationalError as e:
# self.db.execute('''Create table stateatomseqs
# (atomseqid, orderid INTEGER PRIMARY KEY, instancename NOT NULL,
# FOREIGN KEY (atomseqid) REFERENCES states(atomseqid),
# FOREIGN KEY (instancename) REFERENCES atoms(instancename)
# )''')
self.logger.error("sqlite3.OperationalError")
self.logger.error(e)
db_atomseqids = self.db.execute('''SELECT atomseqid FROM states''')
db_atomnames = self.db.execute('''SELECT instancename FROM atoms''')
for row in db_atomseqids:
self.logger.error('atomseqid: "%s"' % row["atomseqid"])
for row in db_atomnames:
self.logger.error('atomname: "%s"' % row["instancename"])
sys.exit(1)
self.db.commit()
#def db_insert_decway_row(self, decway_row):
# c = self.db.cursor()
# c.execute('INSERT INTO decways VALUES (?, ?, ?)', decway_row)
# self.db.commit()
#
def db_insert_conf_row(self, conf_row):
self.logger.debug(conf_row)
self.db.execute('pragma foreign_keys=ON')
c = self.db.cursor()
c.execute('INSERT INTO confs VALUES (?, ?)', conf_row)
self.db.commit()
# self.db.executemany('Insert into Users values(:Name, :Location, :Type)', [
# dict(Name="Mr. Foo", Location="Boston", Type="Secondary"),
# dict(Name="Mr. Bar", Location="New York", Type="Primary"),
# dict(Name="Mr. Quux", Location="Chicago", Type="Secondary"),
# ])
# self.db.commit()
# self.db.row_factory = sqlite3.Row
#
# and now your in-memory tiny "db" is ready to go. It's no harder to make a DB in a disk file and/or read the initial values from a text file, a CSV, and so forth, of course.
#
# Querying is especially flexible, easy and sweet, e.g., you can mix string insertion and parameter substitution at will...:
#
# def where(w, *a):
# c = self.db.cursor()
# c.execute('Select * From Users where %s' % w, *a)
# return c.fetchall()
def db_insert_fifo_row(self, fifo_row):
#self.db.execute('INSERT INTO fifos VALUES (?, ?, ?, ?, ?)', fifo_row)
self.logger.debug(fifo_row)
self.db.execute('pragma foreign_keys=ON')
self.db.execute('INSERT INTO fifos VALUES (?, ?, ?, ?)', fifo_row)
self.db.commit()
def db_insert_fill_row(self, fill_row):
self.logger.debug(fill_row)
self.db.execute('pragma foreign_keys=ON')
self.db.execute('INSERT INTO fills VALUES (?, ?)', fill_row)
self.db.commit()
def db_insert_link_row(self, link_row):
self.logger.debug(link_row)
self.db.execute('pragma foreign_keys=ON')
self.db.execute('INSERT INTO links VALUES (?, ?, ?)', link_row)
self.db.commit()
def db_insert_atom_row(self, atom_row):
self.logger.debug(atom_row)
self.db.execute('pragma foreign_keys=ON')
#self.db.execute('INSERT INTO atoms VALUES (?, ?, ?)', atom_row)
self.db.execute('INSERT INTO atoms VALUES (?, ?, ?, ?, ?, ?, ?, ?)', atom_row)
self.db.commit()
def db_insert_wire_row(self, wire_row):
self.logger.debug(wire_row)
self.db.execute('pragma foreign_keys=ON')
self.db.execute('INSERT INTO wires VALUES (?, ?)', wire_row)
self.db.commit()
def db_insert_pars_row(self, pars_row):
self.logger.debug(pars_row)
self.db.execute('pragma foreign_keys=ON')
self.db.execute('INSERT INTO paras VALUES (?, ?)', pars_row)
self.db.commit()
def __init__(self, logger):
self.logger = logger
self.db_create()
| |
# firebird/base.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: firebird
:name: Firebird
.. note::
The Firebird dialect within SQLAlchemy **is not currently supported**. The
dialect is not tested within continuous integration and is likely to have
many issues and caveats not currently handled.
Firebird Dialects
-----------------
Firebird offers two distinct dialects_ (not to be confused with a
SQLAlchemy ``Dialect``):
dialect 1
This is the old syntax and behaviour, inherited from Interbase pre-6.0.
dialect 3
This is the newer and supported syntax, introduced in Interbase 6.0.
The SQLAlchemy Firebird dialect detects these versions and
adjusts its representation of SQL accordingly. However,
support for dialect 1 is not well tested and probably has
incompatibilities.
Locking Behavior
----------------
Firebird locks tables aggressively. For this reason, a DROP TABLE may
hang until other transactions are released. SQLAlchemy does its best
to release transactions as quickly as possible. The most common cause
of hanging transactions is a non-fully consumed result set, i.e.::
result = engine.execute("select * from table")
row = result.fetchone()
return
Where above, the ``ResultProxy`` has not been fully consumed. The
connection will be returned to the pool and the transactional state
rolled back once the Python garbage collector reclaims the objects
which hold onto the connection, which often occurs asynchronously.
The above use case can be alleviated by calling ``first()`` on the
``ResultProxy`` which will fetch the first row and immediately close
all remaining cursor/connection resources.
RETURNING support
-----------------
Firebird 2.0 supports returning a result set from inserts, and 2.1
extends that to deletes and updates. This is generically exposed by
the SQLAlchemy ``returning()`` method, such as::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
raises = empl.update().returning(empl.c.id, empl.c.salary).\
where(empl.c.sales>100).\
values(dict(salary=empl.c.salary * 1.1))
print raises.fetchall()
.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
"""
import datetime
from sqlalchemy import exc
from sqlalchemy import sql
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.engine import reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql import expression
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.types import BIGINT
from sqlalchemy.types import BLOB
from sqlalchemy.types import DATE
from sqlalchemy.types import FLOAT
from sqlalchemy.types import INTEGER
from sqlalchemy.types import Integer
from sqlalchemy.types import NUMERIC
from sqlalchemy.types import SMALLINT
from sqlalchemy.types import TEXT
from sqlalchemy.types import TIME
from sqlalchemy.types import TIMESTAMP
RESERVED_WORDS = set(
[
"active",
"add",
"admin",
"after",
"all",
"alter",
"and",
"any",
"as",
"asc",
"ascending",
"at",
"auto",
"avg",
"before",
"begin",
"between",
"bigint",
"bit_length",
"blob",
"both",
"by",
"case",
"cast",
"char",
"character",
"character_length",
"char_length",
"check",
"close",
"collate",
"column",
"commit",
"committed",
"computed",
"conditional",
"connect",
"constraint",
"containing",
"count",
"create",
"cross",
"cstring",
"current",
"current_connection",
"current_date",
"current_role",
"current_time",
"current_timestamp",
"current_transaction",
"current_user",
"cursor",
"database",
"date",
"day",
"dec",
"decimal",
"declare",
"default",
"delete",
"desc",
"descending",
"disconnect",
"distinct",
"do",
"domain",
"double",
"drop",
"else",
"end",
"entry_point",
"escape",
"exception",
"execute",
"exists",
"exit",
"external",
"extract",
"fetch",
"file",
"filter",
"float",
"for",
"foreign",
"from",
"full",
"function",
"gdscode",
"generator",
"gen_id",
"global",
"grant",
"group",
"having",
"hour",
"if",
"in",
"inactive",
"index",
"inner",
"input_type",
"insensitive",
"insert",
"int",
"integer",
"into",
"is",
"isolation",
"join",
"key",
"leading",
"left",
"length",
"level",
"like",
"long",
"lower",
"manual",
"max",
"maximum_segment",
"merge",
"min",
"minute",
"module_name",
"month",
"names",
"national",
"natural",
"nchar",
"no",
"not",
"null",
"numeric",
"octet_length",
"of",
"on",
"only",
"open",
"option",
"or",
"order",
"outer",
"output_type",
"overflow",
"page",
"pages",
"page_size",
"parameter",
"password",
"plan",
"position",
"post_event",
"precision",
"primary",
"privileges",
"procedure",
"protected",
"rdb$db_key",
"read",
"real",
"record_version",
"recreate",
"recursive",
"references",
"release",
"reserv",
"reserving",
"retain",
"returning_values",
"returns",
"revoke",
"right",
"rollback",
"rows",
"row_count",
"savepoint",
"schema",
"second",
"segment",
"select",
"sensitive",
"set",
"shadow",
"shared",
"singular",
"size",
"smallint",
"snapshot",
"some",
"sort",
"sqlcode",
"stability",
"start",
"starting",
"starts",
"statistics",
"sub_type",
"sum",
"suspend",
"table",
"then",
"time",
"timestamp",
"to",
"trailing",
"transaction",
"trigger",
"trim",
"uncommitted",
"union",
"unique",
"update",
"upper",
"user",
"using",
"value",
"values",
"varchar",
"variable",
"varying",
"view",
"wait",
"when",
"where",
"while",
"with",
"work",
"write",
"year",
]
)
class _StringType(sqltypes.String):
"""Base for Firebird string types."""
def __init__(self, charset=None, **kw):
self.charset = charset
super(_StringType, self).__init__(**kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Firebird VARCHAR type"""
__visit_name__ = "VARCHAR"
def __init__(self, length=None, **kwargs):
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Firebird CHAR type"""
__visit_name__ = "CHAR"
def __init__(self, length=None, **kwargs):
super(CHAR, self).__init__(length=length, **kwargs)
class _FBDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
colspecs = {sqltypes.DateTime: _FBDateTime}
ischema_names = {
"SHORT": SMALLINT,
"LONG": INTEGER,
"QUAD": FLOAT,
"FLOAT": FLOAT,
"DATE": DATE,
"TIME": TIME,
"TEXT": TEXT,
"INT64": BIGINT,
"DOUBLE": FLOAT,
"TIMESTAMP": TIMESTAMP,
"VARYING": VARCHAR,
"CSTRING": CHAR,
"BLOB": BLOB,
}
# TODO: date conversion types (should be implemented as _FBDateTime,
# _FBDate, etc. as bind/result functionality is required)
class FBTypeCompiler(compiler.GenericTypeCompiler):
def visit_boolean(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_TIMESTAMP(type_, **kw)
def visit_TEXT(self, type_, **kw):
return "BLOB SUB_TYPE 1"
def visit_BLOB(self, type_, **kw):
return "BLOB SUB_TYPE 0"
def _extend_string(self, type_, basic):
charset = getattr(type_, "charset", None)
if charset is None:
return basic
else:
return "%s CHARACTER SET %s" % (basic, charset)
def visit_CHAR(self, type_, **kw):
basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw)
return self._extend_string(type_, basic)
def visit_VARCHAR(self, type_, **kw):
if not type_.length:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" % self.dialect.name
)
basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw)
return self._extend_string(type_, basic)
class FBCompiler(sql.compiler.SQLCompiler):
"""Firebird specific idiosyncrasies"""
ansi_bind_rules = True
# def visit_contains_op_binary(self, binary, operator, **kw):
# cant use CONTAINING b.c. it's case insensitive.
# def visit_notcontains_op_binary(self, binary, operator, **kw):
# cant use NOT CONTAINING b.c. it's case insensitive.
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_startswith_op_binary(self, binary, operator, **kw):
return "%s STARTING WITH %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
return "%s NOT STARTING WITH %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
)
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_alias(self, alias, asfrom=False, **kwargs):
if self.dialect._version_two:
return super(FBCompiler, self).visit_alias(
alias, asfrom=asfrom, **kwargs
)
else:
# Override to not use the AS keyword which FB 1.5 does not like
if asfrom:
alias_name = (
isinstance(alias.name, expression._truncated_label)
and self._truncated_identifier("alias", alias.name)
or alias.name
)
return (
self.process(alias.element, asfrom=asfrom, **kwargs)
+ " "
+ self.preparer.format_alias(alias, alias_name)
)
else:
return self.process(alias.element, **kwargs)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0])
start = self.process(func.clauses.clauses[1])
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2])
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def visit_length_func(self, function, **kw):
if self.dialect._version_two:
return "char_length" + self.function_argspec(function)
else:
return "strlen" + self.function_argspec(function)
visit_char_length_func = visit_length_func
def function_argspec(self, func, **kw):
# TODO: this probably will need to be
# narrowed to a fixed list, some no-arg functions
# may require parens - see similar example in the oracle
# dialect
if func.clauses is not None and len(func.clauses):
return self.process(func.clause_expr, **kw)
else:
return ""
def default_from(self):
return " FROM rdb$database"
def visit_sequence(self, seq, **kw):
return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list Firebird puts the limit and offset right
after the ``SELECT``...
"""
result = ""
if select._limit_clause is not None:
result += "FIRST %s " % self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
result += "SKIP %s " % self.process(select._offset_clause, **kw)
if select._distinct:
result += "DISTINCT "
return result
def limit_clause(self, select, **kw):
"""Already taken care of in the `get_select_precolumns` method."""
return ""
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return "RETURNING " + ", ".join(columns)
class FBDDLCompiler(sql.compiler.DDLCompiler):
"""Firebird syntactic idiosyncrasies"""
def visit_create_sequence(self, create):
"""Generate a ``CREATE GENERATOR`` statement for the sequence."""
# no syntax for these
# http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
if create.element.start is not None:
raise NotImplementedError(
"Firebird SEQUENCE doesn't support START WITH"
)
if create.element.increment is not None:
raise NotImplementedError(
"Firebird SEQUENCE doesn't support INCREMENT BY"
)
if self.dialect._version_two:
return "CREATE SEQUENCE %s" % self.preparer.format_sequence(
create.element
)
else:
return "CREATE GENERATOR %s" % self.preparer.format_sequence(
create.element
)
def visit_drop_sequence(self, drop):
"""Generate a ``DROP GENERATOR`` statement for the sequence."""
if self.dialect._version_two:
return "DROP SEQUENCE %s" % self.preparer.format_sequence(
drop.element
)
else:
return "DROP GENERATOR %s" % self.preparer.format_sequence(
drop.element
)
class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
"""Install Firebird specific reserved words."""
reserved_words = RESERVED_WORDS
illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(
["_"]
)
def __init__(self, dialect):
super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
class FBExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar(
"SELECT gen_id(%s, 1) FROM rdb$database"
% self.dialect.identifier_preparer.format_sequence(seq),
type_,
)
class FBDialect(default.DefaultDialect):
"""Firebird dialect"""
name = "firebird"
max_identifier_length = 31
supports_sequences = True
sequences_optional = False
supports_default_values = True
postfetch_lastrowid = False
supports_native_boolean = False
requires_name_normalize = True
supports_empty_insert = False
statement_compiler = FBCompiler
ddl_compiler = FBDDLCompiler
preparer = FBIdentifierPreparer
type_compiler = FBTypeCompiler
execution_ctx_cls = FBExecutionContext
colspecs = colspecs
ischema_names = ischema_names
construct_arguments = []
# defaults to dialect ver. 3,
# will be autodetected off upon
# first connect
_version_two = True
def initialize(self, connection):
super(FBDialect, self).initialize(connection)
self._version_two = (
"firebird" in self.server_version_info
and self.server_version_info >= (2,)
) or (
"interbase" in self.server_version_info
and self.server_version_info >= (6,)
)
if not self._version_two:
# TODO: whatever other pre < 2.0 stuff goes here
self.ischema_names = ischema_names.copy()
self.ischema_names["TIMESTAMP"] = sqltypes.DATE
self.colspecs = {sqltypes.DateTime: sqltypes.DATE}
self.implicit_returning = self._version_two and self.__dict__.get(
"implicit_returning", True
)
def normalize_name(self, name):
# Remove trailing spaces: FB uses a CHAR() type,
# that is padded with spaces
name = name and name.rstrip()
if name is None:
return None
elif name.upper() == name and not (
self.identifier_preparer._requires_quotes
)(name.lower()):
return name.lower()
elif name.lower() == name:
return quoted_name(name, quote=True)
else:
return name
def denormalize_name(self, name):
if name is None:
return None
elif name.lower() == name and not (
self.identifier_preparer._requires_quotes
)(name.lower()):
return name.upper()
else:
return name
def has_table(self, connection, table_name, schema=None):
"""Return ``True`` if the given table exists, ignoring
the `schema`."""
tblqry = """
SELECT 1 AS has_table FROM rdb$database
WHERE EXISTS (SELECT rdb$relation_name
FROM rdb$relations
WHERE rdb$relation_name=?)
"""
c = connection.execute(tblqry, [self.denormalize_name(table_name)])
return c.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
"""Return ``True`` if the given sequence (generator) exists."""
genqry = """
SELECT 1 AS has_sequence FROM rdb$database
WHERE EXISTS (SELECT rdb$generator_name
FROM rdb$generators
WHERE rdb$generator_name=?)
"""
c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
return c.first() is not None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
# there are two queries commonly mentioned for this.
# this one, using view_blr, is at the Firebird FAQ among other places:
# http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
# the other query is this one. It's not clear if there's really
# any difference between these two. This link:
# http://www.alberton.info/firebird_sql_meta_info.html#.Ur3vXfZGni8
# states them as interchangeable. Some discussion at [ticket:2898]
# SELECT DISTINCT rdb$relation_name
# FROM rdb$relation_fields
# WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
# see http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is not null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
return [self.normalize_name(row[0]) for row in connection.execute(s)]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
qry = """
SELECT rdb$view_source AS view_source
FROM rdb$relations
WHERE rdb$relation_name=?
"""
rp = connection.execute(qry, [self.denormalize_name(view_name)])
row = rp.first()
if row:
return row["view_source"]
else:
return None
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# Query to extract the PK/FK constrained fields of the given table
keyqry = """
SELECT se.rdb$field_name AS fname
FROM rdb$relation_constraints rc
JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
"""
tablename = self.denormalize_name(table_name)
# get primary key fields
c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
pkfields = [self.normalize_name(r["fname"]) for r in c.fetchall()]
return {"constrained_columns": pkfields, "name": None}
@reflection.cache
def get_column_sequence(
self, connection, table_name, column_name, schema=None, **kw
):
tablename = self.denormalize_name(table_name)
colname = self.denormalize_name(column_name)
# Heuristic-query to determine the generator associated to a PK field
genqry = """
SELECT trigdep.rdb$depended_on_name AS fgenerator
FROM rdb$dependencies tabdep
JOIN rdb$dependencies trigdep
ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
AND trigdep.rdb$depended_on_type=14
AND trigdep.rdb$dependent_type=2
JOIN rdb$triggers trig ON
trig.rdb$trigger_name=tabdep.rdb$dependent_name
WHERE tabdep.rdb$depended_on_name=?
AND tabdep.rdb$depended_on_type=0
AND trig.rdb$trigger_type=1
AND tabdep.rdb$field_name=?
AND (SELECT count(*)
FROM rdb$dependencies trigdep2
WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
"""
genr = connection.execute(genqry, [tablename, colname]).first()
if genr is not None:
return dict(name=self.normalize_name(genr["fgenerator"]))
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
# Query to extract the details of all the fields of the given table
tblqry = """
SELECT r.rdb$field_name AS fname,
r.rdb$null_flag AS null_flag,
t.rdb$type_name AS ftype,
f.rdb$field_sub_type AS stype,
f.rdb$field_length/
COALESCE(cs.rdb$bytes_per_character,1) AS flen,
f.rdb$field_precision AS fprec,
f.rdb$field_scale AS fscale,
COALESCE(r.rdb$default_source,
f.rdb$default_source) AS fdefault
FROM rdb$relation_fields r
JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
JOIN rdb$types t
ON t.rdb$type=f.rdb$field_type AND
t.rdb$field_name='RDB$FIELD_TYPE'
LEFT JOIN rdb$character_sets cs ON
f.rdb$character_set_id=cs.rdb$character_set_id
WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
ORDER BY r.rdb$field_position
"""
# get the PK, used to determine the eventual associated sequence
pk_constraint = self.get_pk_constraint(connection, table_name)
pkey_cols = pk_constraint["constrained_columns"]
tablename = self.denormalize_name(table_name)
# get all of the fields for this table
c = connection.execute(tblqry, [tablename])
cols = []
while True:
row = c.fetchone()
if row is None:
break
name = self.normalize_name(row["fname"])
orig_colname = row["fname"]
# get the data type
colspec = row["ftype"].rstrip()
coltype = self.ischema_names.get(colspec)
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'"
% (colspec, name)
)
coltype = sqltypes.NULLTYPE
elif issubclass(coltype, Integer) and row["fprec"] != 0:
coltype = NUMERIC(
precision=row["fprec"], scale=row["fscale"] * -1
)
elif colspec in ("VARYING", "CSTRING"):
coltype = coltype(row["flen"])
elif colspec == "TEXT":
coltype = TEXT(row["flen"])
elif colspec == "BLOB":
if row["stype"] == 1:
coltype = TEXT()
else:
coltype = BLOB()
else:
coltype = coltype()
# does it have a default value?
defvalue = None
if row["fdefault"] is not None:
# the value comes down as "DEFAULT 'value'": there may be
# more than one whitespace around the "DEFAULT" keyword
# and it may also be lower case
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row["fdefault"].lstrip()
assert defexpr[:8].rstrip().upper() == "DEFAULT", (
"Unrecognized default value: %s" % defexpr
)
defvalue = defexpr[8:].strip()
if defvalue == "NULL":
# Redundant
defvalue = None
col_d = {
"name": name,
"type": coltype,
"nullable": not bool(row["null_flag"]),
"default": defvalue,
"autoincrement": "auto",
}
if orig_colname.lower() == orig_colname:
col_d["quote"] = True
# if the PK is a single field, try to see if its linked to
# a sequence thru a trigger
if len(pkey_cols) == 1 and name == pkey_cols[0]:
seq_d = self.get_column_sequence(connection, tablename, name)
if seq_d is not None:
col_d["sequence"] = seq_d
cols.append(col_d)
return cols
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# Query to extract the details of each UK/FK of the given table
fkqry = """
SELECT rc.rdb$constraint_name AS cname,
cse.rdb$field_name AS fname,
ix2.rdb$relation_name AS targetrname,
se.rdb$field_name AS targetfname
FROM rdb$relation_constraints rc
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
JOIN rdb$index_segments cse ON
cse.rdb$index_name=ix1.rdb$index_name
JOIN rdb$index_segments se
ON se.rdb$index_name=ix2.rdb$index_name
AND se.rdb$field_position=cse.rdb$field_position
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
ORDER BY se.rdb$index_name, se.rdb$field_position
"""
tablename = self.denormalize_name(table_name)
c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
fks = util.defaultdict(
lambda: {
"name": None,
"constrained_columns": [],
"referred_schema": None,
"referred_table": None,
"referred_columns": [],
}
)
for row in c:
cname = self.normalize_name(row["cname"])
fk = fks[cname]
if not fk["name"]:
fk["name"] = cname
fk["referred_table"] = self.normalize_name(row["targetrname"])
fk["constrained_columns"].append(self.normalize_name(row["fname"]))
fk["referred_columns"].append(
self.normalize_name(row["targetfname"])
)
return list(fks.values())
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
qry = """
SELECT ix.rdb$index_name AS index_name,
ix.rdb$unique_flag AS unique_flag,
ic.rdb$field_name AS field_name
FROM rdb$indices ix
JOIN rdb$index_segments ic
ON ix.rdb$index_name=ic.rdb$index_name
LEFT OUTER JOIN rdb$relation_constraints
ON rdb$relation_constraints.rdb$index_name =
ic.rdb$index_name
WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
AND rdb$relation_constraints.rdb$constraint_type IS NULL
ORDER BY index_name, ic.rdb$field_position
"""
c = connection.execute(qry, [self.denormalize_name(table_name)])
indexes = util.defaultdict(dict)
for row in c:
indexrec = indexes[row["index_name"]]
if "name" not in indexrec:
indexrec["name"] = self.normalize_name(row["index_name"])
indexrec["column_names"] = []
indexrec["unique"] = bool(row["unique_flag"])
indexrec["column_names"].append(
self.normalize_name(row["field_name"])
)
return list(indexes.values())
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main program for organizing the buildtool.
This module is reponsible for determining the configuration
then acquiring and dispatching commands.
Commands are introduced into modules, and modules are explicitly
plugged into the command_modules[] list in main() where they
will be initialized and their commands registered into the registry.
From there this module will be able to process arguments and
dispatch commands.
"""
import argparse
import datetime
import logging
import os
import sys
import time
import yaml
from buildtool.metrics import MetricsManager
from buildtool import (
add_parser_argument,
maybe_log_exception,
GitRunner)
STANDARD_LOG_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR
}
# This is so tests can disable it
CHECK_HOME_FOR_CONFIG = True
def add_standard_parser_args(parser, defaults):
"""Init argparser with command-independent options.
Args:
parser: [argparse.Parser]
defaults: [dict] Default value overrides keyed by option name.
"""
parser.add_argument(
'components', nargs='*', default=defaults.get('components', None),
help='Restrict commands to these components or repository names')
add_parser_argument(
parser, 'default_args_file', defaults, None,
help='Path to YAML file containing command-line option overrides.'
' The default is $HOME/.spinnaker/buildtool.yml if present.'
' This parameter will overload the defaults. Embedded'
' default_args_file will also be read at a lower precedence than'
' the containing file.')
add_parser_argument(
parser, 'log_level', defaults, 'info',
choices=STANDARD_LOG_LEVELS.keys(),
help='Set the logging level')
add_parser_argument(
parser, 'output_dir', defaults, 'output',
help='Directory to write working files.')
add_parser_argument(
parser, 'input_dir', defaults, 'source_code',
help='Directory to cache input files, such as cloning git repos.')
add_parser_argument(
parser, 'one_at_a_time', defaults, False, type=bool,
help='Do not perform applicable concurrency, for debugging.')
add_parser_argument(
parser, 'parent_invocation_id', defaults,
'{:%y%m%d}.{}'.format(datetime.datetime.utcnow(), os.getpid()),
help='For identifying the context of the metrics data to be produced.')
def __load_defaults_from_path(path, visited=None):
"""Helper function for loading defaults from yaml file."""
visited = visited or []
if path in visited:
raise ValueError('Circular "default_args_file" dependency in %s' % path)
visited.append(path)
with open(path, 'r') as f:
defaults = yaml.safe_load(f)
# Allow these files to be recursive
# So that there can be some overall default file
# that is then overwridden by another file where
# the override file references the default one
# and the CLI argument points to the override file.
base_defaults_file = defaults.get('default_args_file')
if base_defaults_file:
base_defaults = __load_defaults_from_path(base_defaults_file)
base_defaults.update(defaults) # base is lower precedence.
defaults = base_defaults # defaults is what we want to return.
return defaults
def preprocess_args(args, default_home_path_filename='buildtool.yml'):
"""Preprocess the args to determine the defaults to use.
This recognizes the --default_args_file override and, if present loads them.
Returns:
args, defaults
Where:
args are remaining arguments (with--default_args_file removed
defaults are overriden defaults from the default_args_file, if present.
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--default_args_file', default=None)
options, args = parser.parse_known_args(args)
home_path = os.path.join(os.environ['HOME'], '.spinnaker',
default_home_path_filename)
if CHECK_HOME_FOR_CONFIG and os.path.exists(home_path):
defaults = __load_defaults_from_path(home_path)
defaults['default_args_file'] = home_path
else:
defaults = {}
if options.default_args_file:
override_defaults = __load_defaults_from_path(options.default_args_file)
override_defaults['default_args_file'] = options.default_args_file
defaults.update(override_defaults)
return args, defaults
def make_registry(command_modules, parser, defaults):
"""Creates a command registry, adding command arguments to the parser.
Args:
command_modules: [list of modules] The modules that have commands
to register. Each module should have a function
register_commands(registry, subparsers, defaults)
that will register CommandFactory instances into the registry.
parser: [ArgumentParser] The parser to add commands to
This adds a 'command' subparser to capture the requested command choice.
defaults: [dict] Default values to specify when adding arguments.
"""
registry = {}
subparsers = parser.add_subparsers(title='command', dest='command')
for module in command_modules:
module.register_commands(registry, subparsers, defaults)
return registry
def add_monitoring_context_labels(options):
option_dict = vars(options)
version_name = option_dict.get('git_branch', None)
if not version_name:
bom_name = option_dict.get('bom_version') or option_dict.get('bom_path')
if bom_name:
if bom_name.find('-unbuilt') > 0:
version_name = bom_name[:bom_name.find('-unbuilt')]
elif bom_name.find('-latest') > 0:
version_name = bom_name[:bom_name.find('-latest')]
else:
version_name = bom_name[:bom_name.rfind('-')]
if version_name:
context_labels = ['version=' + version_name]
if (version_name == 'master'
or version_name.startswith('release-')
or version_name.startswith('master-latest-')):
context_labels.append('official_version='+version_name)
if options.monitoring_context_labels:
context_labels.append(options.monitoring_context_labels)
options.monitoring_context_labels = ','.join(context_labels)
def init_options_and_registry(args, command_modules):
"""Register command modules and determine options from commandline.
These are coupled together for implementation simplicity. Conceptually
they are unrelated but they share implementation details that can be
encapsulated by combining them this way.
Args:
args: [list of command-line arguments]
command_modules: See make_registry.
Returns:
options, registry
Where:
options: [Namespace] From parsed args.
registry: [dict] of (<command-name>: <CommandFactory>)
"""
args, defaults = preprocess_args(args)
parser = argparse.ArgumentParser(prog='buildtool.sh')
add_standard_parser_args(parser, defaults)
MetricsManager.init_argument_parser(parser, defaults)
registry = make_registry(command_modules, parser, defaults)
options = parser.parse_args(args)
options.program = 'buildtool'
# Determine the version for monitoring purposes.
# Depending on the options defined, this is either the branch or bom prefix.
add_monitoring_context_labels(options)
return options, registry
def main():
"""The main command dispatcher."""
start_time = time.time()
from importlib import import_module
command_modules = [
import_module(name + '_commands') for name in [
'apidocs',
'bom',
'changelog',
'container',
'debian',
'halyard',
'image',
'rpm',
'source',
'spinnaker',
'inspection',
'spin',
]]
GitRunner.stash_and_clear_auth_env_vars()
options, command_registry = init_options_and_registry(
sys.argv[1:], command_modules)
logging.basicConfig(
format='%(levelname).1s %(asctime)s.%(msecs)03d'
' [%(threadName)s.%(process)d] %(message)s',
datefmt='%H:%M:%S',
level=STANDARD_LOG_LEVELS[options.log_level])
logging.debug(
'Running with options:\n %s',
'\n '.join(yaml.safe_dump(vars(options), default_flow_style=False)
.split('\n')))
factory = command_registry.get(options.command)
if not factory:
logging.error('Unknown command "%s"', options.command)
return -1
MetricsManager.startup_metrics(options)
labels = {'command': options.command}
success = False
try:
command = factory.make_command(options)
command()
success = True
finally:
labels['success'] = success
MetricsManager.singleton().observe_timer(
'BuildTool_Outcome', labels,
time.time() - start_time)
MetricsManager.shutdown_metrics()
return 0
def dump_threads():
"""Dump current threads to facilitate debugging possible deadlock.
A process did not exit when log file suggested it was. Maybe there was
a background thread it was joining on. If so, this might give a clue
should it happen again.
"""
import threading
threads = []
for thread in threading.enumerate():
threads.append(' name={name} daemon={d} id={id}'.format(
name=thread.name, d=thread.daemon, id=thread.ident))
if len(threads) > 1:
logging.info('The following threads still running:\n%s', '\n'.join(threads))
def wrapped_main():
"""Run main and dump outstanding threads when done."""
# pylint: disable=broad-except
try:
retcode = main()
except Exception as ex:
sys.stdout.flush()
maybe_log_exception('main()', ex, action_msg='Terminating')
logging.error("FAILED")
retcode = -1
dump_threads()
return retcode
if __name__ == '__main__':
sys.exit(wrapped_main())
| |
import abc
import numpy as np
import numpy.matlib
# The EKF class contains the framework for an Extended Kalman Filter, but must be subclassed to use.
# A subclass must implement:
# 1) calc_transfer_fun(); see bottom of file for more info.
# 2) __init__() to initialize self.state, self.covar, and self.process_noise appropriately
# Alternatively, the existing implementations of EKF can be used (e.g. EKF2D)
# Sensor classes are optionally used to pass measurement information into the EKF, to keep
# sensor parameters and processing methods for a each sensor together.
# Sensor classes have a read() method which takes raw sensor data and returns
# a SensorReading object, which can be passed to the EKF update() method.
# For usage, see run_ekf1d.py in selfdrive/new for a simple example.
# ekf.predict(dt) should be called between update cycles with the time since it was last called.
# Ideally, predict(dt) should be called at a relatively constant rate.
# update() should be called once per sensor, and can be called multiple times between predict steps.
# Access and set the state of the filter directly with ekf.state and ekf.covar.
class SensorReading:
# Given a perfect model and no noise, data = obs_model * state
def __init__(self, data, covar, obs_model):
self.data = data
self.obs_model = obs_model
self.covar = covar
def __repr__(self):
return "SensorReading(data={}, covar={}, obs_model={})".format(
repr(self.data), repr(self.covar), repr(self.obs_model))
# A generic sensor class that does no pre-processing of data
class SimpleSensor:
# obs_model can be
# a full obesrvation model matrix, or
# an integer or tuple of indices into ekf.state, indicating which variables are being directly observed
# covar can be
# a full covariance matrix
# a float or tuple of individual covars for each component of the sensor reading
# dims is the number of states in the EKF
def __init__(self, obs_model, covar, dims):
# Allow for integer covar/obs_model
if not hasattr(obs_model, "__len__"):
obs_model = (obs_model, )
if not hasattr(covar, "__len__"):
covar = (covar, )
# Full observation model passed
if dims in np.array(obs_model).shape:
self.obs_model = np.asmatrix(obs_model)
self.covar = np.asmatrix(covar)
# Indices of unit observations passed
else:
self.obs_model = np.matlib.zeros((len(obs_model), dims))
self.obs_model[:, list(obs_model)] = np.identity(len(obs_model))
if np.asarray(covar).ndim == 2:
self.covar = np.asmatrix(covar)
elif len(covar) == len(obs_model):
self.covar = np.matlib.diag(covar)
else:
self.covar = np.matlib.identity(len(obs_model)) * covar
def read(self, data, covar=None):
if covar:
self.covar = covar
return SensorReading(data, self.covar, self.obs_model)
class GPS:
earth_r = 6371e3 # m, average earth radius
def __init__(self, xy_idx=(0, 1), dims=2, var=1e4):
self.obs_model = np.matlib.zeros((2, dims))
self.obs_model[:, tuple(xy_idx)] = np.matlib.identity(2)
self.covar = np.matlib.identity(2) * var
# [lat, lon] in decimal degrees
def init_pos(self, latlon):
self.init_lat, self.init_lon = np.radians(np.asarray(latlon[:2]))
# Compute straight-line distance, in meters, between two lat/long coordinates
# Input in radians
def haversine(self, lat1, lon1, lat2, lon2):
lat_diff = lat2 - lat1
lon_diff = lon2 - lon1
d = np.sin(lat_diff * 0.5)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(
lon_diff * 0.5)**2
h = 2 * GPS.earth_r * np.arcsin(np.sqrt(d))
return h
# Convert decimal degrees into meters
def convert_deg2m(self, lat, lon):
lat, lon = np.radians([lat, lon])
xs = (lon - self.init_lon) * np.cos(self.init_lat) * GPS.earth_r
ys = (lat - self.init_lat) * GPS.earth_r
return xs, ys
# Convert meters into decimal degrees
def convert_m2deg(self, xs, ys):
lat = ys / GPS.earth_r + self.init_lat
lon = xs / (GPS.earth_r * np.cos(self.init_lat)) + self.init_lon
return np.degrees(lat), np.degrees(lon)
# latlon is [lat, long,] as decimal degrees
# accuracy is as given by Android location service: radius of 68% confidence
def read(self, latlon, accuracy=None):
x_dist, y_dist = self.convert_deg2m(latlon[0], latlon[1])
if not accuracy:
covar = self.covar
else:
covar = np.matlib.identity(2) * accuracy**2
return SensorReading(
np.asmatrix([x_dist, y_dist]).T, covar, self.obs_model)
class EKF:
__metaclass__ = abc.ABCMeta
def __init__(self, debug=False):
self.DEBUG = debug
def __str__(self):
return "EKF(state={}, covar={})".format(self.state, self.covar)
# Measurement update
# Reading should be a SensorReading object with data, covar, and obs_model attributes
def update(self, reading):
# Potential improvements:
# deal with negative covars
# add noise to really low covars to ensure stability
# use mahalanobis distance to reject outliers
# wrap angles after state updates and innovation
# y = z - H*x
innovation = reading.data - reading.obs_model * self.state
if self.DEBUG:
print "reading:\n",reading.data
print "innovation:\n",innovation
# S = H*P*H' + R
innovation_covar = reading.obs_model * self.covar * reading.obs_model.T + reading.covar
# K = P*H'*S^-1
kalman_gain = self.covar * reading.obs_model.T * np.linalg.inv(
innovation_covar)
if self.DEBUG:
print "gain:\n", kalman_gain
print "innovation_covar:\n", innovation_covar
print "innovation: ", innovation
print "test: ", self.covar * reading.obs_model.T * (
reading.obs_model * self.covar * reading.obs_model.T + reading.covar *
0).I
# x = x + K*y
self.state += kalman_gain*innovation
# print "covar", np.diag(self.covar)
#self.state[(roll_vel, yaw_vel, pitch_vel),:] = reading.data
# Standard form: P = (I - K*H)*P
# self.covar = (self.identity - kalman_gain*reading.obs_model) * self.covar
# Use the Joseph form for numerical stability: P = (I-K*H)*P*(I - K*H)' + K*R*K'
aux_mtrx = (self.identity - kalman_gain * reading.obs_model)
self.covar = aux_mtrx * self.covar * aux_mtrx.T + kalman_gain * reading.covar * kalman_gain.T
if self.DEBUG:
print "After update"
print "state\n", self.state
print "covar:\n",self.covar
def update_scalar(self, reading):
# like update but knowing that measurment is a scalar
# this avoids matrix inversions and speeds up (surprisingly) drived.py a lot
# innovation = reading.data - np.matmul(reading.obs_model, self.state)
# innovation_covar = np.matmul(np.matmul(reading.obs_model, self.covar), reading.obs_model.T) + reading.covar
# kalman_gain = np.matmul(self.covar, reading.obs_model.T)/innovation_covar
# self.state += np.matmul(kalman_gain, innovation)
# aux_mtrx = self.identity - np.matmul(kalman_gain, reading.obs_model)
# self.covar = np.matmul(aux_mtrx, np.matmul(self.covar, aux_mtrx.T)) + np.matmul(kalman_gain, np.matmul(reading.covar, kalman_gain.T))
# written without np.matmul
es = np.einsum
ABC_T = "ij,jk,lk->il"
AB_T = "ij,kj->ik"
AB = "ij,jk->ik"
innovation = reading.data - es(AB, reading.obs_model, self.state)
innovation_covar = es(ABC_T, reading.obs_model, self.covar,
reading.obs_model) + reading.covar
kalman_gain = es(AB_T, self.covar, reading.obs_model) / innovation_covar
self.state += es(AB, kalman_gain, innovation)
aux_mtrx = self.identity - es(AB, kalman_gain, reading.obs_model)
self.covar = es(ABC_T, aux_mtrx, self.covar, aux_mtrx) + \
es(ABC_T, kalman_gain, reading.covar, kalman_gain)
# Prediction update
def predict(self, dt):
es = np.einsum
ABC_T = "ij,jk,lk->il"
AB = "ij,jk->ik"
# State update
transfer_fun, transfer_fun_jacobian = self.calc_transfer_fun(dt)
# self.state = np.matmul(transfer_fun, self.state)
# self.covar = np.matmul(np.matmul(transfer_fun_jacobian, self.covar), transfer_fun_jacobian.T) + self.process_noise * dt
# x = f(x, u), written in the form x = A(x, u)*x
self.state = es(AB, transfer_fun, self.state)
# P = J*P*J' + Q
self.covar = es(ABC_T, transfer_fun_jacobian, self.covar,
transfer_fun_jacobian) + self.process_noise * dt #!dt
#! Clip covariance to avoid explosions
self.covar = np.clip(self.covar,-1e10,1e10)
@abc.abstractmethod
def calc_transfer_fun(self, dt):
"""Return a tuple with the transfer function and transfer function jacobian
The transfer function and jacobian should both be a numpy matrix of size DIMSxDIMS
The transfer function matrix A should satisfy the state-update equation
x_(k+1) = A * x_k
The jacobian J is the direct jacobian A*x_k. For linear systems J=A.
Current implementations calculate A and J as functions of state. Control input
can be added trivially by adding a control parameter to predict() and calc_tranfer_update(),
and using it during calcualtion of A and J
"""
class FastEKF1D(EKF):
"""Fast version of EKF for 1D problems with scalar readings."""
def __init__(self, dt, var_init, Q):
super(FastEKF1D, self).__init__(False)
self.state = [0, 0]
self.covar = [var_init, var_init, 0]
# Process Noise
self.dtQ0 = dt * Q[0]
self.dtQ1 = dt * Q[1]
def update(self, reading):
raise NotImplementedError
def update_scalar(self, reading):
# TODO(mgraczyk): Delete this for speed.
# assert np.all(reading.obs_model == [1, 0])
rcov = reading.covar[0, 0]
x = self.state
S = self.covar
innovation = reading.data - x[0]
innovation_covar = S[0] + rcov
k0 = S[0] / innovation_covar
k1 = S[2] / innovation_covar
x[0] += k0 * innovation
x[1] += k1 * innovation
mk = 1 - k0
S[1] += k1 * (k1 * (S[0] + rcov) - 2 * S[2])
S[2] = mk * (S[2] - k1 * S[0]) + rcov * k0 * k1
S[0] = mk * mk * S[0] + rcov * k0 * k0
def predict(self, dt):
# State update
x = self.state
x[0] += dt * x[1]
# P = J*P*J' + Q
S = self.covar
S[0] += dt * (2 * S[2] + dt * S[1]) + self.dtQ0
S[2] += dt * S[1]
S[1] += self.dtQ1
# Clip covariance to avoid explosions
S = max(-1e10, min(S, 1e10))
def calc_transfer_fun(self, dt):
tf = np.identity(2)
tf[0, 1] = dt
tfj = tf
return tf, tfj
| |
from pandas.compat import range
import re
import operator
import pytest
import warnings
from numpy import nan
import numpy as np
import pandas as pd
from pandas.core.sparse.api import SparseArray, SparseSeries, SparseDtype
from pandas._libs.sparse import IntIndex
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray(object):
def setup_method(self, method):
self.arr_data = np.array([nan, nan, 1, 2, 3, nan, 4, 5, nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with tm.assert_raises_regex(ValueError, "Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
tm.assert_raises_regex(IndexError, errmsg, lambda: self.arr[11])
tm.assert_raises_regex(IndexError, errmsg, lambda: self.arr[-11])
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with tm.assert_raises_regex(ValueError, msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
def test_bad_take(self):
tm.assert_raises_regex(
IndexError, "bounds", lambda: self.arr.take([11]))
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'")
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with tm.assert_raises_regex(ValueError, msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
tm.assert_raises_regex(TypeError, "item assignment", setitem)
tm.assert_raises_regex(TypeError, "item assignment", setslice)
def test_constructor_from_too_large_array(self):
tm.assert_raises_regex(TypeError, "expected dimension <= 1 data",
SparseArray, np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
for dense in [arr.to_dense(), arr.values]:
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
for dense in [arr.to_dense(), arr.values]:
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with tm.assert_raises_regex(ValueError, 'NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.values),
vals.astype(typ))
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
def test_set_fill_invalid_non_scalar(self, val):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
msg = "fill_value must be a scalar"
with tm.assert_raises_regex(ValueError, msg):
arr.fill_value = val
def test_copy_shallow(self):
arr2 = self.arr.copy(deep=False)
assert arr2.sp_values is self.arr.sp_values
assert arr2.sp_index is self.arr.sp_index
def test_values_asarray(self):
assert_almost_equal(self.arr.values, self.arr_data)
assert_almost_equal(self.arr.to_dense(), self.arr_data)
@pytest.mark.parametrize('data,shape,dtype', [
([0, 0, 0, 0, 0], (5,), None),
([], (0,), None),
([0], (1,), None),
(['A', 'A', np.nan, 'B'], (4,), np.object)
])
def test_shape(self, data, shape, dtype):
# GH 21126
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
def test_to_dense(self):
vals = np.array([1, np.nan, np.nan, 3, np.nan])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
vals = np.array([1, np.nan, 0, 3, 0])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
vals = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseArray(vals).to_dense()
tm.assert_numpy_array_equal(res, vals)
res = SparseArray(vals, fill_value=0).to_dense()
tm.assert_numpy_array_equal(res, vals)
# see gh-14647
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
SparseArray(vals).to_dense(fill=2)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.values[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.values[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.values[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.values[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ], fill_value=0)
tm.assert_sp_array_equal(res, exp)
with pytest.raises(IndexError):
sparse[4:, :]
with pytest.raises(IndexError):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = pd.SparseArray([0, 1, 2])
res = arr[[False, False, False]]
assert res.dtype == arr.dtype
@pytest.mark.parametrize("op", ["add", "sub", "mul",
"truediv", "floordiv", "pow"])
def test_binary_operators(self, op):
op = getattr(operator, op)
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.values, second.values),
fill_value=first.fill_value)
assert isinstance(res, SparseArray)
assert_almost_equal(res.values, exp.values)
res2 = op(first, second.values)
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.values, second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# Ignore this if the actual op raises (e.g. pow).
try:
exp = op(first.values, 4)
exp_fv = op(first.fill_value, 4)
except ValueError:
pass
else:
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.values, exp)
with np.errstate(all="ignore"):
for first_arr, second_arr in [(arr1, arr2), (farr1, farr2)]:
_check_op(op, first_arr, second_arr)
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = SparseArray([1, 2, 3])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action='always',
category=DeprecationWarning)
warnings.filterwarnings(action='always',
category=PendingDeprecationWarning)
for _ in sp_arr:
pass
assert len(w) == 0
def test_fillna(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0])
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan])
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
# float dtype's fill_value is np.nan, replaced by -1
s = SparseArray([0., 0., 0., 0.])
res = s.fillna(-1)
exp = SparseArray([0., 0., 0., 0.], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
# int dtype shouldn't have missing. No changes.
s = SparseArray([0, 0, 0, 0])
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
tm.assert_sp_array_equal(res, s)
s = SparseArray([0, 0, 0, 0], fill_value=0)
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=0)
tm.assert_sp_array_equal(res, exp)
# fill_value can be nan if there is no missing hole.
# only fill_value will be changed
s = SparseArray([0, 0, 0, 0], fill_value=np.nan)
assert s.dtype == SparseDtype(np.int64, fill_value=np.nan)
assert np.isnan(s.fill_value)
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
def test_fillna_overlap(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
# filling with existing value doesn't replace existing value with
# fill_value, i.e. existing 3 remains in sp_values
res = s.fillna(3)
exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)
tm.assert_numpy_array_equal(res.to_dense(), exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(3)
exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
class TestSparseArrayAnalytics(object):
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
def test_all(self, data, pos, neg):
# GH 17570
out = SparseArray(data).all()
assert out
out = SparseArray(data, fill_value=pos).all()
assert out
data[1] = neg
out = SparseArray(data).all()
assert not out
out = SparseArray(data, fill_value=pos).all()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0)
])
@td.skip_if_np_lt_115 # prior didn't dispatch
def test_numpy_all(self, data, pos, neg):
# GH 17570
out = np.all(SparseArray(data))
assert out
out = np.all(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.all(SparseArray(data))
assert not out
out = np.all(SparseArray(data, fill_value=pos))
assert not out
# raises with a different message on py2.
msg = "the \'out\' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.all,
SparseArray(data), out=np.array([]))
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
def test_any(self, data, pos, neg):
# GH 17570
out = SparseArray(data).any()
assert out
out = SparseArray(data, fill_value=pos).any()
assert out
data[1] = neg
out = SparseArray(data).any()
assert not out
out = SparseArray(data, fill_value=pos).any()
assert not out
@pytest.mark.parametrize('data,pos,neg', [
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0)
])
@td.skip_if_np_lt_115 # prior didn't dispatch
def test_numpy_any(self, data, pos, neg):
# GH 17570
out = np.any(SparseArray(data))
assert out
out = np.any(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.any(SparseArray(data))
assert not out
out = np.any(SparseArray(data, fill_value=pos))
assert not out
msg = "the \'out\' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.any,
SparseArray(data), out=out)
def test_sum(self):
data = np.arange(10).astype(float)
out = SparseArray(data).sum()
assert out == 45.0
data[5] = np.nan
out = SparseArray(data, fill_value=2).sum()
assert out == 40.0
out = SparseArray(data, fill_value=np.nan).sum()
assert out == 40.0
def test_numpy_sum(self):
data = np.arange(10).astype(float)
out = np.sum(SparseArray(data))
assert out == 45.0
data[5] = np.nan
out = np.sum(SparseArray(data, fill_value=2))
assert out == 40.0
out = np.sum(SparseArray(data, fill_value=np.nan))
assert out == 40.0
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.sum,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.sum,
SparseArray(data), out=out)
@pytest.mark.parametrize("data,expected", [
(np.array([1, 2, 3, 4, 5], dtype=float), # non-null data
SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0]))),
(np.array([1, 2, np.nan, 4, 5], dtype=float), # null data
SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])))
])
@pytest.mark.parametrize("numpy", [True, False])
def test_cumsum(self, data, expected, numpy):
cumsum = np.cumsum if numpy else lambda s: s.cumsum()
out = cumsum(SparseArray(data))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=np.nan))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=2))
tm.assert_sp_array_equal(out, expected)
if numpy: # numpy compatibility checks.
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
SparseArray(data), out=out)
else:
axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.
msg = "axis\\(={axis}\\) out of bounds".format(axis=axis)
with tm.assert_raises_regex(ValueError, msg):
SparseArray(data).cumsum(axis=axis)
def test_mean(self):
data = np.arange(10).astype(float)
out = SparseArray(data).mean()
assert out == 4.5
data[5] = np.nan
out = SparseArray(data).mean()
assert out == 40.0 / 9
def test_numpy_mean(self):
data = np.arange(10).astype(float)
out = np.mean(SparseArray(data))
assert out == 4.5
data[5] = np.nan
out = np.mean(SparseArray(data))
assert out == 40.0 / 9
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.mean,
SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.mean,
SparseArray(data), out=out)
def test_ufunc(self):
# GH 13853 make sure ufunc is applied to fill_value
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([1, np.nan, 2, np.nan, 2])
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=-1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,
fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))
tm.assert_sp_array_equal(np.sin(sparse), result)
def test_ufunc_args(self):
# GH 13853 make sure ufunc is applied to fill_value, including its arg
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([2, np.nan, 3, np.nan, -1])
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([2, 0, 3, -1], fill_value=2)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray([2, 0, 1, -1], fill_value=1)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
def test_nbytes_integer(self):
arr = SparseArray([1, 0, 0, 0, 2], kind='integer')
result = arr.nbytes
# (2 * 8) + 2 * 4
assert result == 24
def test_nbytes_block(self):
arr = SparseArray([1, 2, 0, 0, 0], kind='block')
result = arr.nbytes
# (2 * 8) + 4 + 4
# sp_values, blocs, blenghts
assert result == 24
def test_asarray_datetime64(self):
s = pd.SparseArray(
pd.to_datetime(['2012', None, None, '2013'])
)
np.asarray(s)
def test_setting_fill_value_fillna_still_works():
# This is why letting users update fill_value / dtype is bad
# astype has the same problem.
arr = SparseArray([1., np.nan, 1.0], fill_value=0.0)
arr.fill_value = np.nan
result = arr.isna()
# Can't do direct comparison, since the sp_index will be different
# So let's convert to ndarray and check there.
result = np.asarray(result)
expected = np.array([False, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_setting_fill_value_updates():
arr = SparseArray([0.0, np.nan], fill_value=0)
arr.fill_value = np.nan
# use private constructor to get the index right
# otherwise both nans would be un-stored.
expected = SparseArray._simple_new(
sparse_array=np.array([np.nan]),
sparse_index=IntIndex(2, [1]),
dtype=SparseDtype(float, np.nan),
)
tm.assert_sp_array_equal(arr, expected)
@pytest.mark.parametrize("arr, loc", [
([None, 1, 2], 0),
([0, None, 2], 1),
([0, 1, None], 2),
([0, 1, 1, None, None], 3),
([1, 1, 1, 2], -1),
([], -1),
])
def test_first_fill_value_loc(arr, loc):
result = SparseArray(arr)._first_fill_value_loc()
assert result == loc
@pytest.mark.parametrize('arr', [
[1, 2, np.nan, np.nan],
[1, np.nan, 2, np.nan],
[1, 2, np.nan],
])
@pytest.mark.parametrize("fill_value", [
np.nan, 0, 1
])
def test_unique_na_fill(arr, fill_value):
a = pd.SparseArray(arr, fill_value=fill_value).unique()
b = pd.Series(arr).unique()
assert isinstance(a, SparseArray)
a = np.asarray(a)
tm.assert_numpy_array_equal(a, b)
def test_map():
arr = SparseArray([0, 1, 2])
expected = SparseArray([10, 11, 12], fill_value=10)
# dict
result = arr.map({0: 10, 1: 11, 2: 12})
tm.assert_sp_array_equal(result, expected)
# series
result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
tm.assert_sp_array_equal(result, expected)
# function
result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
expected = SparseArray([10, 11, 12], fill_value=10)
tm.assert_sp_array_equal(result, expected)
def test_map_missing():
arr = SparseArray([0, 1, 2])
expected = SparseArray([10, 11, None], fill_value=10)
result = arr.map({0: 10, 1: 11})
tm.assert_sp_array_equal(result, expected)
| |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import OneHotEncoder as sklearnOneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.validation import check_X_y, check_array, check_random_state
from sklearn.utils import column_or_1d
from sklearn.utils.validation import check_is_fitted
class CountEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with their count and add random noise in (0, 1) interval.
"""
def __init__(self, seed=0):
self.seed = seed
def fit(self, y):
"""Fit count encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values
Returns
-------
self : returns an instance of self.
"""
self.random_state = check_random_state(self.seed)
if pd.Series(y).isnull().any():
raise ValueError("Input contains NaN")
y = column_or_1d(y, warn=True)
unique, counts = np.unique(y, return_counts=True)
random = (self.random_state.rand(unique.shape[0]) - 0.5) * 2
self.classes_, self.cardinalities_ = unique, counts + random
return self
def fit_transform(self, y, **kwargs):
"""Fit count encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
if pd.Series(y).isnull().any():
raise ValueError("Input contains NaN")
y = column_or_1d(y, warn=True)
self.random_state = check_random_state(self.seed)
unique, counts = np.unique(y, return_counts=True)
random = (self.random_state.rand(unique.shape[0]) - 0.5) * 2
self.classes_, self.cardinalities_ = unique, counts + random
indexes = [np.where(y == value) for value in self.classes_]
result = np.empty(y.shape)
for index, card in zip(indexes, self.cardinalities_):
result[index] = card
return result
def transform(self, y):
""" Transform labels to count encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, ['classes_', 'cardinalities_'])
if pd.Series(y).isnull().any():
raise ValueError("Input contains NaN")
y = column_or_1d(y, warn=True)
classes = np.unique(y)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
result = np.empty(y.shape)
indexes = [np.where(y == value) for value in self.classes_]
for index, card in zip(indexes, self.cardinalities_):
result[index] = card
return result
def inverse_transform(self, y):
""" Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, ['classes_', 'cardinalities_'])
diff = np.setdiff1d(y, np.array(self.cardinalities_))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
indexes = np.hstack([np.where(self.cardinalities_ == value)[0] for value in y])
return self.classes_[indexes]
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical features using a one-hot aka one-of-K scheme.
This is a wrapper for scikit-learn OneHotEncoder with categorical_features = "all".
Names of new columns are available by attribute self.names.
It can be used straightforward in sklearn pipelines.
"""
def __init__(self, sparse=False, handle_unknown='error'):
self.ohe = sklearnOneHotEncoder(categorical_features='all', sparse=sparse, handle_unknown=handle_unknown)
self.le = LabelEncoder()
self.names = []
def fit(self, X):
"""Fit One-Hot Encoder
Names of new columns are available by attribute 'names' of Encoder.
Parameters
----------
X : array-like of shape (n_samples,)
Input array of any type
Returns
-------
self : returns an instance of self.
"""
if pd.Series(X).isnull().any():
raise ValueError("Input contains NaN")
X = column_or_1d(X, warn=True)
X = self.le.fit_transform(X.flatten())
self.ohe = self.ohe.fit(X.reshape(-1, 1))
self.names = list(self.le.classes_)
return self
def fit_transform(self, X, **kwargs):
"""Fit One-Hot Encoder to X, then transform X.
Equivalent to self.fit(X).transform(X)
Names of new columns are available by attribute 'names' of Encoder..
Parameters
----------
X : array-like of shape [n_samples]
Input array of any type
Returns
-------
y : array-like
"""
if pd.Series(X).isnull().any():
raise ValueError("Input contains NaN")
X = column_or_1d(X, warn=True)
X = self.le.fit_transform(X).reshape(-1, 1)
y = self.ohe.fit_transform(X)
self.names = list(self.le.classes_)
return y
def transform(self, X):
""" Transform X by fitted One-Hot encoder.
Names of new columns are available by attribute 'names' of Encoder.
Parameters
----------
X : array-like
Returns
-------
y : array-like
"""
column_or_1d(X, warn=True)
if pd.Series(X).isnull().any():
raise ValueError("Input contains NaN")
classes = np.unique(X)
check_is_fitted(self, 'names')
check_is_fitted(self.le, 'classes_')
if len(np.intersect1d(classes, self.le.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.le.classes_)
raise ValueError("X contains new labels: %s" % str(diff))
X = self.le.transform(X).reshape(-1, 1)
y = self.ohe.transform(X)
return y
class MeanEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical features using means of target feature.
"""
def __init__(self):
pass
def fit(self, X, y):
"""Fit MeanEncoder
Parameters
----------
X : array-like of shape (n_samples,)
Input array of any type
y: array-like of shape (n_samples,)
Target values for means calculation
Returns
-------
self : returns an instance of self.
"""
if pd.Series(X).isnull().any():
raise ValueError("Input contains NaN")
if pd.Series(y).isnull().any():
raise ValueError("Input contains NaN")
X, y = check_X_y(X, y, ensure_2d=False, dtype=None, copy=True)
self.classes_ = np.unique(X)
indexes = [(X == value) for value in self.classes_]
self.means_ = [y[index].mean() for index in indexes]
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit MeanEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X)
Parameters
----------
X : array-like of shape (n_samples,)
Input array of any type
y: array-like of shape (n_samples,)
Target values for means calculation
Returns
-------
X* : array-like
"""
if pd.Series(X).isnull().any():
raise ValueError("Input contains NaN")
if pd.Series(y).isnull().any():
raise ValueError("Input contains NaN")
X, y = check_X_y(X, y, ensure_2d=False, dtype=None, copy=True)
self.classes_ = np.unique(X)
indexes = [(X == value) for value in self.classes_]
self.means_ = [y[index].mean() for index in indexes]
for index, mean in zip(indexes, self.means_):
X[index] = mean
return X
def transform(self, X):
""" Transform X by fitted MeanEncoder.
Parameters
----------
X : array-like of shape (n_samples,)
Input array of any type
Returns
-------
X* : array-like
"""
check_is_fitted(self, 'classes_')
if pd.Series(X).isnull().any():
raise ValueError("Input contains NaN")
X = check_array(X, ensure_2d=False, dtype=None, copy=True)
classes = np.unique(X)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("X contains new labels: %s" % str(diff))
indexes = [(X == value) for value in self.classes_]
for index, mean in zip(indexes, self.means_):
X[index] = mean
return X
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import functools
import itertools
import logging as std_logging
import os
import random
import re
import uuid
from oslo_config import cfg
from oslo_log import log as logging
import redis
import six
import yaml
LOG = logging.getLogger(__name__)
def env(*_vars, **kwargs):
"""Returns the first environment variable set.
If none are non-empty, defaults to '' or keyword arg default.
"""
for v in _vars:
value = os.environ.get(v)
if value:
return value
return kwargs.get('default', None)
def validate_required_opts(conf, opts):
# all config parameters default to ENV values, that's why standard
# check of required options doesn't work and needs to be done manually
for opt in opts:
if opt.required and not conf[opt.dest]:
raise cfg.RequiredOptError(opt.name)
def init_config_and_logging(opts):
conf = cfg.CONF
conf.register_cli_opts(opts)
conf.register_opts(opts)
logging.register_options(conf)
logging.set_defaults()
try:
conf(project='act')
validate_required_opts(conf, opts)
except cfg.RequiredOptError as e:
print('Error: %s' % e)
conf.print_usage()
exit(1)
logging.setup(conf, 'act')
LOG.info('Logging enabled')
conf.log_opt_values(LOG, std_logging.DEBUG)
def resolve_relative_path(file_name):
path = os.path.normpath(os.path.join(
os.path.dirname(__import__('act').__file__), '../', file_name))
if os.path.exists(path):
return path
def read_file(file_name, base_dir='', alias_mapper=None):
full_path = os.path.normpath(os.path.join(base_dir, file_name))
if alias_mapper: # interpret file_name as alias
alias_path = resolve_relative_path(alias_mapper(file_name))
if alias_path:
full_path = alias_path
LOG.info('Alias "%s" is resolved into file "%s"',
file_name, full_path)
if not os.path.exists(full_path):
# treat file_name as relative to act's package root
full_path = os.path.normpath(os.path.join(
os.path.dirname(__import__('act').__file__), '../', file_name))
if not os.path.exists(full_path):
msg = ('File %s not found by absolute nor by relative path' %
file_name)
LOG.error(msg)
raise IOError(msg)
fd = None
try:
fd = open(full_path)
return fd.read()
except IOError as e:
LOG.error('Error reading file: %s', e)
raise
finally:
if fd:
fd.close()
def write_file(data, file_name, base_dir=''):
full_path = os.path.normpath(os.path.join(base_dir, file_name))
fd = None
try:
fd = open(full_path, 'w')
return fd.write(data)
except IOError as e:
LOG.error('Error writing file: %s', e)
raise
finally:
if fd:
fd.close()
def read_yaml_file(file_name, base_dir='', alias_mapper=None):
raw = read_file(file_name, base_dir=base_dir, alias_mapper=alias_mapper)
try:
parsed = yaml.safe_load(raw)
return parsed
except Exception as e:
LOG.error('Failed to parse file %(file)s in YAML format: %(err)s',
dict(file=file_name, err=e))
def split_address(address):
try:
host, port = address.split(':')
except ValueError:
raise ValueError('Invalid address: %s, "host:port" expected', address)
return host, port
def read_uri(uri):
try:
req = six.moves.urllib.request.Request(url=uri)
fd = six.moves.urllib.request.urlopen(req)
raw = fd.read()
fd.close()
return raw
except Exception as e:
LOG.warn('Error "%(error)s" while reading uri %(uri)s',
{'error': e, 'uri': uri})
def random_string(length=6):
return ''.join(random.sample('adefikmoprstuz', length))
def make_id():
return str(uuid.uuid4())
def copy_dict_kv(source):
return dict((k, v) for k, v in source.items())
def flatten_dict(d, prefix='', sep='.'):
res = []
for k, v in d.items():
path = prefix + k
if isinstance(v, dict):
res.extend(flatten_dict(v, path + sep))
else:
res.append((path, v))
return res
def make_help_options(message, base, type_filter=None):
path = resolve_relative_path(base)
files = itertools.chain.from_iterable(
[map(functools.partial(os.path.join, root), files)
for root, dirs, files in os.walk(path)]) # list of files in a tree
if type_filter:
files = (f for f in files if type_filter(f)) # filtered list
rel_files = map(functools.partial(os.path.relpath, start=path), files)
return message % ', '.join('"%s"' % f.partition('.')[0]
for f in sorted(rel_files))
def algebraic_product(**kwargs):
position_to_key = {}
values = []
total = 1
for key, item in six.iteritems(kwargs):
position_to_key[len(values)] = key
if type(item) != list:
item = [item] # enclose single item into the list
values.append(item)
total *= len(item)
LOG.debug('Total number of permutations is: %s', total)
for chain in itertools.product(*values):
result = {}
for position, key in six.iteritems(position_to_key):
result[key] = chain[position]
yield result
def strict(s):
return re.sub(r'[^\w\d]+', '_', re.sub(r'\(.+\)', '', s)).lower()
def weighted_random_choice(items):
totals = []
running_total = 0
for item in items:
running_total += item.weight
totals.append(running_total)
rnd = random.random() * running_total
return items[bisect.bisect_right(totals, rnd)]
def make_redis_connection(**kwargs):
kwargs = dict((k, v) for k, v in kwargs.items() if v)
return redis.Redis(**kwargs)
| |
#!/usr/bin/env python3
from collections.abc import Mapping, Sequence
from collections import OrderedDict
from types import FunctionType
import logging
from tabulate import tabulate
from tqdm import tqdm
import textwrap
import psutil
import numpy
import sys
from os.path import basename
import utils
from pools.eventlet import EventletPool
from pools.gevent import GeventPool
from pools.multiprocessing import MultiprocessingProcessPool, \
MultiprocessingThreadPool
from pools.standard_library import StandardProcessPool, StandardThreadPool
def run_test(work_type: FunctionType, job_sets: Sequence, trials: int,
pool_class: type, worker_count: int) -> Mapping:
pool = pool_class(worker_count)
if work_type == 'compute':
test_func = pool.run_compute_test
elif work_type == 'network':
test_func = pool.run_network_test
else:
raise Exception("Invalid work type: {}".format(work_type))
results = map(
lambda jobs: test_func(jobs, trials, show_progress=True),
tqdm(job_sets, desc=pool_class.__name__),
)
summarized_results = list(map(summarize_test, results))
pool.destroy_pool()
return summarized_results
def summarize_test(test_output: Mapping) -> Mapping:
return {
'jobs': test_output['jobs'],
'time': numpy.mean(test_output['time']),
'blocks': numpy.mean(test_output['blocks']),
}
if __name__ == '__main__':
import multiprocessing
# Set up Multiprocessing start method
# Some start methods depend on a clean process to fork from
multiprocessing.set_start_method('spawn')
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--work-type', '-w', default='compute',
choices=['compute', 'network'],
help='The kind of work to perform in each pool')
parser.add_argument('--max-work', '-m', type=int, default=4,
help='The power of 2 for number of jobs to execute. '
'For example, a choice of 4 will yield a maximum '
'of 2^4 jobs to run.')
parser.add_argument('--trials', '-r', type=int, default=1,
help='The total number of times to run a test with '
'the same parameters')
parser.add_argument('--samples', '-s', type=int, default=10,
help='The total number of samples to compute. '
'For example, 4 samples with max-work of 4 will '
'run each pool with 4, 8, 12, and then 16 jobs.')
parser.add_argument('--concurrent-threads', '-t', type=int, default=50,
help='The number of concurrent threads to use in '
'each thread pool.')
parser.add_argument('--concurrent-processes', '-p', type=int,
default=multiprocessing.cpu_count() * 2 + 1,
help='The number of concurrent processes to use in '
'each process pool. The default is (number of'
'processors * 2) + 1.')
parser.add_argument('--no-graph', action='store_true', default=False,
help='Disable showing the graph of the results at the '
'end of execution.')
parser.add_argument('--graph-height', type=float, default=6,
help='Set the graph height (inches)')
parser.add_argument('--graph-width', type=float, default=10,
help='Set the graph width (inches)')
parser.add_argument('--graph-save',
help='If set, the graph that is created will be '
'saved to the provided file name. Be sure to '
'include a supported matplotlib file extension '
'like .png or .pdf')
parser.add_argument('--save', help='If set, then the text output and '
'graph are saved in markdown and png formats, '
'respectively. Overrides --graph-save.')
args = parser.parse_args()
if args.samples < 1:
parser.error("Samples must be a positive integer")
if args.trials < 1:
parser.error("Trials must be a positive integer")
if args.graph_height < 1:
parser.error("Graph height must be a positive integer")
if args.graph_width < 1:
parser.error("Graph width must be a positive integer")
if args.save is not None and basename(args.save) == '':
parser.error("Save file's name must not be empty")
logger = logging.getLogger('pools')
logger.setLevel(logging.DEBUG)
stdout_handler = logging.StreamHandler()
stdout_handler.setLevel(logging.DEBUG)
logger.addHandler(stdout_handler)
if args.save is not None:
# Send to data dump file as well as console
file_handler = logging.FileHandler(args.save + '.md', mode='w')
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
pool_types = [
(EventletPool, args.concurrent_threads),
(GeventPool, args.concurrent_threads),
(MultiprocessingProcessPool, args.concurrent_processes),
(MultiprocessingThreadPool, args.concurrent_threads),
(StandardProcessPool, args.concurrent_processes),
(StandardThreadPool, args.concurrent_threads),
]
max_jobs = 2 ** args.max_work
trials = args.trials
samples = args.samples
job_step = int(max_jobs / samples)
if job_step == 0:
job_step = 1
job_sets = range(0, max_jobs + 1, job_step)
logger.info(textwrap.dedent(
"""\
## Command input
```
{argv}
```
## Machine configuration
* CPU count: {cpu_count}
* Memory size: {memory_size}
## Test configuration:
* Maximum work: 2^{max_work} = {jobs} jobs
* Concurrent processes: {concurrent_processes}
* Concurrent threads: {concurrent_threads}
* Number of samples: {samples}
* Trials: {trials}
""").format(
argv=' \\\n '.join(sys.argv),
cpu_count=psutil.cpu_count(),
memory_size=utils.bytes_for_humans(
psutil.virtual_memory().available
),
jobs=max_jobs,
**vars(args)
)
)
all_results = list(tqdm(
map(
lambda pool_class_tuple: run_test(
args.work_type,
job_sets,
trials,
*pool_class_tuple
),
pool_types
),
desc='Pool Analysis',
total=len(pool_types),
))
all_results_dict = zip(
map(lambda cls_tuple: cls_tuple[0].__name__, pool_types),
all_results
)
# Sort iteration order of mapping
all_results_dict = OrderedDict(sorted(all_results_dict))
logger.info("## Results\n")
if args.save is not None:
logger.info('\n'.format(name=basename(args.save)))
for class_name, result in all_results_dict.items():
table = tabulate(result, headers='keys',
tablefmt='pipe')
logger.info("### {}\n\n{}\n\n".format(class_name, table))
if args.no_graph is True:
exit(0)
from matplotlib import pyplot as plt
plt.figure(figsize=(args.graph_width, args.graph_height))
plt.subplots_adjust(left=0.1, hspace=0.4)
time_axes = plt.subplot(2, 1, 1)
time_lines = utils.plot_tuple_array(
time_axes, all_results_dict, 'jobs', 'time',
custom_y_label='completion time (s)',
)
plt.title("run time vs job count")
memory_axes = plt.subplot(2, 1, 2)
memory_lines = utils.plot_tuple_array(
memory_axes, all_results_dict, 'jobs', 'blocks',
custom_y_label='memory allocated (blocks)',
y_mapping=utils.lower_bound,
)
plt.title("memory allocated vs job count")
# Scale graphs down and put legend on right
horizontal_scaling = 0.7
pos = time_axes.get_position()
time_axes.set_position([pos.x0, pos.y0,
pos.width * horizontal_scaling, pos.height])
pos = memory_axes.get_position()
memory_axes.set_position([pos.x0, pos.y0,
pos.width * horizontal_scaling, pos.height])
plt.figlegend(
time_lines,
labels=all_results_dict.keys(),
loc='center left',
bbox_to_anchor=(horizontal_scaling - 0.005, 0.5),
fontsize='medium',
)
if args.save is not None:
plt.savefig(args.save + '.png')
elif args.graph_save is not None:
plt.savefig(args.graph_save)
else:
plt.show()
| |
# -*- coding: utf-8 -*-
"""
mudicom.base
~~~~~~~~~~~~
Primary functionality for reading DICOM files using the Dicom and
DataElement class definitions.
"""
import os
import json
import gdcm
from .validation import validate
from .image import Image
from .exceptions import InvalidDicom
def get_anon_tags():
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
data = None
with open(os.path.join(BASE_DIR, "json/deidentify.json"), "r") as fp:
data = json.load(fp)
return data
class DataElement(object):
""" Object representation of a Data Element
:param swig_element: GDCMs DataElement SWIG object
:param name: Name of DICOM data element
:param value: Value of DICOM data element
* DataElement Properties:
* name: Name of DICOM data element
* Value: Value of data element
* VR: Value Representation of data element
* VL: Value Length of data element
* tag: Dictionary of data element tag information
* group: Tag group of data element
* element: Tag element of data element
* str: String representation of data element tag """
def __init__(self, swig_element, name, value):
""" Executed on creation of Data Element
:param swig_element: GDCMs DataElement object
:param name: Name of DICOM Data Element
:param value: Value of DICOM Data Element """
self._swig_element = swig_element
self.name = name
self.value = value
self.VR = str(swig_element.GetVR()).strip()
self.VL = str(swig_element.GetVL()).strip()
tg = swig_element.GetTag()
self.tag = {
"group": hex(int(tg.GetGroup())),
"element": hex(int(tg.GetElement())),
"str": str(swig_element.GetTag()).strip(),
}
def __repr__(self):
return "<DataElement {0} {1}>".format(self.name, self.tag['str'])
def __str__(self):
return str(self.name)
class Dicom(object):
""" Primary class that loads the DICOM file into memory and has properties
that allows for reading the DICOM elements, extracting images
:param fname: Location and filename of DICOM file.
"""
def __init__(self, fname):
self.fname = fname
self.file_name, self.file_extension = os.path.splitext(fname)
reader = gdcm.Reader()
reader.SetFileName(fname)
if not reader.Read():
raise InvalidDicom("Not a valid DICOM file")
self._file = reader.GetFile()
self._header = self._file.GetHeader()
self._dataset = self._file.GetDataSet()
self._str_filter = gdcm.StringFilter()
self._str_filter.SetFile(self._file)
self._image = None
self._anon_obj = None
self._anon_tags = None
self._errors = None
self._warnings = None
def __repr__(self):
return "<Dicom {0}>".format(self.fname)
def __str__(self):
return str(self.fname)
def read(self):
""" Returns array of dictionaries containing all the data elements in
the DICOM file.
"""
def ds(data_element):
value = self._str_filter.ToStringPair(data_element.GetTag())
if value[1]:
return DataElement(data_element, value[0].strip(), value[1].strip())
results = [data for data in self.walk(ds) if data is not None]
return results
def walk(self, fn):
""" Loops through all data elements and allows a function to interact
with each data element. Uses a generator to improve iteration.
:param fn: Function that interacts with each DICOM element """
if not hasattr(fn, "__call__"):
raise TypeError("""walk_dataset requires a
function as its parameter""")
dataset = self._dataset
iterator = dataset.GetDES().begin()
while (not iterator.equal(dataset.GetDES().end())):
data_element = iterator.next()
yield fn(data_element)
header = self._header
iterator = header.GetDES().begin()
while (not iterator.equal(header.GetDES().end())):
data_element = iterator.next()
yield fn(data_element)
def find(self, group=None, element=None, name=None, VR=None):
""" Searches for data elements in the DICOM file given the filters
supplied to this method.
:param group: Hex decimal for the group of a DICOM element e.g. 0x002
:param element: Hex decimal for the element value of a DICOM element e.g. 0x0010
:param name: Name of the DICOM element, e.g. "Modality"
:param VR: Value Representation of the DICOM element, e.g. "PN"
"""
results = self.read()
if name is not None:
def find_name(data_element):
return data_element.name.lower() == name.lower()
return filter(find_name, results)
if group is not None:
def find_group(data_element):
return (data_element.tag['group'] == group
or int(data_element.tag['group'], 16) == group)
results = filter(find_group, results)
if element is not None:
def find_element(data_element):
return (data_element.tag['element'] == element
or int(data_element.tag['element'], 16) == element)
results = filter(find_element, results)
if VR is not None:
def find_VR(data_element):
return data_element.VR.lower() == VR.lower()
results = filter(find_VR, results)
return results
def anonymize(self):
""" According to PS 3.15-2008, basic application level
De-Indentification of a DICOM file requires replacing the values of a
set of data elements"""
self._anon_obj = gdcm.Anonymizer()
self._anon_obj.SetFile(self._file)
self._anon_obj.RemoveGroupLength()
if self._anon_tags is None:
self._anon_tags = get_anon_tags()
for tag in self._anon_tags:
cur_tag = tag['Tag'].replace("(", "")
cur_tag = cur_tag.replace(")", "")
name = tag["Attribute Name"].replace(" ", "").encode("utf8")
group, element = cur_tag.split(",", 1)
# TODO expand this 50xx, 60xx, gggg, eeee
if ("xx" not in group
and "gggg" not in group
and "eeee" not in group):
group = int(group, 16)
element = int(element, 16)
if self.find(group=group, element=element):
self._anon_obj.Replace(
gdcm.Tag(group, element), "Anon" + name)
return self._anon_obj
def save_as(self, fname, obj=None):
""" Save DICOM file given a GDCM DICOM object.
Examples of a GDCM DICOM object:
* gdcm.Writer()
* gdcm.Reader()
* gdcm.Anonymizer()
:param fname: DICOM file name to be saved
:param obj: DICOM object to be saved, if None, Anonymizer() is used
"""
writer = gdcm.Writer()
writer.SetFileName(fname)
if obj is None and self._anon_obj:
obj = self._anon_obj
else:
raise ValueError("Need DICOM object, e.g. obj=gdcm.Anonymizer()")
writer.SetFile(obj.GetFile())
if not writer.Write():
raise IOError("Could not save DICOM file")
return True
@property
def image(self):
""" Read the loaded DICOM image data """
if self._image is None:
self._image = Image(self.fname)
return self._image
def validate(self):
validation = validate(self.fname)
self._errors = validation['errors']
self._warnings = validation['warnings']
return validation
@property
def errors(self):
if self._errors is None:
return self.validate()['errors']
return self._errors
@property
def warnings(self):
if self._warnings is None:
return self.validate()['warnings']
return self._warnings
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cl_bridge
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a bridge port on Cumulus Linux
deprecated: Deprecated in 2.3. Use M(nclu) instead.
description:
- Configures a bridge interface on Cumulus Linux To configure a bond port
use the cl_bond module. To configure any other type of interface use the
cl_interface module. Follow the guidelines for bridging found in the
Cumulus User Guide at U(http://docs.cumulusnetworks.com)
options:
name:
description:
- Name of the interface.
required: true
alias_name:
description:
- Description of the port.
ipv4:
description:
- List of IPv4 addresses to configure on the interface.
In the form I(X.X.X.X/YY).
ipv6:
description:
- List of IPv6 addresses to configure on the interface.
In the form I(X:X:X::X/YYY).
addr_method:
description:
- Configures the port to use DHCP.
To enable this feature use the option I(dhcp).
choices: ['dhcp']
mtu:
description:
- Set MTU. Configure Jumbo Frame by setting MTU to I(9000).
virtual_ip:
description:
- Define IPv4 virtual IP used by the Cumulus Linux VRR feature.
virtual_mac:
description:
- Define Ethernet mac associated with Cumulus Linux VRR feature.
vids:
description:
- In vlan-aware mode, lists VLANs defined under the interface.
pvid:
description:
- In vlan-aware mode, defines vlan that is the untagged vlan.
stp:
description:
- Enables spanning tree Protocol. As of Cumulus Linux 2.5 the default
bridging mode, only per vlan RSTP or 802.1d is supported. For the
vlan aware mode, only common instance STP is supported
default: 'yes'
choices: ['yes', 'no']
ports:
description:
- List of bridge members.
required: True
vlan_aware:
description:
- Enables vlan-aware mode.
choices: ['yes', 'no']
mstpctl_treeprio:
description:
- Set spanning tree root priority. Must be a multiple of 4096.
location:
description:
- Interface directory location.
default:
- '/etc/network/interfaces.d'
requirements: [ Alternate Debian network interface manager
ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ]
notes:
- As this module writes the interface directory location, ensure that
``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or
whatever path is mentioned in the ``location`` attribute.
- For the config to be activated, i.e installed in the kernel,
"service networking reload" needs be be executed. See EXAMPLES section.
'''
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a bridge vlan aware bridge.
- cl_bridge:
name: br0
ports: 'swp1-12'
vlan_aware: 'yes'
notify: reload networking
# configure bridge interface to define a default set of vlans
- cl_bridge:
name: bridge
ports: 'swp1-12'
vlan_aware: 'yes'
vids: '1-100'
notify: reload networking
# define cl_bridge once in tasks file
# then write interface config in variables file
# with just the options you want.
- cl_bridge:
name: "{{ item.key }}"
ports: "{{ item.value.ports }}"
vlan_aware: "{{ item.value.vlan_aware|default(omit) }}"
ipv4: "{{ item.value.ipv4|default(omit) }}"
ipv6: "{{ item.value.ipv6|default(omit) }}"
alias_name: "{{ item.value.alias_name|default(omit) }}"
addr_method: "{{ item.value.addr_method|default(omit) }}"
mtu: "{{ item.value.mtu|default(omit) }}"
vids: "{{ item.value.vids|default(omit) }}"
virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
mstpctl_treeprio: "{{ item.value.mstpctl_treeprio|default(omit) }}"
with_dict: "{{ cl_bridges }}"
notify: reload networking
# In vars file
# ============
---
cl_bridge:
br0:
alias_name: 'vlan aware bridge'
ports: ['swp1', 'swp3']
vlan_aware: true
vids: ['1-100']
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
import os
import re
import tempfile
from ansible.module_utils.basic import AnsibleModule
# handy helper for calling system calls.
# calls AnsibleModule.run_command and prints a more appropriate message
# exec_path - path to file to execute, with all its arguments.
# E.g "/sbin/ip -o link show"
# failure_msg - what message to print on failure
def run_cmd(module, exec_path):
(_rc, out, _err) = module.run_command(exec_path)
if _rc > 0:
if re.search('cannot find interface', _err):
return '[{}]'
failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
module.fail_json(msg=failure_msg)
else:
return out
def current_iface_config(module):
# due to a bug in ifquery, have to check for presence of interface file
# and not rely solely on ifquery. when bug is fixed, this check can be
# removed
_ifacename = module.params.get('name')
_int_dir = module.params.get('location')
module.custom_current_config = {}
if os.path.exists(_int_dir + '/' + _ifacename):
_cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
module.custom_current_config = module.from_json(
run_cmd(module, _cmd))[0]
def build_address(module):
# if addr_method == 'dhcp', don't add IP address
if module.params.get('addr_method') == 'dhcp':
return
_ipv4 = module.params.get('ipv4')
_ipv6 = module.params.get('ipv6')
_addresslist = []
if _ipv4 and len(_ipv4) > 0:
_addresslist += _ipv4
if _ipv6 and len(_ipv6) > 0:
_addresslist += _ipv6
if len(_addresslist) > 0:
module.custom_desired_config['config']['address'] = ' '.join(
_addresslist)
def build_vids(module):
_vids = module.params.get('vids')
if _vids and len(_vids) > 0:
module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
def build_pvid(module):
_pvid = module.params.get('pvid')
if _pvid:
module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
def conv_bool_to_str(_value):
if isinstance(_value, bool):
if _value is True:
return 'yes'
else:
return 'no'
return _value
def build_generic_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
if _value:
module.custom_desired_config['config'][
re.sub('_', '-', _attr)] = str(_value)
def build_alias_name(module):
alias_name = module.params.get('alias_name')
if alias_name:
module.custom_desired_config['config']['alias'] = alias_name
def build_addr_method(module):
_addr_method = module.params.get('addr_method')
if _addr_method:
module.custom_desired_config['addr_family'] = 'inet'
module.custom_desired_config['addr_method'] = _addr_method
def build_vrr(module):
_virtual_ip = module.params.get('virtual_ip')
_virtual_mac = module.params.get('virtual_mac')
vrr_config = []
if _virtual_ip:
vrr_config.append(_virtual_mac)
vrr_config.append(_virtual_ip)
module.custom_desired_config.get('config')['address-virtual'] = \
' '.join(vrr_config)
def add_glob_to_array(_bridgemems):
"""
goes through each bridge member if it sees a dash add glob
before it
"""
result = []
if isinstance(_bridgemems, list):
for _entry in _bridgemems:
if re.search('-', _entry):
_entry = 'glob ' + _entry
result.append(_entry)
return ' '.join(result)
return _bridgemems
def build_bridge_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = add_glob_to_array(_value)
if _value:
module.custom_desired_config['config'][
'bridge-' + re.sub('_', '-', _attr)] = str(_value)
def build_desired_iface_config(module):
"""
take parameters defined and build ifupdown2 compatible hash
"""
module.custom_desired_config = {
'addr_family': None,
'auto': True,
'config': {},
'name': module.params.get('name')
}
for _attr in ['vlan_aware', 'pvid', 'ports', 'stp']:
build_bridge_attr(module, _attr)
build_addr_method(module)
build_address(module)
build_vids(module)
build_alias_name(module)
build_vrr(module)
for _attr in ['mtu', 'mstpctl_treeprio']:
build_generic_attr(module, _attr)
def config_dict_changed(module):
"""
return true if 'config' dict in hash is different
between desired and current config
"""
current_config = module.custom_current_config.get('config')
desired_config = module.custom_desired_config.get('config')
return current_config != desired_config
def config_changed(module):
"""
returns true if config has changed
"""
if config_dict_changed(module):
return True
# check if addr_method is changed
return module.custom_desired_config.get('addr_method') != \
module.custom_current_config.get('addr_method')
def replace_config(module):
temp = tempfile.NamedTemporaryFile()
desired_config = module.custom_desired_config
# by default it will be something like /etc/network/interfaces.d/swp1
final_location = module.params.get('location') + '/' + \
module.params.get('name')
final_text = ''
_fh = open(final_location, 'w')
# make sure to put hash in array or else ifquery will fail
# write to temp file
try:
temp.write(module.jsonify([desired_config]))
# need to seek to 0 so that data is written to tempfile.
temp.seek(0)
_cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
final_text = run_cmd(module, _cmd)
finally:
temp.close()
try:
_fh.write(final_text)
finally:
_fh.close()
def main():
module = AnsibleModule(
argument_spec=dict(
ports=dict(required=True, type='list'),
name=dict(required=True, type='str'),
ipv4=dict(type='list'),
ipv6=dict(type='list'),
alias_name=dict(type='str'),
addr_method=dict(type='str',
choices=['', 'dhcp']),
mtu=dict(type='str'),
virtual_ip=dict(type='str'),
virtual_mac=dict(type='str'),
vids=dict(type='list'),
pvid=dict(type='str'),
mstpctl_treeprio=dict(type='str'),
vlan_aware=dict(type='bool'),
stp=dict(type='bool', default='yes'),
location=dict(type='str',
default='/etc/network/interfaces.d')
),
required_together=[
['virtual_ip', 'virtual_mac']
]
)
# if using the jinja default filter, this resolves to
# create an list with an empty string ['']. The following
# checks all lists and removes it, so that functions expecting
# an empty list, get this result. May upstream this fix into
# the AnsibleModule code to have it check for this.
for k, _param in module.params.items():
if isinstance(_param, list):
module.params[k] = [x for x in _param if x]
_location = module.params.get('location')
if not os.path.exists(_location):
_msg = "%s does not exist." % (_location)
module.fail_json(msg=_msg)
return # for testing purposes only
ifacename = module.params.get('name')
_changed = False
_msg = "interface %s config not changed" % (ifacename)
current_iface_config(module)
build_desired_iface_config(module)
if config_changed(module):
replace_config(module)
_msg = "interface %s config updated" % (ifacename)
_changed = True
module.exit_json(changed=_changed, msg=_msg)
if __name__ == '__main__':
main()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for Mask-RCNN.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import control_flow_util
import coco_metric
import dataloader
import mask_rcnn_model
import mask_rcnn_params
import mask_rcnn_runner
import runner_utils
from mlp_log import mlp_log
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu', default=None,
help='Name of the Cloud TPU for Cluster Resolvers.')
flags.DEFINE_string(
'master',
default=None,
help='Name of the Cloud TPU for Cluster Resolvers. You must specify either '
'this flag or --master.')
# Model specific paramenters
flags.DEFINE_string('tpu_job_name', default=None, help='The tpu worker name.')
flags.DEFINE_string(
'eval_master', default='',
help='GRPC URL of the eval master. Set to an appropiate value when running '
'on CPU/GPU')
flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than CPUs')
flags.DEFINE_string('hparams', '',
'Comma separated k=v pairs of hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_multi_integer(
'input_partition_dims', None,
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string('resnet_checkpoint', '',
'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string(
'validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file',
None,
'COCO validation JSON containing golden bounding boxes.')
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
# TODO(b/132208296): remove this workaround that uses control flow v2.
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
tpu = FLAGS.tpu or FLAGS.master
tpu_cluster_resolver = runner_utils.create_tpu_cluster_resolver(
FLAGS.use_tpu, tpu, FLAGS.tpu_zone, FLAGS.gcp_project)
if tpu_cluster_resolver:
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
# Check data path
run_train = FLAGS.mode in ('train', 'train_and_eval')
if run_train and FLAGS.training_file_pattern is None:
raise RuntimeError('You must specify --training_file_pattern for training.')
run_eval = FLAGS.mode in ('eval', 'train_and_eval') or (
FLAGS.mode == 'train' and FLAGS.eval_after_training)
if run_eval:
if FLAGS.validation_file_pattern is None:
raise RuntimeError('You must specify --validation_file_pattern '
'for evaluation.')
if FLAGS.val_json_file is None:
raise RuntimeError('You must specify --val_json_file for evaluation.')
# Parse hparams
hparams = mask_rcnn_params.default_hparams()
hparams.parse(FLAGS.hparams)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` has 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# Note: In the below code, TPUEstimator uses both `shard` and `replica` (with
# the same meaning).
# Note that spatial partition is part of the model-parallelism optimization.
# See core_assignment_utils.py for more details about model parallelism.
if FLAGS.input_partition_dims:
labels_partition_dims = {
'gt_boxes': None,
'gt_classes': None,
'cropped_gt_masks': None,
}
for level in range(hparams.get('min_level'), hparams.get('max_level') + 1):
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['score_targets_%d' % level] = None
num_cores_per_replica = int(np.prod(FLAGS.input_partition_dims))
image_partition_dims = [
FLAGS.input_partition_dims[i] for i in [1, 0, 2]
] if hparams.get('transpose_input') else FLAGS.input_partition_dims
features_partition_dims = {
'images': image_partition_dims,
'source_ids': None,
'image_info': None,
}
input_partition_dims = [features_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
hparams.values(),
num_shards=num_shards,
num_cores_per_replica=num_cores_per_replica,
use_tpu=FLAGS.use_tpu,
resnet_checkpoint=FLAGS.resnet_checkpoint,
val_json_file=FLAGS.val_json_file,
model_dir=FLAGS.model_dir)
tpu_config = tf.contrib.tpu.TPUConfig(
params['iterations_per_loop'],
num_shards=num_shards,
num_cores_per_replica=params['num_cores_per_replica'],
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig
.PER_HOST_V2,
tpu_job_name=FLAGS.tpu_job_name,
)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
log_step_count_steps=params['iterations_per_loop'],
tpu_config=tpu_config,
save_checkpoints_steps=params['iterations_per_loop'],
)
train_replicas_per_worker = (
params['cores_per_worker'] // params['num_cores_per_replica']
) if params['num_cores_per_replica'] else params['cores_per_worker']
train_params = dict(
params,
replicas_per_worker=train_replicas_per_worker,
)
eval_params = dict(
params,
input_rand_hflip=False,
resnet_checkpoint=None,
is_training_bn=False,
)
# MLPerf logging.
mlp_log.mlperf_print(key='init_start', value=None)
mlp_log.mlperf_print(key='global_batch_size',
value=params['train_batch_size'])
runner = None
if run_train and run_eval:
if params['train_use_tpu_estimator'] or params['eval_use_tpu_estimator']:
raise RuntimeError('train_and_eval runner does not support TPUEstimator.')
dist_eval_params = dict(
eval_params,
replicas_per_worker=train_replicas_per_worker,
)
runner = mask_rcnn_runner.TrainEvalRunner(
model_fn=mask_rcnn_model.MaskRcnnModelFn(),
input_fn=dataloader.InputReader(
FLAGS.training_file_pattern,
mode=tf.estimator.ModeKeys.TRAIN,
use_fake_data=FLAGS.use_fake_data),
eval_input_fn=dataloader.InputReader(
FLAGS.validation_file_pattern, mode=tf.estimator.ModeKeys.PREDICT,
distributed_eval=True),
eval_metric=coco_metric.EvaluationMetric(
FLAGS.val_json_file, use_cpp_extension=True),
train_params=train_params,
eval_params=dist_eval_params,
run_config=run_config)
elif run_train:
# Check low-level train runner compatibility.
if not params['train_use_tpu_estimator']:
if FLAGS.mode == 'train_and_eval':
raise RuntimeError('Low level train runner does not support mode '
'train_and_eval yet.')
train_params = dict(
params,
replicas_per_worker=train_replicas_per_worker,
)
runner = mask_rcnn_runner.TrainRunner(
model_fn=mask_rcnn_model.MaskRcnnModelFn(),
input_fn=dataloader.InputReader(
FLAGS.training_file_pattern,
mode=tf.estimator.ModeKeys.TRAIN,
use_fake_data=FLAGS.use_fake_data),
params=train_params,
run_config=run_config,
use_tpu_estimator=train_params['train_use_tpu_estimator'])
else:
sidecar_eval_params = dict(
eval_params,
# sidecar eval only uses one worker and does not use spatial partition.
replicas_per_worker=FLAGS.num_cores,)
runner = mask_rcnn_runner.EvalRunner(
mask_rcnn_model.MaskRcnnModelFn(),
dataloader.InputReader(
FLAGS.validation_file_pattern,
mode=tf.estimator.ModeKeys.PREDICT),
coco_metric.EvaluationMetric(
FLAGS.val_json_file,
use_cpp_extension=True),
sidecar_eval_params,
run_config,
use_tpu_estimator=sidecar_eval_params['eval_use_tpu_estimator'])
if FLAGS.mode == 'train':
runner.train()
elif FLAGS.mode == 'eval':
def terminate_eval():
tf.logging.info('Terminating eval after %d seconds of no checkpoints' %
FLAGS.eval_timeout)
return True
run_success = False
# Run evaluation when there's a new checkpoint
for ckpt in tf.contrib.training.checkpoints_iterator(
params['model_dir'],
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout,
timeout_fn=terminate_eval):
tf.logging.info('Starting to evaluate.')
try:
eval_results = runner.evaluate(ckpt)
current_step, _ = runner.get_step_and_epoch_number(ckpt)
if (eval_results['AP'] >= mask_rcnn_params.BOX_EVAL_TARGET and
eval_results['mask_AP'] >= mask_rcnn_params.MASK_EVAL_TARGET):
mlp_log.mlperf_print(key='run_stop', metadata={'status': 'success'})
run_success = True
break
if int(current_step) >= params['total_steps']:
tf.logging.info('Evaluation finished after training step %d' %
current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info('Checkpoint %s no longer exists, skipping checkpoint' %
ckpt)
if not run_success:
mlp_log.mlperf_print(key='run_stop', metadata={'status': 'aborted'})
elif FLAGS.mode == 'train_and_eval':
runner.train_and_eval()
else:
tf.logging.info('Mode not found.')
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| |
from flask import Flask, render_template, request,\
redirect, url_for, flash, jsonify, make_response
from sqlalchemy import create_engine, or_
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Author, Poem, User
from flask import session as login_session
import random, string
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from oauth2client.client import AccessTokenCredentials
import httplib2
import json
import requests
app = Flask(__name__)
CLIENT_ID = json.loads(open('client_secrets.json', 'r').read())['web']['client_id']
engine = create_engine('sqlite:///poetryandalcohol.db/', encoding='utf-8')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create anti-forgery state token
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('/etc/login.html', STATE=state)
@app.route('/fbconnect', methods=['POST'])
def fbconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = request.data
app_id = json.loads(open('fb_client_secrets.json', 'r').read())[
'web']['app_id']
app_secret = json.loads(
open('fb_client_secrets.json', 'r').read())['web']['app_secret']
url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % (
app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.4/me"
# strip expire tag from access token
token = result.split("&")[0]
url = 'https://graph.facebook.com/v2.4/me?%s&fields=name,id,email' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# print "url sent for API access:%s"% url
# print "API JSON result: %s" % result
data = json.loads(result)
login_session['provider'] = 'facebook'
login_session['username'] = data["name"]
login_session['email'] = data["email"]
login_session['facebook_id'] = data["id"]
# The token must be stored in the login_session in order to properly logout, let's strip out the information before the equals sign in our token
stored_token = token.split("=")[1]
login_session['access_token'] = stored_token
# Get user picture
url = 'https://graph.facebook.com/v2.4/me/picture?%s&redirect=0&height=200&width=200' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['picture'] = data["data"]["url"]
# see if user exists
user_id = getUserId(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("Now logged in as %s" % login_session['username'])
return output
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
print "THE INVALID BITCH NIGGAA"
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is already connected.'),
200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['credentials'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['provider'] = 'google'
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
#see if user exists, if it doesn't make a new one
user_id = getUserId(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
return output
# User Helper Functions
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserId(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route('/fbdisconnect')
def fbdisconnect():
facebook_id = login_session['facebook_id']
# The access token must me included to successfully logout
access_token = login_session['access_token']
url = 'https://graph.facebook.com/%s/permissions?access_token=%s' % (facebook_id,access_token)
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
del login_session['facebook_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
return "you have been logged out"
@app.route('/gdisconnect')
def gdisconnect():
# Only disconnect a connected user.
credentials = AccessTokenCredentials(login_session['credentials'],
'user-agent-value')
if credentials is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials.access_token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print result
if result['status'] == '200':
# Reset the user's sesson.
del login_session['credentials']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
if 'admin' in login_session:
del login_session['admin']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
# For whatever reason, the given token was invalid.
# Change this because of P3P policy. Stupid fucking thing.
response = make_response(
json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
# Disconnect based on provider
@app.route('/disconnect')
def disconnect():
print "disconnectingghakljshd"
if 'provider' in login_session:
print "next"
if login_session['provider'] == 'google':
print "gdisconnect"
gdisconnect()
flash("You have successfully been logged out.")
return redirect(url_for('authors'))
if login_session['provider'] == 'facebook':
print "fbdisconnect"
fbdisconnect()
flash("You have successfully been logged out.")
return redirect(url_for('authors'))
else:
flash("You were not logged in")
return redirect(url_for('authors'))
# redirects the user to the index page
@app.route('/back')
def back():
this_url = ''
if this_url is None:
return redirect(url_for(''))
else:
return redirect(this_url)
# set up index route
@app.route('/')
def authors():
#Create State
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
print state
login_session['state'] = state
authors = session.query(Author).all()
if 'email' not in login_session:
print "EMAISDLKASHF"
return render_template('index.html', authors=authors, STATE=state)
elif 'email' in login_session:
user_id = getUserId(login_session['email'])
creator = login_session['email']
return render_template('index.html', authors=authors, STATE=state, creator=creator, user_id=user_id)
else:
return redirect('/login')
# returns logged in users id
@app.route('/get_current_user')
def get_current_user():
try:
user_id = getUserId(login_session['email'])
return jsonify(user_id=user_id)
except:
return render_template('index.html', user_id=0)
# get and return search terms
@app.route('/get_search_term', methods=['GET'])
def get_search_term():
term = request.args.get('q')
# query the authors with the search term
do_query_authors = session.query(Author.name).filter(Author.name.like('%' + str(term) + '%'))
author_results = [author[0] for author in do_query_authors.all()]
# query the poems with the search term
do_query_poems = session.query(Poem.name).filter(Poem.name.like('%' + str(term) + '%'))
poem_results = [poem[0] for poem in do_query_poems.all()]
# create a list combining authors and poems
list_results = []
if poem_results!=None or poem_results!='':
list_results.extend(poem_results)
if author_results!=None or author_results!='':
list_results.extend(author_results)
# return list of authors and poems if found in database
return jsonify(search_term=list_results)
# # get and return author name from search
# @app.route('/get_search_term_name', methods=['GET'])
# def get_search_term_name():
# term = request.args.get('q')
# # query the authors with the search term
# do_query_authors = session.query(Author.name).filter(Author.name.like('%' + str(term) + '%'))
# author_results = [author[0] for author in do_query_authors.all()]
# # return list of authors and poems if found in database
# return jsonify(auther_results=author_results)
# adds an author to the database
@app.route('/authors/new/', methods=['GET', 'POST'])
def add_author():
if request.method == 'POST':
#if author exists, do not create new author
all_authors = session.query(Author).all()
new_author = request.form['name']
#fix this for multiple rows check
exists = session.query(Author.name).filter_by(name=new_author).scalar() is not None
print new_author
print new_author
print new_author
print new_author
if exists:
print "Author exists widdit"
return redirect(url_for('back'))
else:
new_author = Author(name=new_author, user_id=getUserId(login_session['email']))
session.add(new_author)
session.commit()
print "Author CREATED"
return redirect(url_for('back'))
else:
# this should return an error on the form
return render_template('index.html')
# updates an authors name in the database
@app.route('/authors/update/', methods=['GET', 'POST'])
def update_author():
if request.method == 'POST':
new_name = request.form['name']
author_id = request.form['id']
print new_name
print author_id
editedAuthor = session.query(Author).filter_by(id=author_id).one()
editedAuthor.name = new_name
session.add(editedAuthor)
session.commit()
flash("author name updated")
return redirect(url_for('back'))
else:
# this should return an error on the form
return render_template('index.html')
# deletes an author and all poems from the database
@app.route('/authors/delete/', methods=['GET', 'POST'])
def delete_author():
if request.method == 'POST':
author_id = request.form['id']
author_to_delete = session.query(Author).filter_by(id=author_id).one()
session.delete(author_to_delete)
session.commit()
# delete all poems associated with author if they exist
try:
author_poems = session.query(Poem).filter_by(author_id=author_id).all()
session.delete(author_poems)
session.commit()
return redirect(url_for('back'))
except:
flash("an author has been deleted")
return redirect(url_for('back'))
else:
# this should return an error on the form
return render_template('index.html')
# returns a list of the authors poems in json format
# list is updated via jquery getJSON
@app.route('/get_author_poems')
def get_author_poems():
author_id = request.args.get('author_id', 0, type=int)
author = session.query(Author).filter_by(id=author_id).one()
poems = session.query(Poem).filter_by(author_id=author.id).all()
return jsonify(Poems=[p.serialize for p in poems])
# returns a single poem in json for jquery update
@app.route('/get_poem')
def get_poem():
poem_id = request.args.get('poem_id', 0, type=int)
poem = session.query(Poem).filter_by(id=poem_id).one()
return jsonify(Poem=poem.serialize)
# adds an author to the database
@app.route('/poem/new/', methods=['GET', 'POST'])
def add_poem():
if request.method == 'POST':
new_poem_name = request.form['poemTitle']
new_poem_text = request.form['poemText']
author_name = request.form['name']
user_id = getUserId(login_session['email'])
try:
if session.query(Author).filter_by(name=author_name).one() is not None:
author = session.query(Author).filter_by(name=author_name).one()
author_id = author.id
new_poem = Poem(name=new_poem_name,
the_poem=new_poem_text,
author_id=author_id,
user_id=user_id)
session.add(new_poem)
session.commit()
return redirect(url_for('back'))
except:
flash("Author not found")
return redirect(url_for('back'))
else:
# this should return an error on the form
return render_template('index.html')
# updates a poem by a specific author
@app.route('/poem/update/', methods=['GET', 'POST'])
def update_poem():
if request.method == 'POST':
new_name = request.form['name']
new_poem = request.form['the_poem']
poem_id = request.form['id']
print poem_id
print poem_id
print poem_id
print poem_id
editedPoem = session.query(Poem).filter_by(id=poem_id).one()
editedPoem.name = new_name
editedPoem.the_poem = new_poem
session.add(editedPoem)
session.commit()
flash("poem name/entry updated")
return redirect(url_for('back'))
else:
# this should return an error on the form
return render_template('index.html')
# deletes an author and all poems from the database
@app.route('/poem/delete/', methods=['GET', 'POST'])
def delete_poem():
if request.method == 'POST':
poem_id = request.form['id']
poem_to_delete = session.query(Poem).filter_by(id=poem_id).one()
session.delete(poem_to_delete)
session.commit()
flash("a poem has been deleted")
return redirect(url_for('back'))
else:
# this should return an error on the form
return render_template('index.html')
#########
#### API ENDPOINTS
#########
# queries all authors and returns JSON
@app.route('/authors/JSON')
def authors_JSON():
authors = session.query(Author).all()
return jsonify(Authors=[a.serialize for a in authors])
@app.route('/authors/<int:author_id>/poems/JSON')
def authors_poems_JSON(author_id):
poems = session.query(Poem).filter_by(author_id=author_id)
return jsonify(Poem=[p.serialize for p in poems])
@app.route('/authors/<int:author_id>/poems/<int:poem_id>/JSON')
def poem_JSON(author_id, poem_id):
poem = session.query(Poem).filter_by(author_id=author_id, id=poem_id)
return jsonify(Poems=[p.serialize for p in poem])
if __name__ == '__main__':
app.secret_key = 'super secret key'
app.debug = True
app.run(host='0.0.0.0', port=5000)
| |
# Added Fortran compiler support to config. Currently useful only for
# try_compile call. try_run works but is untested for most of Fortran
# compilers (they must define linker_exe first).
# Pearu Peterson
from __future__ import division, absolute_import, print_function
import os, signal
import warnings
import sys
from distutils.command.config import config as old_config
from distutils.command.config import LANG_EXT
from distutils import log
from distutils.file_util import copy_file
from distutils.ccompiler import CompileError, LinkError
import distutils
from numpy.distutils.exec_command import exec_command
from numpy.distutils.mingw32ccompiler import generate_manifest
from numpy.distutils.command.autodist import check_inline, check_compiler_gcc4
from numpy.distutils.compat import get_exception
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
class config(old_config):
old_config.user_options += [
('fcompiler=', None, "specify the Fortran compiler type"),
]
def initialize_options(self):
self.fcompiler = None
old_config.initialize_options(self)
def try_run(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None, lang="c"):
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
"Usage of try_run is deprecated: please do not \n" \
"use it anymore, and avoid configuration checks \n" \
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning)
return old_config.try_run(self, body, headers, include_dirs, libraries,
library_dirs, lang)
def _check_compiler (self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if sys.platform == 'win32' and self.compiler.compiler_type == 'msvc':
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
# initialize call query_vcvarsall, which throws an IOError, and
# causes an error along the way without much information. We try to
# catch it here, hoping it is early enough, and print an helpful
# message instead of Error: None.
if not self.compiler.initialized:
try:
self.compiler.initialize()
except IOError:
e = get_exception()
msg = """\
Could not initialize compiler instance: do you have Visual Studio
installed ? If you are trying to build with mingw, please use python setup.py
build -c mingw32 instead ). If you have Visual Studio installed, check it is
correctly installed, and the right version (VS 2008 for python 2.6, VS 2003 for
2.5, etc...). Original exception was: %s, and the Compiler
class was %s
============================================================================""" \
% (e, self.compiler.__class__.__name__)
print ("""\
============================================================================""")
raise distutils.errors.DistutilsPlatformError(msg)
if not isinstance(self.fcompiler, FCompiler):
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
dry_run=self.dry_run, force=1,
c_compiler=self.compiler)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
def _wrap_method(self, mth, lang, args):
from distutils.ccompiler import CompileError
from distutils.errors import DistutilsExecError
save_compiler = self.compiler
if lang in ['f77', 'f90']:
self.compiler = self.fcompiler
try:
ret = mth(*((self,)+args))
except (DistutilsExecError, CompileError):
msg = str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
return ret
def _compile (self, body, headers, include_dirs, lang):
return self._wrap_method(old_config._compile, lang,
(body, headers, include_dirs, lang))
def _link (self, body,
headers, include_dirs,
libraries, library_dirs, lang):
if self.compiler.compiler_type=='msvc':
libraries = (libraries or [])[:]
library_dirs = (library_dirs or [])[:]
if lang in ['f77', 'f90']:
lang = 'c' # always use system linker when using MSVC compiler
if self.fcompiler:
for d in self.fcompiler.library_dirs or []:
# correct path when compiling in Cygwin but with
# normal Win Python
if d.startswith('/usr/lib'):
s, o = exec_command(['cygpath', '-w', d],
use_tee=False)
if not s: d = o
library_dirs.append(d)
for libname in self.fcompiler.libraries or []:
if libname not in libraries:
libraries.append(libname)
for libname in libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(libdir, '%s.lib' % (libname))
copy_file(libfile, libfile2)
self.temp_files.append(libfile2)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s' \
% (libname, library_dirs))
elif self.compiler.compiler_type == 'mingw32':
generate_manifest(self)
return self._wrap_method(old_config._link, lang,
(body, headers, include_dirs,
libraries, library_dirs, lang))
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
self._check_compiler()
return self.try_compile(
"/* we need a dummy line to make distutils happy */",
[header], include_dirs)
def check_decl(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main()
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}""" % (symbol, symbol)
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main()
{
#if %s
#else
#error false or undefined macro
#endif
;
return 0;
}""" % (symbol,)
return self.try_compile(body, headers, include_dirs)
def check_type(self, type_name, headers=None, include_dirs=None,
library_dirs=None):
"""Check type availability. Return True if the type can be compiled,
False otherwise"""
self._check_compiler()
# First check the type can be compiled
body = r"""
int main() {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % {'name': type_name}
st = False
try:
try:
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
st = True
except distutils.errors.CompileError:
st = False
finally:
self._clean()
return st
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
"""Check size of a given type."""
self._check_compiler()
# First check the type can be compiled
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
test_array [0] = 0
;
return 0;
}
"""
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
self._clean()
if expected:
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
for size in expected:
try:
self._compile(body % {'type': type_name, 'size': size},
headers, include_dirs, 'c')
self._clean()
return size
except CompileError:
pass
# this fails to *compile* if size > sizeof(type)
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
# The principle is simple: we first find low and high bounds of size
# for the type, where low/high are looked up on a log scale. Then, we
# do a binary search to find the exact size between low and high
low = 0
mid = 0
while True:
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
break
except CompileError:
#log.info("failure to test for bound %d" % mid)
low = mid + 1
mid = 2 * mid + 1
high = mid
# Binary search:
while low != high:
mid = (high - low) // 2 + low
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
high = mid
except CompileError:
low = mid + 1
return low
def check_func(self, func,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
# clean up distutils's config a bit: add void to main(), and
# return a value.
self._check_compiler()
body = []
if decl:
body.append("int %s (void);" % func)
# Handle MSVC intrinsics: force MS compiler to make a function call.
# Useful to test for some functions when built with optimization on, to
# avoid build error because the intrinsic and our 'fake' test
# declaration do not match.
body.append("#ifdef _MSC_VER")
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
if call_args is None:
call_args = ''
body.append(" %s(%s);" % (func, call_args))
else:
body.append(" %s;" % func)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_funcs_once(self, funcs,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
"""Check a list of functions at once.
This is useful to speed up things, since all the functions in the funcs
list will be put in one compilation unit.
Arguments
---------
funcs : seq
list of functions to test
include_dirs : seq
list of header paths
libraries : seq
list of libraries to link the code snippet to
libraru_dirs : seq
list of library paths
decl : dict
for every (key, value), the declaration in the value will be
used for function in key. If a function is not in the
dictionay, no declaration will be used.
call : dict
for every item (f, value), if the value is True, a call will be
done to the function f.
"""
self._check_compiler()
body = []
if decl:
for f, v in decl.items():
if v:
body.append("int %s (void);" % f)
# Handle MS intrinsics. See check_func for more info.
body.append("#ifdef _MSC_VER")
for func in funcs:
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
for f in funcs:
if f in call and call[f]:
if not (call_args and f in call_args and call_args[f]):
args = ''
else:
args = call_args[f]
body.append(" %s(%s);" % (f, args))
else:
body.append(" %s;" % f)
else:
for f in funcs:
body.append(" %s;" % f)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_inline(self):
"""Return the inline keyword recognized by the compiler, empty string
otherwise."""
return check_inline(self)
def check_compiler_gcc4(self):
"""Return True if the C compiler is gcc >= 4."""
return check_compiler_gcc4(self)
def get_output(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c"):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Returns the exit status code
of the program and its output.
"""
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
"Usage of get_output is deprecated: please do not \n" \
"use it anymore, and avoid configuration checks \n" \
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning)
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
exitcode, output = 255, ''
try:
grabber = GrabStdout()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
grabber.restore()
except:
output = grabber.data
grabber.restore()
raise
exe = os.path.join('.', exe)
exitstatus, output = exec_command(exe, execute_in='.')
if hasattr(os, 'WEXITSTATUS'):
exitcode = os.WEXITSTATUS(exitstatus)
if os.WIFSIGNALED(exitstatus):
sig = os.WTERMSIG(exitstatus)
log.error('subprocess exited with signal %d' % (sig,))
if sig == signal.SIGINT:
# control-C
raise KeyboardInterrupt
else:
exitcode = exitstatus
log.info("success!")
except (CompileError, LinkError):
log.info("failure.")
self._clean()
return exitcode, output
class GrabStdout(object):
def __init__(self):
self.sys_stdout = sys.stdout
self.data = ''
sys.stdout = self
def write (self, data):
self.sys_stdout.write(data)
self.data += data
def flush (self):
self.sys_stdout.flush()
def restore(self):
sys.stdout = self.sys_stdout
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import six
from toscaparser.common import exception
from toscaparser import functions
from toscaparser.tests.base import TestCase
from toscaparser.tosca_template import ToscaTemplate
from toscaparser.utils.gettextutils import _
class IntrinsicFunctionsTest(TestCase):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/tosca_single_instance_wordpress.yaml")
params = {'db_name': 'my_wordpress', 'db_user': 'my_db_user',
'db_root_pwd': '12345678'}
tosca = ToscaTemplate(tosca_tpl, parsed_params=params)
def _get_node(self, node_name, tosca=None):
if tosca is None:
tosca = self.tosca
return [
node for node in tosca.nodetemplates
if node.name == node_name][0]
def _get_operation(self, interfaces, operation):
return [
interface for interface in interfaces
if interface.name == operation][0]
def _get_property(self, node_template, property_name):
return [prop.value for prop in node_template.get_properties_objects()
if prop.name == property_name][0]
def _get_inputs_dict(self):
inputs = {}
for input in self.tosca.inputs:
inputs[input.name] = input.default
return inputs
def _get_input(self, name):
self._get_inputs_dict()[name]
def test_get_property(self):
wordpress = self._get_node('wordpress')
operation = self._get_operation(wordpress.interfaces, 'configure')
wp_db_password = operation.inputs['wp_db_password']
self.assertIsInstance(wp_db_password, functions.GetProperty)
result = wp_db_password.result()
self.assertEqual('wp_pass', result)
def test_get_property_with_input_param(self):
wordpress = self._get_node('wordpress')
operation = self._get_operation(wordpress.interfaces, 'configure')
wp_db_user = operation.inputs['wp_db_user']
self.assertIsInstance(wp_db_user, functions.GetProperty)
result = wp_db_user.result()
self.assertEqual('my_db_user', result)
def test_unknown_capability_property(self):
self.assertRaises(exception.ValidationError, self._load_template,
'functions/test_unknown_capability_property.yaml')
exception.ExceptionCollector.assertExceptionMessage(
KeyError,
_('\'Property "unknown" was not found in capability '
'"database_endpoint" of node template "database" referenced '
'from node template "database".\''))
def test_get_input_in_properties(self):
mysql_dbms = self._get_node('mysql_dbms')
expected_inputs = ['db_root_pwd', 'db_port']
props = mysql_dbms.get_properties()
for key in props.keys():
prop = props[key]
self.assertIsInstance(prop.value, functions.GetInput)
expected_inputs.remove(prop.value.input_name)
self.assertListEqual(expected_inputs, [])
def test_get_input_validation(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_unknown_input_in_property.yaml')
exception.ExceptionCollector.assertExceptionMessage(
exception.UnknownInputError,
_('Unknown input "objectstore_name".'))
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_unknown_input_in_interface.yaml')
exception.ExceptionCollector.assertExceptionMessage(
exception.UnknownInputError,
_('Unknown input "image_id".'))
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_invalid_function_signature.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Expected one argument for function "get_input" but received '
'"[\'cpus\', \'cpus\']".'))
def test_get_input_default_value_result(self):
mysql_dbms = self._get_node('mysql_dbms')
dbms_port = self._get_property(mysql_dbms, 'port')
self.assertEqual(3306, dbms_port.result())
dbms_root_password = self._get_property(mysql_dbms,
'root_password')
self.assertEqual(dbms_root_password.result(), '12345678')
def test_get_property_with_host(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/test_get_property_with_host.yaml")
mysql_database = self._get_node('mysql_database',
ToscaTemplate(tosca_tpl,
parsed_params={
'db_root_pwd': '123'
}))
operation = self._get_operation(mysql_database.interfaces, 'configure')
db_port = operation.inputs['db_port']
self.assertIsInstance(db_port, functions.GetProperty)
result = db_port.result()
self.assertEqual(3306, result)
test = operation.inputs['test']
self.assertIsInstance(test, functions.GetProperty)
result = test.result()
self.assertEqual(1, result)
def test_get_property_with_nested_params(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/tosca_nested_property_names_indexes.yaml")
webserver = self._get_node('wordpress',
ToscaTemplate(tosca_tpl,
parsed_params={
'db_root_pwd': '1234'}))
operation = self._get_operation(webserver.interfaces, 'configure')
wp_endpoint_prot = operation.inputs['wp_endpoint_protocol']
self.assertIsInstance(wp_endpoint_prot, functions.GetProperty)
self.assertEqual('tcp', wp_endpoint_prot.result())
wp_list_prop = operation.inputs['wp_list_prop']
self.assertIsInstance(wp_list_prop, functions.GetProperty)
self.assertEqual(3, wp_list_prop.result())
def test_get_property_with_capabilties_inheritance(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/test_capabilties_inheritance.yaml")
some_node = self._get_node('some_node',
ToscaTemplate(tosca_tpl,
parsed_params={
'db_root_pwd': '1234'}))
operation = self._get_operation(some_node.interfaces, 'configure')
some_input = operation.inputs['some_input']
self.assertIsInstance(some_input, functions.GetProperty)
self.assertEqual('someval', some_input.result())
def test_get_property_source_target_keywords(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/test_get_property_source_target_keywords.yaml")
tosca = ToscaTemplate(tosca_tpl,
parsed_params={'db_root_pwd': '1234'})
for node in tosca.nodetemplates:
for relationship, trgt in node.relationships.items():
rel_template = trgt.get_relationship_template()[0]
break
operation = self._get_operation(rel_template.interfaces,
'pre_configure_source')
target_test = operation.inputs['target_test']
self.assertIsInstance(target_test, functions.GetProperty)
self.assertEqual(1, target_test.result())
source_port = operation.inputs['source_port']
self.assertIsInstance(source_port, functions.GetProperty)
self.assertEqual(3306, source_port.result())
class GetAttributeTest(TestCase):
def _load_template(self, filename):
return ToscaTemplate(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data',
filename),
parsed_params={'db_root_pwd': '1234'})
def _get_operation(self, interfaces, operation):
return [
interface for interface in interfaces
if interface.name == operation][0]
def test_get_attribute_in_outputs(self):
tpl = self._load_template('tosca_single_instance_wordpress.yaml')
website_url_output = [
x for x in tpl.outputs if x.name == 'website_url'][0]
self.assertIsInstance(website_url_output.value, functions.GetAttribute)
self.assertEqual('server', website_url_output.value.node_template_name)
self.assertEqual('private_address',
website_url_output.value.attribute_name)
def test_get_attribute_invalid_args(self):
expected_msg = _('Illegal arguments for function "get_attribute".'
' Expected arguments: "node-template-name", '
'"req-or-cap"(optional), "property name"')
err = self.assertRaises(ValueError,
functions.get_function, None, None,
{'get_attribute': []})
self.assertIn(expected_msg, six.text_type(err))
err = self.assertRaises(ValueError,
functions.get_function, None, None,
{'get_attribute': ['x']})
self.assertIn(expected_msg, six.text_type(err))
def test_get_attribute_unknown_node_template_name(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_get_attribute_unknown_node_template_name.yaml')
exception.ExceptionCollector.assertExceptionMessage(
KeyError,
_('\'Node template "unknown_node_template" was not found.\''))
def test_get_attribute_unknown_attribute(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_get_attribute_unknown_attribute_name.yaml')
exception.ExceptionCollector.assertExceptionMessage(
KeyError,
_('\'Attribute "unknown_attribute" was not found in node template '
'"server".\''))
def test_get_attribute_host_keyword(self):
tpl = self._load_template(
'functions/test_get_attribute_host_keyword.yaml')
def assert_get_attribute_host_functionality(node_template_name):
node = [x for x in tpl.nodetemplates
if x.name == node_template_name][0]
configure_op = [
x for x in node.interfaces if x.name == 'configure'][0]
ip_addr_input = configure_op.inputs['ip_address']
self.assertIsInstance(ip_addr_input, functions.GetAttribute)
self.assertEqual('server',
ip_addr_input.get_referenced_node_template().name)
assert_get_attribute_host_functionality('dbms')
assert_get_attribute_host_functionality('database')
def test_get_attribute_host_not_found(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_get_attribute_host_not_found.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('"get_attribute: [ HOST, ... ]" was used in node template '
'"server" but "tosca.relationships.HostedOn" was not found in '
'the relationship chain.'))
def test_get_attribute_illegal_host_in_outputs(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_get_attribute_illegal_host_in_outputs.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('"get_attribute: [ HOST, ... ]" is not allowed in "outputs" '
'section of the TOSCA template.'))
def test_get_attribute_with_index(self):
self._load_template(
'functions/test_get_attribute_with_index.yaml')
def test_get_attribute_with_index_error(self):
self.assertRaises(
exception.ValidationError, self._load_template,
'functions/test_get_attribute_with_index_error.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Illegal arguments for function "get_attribute". '
'Unexpected attribute/index value "0"'))
def test_get_attribute_source_target_keywords(self):
tosca_tpl = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/functions/test_get_attribute_source_target_keywords.yaml")
tosca = ToscaTemplate(tosca_tpl,
parsed_params={'db_root_pwd': '12345678'})
for node in tosca.nodetemplates:
for relationship, trgt in node.relationships.items():
rel_template = trgt.get_relationship_template()[0]
break
operation = self._get_operation(rel_template.interfaces,
'pre_configure_source')
target_test = operation.inputs['target_test']
self.assertIsInstance(target_test, functions.GetAttribute)
source_port = operation.inputs['source_port']
self.assertIsInstance(source_port, functions.GetAttribute)
def test_get_attribute_with_nested_params(self):
self._load_template(
'functions/test_get_attribute_with_nested_params.yaml')
def test_implicit_attribute(self):
self.assertIsNotNone(self._load_template(
'functions/test_get_implicit_attribute.yaml'))
class ConcatTest(TestCase):
def _load_template(self, filename):
return ToscaTemplate(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
filename))
def test_validate_concat(self):
tosca = self._load_template("data/functions/test_concat.yaml")
server_url_output = [
output for output in tosca.outputs if output.name == 'url'][0]
func = functions.get_function(self, tosca.outputs,
server_url_output.value)
self.assertIsInstance(func, functions.Concat)
self.assertRaises(exception.ValidationError, self._load_template,
'data/functions/test_concat_invalid.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Invalid arguments for function "concat". Expected at least '
'one arguments.'))
class TokenTest(TestCase):
def _load_template(self, filename):
return ToscaTemplate(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
filename))
def test_validate_token(self):
tosca = self._load_template("data/functions/test_token.yaml")
server_url_output = [
output for output in tosca.outputs if output.name == 'url'][0]
func = functions.get_function(self, tosca.outputs,
server_url_output.value)
self.assertIsInstance(func, functions.Token)
self.assertRaises(exception.ValidationError, self._load_template,
'data/functions/test_token_invalid.yaml')
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Invalid arguments for function "token". Expected at least '
'three arguments.'))
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Invalid arguments for function "token". Expected '
'integer value as third argument.'))
exception.ExceptionCollector.assertExceptionMessage(
ValueError,
_('Invalid arguments for function "token". Expected '
'single char value as second argument.'))
| |
#!/usr/bin/env python
"""
EVENNIA SERVER STARTUP SCRIPT
This is the start point for running Evennia.
Sets the appropriate environmental variables and launches the server
and portal through the runner. Run without arguments to get a
menu. Run the script with the -h flag to see usage information.
"""
import os
import sys, signal
from optparse import OptionParser
from subprocess import Popen
# Set the Python path up so we can get to settings.py from here.
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ['DJANGO_SETTINGS_MODULE'] = 'game.settings'
if not os.path.exists('settings.py'):
# make sure we have a settings.py file.
print " No settings.py file found. launching manage.py ..."
# this triggers the settings file creation.
import game.manage
sys.exit()
# signal processing
SIG = signal.SIGINT
HELPENTRY = \
"""
(version %s)
This program launches Evennia with various options. You can access all
this functionality directly from the command line; for example option
five (restart server) would be "evennia.py restart server". Use
"evennia.py -h" for command line options.
Evennia consists of two separate programs that both must be running
for the game to work as it should:
Portal - the connection to the outside world (via telnet, web, ssh
etc). This is normally running as a daemon and don't need to
be reloaded unless you are debugging a new connection
protocol. As long as this is running, players won't loose
their connection to your game. Only one instance of Portal
will be started, more will be ignored.
Server - the game server itself. This will often need to be reloaded
as you develop your game. The Portal will auto-connect to the
Server whenever the Server activates. We will also make sure
to automatically restart this whenever it is shut down (from
here or from inside the game or via task manager etc). Only
one instance of Server will be started, more will be ignored.
In a production environment you will want to run with the default
option (1), which runs as much as possible as a background
process. When developing your game it is however convenient to
directly see tracebacks on standard output, so starting with options
2-4 may be a good bet. As you make changes to your code, reload the
server (option 5) to make it available to users.
Reload and stop is not well supported in Windows. If you have issues, log
into the game to stop or restart the server instead.
"""
MENU = \
"""
+---------------------------------------------------------------------------+
| |
| Welcome to the Evennia launcher! |
| |
| Pick an option below. Use 'h' to get help. |
| |
+--- Starting (will not restart already running processes) -----------------+
| |
| 1) (default): Start Server and Portal. Portal starts in daemon mode.|
| All output is to logfiles. |
| 2) (game debug): Start Server and Portal. Portal starts in daemon mode.|
| Server outputs to stdout instead of logfile. |
| 3) (portal debug): Start Server and Portal. Portal starts in non-daemon |
| mode (can be reloaded) and logs to stdout. |
| 4) (full debug): Start Server and Portal. Portal starts in non-daemon |
| mode (can be reloaded). Both log to stdout. |
| |
+--- Restarting (must first be started) ------------------------------------+
| |
| 5) Reload the Server |
| 6) Reload the Portal (only works in non-daemon mode. If running |
| in daemon mode, Portal needs to be stopped/started manually. |
| |
+--- Stopping (must first be started) --------------------------------------+
| |
| 7) Stopping both Portal and Server. Server will not restart. |
| 8) Stopping only Server. Server will not restart. |
| 9) Stopping only Portal. |
| |
+---------------------------------------------------------------------------+
| h) Help |
| q) Quit |
+---------------------------------------------------------------------------+
"""
#
# System Configuration and setup
#
SERVER_PIDFILE = "server.pid"
PORTAL_PIDFILE = "portal.pid"
SERVER_RESTART = "server.restart"
PORTAL_RESTART = "portal.restart"
# Get the settings
from django.conf import settings
from src.utils.utils import get_evennia_version
EVENNIA_VERSION = get_evennia_version()
# Setup access of the evennia server itself
SERVER_PY_FILE = os.path.join(settings.SRC_DIR, 'server/server.py')
PORTAL_PY_FILE = os.path.join(settings.SRC_DIR, 'server/portal.py')
# Get logfile names
SERVER_LOGFILE = settings.SERVER_LOG_FILE
PORTAL_LOGFILE = settings.PORTAL_LOG_FILE
# Check so a database exists and is accessible
from django.db import DatabaseError
from src.objects.models import ObjectDB
try:
test = ObjectDB.objects.get(id=1)
except ObjectDB.DoesNotExist:
pass # this is fine at this point
except DatabaseError,e:
print """
Your database does not seem to be set up correctly.
(error was '%s')
Please run:
python manage.py syncdb (create an admin user when prompted)
python manage.py migrate
When you have a database set up, rerun evennia.py.
""" % e
sys.exit()
# Add this to the environmental variable for the 'twistd' command.
currpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] += (":%s" % currpath)
else:
os.environ['PYTHONPATH'] = currpath
TWISTED_BINARY = 'twistd'
if os.name == 'nt':
# Windows needs more work to get the correct binary
try:
# Test for for win32api
import win32api
except ImportError:
print """
ERROR: Unable to import win32api, which Twisted requires to run.
You may download it from:
http://sourceforge.net/projects/pywin32
or
http://starship.python.net/crew/mhammond/win32/Downloads.html"""
sys.exit()
if not os.path.exists('twistd.bat'):
# Test for executable twisted batch file. This calls the twistd.py
# executable that is usually not found on the path in Windows.
# It's not enough to locate scripts.twistd, what we want is the
# executable script C:\PythonXX/Scripts/twistd.py. Alas we cannot
# hardcode this location since we don't know if user has Python
# in a non-standard location, so we try to figure it out.
from twisted.scripts import twistd
twistd_path = os.path.abspath(
os.path.join(os.path.dirname(twistd.__file__),
os.pardir, os.pardir, os.pardir, os.pardir,
'scripts', 'twistd.py'))
bat_file = open('twistd.bat','w')
bat_file.write("@\"%s\" \"%s\" %%*" % (sys.executable, twistd_path))
bat_file.close()
print """
INFO: Since you are running Windows, a file 'twistd.bat' was
created for you. This is a simple batch file that tries to call
the twisted executable. Evennia determined this to be:
%(twistd_path)s
If you run into errors at startup you might need to edit
twistd.bat to point to the actual location of the Twisted
executable (usually called twistd.py) on your machine.
This procedure is only done once. Run evennia.py again when you
are ready to start the server.
""" % {'twistd_path': twistd_path}
sys.exit()
TWISTED_BINARY = 'twistd.bat'
# Functions
def get_pid(pidfile):
"""
Get the PID (Process ID) by trying to access
an PID file.
"""
pid = None
if os.path.exists(pidfile):
f = open(pidfile, 'r')
pid = f.read()
return pid
def del_pid(pidfile):
"""
The pidfile should normally be removed after a process has finished, but
when sending certain signals they remain, so we need to clean them manually.
"""
if os.path.exists(pidfile):
os.remove(pidfile)
def kill(pidfile, signal=SIG, succmsg="", errmsg="", restart_file=SERVER_RESTART, restart="reload"):
"""
Send a kill signal to a process based on PID. A customized success/error
message will be returned. If clean=True, the system will attempt to manually
remove the pid file.
"""
pid = get_pid(pidfile)
if pid:
if os.name == 'nt':
if sys.version < "2.7":
print "Windows requires Python 2.7 or higher for this operation."
return
os.remove(pidfile)
# set restart/norestart flag
f = open(restart_file, 'w')
f.write(str(restart))
f.close()
try:
os.kill(int(pid), signal)
except OSError:
print "Process %(pid)s could not be signalled. The PID file '%(pidfile)s' seems stale. Try removing it." % {'pid': pid, 'pidfile': pidfile}
return
print "Evennia:", succmsg
return
print "Evennia:", errmsg
def run_menu():
"""
This launches an interactive menu.
"""
cmdstr = [sys.executable, "runner.py"]
while True:
# menu loop
print MENU
inp = raw_input(" option > ")
# quitting and help
if inp.lower() == 'q':
sys.exit()
elif inp.lower() == 'h':
print HELPENTRY % EVENNIA_VERSION
raw_input("press <return> to continue ...")
continue
# options
try:
inp = int(inp)
except ValueError:
print "Not a valid option."
continue
errmsg = "The %s does not seem to be running."
if inp < 5:
if inp == 1:
pass # default operation
elif inp == 2:
cmdstr.extend(['--iserver'])
elif inp == 3:
cmdstr.extend(['--iportal'])
elif inp == 4:
cmdstr.extend(['--iserver', '--iportal'])
return cmdstr
elif inp < 10:
if inp == 5:
if os.name == 'nt':
print "This operation is not supported under Windows. Log into the game to restart/reload the server."
return
kill(SERVER_PIDFILE, SIG, "Server reloaded.", errmsg % "Server", restart="reload")
elif inp == 6:
if os.name == 'nt':
print "This operation is not supported under Windows."
return
kill(PORTAL_PIDFILE, SIG, "Portal reloaded (or stopped if in daemon mode).", errmsg % "Portal", restart=True)
elif inp == 7:
kill(SERVER_PIDFILE, SIG, "Stopped Portal.", errmsg % "Portal", PORTAL_RESTART, restart=False)
kill(PORTAL_PIDFILE, SIG, "Stopped Server.", errmsg % "Server", restart="shutdown")
elif inp == 8:
kill(PORTAL_PIDFILE, SIG, "Stopped Server.", errmsg % "Server", restart="shutdown")
elif inp == 9:
kill(SERVER_PIDFILE, SIG, "Stopped Portal.", errmsg % "Portal", PORTAL_RESTART, restart=False)
return
else:
print "Not a valid option."
return None
def handle_args(options, mode, service):
"""
Handle argument options given on the command line.
options - parsed object for command line
mode - str; start/stop etc
service - str; server, portal or all
"""
inter = options.interactive
cmdstr = [sys.executable, "runner.py"]
errmsg = "The %s does not seem to be running."
if mode == 'start':
# launch the error checker. Best to catch the errors already here.
error_check_python_modules()
# starting one or many services
if service == 'server':
if inter:
cmdstr.append('--iserver')
cmdstr.append('--noportal')
elif service == 'portal':
if inter:
cmdstr.append('--iportal')
cmdstr.append('--noserver')
else: # all
# for convenience we don't start logging of portal, only of server with this command.
if inter:
cmdstr.extend(['--iserver'])
return cmdstr
elif mode == 'reload':
# restarting services
if os.name == 'nt':
print "Restarting from command line is not supported under Windows. Log into the game to restart."
return
if service == 'server':
kill(SERVER_PIDFILE, SIG, "Server reloaded.", errmsg % 'Server', restart="reload")
elif service == 'portal':
print """
Note: Portal usually don't need to be reloaded unless you are debugging in interactive mode.
If Portal was running in default Daemon mode, it cannot be restarted. In that case you have
to restart it manually with 'evennia.py start portal'
"""
kill(PORTAL_PIDFILE, SIG, "Portal reloaded (or stopped, if it was in daemon mode).", errmsg % 'Portal', PORTAL_RESTART)
else: # all
# default mode, only restart server
kill(SERVER_PIDFILE, SIG, "Server reload.", errmsg % 'Server', restart="reload")
elif mode == 'stop':
# stop processes, avoiding reload
if service == 'server':
kill(SERVER_PIDFILE, SIG, "Server stopped.", errmsg % 'Server', restart="shutdown")
elif service == 'portal':
kill(PORTAL_PIDFILE, SIG, "Portal stopped.", errmsg % 'Portal', PORTAL_RESTART, restart=False)
else:
kill(PORTAL_PIDFILE, SIG, "Portal stopped.", errmsg % 'Portal', PORTAL_RESTART, restart=False)
kill(SERVER_PIDFILE, SIG, "Server stopped.", errmsg % 'Server', restart="shutdown")
return None
def error_check_python_modules():
"""
Import settings modules in settings. This will raise exceptions on
pure python-syntax issues which are hard to catch gracefully
with exceptions in the engine (since they are formatting errors in
the python source files themselves). Best they fail already here
before we get any further.
"""
def imp(path, split=True):
mod, fromlist = path, "None"
if split:
mod, fromlist = path.rsplit('.', 1)
__import__(mod, fromlist=[fromlist])
# core modules
imp(settings.COMMAND_PARSER)
imp(settings.SEARCH_AT_RESULT)
imp(settings.SEARCH_AT_MULTIMATCH_INPUT)
imp(settings.CONNECTION_SCREEN_MODULE, split=False)
#imp(settings.AT_INITIAL_SETUP_HOOK_MODULE, split=False)
for path in settings.LOCK_FUNC_MODULES:
imp(path, split=False)
# cmdsets
deprstring = "settings.%s should be renamed to %s. If defaults are used, their path/classname must be updated (see src/settings_default.py)."
if hasattr(settings, "CMDSET_DEFAULT"): raise DeprecationWarning(deprstring % ("CMDSET_DEFAULT", "CMDSET_CHARACTER"))
if hasattr(settings, "CMDSET_OOC"): raise DeprecationWarning(deprstring % ("CMDSET_OOC", "CMDSET_PLAYER"))
from src.commands import cmdsethandler
if not cmdsethandler.import_cmdset(settings.CMDSET_UNLOGGEDIN, None): print "Warning: CMDSET_UNLOGGED failed to load!"
if not cmdsethandler.import_cmdset(settings.CMDSET_CHARACTER, None): print "Warning: CMDSET_CHARACTER failed to load"
if not cmdsethandler.import_cmdset(settings.CMDSET_PLAYER, None): print "Warning: CMDSET_PLAYER failed to load"
# typeclasses
imp(settings.BASE_PLAYER_TYPECLASS)
imp(settings.BASE_OBJECT_TYPECLASS)
imp(settings.BASE_CHARACTER_TYPECLASS)
imp(settings.BASE_ROOM_TYPECLASS)
imp(settings.BASE_EXIT_TYPECLASS)
imp(settings.BASE_SCRIPT_TYPECLASS)
def main():
"""
This handles command line input.
"""
parser = OptionParser(usage="%prog [-i] [menu|start|reload|stop [server|portal|all]]",
description="""This is the main Evennia launcher. It handles the Portal and Server, the two services making up Evennia. Default is to operate on both services. Interactive mode sets the service to log to stdout, in the foreground. Note that when launching 'all' services with the \"--interactive\" flag, both services will be started, but only Server will actually be started in interactive mode, simply because this is the most commonly useful setup. To activate interactive mode also for Portal, use the menu or launch the two services explicitly as two separate calls to this program.""")
parser.add_option('-i', '--interactive', action='store_true', dest='interactive', default=False, help="Start given processes in interactive mode.")
options, args = parser.parse_args()
if not args:
mode = "menu"
service = 'all'
if args:
mode = args[0]
service = "all"
if len(args) > 1:
service = args[1]
if mode not in ['menu', 'start', 'reload', 'stop']:
print "mode should be none, 'menu', 'start', 'reload' or 'stop'."
sys.exit()
if service not in ['server', 'portal', 'all']:
print "service should be none, 'server', 'portal' or 'all'."
sys.exit()
if mode == 'menu':
# launch menu
cmdstr = run_menu()
else:
# handle command-line arguments
cmdstr = handle_args(options, mode, service)
if cmdstr:
# call the runner.
cmdstr.append('start')
Popen(cmdstr)
if __name__ == '__main__':
# start Evennia
from src.utils.utils import check_evennia_dependencies
if check_evennia_dependencies():
main()
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.tasks_v2.types import cloudtasks
from google.cloud.tasks_v2.types import queue
from google.cloud.tasks_v2.types import queue as gct_queue
from google.cloud.tasks_v2.types import task
from google.cloud.tasks_v2.types import task as gct_task
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import CloudTasksTransport, DEFAULT_CLIENT_INFO
from .grpc import CloudTasksGrpcTransport
class CloudTasksGrpcAsyncIOTransport(CloudTasksTransport):
"""gRPC AsyncIO backend transport for CloudTasks.
Cloud Tasks allows developers to manage the execution of
background work in their applications.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "cloudtasks.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "cloudtasks.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_queues(
self,
) -> Callable[
[cloudtasks.ListQueuesRequest], Awaitable[cloudtasks.ListQueuesResponse]
]:
r"""Return a callable for the list queues method over gRPC.
Lists queues.
Queues are returned in lexicographical order.
Returns:
Callable[[~.ListQueuesRequest],
Awaitable[~.ListQueuesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_queues" not in self._stubs:
self._stubs["list_queues"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/ListQueues",
request_serializer=cloudtasks.ListQueuesRequest.serialize,
response_deserializer=cloudtasks.ListQueuesResponse.deserialize,
)
return self._stubs["list_queues"]
@property
def get_queue(
self,
) -> Callable[[cloudtasks.GetQueueRequest], Awaitable[queue.Queue]]:
r"""Return a callable for the get queue method over gRPC.
Gets a queue.
Returns:
Callable[[~.GetQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_queue" not in self._stubs:
self._stubs["get_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/GetQueue",
request_serializer=cloudtasks.GetQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["get_queue"]
@property
def create_queue(
self,
) -> Callable[[cloudtasks.CreateQueueRequest], Awaitable[gct_queue.Queue]]:
r"""Return a callable for the create queue method over gRPC.
Creates a queue.
Queues created with this method allow tasks to live for a
maximum of 31 days. After a task is 31 days old, the task will
be deleted regardless of whether it was dispatched or not.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Returns:
Callable[[~.CreateQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_queue" not in self._stubs:
self._stubs["create_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/CreateQueue",
request_serializer=cloudtasks.CreateQueueRequest.serialize,
response_deserializer=gct_queue.Queue.deserialize,
)
return self._stubs["create_queue"]
@property
def update_queue(
self,
) -> Callable[[cloudtasks.UpdateQueueRequest], Awaitable[gct_queue.Queue]]:
r"""Return a callable for the update queue method over gRPC.
Updates a queue.
This method creates the queue if it does not exist and updates
the queue if it does exist.
Queues created with this method allow tasks to live for a
maximum of 31 days. After a task is 31 days old, the task will
be deleted regardless of whether it was dispatched or not.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Returns:
Callable[[~.UpdateQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_queue" not in self._stubs:
self._stubs["update_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/UpdateQueue",
request_serializer=cloudtasks.UpdateQueueRequest.serialize,
response_deserializer=gct_queue.Queue.deserialize,
)
return self._stubs["update_queue"]
@property
def delete_queue(
self,
) -> Callable[[cloudtasks.DeleteQueueRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete queue method over gRPC.
Deletes a queue.
This command will delete the queue even if it has tasks in it.
Note: If you delete a queue, a queue with the same name can't be
created for 7 days.
WARNING: Using this method may have unintended side effects if
you are using an App Engine ``queue.yaml`` or ``queue.xml`` file
to manage your queues. Read `Overview of Queue Management and
queue.yaml <https://cloud.google.com/tasks/docs/queue-yaml>`__
before using this method.
Returns:
Callable[[~.DeleteQueueRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_queue" not in self._stubs:
self._stubs["delete_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/DeleteQueue",
request_serializer=cloudtasks.DeleteQueueRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_queue"]
@property
def purge_queue(
self,
) -> Callable[[cloudtasks.PurgeQueueRequest], Awaitable[queue.Queue]]:
r"""Return a callable for the purge queue method over gRPC.
Purges a queue by deleting all of its tasks.
All tasks created before this method is called are
permanently deleted.
Purge operations can take up to one minute to take
effect. Tasks might be dispatched before the purge takes
effect. A purge is irreversible.
Returns:
Callable[[~.PurgeQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "purge_queue" not in self._stubs:
self._stubs["purge_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/PurgeQueue",
request_serializer=cloudtasks.PurgeQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["purge_queue"]
@property
def pause_queue(
self,
) -> Callable[[cloudtasks.PauseQueueRequest], Awaitable[queue.Queue]]:
r"""Return a callable for the pause queue method over gRPC.
Pauses the queue.
If a queue is paused then the system will stop dispatching tasks
until the queue is resumed via
[ResumeQueue][google.cloud.tasks.v2.CloudTasks.ResumeQueue].
Tasks can still be added when the queue is paused. A queue is
paused if its [state][google.cloud.tasks.v2.Queue.state] is
[PAUSED][google.cloud.tasks.v2.Queue.State.PAUSED].
Returns:
Callable[[~.PauseQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "pause_queue" not in self._stubs:
self._stubs["pause_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/PauseQueue",
request_serializer=cloudtasks.PauseQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["pause_queue"]
@property
def resume_queue(
self,
) -> Callable[[cloudtasks.ResumeQueueRequest], Awaitable[queue.Queue]]:
r"""Return a callable for the resume queue method over gRPC.
Resume a queue.
This method resumes a queue after it has been
[PAUSED][google.cloud.tasks.v2.Queue.State.PAUSED] or
[DISABLED][google.cloud.tasks.v2.Queue.State.DISABLED]. The
state of a queue is stored in the queue's
[state][google.cloud.tasks.v2.Queue.state]; after calling this
method it will be set to
[RUNNING][google.cloud.tasks.v2.Queue.State.RUNNING].
WARNING: Resuming many high-QPS queues at the same time can lead
to target overloading. If you are resuming high-QPS queues,
follow the 500/50/5 pattern described in `Managing Cloud Tasks
Scaling
Risks <https://cloud.google.com/tasks/docs/manage-cloud-task-scaling>`__.
Returns:
Callable[[~.ResumeQueueRequest],
Awaitable[~.Queue]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "resume_queue" not in self._stubs:
self._stubs["resume_queue"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/ResumeQueue",
request_serializer=cloudtasks.ResumeQueueRequest.serialize,
response_deserializer=queue.Queue.deserialize,
)
return self._stubs["resume_queue"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a
[Queue][google.cloud.tasks.v2.Queue]. Returns an empty policy if
the resource exists and does not have a policy set.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the
specified resource parent:
- ``cloudtasks.queues.getIamPolicy``
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy for a
[Queue][google.cloud.tasks.v2.Queue]. Replaces any existing
policy.
Note: The Cloud Console does not check queue-level IAM
permissions yet. Project-level permissions are required to use
the Cloud Console.
Authorization requires the following `Google
IAM <https://cloud.google.com/iam>`__ permission on the
specified resource parent:
- ``cloudtasks.queues.setIamPolicy``
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that a caller has on a
[Queue][google.cloud.tasks.v2.Queue]. If the resource does not
exist, this will return an empty set of permissions, not a
[NOT_FOUND][google.rpc.Code.NOT_FOUND] error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for
authorization checking. This operation may "fail open" without
warning.
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
@property
def list_tasks(
self,
) -> Callable[
[cloudtasks.ListTasksRequest], Awaitable[cloudtasks.ListTasksResponse]
]:
r"""Return a callable for the list tasks method over gRPC.
Lists the tasks in a queue.
By default, only the
[BASIC][google.cloud.tasks.v2.Task.View.BASIC] view is retrieved
due to performance considerations;
[response_view][google.cloud.tasks.v2.ListTasksRequest.response_view]
controls the subset of information which is returned.
The tasks may be returned in any order. The ordering may change
at any time.
Returns:
Callable[[~.ListTasksRequest],
Awaitable[~.ListTasksResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_tasks" not in self._stubs:
self._stubs["list_tasks"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/ListTasks",
request_serializer=cloudtasks.ListTasksRequest.serialize,
response_deserializer=cloudtasks.ListTasksResponse.deserialize,
)
return self._stubs["list_tasks"]
@property
def get_task(self) -> Callable[[cloudtasks.GetTaskRequest], Awaitable[task.Task]]:
r"""Return a callable for the get task method over gRPC.
Gets a task.
Returns:
Callable[[~.GetTaskRequest],
Awaitable[~.Task]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_task" not in self._stubs:
self._stubs["get_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/GetTask",
request_serializer=cloudtasks.GetTaskRequest.serialize,
response_deserializer=task.Task.deserialize,
)
return self._stubs["get_task"]
@property
def create_task(
self,
) -> Callable[[cloudtasks.CreateTaskRequest], Awaitable[gct_task.Task]]:
r"""Return a callable for the create task method over gRPC.
Creates a task and adds it to a queue.
Tasks cannot be updated after creation; there is no UpdateTask
command.
- The maximum task size is 100KB.
Returns:
Callable[[~.CreateTaskRequest],
Awaitable[~.Task]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_task" not in self._stubs:
self._stubs["create_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/CreateTask",
request_serializer=cloudtasks.CreateTaskRequest.serialize,
response_deserializer=gct_task.Task.deserialize,
)
return self._stubs["create_task"]
@property
def delete_task(
self,
) -> Callable[[cloudtasks.DeleteTaskRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete task method over gRPC.
Deletes a task.
A task can be deleted if it is scheduled or dispatched.
A task cannot be deleted if it has executed successfully
or permanently failed.
Returns:
Callable[[~.DeleteTaskRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_task" not in self._stubs:
self._stubs["delete_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/DeleteTask",
request_serializer=cloudtasks.DeleteTaskRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_task"]
@property
def run_task(self) -> Callable[[cloudtasks.RunTaskRequest], Awaitable[task.Task]]:
r"""Return a callable for the run task method over gRPC.
Forces a task to run now.
When this method is called, Cloud Tasks will dispatch the task,
even if the task is already running, the queue has reached its
[RateLimits][google.cloud.tasks.v2.RateLimits] or is
[PAUSED][google.cloud.tasks.v2.Queue.State.PAUSED].
This command is meant to be used for manual debugging. For
example, [RunTask][google.cloud.tasks.v2.CloudTasks.RunTask] can
be used to retry a failed task after a fix has been made or to
manually force a task to be dispatched now.
The dispatched task is returned. That is, the task that is
returned contains the [status][Task.status] after the task is
dispatched but before the task is received by its target.
If Cloud Tasks receives a successful response from the task's
target, then the task will be deleted; otherwise the task's
[schedule_time][google.cloud.tasks.v2.Task.schedule_time] will
be reset to the time that
[RunTask][google.cloud.tasks.v2.CloudTasks.RunTask] was called
plus the retry delay specified in the queue's
[RetryConfig][google.cloud.tasks.v2.RetryConfig].
[RunTask][google.cloud.tasks.v2.CloudTasks.RunTask] returns
[NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a
task that has already succeeded or permanently failed.
Returns:
Callable[[~.RunTaskRequest],
Awaitable[~.Task]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "run_task" not in self._stubs:
self._stubs["run_task"] = self.grpc_channel.unary_unary(
"/google.cloud.tasks.v2.CloudTasks/RunTask",
request_serializer=cloudtasks.RunTaskRequest.serialize,
response_deserializer=task.Task.deserialize,
)
return self._stubs["run_task"]
def close(self):
return self.grpc_channel.close()
__all__ = ("CloudTasksGrpcAsyncIOTransport",)
| |
import re
from django.contrib.gis.db.models.fields import BaseSpatialField
from django.db.models.expressions import Col, Expression
from django.db.models.lookups import Lookup, Transform
from django.db.models.sql.query import Query
class RasterBandTransform(Transform):
def as_sql(self, compiler, connection):
return compiler.compile(self.lhs)
class GISLookup(Lookup):
sql_template = None
transform_func = None
distance = False
band_rhs = None
band_lhs = None
def __init__(self, lhs, rhs):
rhs, *self.rhs_params = rhs if isinstance(rhs, (list, tuple)) else [rhs]
super().__init__(lhs, rhs)
self.template_params = {}
self.process_rhs_params()
def process_rhs_params(self):
if self.rhs_params:
# Check if a band index was passed in the query argument.
if len(self.rhs_params) == (2 if self.lookup_name == 'relate' else 1):
self.process_band_indices()
elif len(self.rhs_params) > 1:
raise ValueError('Tuple too long for lookup %s.' % self.lookup_name)
elif isinstance(self.lhs, RasterBandTransform):
self.process_band_indices(only_lhs=True)
def process_band_indices(self, only_lhs=False):
"""
Extract the lhs band index from the band transform class and the rhs
band index from the input tuple.
"""
# PostGIS band indices are 1-based, so the band index needs to be
# increased to be consistent with the GDALRaster band indices.
if only_lhs:
self.band_rhs = 1
self.band_lhs = self.lhs.band_index + 1
return
if isinstance(self.lhs, RasterBandTransform):
self.band_lhs = self.lhs.band_index + 1
else:
self.band_lhs = 1
self.band_rhs, *self.rhs_params = self.rhs_params
def get_db_prep_lookup(self, value, connection):
# get_db_prep_lookup is called by process_rhs from super class
return ('%s', [connection.ops.Adapter(value)] + (self.rhs_params or []))
def process_rhs(self, compiler, connection):
if isinstance(self.rhs, Query):
# If rhs is some Query, don't touch it.
return super().process_rhs(compiler, connection)
geom = self.rhs
if isinstance(self.rhs, Col):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = self.rhs.output_field
if not hasattr(geo_fld, 'srid'):
raise ValueError('No geographic field found in expression.')
self.rhs.srid = geo_fld.srid
sql, _ = compiler.compile(geom)
return connection.ops.get_geom_placeholder(self.lhs.output_field, geom, compiler) % sql, []
elif isinstance(self.rhs, Expression):
raise ValueError('Complex expressions not supported for spatial fields.')
rhs, rhs_params = super().process_rhs(compiler, connection)
rhs = connection.ops.get_geom_placeholder(self.lhs.output_field, geom, compiler)
return rhs, rhs_params
def get_rhs_op(self, connection, rhs):
# Unlike BuiltinLookup, the GIS get_rhs_op() implementation should return
# an object (SpatialOperator) with an as_sql() method to allow for more
# complex computations (where the lhs part can be mixed in).
return connection.ops.gis_operators[self.lookup_name]
def as_sql(self, compiler, connection):
lhs_sql, sql_params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
sql_params.extend(rhs_params)
template_params = {'lhs': lhs_sql, 'rhs': rhs_sql, 'value': '%s'}
template_params.update(self.template_params)
rhs_op = self.get_rhs_op(connection, rhs_sql)
return rhs_op.as_sql(connection, self, template_params, sql_params)
# ------------------
# Geometry operators
# ------------------
@BaseSpatialField.register_lookup
class OverlapsLeftLookup(GISLookup):
"""
The overlaps_left operator returns true if A's bounding box overlaps or is to the
left of B's bounding box.
"""
lookup_name = 'overlaps_left'
@BaseSpatialField.register_lookup
class OverlapsRightLookup(GISLookup):
"""
The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the
right of B's bounding box.
"""
lookup_name = 'overlaps_right'
@BaseSpatialField.register_lookup
class OverlapsBelowLookup(GISLookup):
"""
The 'overlaps_below' operator returns true if A's bounding box overlaps or is below
B's bounding box.
"""
lookup_name = 'overlaps_below'
@BaseSpatialField.register_lookup
class OverlapsAboveLookup(GISLookup):
"""
The 'overlaps_above' operator returns true if A's bounding box overlaps or is above
B's bounding box.
"""
lookup_name = 'overlaps_above'
@BaseSpatialField.register_lookup
class LeftLookup(GISLookup):
"""
The 'left' operator returns true if A's bounding box is strictly to the left
of B's bounding box.
"""
lookup_name = 'left'
@BaseSpatialField.register_lookup
class RightLookup(GISLookup):
"""
The 'right' operator returns true if A's bounding box is strictly to the right
of B's bounding box.
"""
lookup_name = 'right'
@BaseSpatialField.register_lookup
class StrictlyBelowLookup(GISLookup):
"""
The 'strictly_below' operator returns true if A's bounding box is strictly below B's
bounding box.
"""
lookup_name = 'strictly_below'
@BaseSpatialField.register_lookup
class StrictlyAboveLookup(GISLookup):
"""
The 'strictly_above' operator returns true if A's bounding box is strictly above B's
bounding box.
"""
lookup_name = 'strictly_above'
@BaseSpatialField.register_lookup
class SameAsLookup(GISLookup):
"""
The "~=" operator is the "same as" operator. It tests actual geometric
equality of two features. So if A and B are the same feature,
vertex-by-vertex, the operator returns true.
"""
lookup_name = 'same_as'
BaseSpatialField.register_lookup(SameAsLookup, 'exact')
@BaseSpatialField.register_lookup
class BBContainsLookup(GISLookup):
"""
The 'bbcontains' operator returns true if A's bounding box completely contains
by B's bounding box.
"""
lookup_name = 'bbcontains'
@BaseSpatialField.register_lookup
class BBOverlapsLookup(GISLookup):
"""
The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.
"""
lookup_name = 'bboverlaps'
@BaseSpatialField.register_lookup
class ContainedLookup(GISLookup):
"""
The 'contained' operator returns true if A's bounding box is completely contained
by B's bounding box.
"""
lookup_name = 'contained'
# ------------------
# Geometry functions
# ------------------
@BaseSpatialField.register_lookup
class ContainsLookup(GISLookup):
lookup_name = 'contains'
@BaseSpatialField.register_lookup
class ContainsProperlyLookup(GISLookup):
lookup_name = 'contains_properly'
@BaseSpatialField.register_lookup
class CoveredByLookup(GISLookup):
lookup_name = 'coveredby'
@BaseSpatialField.register_lookup
class CoversLookup(GISLookup):
lookup_name = 'covers'
@BaseSpatialField.register_lookup
class CrossesLookup(GISLookup):
lookup_name = 'crosses'
@BaseSpatialField.register_lookup
class DisjointLookup(GISLookup):
lookup_name = 'disjoint'
@BaseSpatialField.register_lookup
class EqualsLookup(GISLookup):
lookup_name = 'equals'
@BaseSpatialField.register_lookup
class IntersectsLookup(GISLookup):
lookup_name = 'intersects'
@BaseSpatialField.register_lookup
class OverlapsLookup(GISLookup):
lookup_name = 'overlaps'
@BaseSpatialField.register_lookup
class RelateLookup(GISLookup):
lookup_name = 'relate'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def get_db_prep_lookup(self, value, connection):
if len(self.rhs_params) != 1:
raise ValueError('relate must be passed a two-tuple')
# Check the pattern argument
backend_op = connection.ops.gis_operators[self.lookup_name]
if hasattr(backend_op, 'check_relate_argument'):
backend_op.check_relate_argument(self.rhs_params[0])
else:
pattern = self.rhs_params[0]
if not isinstance(pattern, str) or not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
return super().get_db_prep_lookup(value, connection)
@BaseSpatialField.register_lookup
class TouchesLookup(GISLookup):
lookup_name = 'touches'
@BaseSpatialField.register_lookup
class WithinLookup(GISLookup):
lookup_name = 'within'
class DistanceLookupBase(GISLookup):
distance = True
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %(value)s'
def process_rhs_params(self):
if not 1 <= len(self.rhs_params) <= 3:
raise ValueError("2, 3, or 4-element tuple required for '%s' lookup." % self.lookup_name)
elif len(self.rhs_params) == 3 and self.rhs_params[2] != 'spheroid':
raise ValueError("For 4-element tuples the last argument must be the 'spheroid' directive.")
# Check if the second parameter is a band index.
if len(self.rhs_params) > 1 and self.rhs_params[1] != 'spheroid':
self.process_band_indices()
def process_rhs(self, compiler, connection):
params = [connection.ops.Adapter(self.rhs)]
# Getting the distance parameter in the units of the field.
dist_param = self.rhs_params[0]
if hasattr(dist_param, 'resolve_expression'):
dist_param = dist_param.resolve_expression(compiler.query)
sql, expr_params = compiler.compile(dist_param)
self.template_params['value'] = sql
params.extend(expr_params)
else:
params += connection.ops.get_distance(
self.lhs.output_field, self.rhs_params,
self.lookup_name,
)
rhs = connection.ops.get_geom_placeholder(self.lhs.output_field, params[0], compiler)
return (rhs, params)
@BaseSpatialField.register_lookup
class DWithinLookup(DistanceLookupBase):
lookup_name = 'dwithin'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
@BaseSpatialField.register_lookup
class DistanceGTLookup(DistanceLookupBase):
lookup_name = 'distance_gt'
@BaseSpatialField.register_lookup
class DistanceGTELookup(DistanceLookupBase):
lookup_name = 'distance_gte'
@BaseSpatialField.register_lookup
class DistanceLTLookup(DistanceLookupBase):
lookup_name = 'distance_lt'
@BaseSpatialField.register_lookup
class DistanceLTELookup(DistanceLookupBase):
lookup_name = 'distance_lte'
| |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetable', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LecturerA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10)),
('lecturerid', models.TextField(max_length=10)),
('name', models.TextField(max_length=80)),
('category', models.TextField(max_length=10)),
('type', models.TextField(max_length=10)),
('status', models.TextField(max_length=10)),
('parttime', models.TextField(max_length=20)),
('cost', models.BigIntegerField()),
('costtype', models.TextField(max_length=10)),
('linkcode', models.TextField(max_length=20)),
('owner', models.TextField(max_length=10)),
('displectid', models.TextField(max_length=10, serialize=False)),
('covprior', models.BigIntegerField()),
('covingprior', models.BigIntegerField()),
('excludecover', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='LecturerB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10)),
('lecturerid', models.TextField(max_length=10)),
('name', models.TextField(max_length=80)),
('category', models.TextField(max_length=10)),
('type', models.TextField(max_length=10)),
('status', models.TextField(max_length=10)),
('parttime', models.TextField(max_length=20)),
('cost', models.BigIntegerField()),
('costtype', models.TextField(max_length=10)),
('linkcode', models.TextField(max_length=20)),
('owner', models.TextField(max_length=10)),
('displectid', models.TextField(max_length=10, serialize=False)),
('covprior', models.BigIntegerField()),
('covingprior', models.BigIntegerField()),
('excludecover', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Lock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('a', models.BooleanField()),
('b', models.BooleanField()),
],
),
migrations.CreateModel(
name='ModuleA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10)),
('moduleid', models.TextField(max_length=12, serialize=False)),
('owner', models.TextField(max_length=10)),
('name', models.TextField(max_length=120)),
('category', models.TextField(max_length=10)),
('classif', models.TextField(max_length=10)),
('linkcode', models.TextField(max_length=20)),
('csize', models.BigIntegerField()),
('minsize', models.BigIntegerField()),
('maxsize', models.BigIntegerField()),
('prefmaxsize', models.BigIntegerField()),
('lecturerid', models.TextField(max_length=10)),
('lectgroup', models.BigIntegerField()),
('dontfit', models.CharField(max_length=1)),
('unitvalue', models.TextField(max_length=10)),
('instid', models.BigIntegerField()),
('isactive', models.CharField(max_length=1)),
],
),
migrations.CreateModel(
name='ModuleB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10)),
('moduleid', models.TextField(max_length=12, serialize=False)),
('owner', models.TextField(max_length=10)),
('name', models.TextField(max_length=120)),
('category', models.TextField(max_length=10)),
('classif', models.TextField(max_length=10)),
('linkcode', models.TextField(max_length=20)),
('csize', models.BigIntegerField()),
('minsize', models.BigIntegerField()),
('maxsize', models.BigIntegerField()),
('prefmaxsize', models.BigIntegerField()),
('lecturerid', models.TextField(max_length=10)),
('lectgroup', models.BigIntegerField()),
('dontfit', models.CharField(max_length=1)),
('unitvalue', models.TextField(max_length=10)),
('instid', models.BigIntegerField()),
('isactive', models.CharField(max_length=1)),
],
),
migrations.CreateModel(
name='RoomsA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('roomid', models.TextField(max_length=10)),
('siteid', models.TextField(max_length=10)),
('name', models.TextField(max_length=80)),
('category', models.TextField(max_length=10)),
('type', models.TextField(max_length=10)),
('classification', models.TextField(max_length=10, serialize=False)),
('roomgrpcode', models.TextField(max_length=10)),
('zone', models.TextField(max_length=10)),
('capacity', models.BigIntegerField()),
('prefmin', models.BigIntegerField()),
('prefmax', models.BigIntegerField()),
('deptid', models.TextField(max_length=10)),
('roomarea', models.BigIntegerField()),
('dynafill', models.CharField(max_length=1)),
('setid', models.TextField(max_length=10)),
('uniquefield', models.TextField(max_length=10)),
('linkcode', models.TextField(max_length=20)),
('campusid', models.TextField(max_length=10)),
],
),
migrations.CreateModel(
name='RoomsB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('roomid', models.TextField(max_length=10)),
('siteid', models.TextField(max_length=10)),
('name', models.TextField(max_length=80)),
('category', models.TextField(max_length=10)),
('type', models.TextField(max_length=10)),
('classification', models.TextField(max_length=10, serialize=False)),
('roomgrpcode', models.TextField(max_length=10)),
('zone', models.TextField(max_length=10)),
('capacity', models.BigIntegerField()),
('prefmin', models.BigIntegerField()),
('prefmax', models.BigIntegerField()),
('deptid', models.TextField(max_length=10)),
('roomarea', models.BigIntegerField()),
('dynafill', models.CharField(max_length=1)),
('setid', models.TextField(max_length=10)),
('uniquefield', models.TextField(max_length=10)),
('linkcode', models.TextField(max_length=20)),
('campusid', models.TextField(max_length=10)),
],
),
migrations.CreateModel(
name='SitesA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10)),
('siteid', models.TextField(max_length=10)),
('sitename', models.TextField(max_length=80)),
('address1', models.TextField(max_length=80)),
('address2', models.TextField(max_length=80, serialize=False)),
('address3', models.TextField(max_length=80)),
('address4', models.TextField(max_length=80)),
('phone1', models.TextField(max_length=50)),
('phone2', models.TextField(max_length=50)),
('contact1', models.TextField(max_length=50)),
('contact2', models.TextField(max_length=50)),
('linkcode', models.TextField(max_length=20)),
('campusid', models.TextField(max_length=10)),
],
),
migrations.CreateModel(
name='SitesB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10)),
('siteid', models.TextField(max_length=10)),
('sitename', models.TextField(max_length=80)),
('address1', models.TextField(max_length=80)),
('address2', models.TextField(max_length=80, serialize=False)),
('address3', models.TextField(max_length=80)),
('address4', models.TextField(max_length=80)),
('phone1', models.TextField(max_length=50)),
('phone2', models.TextField(max_length=50)),
('contact1', models.TextField(max_length=50)),
('contact2', models.TextField(max_length=50)),
('linkcode', models.TextField(max_length=20)),
('campusid', models.TextField(max_length=10)),
],
),
migrations.CreateModel(
name='TimetableA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slotid', models.BigIntegerField(serialize=False)),
('slotentry', models.BigIntegerField()),
('slottotal', models.BigIntegerField()),
('setid', models.TextField(max_length=10)),
('periodid', models.BigIntegerField()),
('weekday', models.BigIntegerField()),
('starttime', models.TextField(max_length=5)),
('duration', models.BigIntegerField()),
('finishtime', models.TextField(max_length=5)),
('weekid', models.BigIntegerField()),
('classgroupid', models.TextField(max_length=10)),
('courseid', models.TextField(max_length=12)),
('courseyear', models.BigIntegerField()),
('clsgrpcode', models.TextField(max_length=10)),
('lecturerid', models.TextField(max_length=10)),
('moduleid', models.TextField(max_length=12)),
('deptid', models.TextField(max_length=10)),
('moduletype', models.TextField(max_length=10)),
('modgrpcode', models.TextField(max_length=10)),
('siteid', models.TextField(max_length=10)),
('roomid', models.TextField(max_length=10)),
('roomgrpcode', models.TextField(max_length=10)),
('sourcesid', models.TextField(max_length=20)),
('capacity', models.BigIntegerField()),
('reqsiteid', models.TextField(max_length=10)),
('reqroomid', models.TextField(max_length=10)),
('reqtype', models.TextField(max_length=10)),
('reqcategory', models.TextField(max_length=10)),
('linkcode', models.TextField(max_length=10)),
('linkid', models.BigIntegerField()),
('chainid', models.BigIntegerField()),
('exclid', models.BigIntegerField()),
('associd', models.BigIntegerField()),
('specid', models.BigIntegerField()),
('locked', models.BigIntegerField()),
('status', models.BigIntegerField()),
('readlock', models.BigIntegerField()),
('classif', models.TextField(max_length=10)),
('owner', models.TextField(max_length=10)),
('drstatus', models.BigIntegerField()),
('lectgrp', models.BigIntegerField()),
('evpriority', models.BigIntegerField()),
('fixlect', models.CharField(max_length=1)),
('fixroom', models.CharField(max_length=1)),
('fixevent', models.CharField(max_length=1)),
('reqclass', models.TextField(max_length=10)),
('reqzone', models.TextField(max_length=10)),
('tweightid', models.BigIntegerField()),
('fixweight', models.BigIntegerField()),
('siteproximity', models.BigIntegerField()),
('zoneproximity', models.BigIntegerField()),
('maxrooms', models.BigIntegerField()),
('datechanged', models.TextField(max_length=12)),
('sizeused', models.BigIntegerField()),
('uniquefield', models.TextField(max_length=10)),
('equipid', models.TextField(max_length=10)),
('ecode', models.TextField(max_length=20)),
('einstalled', models.TextField(max_length=12)),
('eremoved', models.TextField(max_length=12)),
('ewhoinstalled', models.TextField(max_length=20)),
('ewhoremoved', models.TextField(max_length=20)),
('tobecopied', models.CharField(max_length=1)),
('copied', models.CharField(max_length=1)),
('excludefit', models.CharField(max_length=1)),
('gendatanum', models.BigIntegerField()),
('gendatastring', models.TextField(max_length=100)),
('regid', models.BigIntegerField()),
('sourcechange', models.BigIntegerField()),
('userchange', models.TextField(max_length=30)),
('mequipcat', models.TextField(max_length=10)),
('mequiptype', models.TextField(max_length=10)),
('mequipnotes', models.CharField(max_length=1)),
('triggerdate', models.TextField(max_length=10)),
('reqcampusid', models.TextField(max_length=10)),
('instid', models.BigIntegerField()),
('numperiods', models.BigIntegerField()),
('maxperiodgap', models.BigIntegerField()),
('groupid', models.BigIntegerField()),
('tobescheduled', models.BigIntegerField()),
('board', models.TextField(max_length=10)),
('series', models.TextField(max_length=10)),
('crsyear', models.TextField(max_length=4)),
('optcode', models.TextField(max_length=10)),
('compcode', models.TextField(max_length=20)),
('subcode', models.TextField(max_length=10)),
('compinstid', models.BigIntegerField()),
('roompoolid', models.BigIntegerField()),
('nonconid', models.BigIntegerField()),
('typeevent', models.BigIntegerField()),
('ncyear', models.TextField(max_length=3)),
('reasonforchange', models.TextField(max_length=10)),
],
),
migrations.CreateModel(
name='TimetableB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slotid', models.BigIntegerField(serialize=False)),
('slotentry', models.BigIntegerField()),
('slottotal', models.BigIntegerField()),
('setid', models.TextField(max_length=10)),
('periodid', models.BigIntegerField()),
('weekday', models.BigIntegerField()),
('starttime', models.TextField(max_length=5)),
('duration', models.BigIntegerField()),
('finishtime', models.TextField(max_length=5)),
('weekid', models.BigIntegerField()),
('classgroupid', models.TextField(max_length=10)),
('courseid', models.TextField(max_length=12)),
('courseyear', models.BigIntegerField()),
('clsgrpcode', models.TextField(max_length=10)),
('lecturerid', models.TextField(max_length=10)),
('moduleid', models.TextField(max_length=12)),
('deptid', models.TextField(max_length=10)),
('moduletype', models.TextField(max_length=10)),
('modgrpcode', models.TextField(max_length=10)),
('siteid', models.TextField(max_length=10)),
('roomid', models.TextField(max_length=10)),
('roomgrpcode', models.TextField(max_length=10)),
('sourcesid', models.TextField(max_length=20)),
('capacity', models.BigIntegerField()),
('reqsiteid', models.TextField(max_length=10)),
('reqroomid', models.TextField(max_length=10)),
('reqtype', models.TextField(max_length=10)),
('reqcategory', models.TextField(max_length=10)),
('linkcode', models.TextField(max_length=10)),
('linkid', models.BigIntegerField()),
('chainid', models.BigIntegerField()),
('exclid', models.BigIntegerField()),
('associd', models.BigIntegerField()),
('specid', models.BigIntegerField()),
('locked', models.BigIntegerField()),
('status', models.BigIntegerField()),
('readlock', models.BigIntegerField()),
('classif', models.TextField(max_length=10)),
('owner', models.TextField(max_length=10)),
('drstatus', models.BigIntegerField()),
('lectgrp', models.BigIntegerField()),
('evpriority', models.BigIntegerField()),
('fixlect', models.CharField(max_length=1)),
('fixroom', models.CharField(max_length=1)),
('fixevent', models.CharField(max_length=1)),
('reqclass', models.TextField(max_length=10)),
('reqzone', models.TextField(max_length=10)),
('tweightid', models.BigIntegerField()),
('fixweight', models.BigIntegerField()),
('siteproximity', models.BigIntegerField()),
('zoneproximity', models.BigIntegerField()),
('maxrooms', models.BigIntegerField()),
('datechanged', models.TextField(max_length=12)),
('sizeused', models.BigIntegerField()),
('uniquefield', models.TextField(max_length=10)),
('equipid', models.TextField(max_length=10)),
('ecode', models.TextField(max_length=20)),
('einstalled', models.TextField(max_length=12)),
('eremoved', models.TextField(max_length=12)),
('ewhoinstalled', models.TextField(max_length=20)),
('ewhoremoved', models.TextField(max_length=20)),
('tobecopied', models.CharField(max_length=1)),
('copied', models.CharField(max_length=1)),
('excludefit', models.CharField(max_length=1)),
('gendatanum', models.BigIntegerField()),
('gendatastring', models.TextField(max_length=100)),
('regid', models.BigIntegerField()),
('sourcechange', models.BigIntegerField()),
('userchange', models.TextField(max_length=30)),
('mequipcat', models.TextField(max_length=10)),
('mequiptype', models.TextField(max_length=10)),
('mequipnotes', models.CharField(max_length=1)),
('triggerdate', models.TextField(max_length=10)),
('reqcampusid', models.TextField(max_length=10)),
('instid', models.BigIntegerField()),
('numperiods', models.BigIntegerField()),
('maxperiodgap', models.BigIntegerField()),
('groupid', models.BigIntegerField()),
('tobescheduled', models.BigIntegerField()),
('board', models.TextField(max_length=10)),
('series', models.TextField(max_length=10)),
('crsyear', models.TextField(max_length=4)),
('optcode', models.TextField(max_length=10)),
('compcode', models.TextField(max_length=20)),
('subcode', models.TextField(max_length=10)),
('compinstid', models.BigIntegerField()),
('roompoolid', models.BigIntegerField()),
('nonconid', models.BigIntegerField()),
('typeevent', models.BigIntegerField()),
('ncyear', models.TextField(max_length=3)),
('reasonforchange', models.TextField(max_length=10)),
],
),
migrations.CreateModel(
name='WeekmapnumericA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10, serialize=False)),
('weekid', models.BigIntegerField()),
('weeknumber', models.BigIntegerField()),
('drstatus', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='WeekmapnumericB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10, serialize=False)),
('weekid', models.BigIntegerField()),
('weeknumber', models.BigIntegerField()),
('drstatus', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='WeekmapstringA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10)),
('weekid', models.BigIntegerField()),
('name', models.TextField(max_length=50)),
('weeks', models.TextField(max_length=104)),
('numweeks', models.BigIntegerField()),
('statweeks', models.TextField(max_length=10, serialize=False)),
('drstatus', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='WeekmapstringB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10)),
('weekid', models.BigIntegerField()),
('name', models.TextField(max_length=50)),
('weeks', models.TextField(max_length=104)),
('numweeks', models.BigIntegerField()),
('statweeks', models.TextField(max_length=10, serialize=False)),
('drstatus', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='WeekstructureA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10)),
('weeknumber', models.BigIntegerField()),
('startdate', models.DateField()),
('description', models.TextField(max_length=80, serialize=False)),
('mappedto', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='WeekstructureB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('setid', models.TextField(max_length=10)),
('weeknumber', models.BigIntegerField()),
('startdate', models.DateField()),
('description', models.TextField(max_length=80, serialize=False)),
('mappedto', models.BigIntegerField()),
],
),
]
| |
from __future__ import annotations
import collections
import glob
import itertools
import logging
import os
from dials.command_line.dials_import import ManualGeometryUpdater
from dials.util.options import geometry_phil_scope
from dxtbx.imageset import ImageSequence
from dxtbx.model.experiment_list import (
BeamComparison,
DetectorComparison,
ExperimentList,
ExperimentListFactory,
GoniometerComparison,
)
from dxtbx.sequence_filenames import locate_files_matching_template_string
from scitbx.array_family import flex
from xia2.Handlers.Phil import PhilIndex
logger = logging.getLogger("xia2.Schema")
class _ImagesetCache(dict):
pass
imageset_cache = _ImagesetCache()
def longest_common_substring(s1, s2):
m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))]
longest, x_longest = 0, 0
for x in range(1, 1 + len(s1)):
for y in range(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest : x_longest]
def load_imagesets(
template,
directory,
id_image=None,
image_range=None,
use_cache=True,
reversephi=False,
):
global imageset_cache
from xia2.Applications.xia2setup import known_hdf5_extensions
full_template_path = os.path.join(directory, template)
if full_template_path not in imageset_cache or not use_cache:
params = PhilIndex.params.xia2.settings
compare_beam = BeamComparison(
wavelength_tolerance=params.input.tolerance.beam.wavelength,
direction_tolerance=params.input.tolerance.beam.direction,
polarization_normal_tolerance=params.input.tolerance.beam.polarization_normal,
polarization_fraction_tolerance=params.input.tolerance.beam.polarization_fraction,
)
compare_detector = DetectorComparison(
fast_axis_tolerance=params.input.tolerance.detector.fast_axis,
slow_axis_tolerance=params.input.tolerance.detector.slow_axis,
origin_tolerance=params.input.tolerance.detector.origin,
)
compare_goniometer = GoniometerComparison(
rotation_axis_tolerance=params.input.tolerance.goniometer.rotation_axis,
fixed_rotation_tolerance=params.input.tolerance.goniometer.fixed_rotation,
setting_rotation_tolerance=params.input.tolerance.goniometer.setting_rotation,
)
scan_tolerance = params.input.tolerance.scan.oscillation
# If diamond anvil cell data, always use dynamic shadowing
high_pressure = PhilIndex.params.dials.high_pressure.correction
format_kwargs = {
"dynamic_shadowing": params.input.format.dynamic_shadowing or high_pressure,
"multi_panel": params.input.format.multi_panel,
}
if os.path.splitext(full_template_path)[-1] in known_hdf5_extensions:
# if we are passed the correct file, use this, else look for a master
# file (i.e. something_master.h5)
if os.path.exists(full_template_path) and os.path.isfile(
full_template_path
):
master_file = full_template_path
else:
g = glob.glob(os.path.join(directory, "*_master.h5"))
master_file = None
for p in g:
substr = longest_common_substring(template, p)
if substr:
if master_file is None or (
len(substr)
> len(longest_common_substring(template, master_file))
):
master_file = p
if master_file is None:
raise RuntimeError("Can't find master file for %s" % full_template_path)
unhandled = []
experiments = ExperimentListFactory.from_filenames(
[master_file],
unhandled=unhandled,
compare_beam=compare_beam,
compare_detector=compare_detector,
compare_goniometer=compare_goniometer,
scan_tolerance=scan_tolerance,
format_kwargs=format_kwargs,
)
assert len(unhandled) == 0, (
"unhandled image files identified: %s" % unhandled
)
else:
params = PhilIndex.get_python_object()
read_all_image_headers = params.xia2.settings.read_all_image_headers
if read_all_image_headers:
paths = sorted(
locate_files_matching_template_string(full_template_path)
)
unhandled = []
experiments = ExperimentListFactory.from_filenames(
paths,
unhandled=unhandled,
compare_beam=compare_beam,
compare_detector=compare_detector,
compare_goniometer=compare_goniometer,
scan_tolerance=scan_tolerance,
format_kwargs=format_kwargs,
)
assert len(unhandled) == 0, (
"unhandled image files identified: %s" % unhandled
)
else:
from xia2.Handlers.CommandLine import CommandLine
experiments = ExperimentList()
start_ends = CommandLine.get_start_ends(full_template_path)
if not start_ends:
start_ends.append(None)
for start_end in start_ends:
experiments.extend(
ExperimentList.from_templates(
[full_template_path],
format_kwargs=format_kwargs,
image_range=start_end,
)
)
imagesets = [
iset for iset in experiments.imagesets() if isinstance(iset, ImageSequence)
]
assert len(imagesets) > 0, "no imageset found"
imageset_cache[full_template_path] = collections.OrderedDict()
if reversephi:
for imageset in imagesets:
goniometer = imageset.get_goniometer()
goniometer.set_rotation_axis(
tuple(-g for g in goniometer.get_rotation_axis())
)
reference_geometry = PhilIndex.params.xia2.settings.input.reference_geometry
if reference_geometry is not None and len(reference_geometry) > 0:
update_with_reference_geometry(imagesets, reference_geometry)
# Update the geometry
params = PhilIndex.params.xia2.settings
update_geometry = []
# Then add manual geometry
work_phil = geometry_phil_scope.format(params.input)
diff_phil = geometry_phil_scope.fetch_diff(source=work_phil)
if diff_phil.as_str() != "":
update_geometry.append(ManualGeometryUpdater(params.input))
imageset_list = []
for imageset in imagesets:
for updater in update_geometry:
imageset = updater(imageset)
imageset_list.append(imageset)
imagesets = imageset_list
for imageset in imagesets:
scan = imageset.get_scan()
exposure_times = scan.get_exposure_times()
epochs = scan.get_epochs()
if exposure_times.all_eq(0) or exposure_times[0] == 0:
exposure_times = flex.double(exposure_times.size(), 1)
scan.set_exposure_times(exposure_times)
elif not exposure_times.all_gt(0):
exposure_times = flex.double(exposure_times.size(), exposure_times[0])
scan.set_exposure_times(exposure_times)
if epochs.size() > 1 and not epochs.all_gt(0):
if epochs[0] == 0:
epochs[0] = 1
for i in range(1, epochs.size()):
epochs[i] = epochs[i - 1] + exposure_times[i - 1]
scan.set_epochs(epochs)
_id_image = scan.get_image_range()[0]
imageset_cache[full_template_path][_id_image] = imageset
if id_image is not None:
return [imageset_cache[full_template_path][id_image]]
elif image_range is not None:
for imageset in imageset_cache[full_template_path].values():
scan = imageset.get_scan()
scan_image_range = scan.get_image_range()
if (
image_range[0] >= scan_image_range[0]
and image_range[1] <= scan_image_range[1]
):
b0 = scan.get_batch_offset()
i0 = image_range[0] - scan_image_range[0] + b0
i1 = image_range[1] - scan_image_range[0] + b0
imagesets = [imageset[i0 : i1 + 1]]
assert len(imagesets[0]) == image_range[1] - image_range[0] + 1, len(
imagesets[0]
)
return imagesets
return list(imageset_cache[full_template_path].values())
def update_with_reference_geometry(imagesets, reference_geometry_list):
assert reference_geometry_list is not None
assert len(reference_geometry_list) >= 1
reference_components = load_reference_geometries(reference_geometry_list)
for imageset in imagesets:
reference_geometry = find_relevant_reference_geometry(
imageset, reference_components
)
imageset.set_beam(reference_geometry["beam"])
imageset.set_detector(reference_geometry["detector"])
def load_reference_geometries(geometry_file_list):
logger.debug("Collecting reference instrument models.")
ref_geoms = {
# Note that 'index' is the index of the experiment in the expt list file,
# as per dials.show, rather than the UID string of the experiment.
(expt.detector, expt.beam, f, index)
for f in geometry_file_list
for index, expt in enumerate(ExperimentList.from_file(f, check_format=False))
}
logger.debug("Removing duplicate reference geometries.")
duplicates = set()
for a, b in filter(duplicates.isdisjoint, itertools.combinations(ref_geoms, 2)):
if compare_geometries(a[0], b[0]):
logger.debug(f"Experiment {b[3]} of {b[2]} is a duplicate.")
duplicates.add(b)
ref_geoms -= duplicates
n = len(ref_geoms)
logger.debug(f"Found {n} unique reference geometr{'ies' if n != 1 else 'y'}.")
for geometry in ref_geoms:
logger.debug(f"Experiment {geometry[3]} of {geometry[2]} is unique.")
return [{"detector": geometry[0], "beam": geometry[1]} for geometry in ref_geoms]
def compare_geometries(detectorA, detectorB):
return detectorA.is_similar_to(
detectorB,
fast_axis_tolerance=0.1,
slow_axis_tolerance=0.1,
origin_tolerance=10,
ignore_trusted_range=True,
)
def find_relevant_reference_geometry(imageset, geometry_list):
for geometry in geometry_list:
if compare_geometries(geometry["detector"], imageset.get_detector()):
break
else:
raise Exception("No appropriate reference geometry found")
return geometry
| |
import os, sys
import logging
import traceback
import settings
from datetime import datetime
import time
# make things easier so people don't have to install pygments
try:
from pygments import highlight
from pygments.lexers import HtmlLexer
from pygments.formatters import HtmlFormatter
pygments_found=True
except ImportError:
pygments_found=False
from zipstream import ZipStream
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from hq.models import ExtUser, Domain
from hq.utils import build_url
from requestlogger.models import RequestLog
from xformmanager.models import FormDefModel
from xformmanager.manager import XFormManager
from buildmanager import xformvalidator
from buildmanager.jar import validate_jar, extract_xforms
from buildmanager.exceptions import BuildError
BUILDFILES_PATH = settings.RAPIDSMS_APPS['buildmanager']['buildpath']
class Project (models.Model):
"""
A project is a high level container for a given build project. A project
can contain a history of builds
"""
domain = models.ForeignKey(Domain)
name = models.CharField(max_length=255)
description = models.CharField(max_length=512, null=True, blank=True)
# the optional project id in a different server (e.g. the build server)
project_id = models.CharField(max_length=20, null=True, blank=True)
@property
def downloads(self):
'''Get all the downloads associated with this project, across
builds.'''
return BuildDownload.objects.filter(build__project=self)
def get_non_released_builds(self):
'''Get all non-released builds for this project'''
return self.builds.exclude(status="release").order_by('-package_created')
def get_released_builds(self):
'''Get all released builds for a project'''
return self.builds.filter(status="release").order_by('-released')
def get_latest_released_build(self):
'''Gets the latest released build for a project, based on the
released date.'''
releases = self.get_released_builds()
if releases:
return releases[0]
def get_latest_jar_url(self):
'''Get the URL for the latest released jar file, empty if no builds
have been released'''
build = self.get_latest_released_build()
if build:
return reverse('get_latest_buildfile',
args=(self.id,
build.get_jar_filename()))
return None
def get_latest_jad_url(self):
'''Get the URL for the latest released jad file, empty if no builds
have been released'''
build = self.get_latest_released_build()
if build:
return reverse('get_latest_buildfile',
args=(self.id,
build.get_jad_filename()))
return None
def get_buildURL(self):
"""Hard coded build url for our build server"""
return 'http://build.dimagi.com:250/viewType.html?buildTypeId=bt%s' % self.project_id
def num_builds(self):
'''Get the number of builds associated with this project'''
return self.builds.all().count()
def __unicode__(self):
return unicode(self.name)
UNKNOWN_IP = "0.0.0.0"
BUILD_STATUS = (
('build', 'Standard Build'),
('release', 'Release'),
)
class ProjectBuild(models.Model):
'''When a jad/jar is built, it should correspond to a unique ReleasePackage
With all corresponding meta information on release info and build
information such that it can be traced back to a url/build info in source
control.'''
project = models.ForeignKey(Project, related_name="builds")
# we have it as a User instead of ExtUser here because we want our
# build server User to be able to push to multiple domains
uploaded_by = models.ForeignKey(User, related_name="builds_uploaded")
status = models.CharField(max_length=64, choices=BUILD_STATUS, default="build")
# the teamcity build number
build_number = models.PositiveIntegerField()
# the source control revision number
revision_number = models.CharField(max_length=255, null=True, blank=True)
# the "release" version. e.g. 2.0.1
version = models.CharField(max_length=20, null=True, blank=True)
package_created = models.DateTimeField()
jar_file = models.FilePathField(_('JAR File Location'),
match='.*\.jar$',
recursive=True,
path=BUILDFILES_PATH,
max_length=255)
jad_file = models.FilePathField(_('JAD File Location'),
match='.*\.jad$',
recursive=True,
path=BUILDFILES_PATH,
max_length=255)
description = models.CharField(max_length=512, null=True, blank=True)
# release info
released = models.DateTimeField(null=True, blank=True)
released_by = models.ForeignKey(User, null=True, blank=True, related_name="builds_released")
def __unicode__(self):
return "%s build: %s. jad: %s, jar: %s" %\
(self.project, self.build_number, self.jad_file, self.jar_file)
def __str__(self):
return unicode(self).encode('utf-8')
def get_display_string(self):
'''Like calling str() but with a url attached'''
return "%s\nurl on server: %s" % (str(self),
build_url(reverse('show_build',
args=(self.id,))))
def get_jar_download_count(self):
return len(self.downloads.filter(type="jar"))
def get_jad_download_count(self):
return len(self.downloads.filter(type="jad"))
@property
def upload_information(self):
'''Get the upload request information associated with this,
if it is present.'''
try:
return BuildUpload.objects.get(build=self).log
except BuildUpload.DoesNotExist:
return None
def save(self):
"""Override save to provide some simple enforcement of uniqueness to the build numbers
generated by the submission of the build"""
if ProjectBuild.objects.filter(project=self.project).filter(build_number=self.build_number).count() > 0 and self.id == None:
raise Exception ("Error, the build number must be unique for this project build: " + str(self.build_number) + " project: " + str(self.project.id))
else:
super(ProjectBuild, self).save()
def get_jar_size(self):
return os.path.getsize(self.jar_file)
def get_jad_size(self):
return os.path.getsize(self.jad_file)
def get_jar_filename(self):
'''Returns the name (no paths) of the jar file'''
return os.path.basename(self.jar_file)
def get_jad_filename(self):
'''Returns the name (no paths) of the jad file'''
return os.path.basename(self.jad_file)
def get_zip_filename(self):
'''Returns the name (no paths) of the zip file, which will include the version number infromation'''
fname = os.path.basename(self.jar_file)
basename = os.path.splitext(fname)[0]
zipfilename = basename + "-build" + str(self.build_number) + ".zip"
return zipfilename
def get_jar_filestream(self):
try:
fin = open(self.jar_file,'r')
return fin
except Exception, e:
logging.error("Unable to open jarfile", extra={"exception": e,
"jar_file": self.jar_file,
"build_number": self.build_number,
"project_id": self.project.id})
def get_jad_filestream(self, mode='r'):
try:
fin = open(self.jad_file, mode)
return fin
except Exception, e:
logging.error("Unable to open jadfile", extra={"exception": e,
"jad_file": self.jad_file,
"build_number": self.build_number,
"project_id": self.project.id})
def get_zip_filestream(self):
try:
zpath = str(os.path.dirname(self.jar_file) + "/")
buf = StringIO()
zp = ZipStream(zpath)
for data in zp:
buf.write(data)
#print data
buf.flush()
buf.seek(0)
return buf.read()
except Exception, e:
logging.error("Unable to open create ZipStream", extra={"exception": e,
"build_number": self.build_number,
"project_id": self.project.id})
def get_jad_contents(self):
'''Returns the contents of the jad as text.'''
file = self.get_jad_filestream()
lines = []
for line in file:
lines.append(line.strip())
return "<br>".join(lines)
def get_jad_properties(self):
'''Reads the properties of the jad file and returns a dict'''
file = self.get_jad_filestream()
sep = ': '
proplines = [line.strip() for line in file.readlines() if line.strip()]
jad_properties = {}
for propln in proplines:
i = propln.find(sep)
if i == -1:
pass #log error?
(propname, propvalue) = (propln[:i], propln[i+len(sep):])
jad_properties[propname] = propvalue
return jad_properties
def write_jad(self, properties):
'''Write a property dictionary back to the jad file'''
ordered = ['MIDlet-Name', 'MIDlet-Version', 'MIDlet-Vendor', 'MIDlet-Jar-URL',
'MIDlet-Jar-Size', 'MIDlet-Info-URL', 'MIDlet-1']
for po in ordered:
if po not in properties.keys():
pass #log error -- required property is missing?
unordered = [propname for propname in properties.keys() if propname not in ordered]
ordered.extend(sorted(unordered))
proplines = ['%s: %s\n' % (propname, properties[propname]) for propname in ordered]
file = self.get_jad_filestream('w')
file.write(''.join(proplines))
file.close()
def add_jad_properties(self, propdict):
'''Add properties to the jad file'''
props = self.get_jad_properties()
props.update(propdict)
self.write_jad(props)
def get_xform_html_summary(self):
'''This is used by the view. It is pretty cool, but perhaps misplaced.'''
to_return = []
for form in self.xforms.all():
try:
to_return.append(form.get_link())
except Exception, e:
# we don't care about this
pass
if to_return:
return "<br>".join(to_return)
else:
return "No X-Forms found"
def get_zip_downloadurl(self):
"""do a reverse to get the urls for the given project/buildnumber for the direct zipfile download"""
return reverse('get_buildfile',
args=(self.project.id,
self.build_number,
self.get_zip_filename()))
def get_jar_downloadurl(self):
"""do a reverse to get the urls for the given project/buildnumber for the direct download"""
return reverse('get_buildfile',
args=(self.project.id,
self.build_number,
os.path.basename(self.jar_file)))
def get_jad_downloadurl(self):
"""do a reverse to get the urls for the given project/buildnumber for the direct download"""
return reverse('get_buildfile',
args=(self.project.id,
self.build_number,
os.path.basename(self.jad_file)))
def get_buildURL(self):
"""Hard coded build url for our build server"""
return 'http://build.dimagi.com:250/viewLog.html?buildTypeId=bt%s&buildNumber=%s' % \
(self.project.project_id, self.build_number)
def set_jadfile(self, filename, filestream):
"""Simple utility function to save the uploaded file to the right location and set the property of the model"""
try:
new_file_name = os.path.join(self._get_destination(), filename)
fout = open(new_file_name, 'w')
fout.write( filestream.read() )
fout.close()
self.jad_file = new_file_name
except Exception, e:
logging.error("Error, saving jadfile failed", extra={"exception":e, "jad_filename":filename})
def set_jarfile(self, filename, filestream):
"""Simple utility function to save the uploaded file to the right location and set the property of the model"""
try:
new_file_name = os.path.join(self._get_destination(), filename)
fout = open(new_file_name, 'wb')
fout.write( filestream.read() )
fout.close()
self.jar_file = new_file_name
except Exception, e:
logging.error("Error, saving jarfile failed", extra={"exception":e, "jar_filename":filename})
def _get_destination(self):
"""The directory this build saves its data to. Defined in
the config and then /xforms/<project_id>/<build_id>/ is
appended. If it doesn't exist, the directory is
created by this method."""
destinationpath = os.path.join(BUILDFILES_PATH,
str(self.project.id),
str(self.build_number))
if not os.path.exists(destinationpath):
os.makedirs(destinationpath)
return destinationpath
def validate_jar(self, include_xforms=False):
'''Validates this build's jar file. By default, does NOT validate
the jar's xforms.'''
validate_jar(self.jar_file, include_xforms)
def validate_xforms(self):
'''Validates this build's xforms.'''
errors = []
for form in self.xforms.all():
try:
xformvalidator.validate(form.file_location)
except Exception, e:
errors.append(e)
if errors:
raise BuildError("Problem validating xforms for %s!" % self, errors)
def check_and_release_xforms(self):
'''Checks this build's xforms against the xformmanager and releases
them, if they pass compatibility tests'''
errors = []
to_skip = []
to_register = []
for form in self.xforms.all():
try:
formdef = xformvalidator.validate(form.file_location)
modelform = FormDefModel.get_model(formdef.target_namespace,
formdef.version)
if modelform:
# if the model form exists we must ensure it is compatible
# with the version we are trying to release
existing_formdef = modelform.to_formdef()
differences = existing_formdef.get_differences(formdef)
if differences.is_empty():
# this is all good
to_skip.append(form)
else:
raise BuildError("""Schema %s is not compatible with %s.
Because of the following differences:
%s
You must update your version number!"""
% (existing_formdef, formdef, differences))
else:
# this must be registered
to_register.append(form)
except Exception, e:
errors.append(e)
if errors:
raise BuildError("Problem validating xforms for %s!" % self, errors)
# finally register
manager = XFormManager()
# TODO: we need transaction management
for form in to_register:
try:
formdefmodel = manager.add_schema(form.get_file_name(),
form.as_filestream())
upload_info = self.upload_information
if upload_info:
formdefmodel.submit_ip = upload_info.ip
user = upload_info.user
else:
formdefmodel.submit_ip = UNKNOWN_IP
user = self.uploaded_by
if user:
try:
extuser = ExtUser.objects.get(id=user.id)
formdefmodel.uploaded_by = extuser
except ExtUser.DoesNotExist:
# they must have just been a regular User
formdefmodel.uploaded_by = None
formdefmodel.bytes_received = form.size
formdefmodel.form_display_name = form.get_file_name()
formdefmodel.domain = self.project.domain
formdefmodel.save()
except Exception, e:
# log the error with the stack, otherwise this is hard to track down
info = sys.exc_info()
logging.error("Error registering form in build manager: %s\n%s" % \
(e, traceback.print_tb(info[2])))
errors.append(e)
if errors:
raise BuildError("Problem registering xforms for %s!" % self, errors)
def set_jad_released(self):
'''Set the appropriate 'release' properties in the jad'''
self.add_jad_properties({
'Build-Number': '*' + str(self.get_release_number()), #remove * once we get a real build number
'Released-on': time.strftime('%Y-%b-%d %H:%M', time.gmtime())
})
#FIXME!
def get_release_number(self):
'''return an incrementing build number per released build, unique across all builds for a given commcare project'''
import random
return random.randint(1000, 9999) #return a high random number until we get the incrementing plugged in
def release(self, user):
'''Release a build. This does a number of things:
1. Validates the Jar. The specifics of this are still in flux but at the very
least it should be extractable, and there should be at least one xform.
2. Ensures all the xforms have valid xmlns, version, and uiversion attributes
3. Checks if xforms with the same xmlns and version are registered already
If so: ensures the current forms are compatible with the registered forms
If not: registers the forms
4. Updates the build status to be released, sets the released and
released_by properties
This method will raise an exception if, for any reason above, the build cannot
be released.'''
if self.status == "release":
raise BuildError("Tried to release an already released build!")
else:
# TODO: we need transaction management. Any of these steps can raise exceptions
self.validate_jar()
self.validate_xforms()
self.check_and_release_xforms()
self.set_jad_released()
self.status = "release"
self.released = datetime.now()
self.released_by = user
self.save()
logging.error("%s just released build %s! We just thought you might want to be keeping tabs..." %
(user, self.get_display_string()))
def extract_and_link_xforms(sender, instance, created, **kwargs):
'''Extracts all xforms from this build's jar and creates
references on disk and model objects for them.'''
# only do this the first time we save, not on updates
if not created:
return
try:
xforms = extract_xforms(instance.jar_file, instance._get_destination())
for form in xforms:
form_model = BuildForm.objects.create(build=instance, file_location=form)
num_created = len(instance.xforms.all())
if num_created == 0:
logging.warn("Build %s didn't have any linked xforms! Why not?!" % instance)
except Exception, e:
logging.error("Problem extracting xforms for build: %s, the error is: %s" %\
(instance, e))
post_save.connect(extract_and_link_xforms, sender=ProjectBuild)
class BuildForm(models.Model):
"""Class representing the location of a single build's xform on
the file system."""
build = models.ForeignKey(ProjectBuild, related_name="xforms")
file_location = models.FilePathField(_('Xform Location'),
recursive=True,
path=BUILDFILES_PATH,
max_length=255)
def get_file_name(self):
'''Get a readable file name for this xform'''
return os.path.basename(self.file_location)
@property
def size(self):
return os.path.getsize(self.file_location)
def get_url(self):
'''Get the url where you can view this form'''
return reverse('get_build_xform', args=(self.id,))
def as_filestream(self):
'''Gets a raw handle to the form as a file stream'''
try:
fin = open(self.file_location,'r')
return fin
except Exception, e:
logging.error("Unable to open xform: %s" % self,
extra={"exception": e })
def get_text(self):
'''Gets the body of the xform, as text'''
try:
file = self.as_filestream()
text = file.read()
file.close()
return text
except Exception, e:
logging.error("Unable to open xform: %s" % self,
extra={"exception": e })
def to_html(self):
'''Gets the body of the xform, as pretty printed text'''
raw_body = self.get_text()
if pygments_found:
return highlight(raw_body, HtmlLexer(), HtmlFormatter())
return raw_body
def get_link(self):
'''A clickable html displayable version of this for use in templates'''
return '<a href=%s target=_blank>%s</a>' % (self.get_url(), self.get_file_name())
def __unicode__(self):
return "%s: %s" % (self.build, self.get_file_name())
BUILD_FILE_TYPE = (
('jad', '.jad file'),
('jar', '.jar file'),
)
class BuildUpload(models.Model):
"""Represents an instance of the upload of a build."""
build = models.ForeignKey(ProjectBuild, unique=True)
log = models.ForeignKey(RequestLog, unique=True)
class BuildDownload(models.Model):
"""Represents an instance of a download of a build file. Included are the
type of file, the build id, and the request log."""
type = models.CharField(max_length=3, choices=BUILD_FILE_TYPE)
build = models.ForeignKey(ProjectBuild, related_name="downloads")
log = models.ForeignKey(RequestLog, unique=True)
def __unicode__(self):
return "%s download for build %s. Request: %s" %\
(self.type, self.build, self.log)
| |
import sys
import os
import random
import math
import statistics
from collections import namedtuple
import seaborn as sns
import numpy as np
from fasta import *
from fastq import *
from log_progress import *
Read = namedtuple('Read', 'index seq qual')
def replace_low_quality(data, q_threshold):
res = ''
res_q = ''
for nuc, q in zip(data.seq, data.qual):
if ord(q) - ord('!') <= q_threshold:
res += 'N'
res_q += '!'
else:
res += nuc
res_q += q
return FASTQData(seq = res, qual = res_q, name = data.name, attr = data.attr)
def median(data):
return statistics.median([ord(q) - ord('!') for q in data.qual])
def hamm(alpha, beta, max_err = 0):
# if len(alpha) != len(beta):
# print("!!!!")
err = 0
for a, b in zip(alpha, beta):
err += (a != b) and (a != 'N') and (b != 'N')
if err > max_err:
return False
return True
def make_best_pos_consensus(orig_seq, orig_num, seq_list):
res = ''
for i in range(len(orig_seq)):
d = {'A': 0, 'C': 0, 'G': 0, 'T': 0, 'N': 0}
d[orig_seq[i]] += orig_num
for seq, cnt in seq_list:
d[seq[i]] += cnt
d.pop('N')
res += max(d.items(), key = lambda x: x[1])[0]
return res
def classify(fqdata, low_threshold, high_threshold):
qmed = median(fqdata)
if qmed > low_threshold and qmed < high_threshold:
return 1
elif qmed >= high_threshold:
return 2
else:
return 0
def cls_stats(cls_seq, which_class):
print("\t", which_class, " seq:\t", len(cls_seq[which_class]), sep = "", end = "")
print("(", round(len(cls_seq[which_class]) / sum(map(len, cls_seq)), 4), ")", sep = "")
print("\t", which_class, " rds:\t", sum(cls_seq[which_class].values()), sep = "", end = "")
print("(", round(len(cls_seq[which_class]) / sum(map(lambda x: sum(x.values()), cls_seq)), 4), ")", sep = "")
def classify_fastq_file(filepath, replace_threshold, low_threshold, high_threshold):
print("Fastq records:\t", int(num_lines(filepath) / 4), sep = "")
q_distr = []
# median - sequences
seqmed = {}
cls_seq = [{}, {}, {}]
for fqdata in FASTQParser(filepath):
fqdata = replace_low_quality(fqdata, replace_threshold)
med = median(fqdata)
q_distr.append(med)
if med not in seqmed: seqmed[med] = []
seqmed[med].append(fqdata.seq)
lo, hi = np.trunc(np.percentile(q_distr, [low_threshold, high_threshold]))
print("Lo / hi percentile median values:", (lo, hi), sep = "\t")
kmers = {}
for med in seqmed:
cls = 2
if med < lo:
cls = 0
elif med >= lo and med < hi:
cls = 1
for seq in seqmed[med]:
cls_seq[cls][seq] = cls_seq[cls].get(seq, 0) + 1
if cls == 0 or cls == 1:
kmers[seq[10:20]] = kmers.get(seq[10:20], 0) + 1
print(sorted(kmers.items(), reverse = True, key = lambda x: x[1]))
print("Classes:")
cls_stats(cls_seq, 0)
cls_stats(cls_seq, 1)
cls_stats(cls_seq, 2)
print()
# sns.distplot(list(kmers.values()), bins = max(list(kmers.values())) - min(list(kmers.values())) + 1, axlabel = "Kmer distribution");
sns.distplot(q_distr, bins = max(q_distr) - min(q_distr) + 1, axlabel = "Median quality distribution");
return cls_seq
def X_clust(minors, majors, x_clust_hamm):
"""
N-clust: merge bad sequences with all other sequences.
H-clust: merge medium quality sequences with the high quality ones.
"""
def _get_keys(seq_dict):
keys = list(seq_dict.keys())
key_ind = dict([(keys[i], i) for i in range(len(keys))])
return keys, key_ind
cands = {}
merged_seq = {x: set([]) for x in majors}
minor_keys, minor_key_inds = _get_keys(minors)
major_keys, major_keys_inds = _get_keys(majors)
print("Computing distances...")
for i in range(len(minors)):
for j in range(len(majors)):
if hamm(minor_keys[i], major_keys[j], x_clust_hamm):
if i not in cands: cands[i] = []
cands[i].append(j)
print("Merging sequences...")
for minor_key, targets in cands.items():
for target in targets:
merged_seq[major_keys[target]].add((minor_key, minors[minor_keys[minor_key]] / len(targets)))
cand_keys = set([minor_keys[k] for k in cands])
new_minors = {x: minors[x] for x in minor_keys if x not in cand_keys}
print("# candidates:", len(cands))
print("# distants", len(new_minors))
print("Making consensuses...")
# sum_pre = sum(majors.values())
n_merged = 0
new_seq_dict = {}
for seq, seq_ls in merged_seq.items():
new_seq = make_best_pos_consensus(seq, majors[seq], [(minor_keys[x[0]], x[1]) for x in seq_ls])
if new_seq not in new_seq_dict:
new_seq_dict[new_seq] = 0
else:
n_merged += 1
new_seq_dict[new_seq] += majors[seq] + sum([x[1] for x in merged_seq[seq]])
print("# merged:", n_merged)
# sum_post = sum(new_seq_dict.values())
# if sum_pre != sum_post:
# print("Sums are not equal!", sum_pre, "vs.", sum_post)
for seq in new_seq_dict:
new_seq_dict[seq] = round(new_seq_dict[seq], 3)
return new_minors, new_seq_dict
def write_and_blast(seq_dict, f1, out_seq, out_blast, max_sequences):
with open(out_seq, 'w') as file:
i = 0
for key, val in reversed(sorted(seq_dict.items(), key = lambda x: x[1])):
# print(val, " (", round(100 * val / sum(final_seq.values()), 4), "%)", sep = '')
print(val, key, sep = '\t', file = file)
i += 1
if i == max_sequences: break
ls = []
i = 0
for key, val in reversed(sorted(seq_dict.items(), key = lambda x: x[1])):
ls.append(faseq(name = "sequence" + str(i) + "_" + str(val) + "_(" + str(round(100 * val / sum(seq_dict.values()), 4)) + ")", seq = key, comm = ''))
i += 1
if i == max_sequences: break
write_fasta(f1 + ".seq.fasta.txt", ls)
os.system("blastn -query " + f1 + ".seq.fasta.txt" + " -db hlabase/hlabase.fasta -outfmt 6 -num_alignments 4 > " + out_blast)
def clusterise_sequences(f1, replace_threshold, low_threshold, high_threshold, n_clust_hamm, h_clust_hamm, max_sequences, out_seq_prefix = "tmp.topseq1", out_blast_prefix = "tmp.blast1"):
prefix = f1[:f1.find(".fastq")]
print("*** Searching for unique sequences ***")
cls_seq = classify_fastq_file(f1, replace_threshold, low_threshold, high_threshold)
print("*** N-clusterisation ***")
cls_seq[0], cls_seq[1] = X_clust(cls_seq[0], cls_seq[1], n_clust_hamm)
print("Intermediate statistics by class:")
cls_stats(cls_seq, 0)
cls_stats(cls_seq, 1)
print()
print("*** H-clusterisation ***")
cls_seq[1], cls_seq[2] = X_clust(cls_seq[1], cls_seq[2], h_clust_hamm)
print("Final statistics by class:")
cls_stats(cls_seq, 1)
cls_stats(cls_seq, 2)
print()
cls_seq[2].update(cls_seq[1])
print("Move minors to the major class.")
print("Final number of sequences:", len(cls_seq[2]), sep = "\t")
print("*** Writing results ***")
# write_and_blast(cls_seq[1], f1 + ".minor", out_seq_prefix + ".minor.txt", out_blast_prefix + ".minor.txt", max_sequences)
write_and_blast(cls_seq[2], f1 + ".major", out_seq_prefix + ".major.txt", out_blast_prefix + ".major.txt", max_sequences)
print("\n*** DONE ***")
if __name__ == '__main__':
aggregate_sequences(sys.argv[1], 50, 7, 5, "tmp.topseq1.txt", "tmp.blast1.txt")
aggregate_sequences(sys.argv[2], 50, 7, 5, "tmp.topseq2.txt", "tmp.blast2.txt")
| |
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.http import HttpResponse
from . import db_views
import json
import time
def login_view(request):
""" return http for login page
On POST check if the user exists and if so log them in.
"""
if request.user.is_authenticated():
return redirect("/dashboard")
# If user is not posting, just return the page
if not request.method == 'POST':
return render(request, 'login.html')
# Make a dict and add the username and password that was entered
data = {}
data['username'] = request.POST.get('email', False)
data['password'] = request.POST.get('password', False)
# See if user exists and is not active
# user = User.objects.filter(username=data['username'])[0]
# if not user.is_active:
# data['errors'] = "User " + + " is not active yet"
# return render(request, 'login.html', dictionary=data)
# Try to login
user = authenticate(username=data['username'], password=data['password'])
if user is not None:
# If login successful
if user.is_active:
# If the user has been approved
login(request, user)
return redirect("/dashboard")
# Redirect to a success page.
else:
# If the user has not been approved
data['errors'] = "The requested account has not been approved a socs admin"
return render(request, 'login.html', dictionary=data)
# Return a 'disabled account' error message
else:
# Failed login attempt
# If they entered a username then print error
if data['username'] != '':
data['errors'] = "Username and password combination does not exist"
return render(request, 'login.html', dictionary=data)
# Return an 'invalid login' error message.
def find_school_ajax(request):
data = {}
data['email'] = request.user.username
result = db_views.find_school(data)
return HttpResponse(json.dumps(result), content_type="application/json")
@login_required(redirect_field_name='/login')
def delete_school_ajax(request):
if 'Administrator' not in request.user.groups.values_list('name', flat=True):
return None
if not request.method == 'POST':
return None
data = {'school_name': request.POST.get('school_name'), 'school_address': request.POST.get('school_address')}
result = db_views.delete_school(data)
return HttpResponse(json.dumps(result), content_type="application/json")
@login_required(redirect_field_name='/login')
def delete_student_ajax(request):
if 'Administrator' not in request.user.groups.values_list('name', flat=True):
return None
if not request.method == 'POST':
return None
data = {"email": request.POST.get('email')}
db_views.delete_a_student_from_database(data)
username = request.POST.get('student_email', False)
return HttpResponse(content_type="application/json")
@login_required(redirect_field_name='/login')
def dashboard_view(request):
""" render the admin dash if the user is logged in """
if 'Administrator' in request.user.groups.values_list('name', flat=True):
data = {}
data['schools'] = db_views.get_all_schools()
users = User.objects.filter(is_active=False)
userRequests = []
userRequests = db_views.get_people([user.username for user in users])
data['users'] = userRequests
active_accounts = User.objects.filter(is_active=True)
print(active_accounts)
print(" ")
active_users = []
active_users = db_views.get_people([active_acct.username for active_acct in active_accounts])
print(active_users)
active_users = filter(None, active_users)
data['active_users'] = active_users
print(active_users)
return render(request, 'admin_dash.html', dictionary=data)
return render(request, 'student_dash.html')
@login_required(redirect_field_name='/login')
def create_school_view(request):
""" render the create school view. TODO: add validation """
# Redirect if the user does not have admin rights
if 'Administrator' not in request.user.groups.values_list('name',
flat=True):
return redirect("/dashboard")
# Display the create school view if the admin is logged in
if request.method == 'GET':
return render(request, 'create_school.html')
elif request.method == 'POST':
# TODO: Actually implement this-This is a copy of create user
if request.POST.get("save"):
# print(request.POST)
is_valid, data = validate_new_school(request)
# print(data)
if is_valid:
# Data is valid and let's store it in the db
db_views.add_school_to_db(data)
return redirect("/login")
else:
return render(request, 'create_user.html', dictionary=data)
elif request.POST.get("cancel"):
return redirect("/login")
return render(request, 'create_user.html')
def validate_new_school(request):
""" return (True if data is valid, Dictionary of input and errors)
validate the school data that was entered in request
"""
# TODO: Implement this-This is a copy of validate_new_user
# Fill data with the information that the user entered
data = create_school_data(request)
valid_data = True
# TODO: Validate the data-Right now we are just assuming it's correct
# If any data is invalid, set valid_data to False and print error
# if len(data['studName'].strip()) == 0:
# valid_data = False
# data['err_studName'] = "Please enter a name"
# if validate_email(data['email']):
# valid_data = False
# data['err_email'] = "Invalid email"
# if User.objects.filter(username=data['email']).count():
# valid_data = False
# data['err_email'] = "A user with that email already exists"
# if len(data['school'].strip()) == 0:
# valid_data = False
# data['err_school'] = "Please enter a school"
# if len(data['pw'].strip()) == 0:
# valid_data = False
# data['err_pw'] = "Please enter a password"
# if not data['pw'] == data['conf_pw']:
# valid_data = False
# data['err_conf_pw'] = "Passwords didn't match"
# Return if the valid
return valid_data, data
def create_school_data(request):
data = {}
data['name'] = request.POST.get('name', False)
data['address'] = request.POST.get('address', False)
data['academicYear'] = request.POST.get('academicYear', False)
data['daysInYear'] = int(request.POST.get('daysInYear', False))
data['daysInASchedule'] = int(request.POST.get('daysInSch', False))
data['semesterInYear'] = int(request.POST.get('semesterInYear', False))
data['periodInDay'] = int(request.POST.get('periodInDay', False))
# Parse the block information
start_periods = request.POST.get('start_periods', False).split()
end_periods = request.POST.get('end_periods', False).split()
days_active = request.POST.get('days_active', False).split()
block_info = []
for i, item in enumerate(start_periods):
block_info.append({'start': int(start_periods[i]),
'end': int(end_periods[i]),
'days_active': days_active[i].split(',')})
data['block_info'] = block_info
# Missing: semester=listofstrings and lunch=listofints
# Find all the semesters added and lunches added
data['semesters'] = []
for i in range(int(data['semesterInYear'])):
semester = request.POST.get('semester_' + str(i), False)
if semester:
data['semesters'].append(semester)
data['lunches'] = []
for i in range(int(data['periodInDay'])):
lunch = request.POST.get('lunch_' + str(i), False)
if lunch:
data['lunches'].append(i+1)
return data
def send_friend_request_ajax(request):
data = {}
data['email_of_sender'] = request.user.username
info = db_views.get_a_person(request.user.username)
# emailer = json_util.loads(name)
name = json.loads(str(info))
# print(name['first_name'])
# print(name['last_name'])
data['first_name_emailer'] = name['first_name']
data['last_name'] = name['last_name']
firstName = request.POST.get('first_name_emailee', False)
lastName = request.POST.get('last_name_emailee', False)
email = request.POST.get('email_of_sendee', False)
data['email_of_sendee'] = email
data['first_name_emailee'] = firstName
data['last_name_emailee'] = lastName
db_views.send_a_friend_request(data)
return HttpResponse(json.dumps(data), content_type="application/json")
def add_class_to_database_ajax(request):
data= {}
data['username']=request.user.username
data['course_id'] = request.POST.get('course_id', False)
data['course_name'] = request.POST.get('course_name', False)
data['instructor'] = request.POST.get('instructor', False)
# data['school'] = ''
# block = {'days_active': ['M','Tu'], 'end': 3, 'start': 0}
# '0-3:M,Tu,W'
block_text = request.POST.get('block', False).split(':')
periods = block_text[0]
days = block_text[1].split(',')
start_period = periods.split('-')[0]
end_period = periods.split('-')[1]
# block = {'days_active': days, 'start': start_period,
# 'end': end_period)
data['days'] = days
data['start_period'] = start_period
data['end_period'] = end_period
data['year'] = request.POST.get('year', False)
data['semester'] = request.POST.get('semester', False)
data['new_year_flag'] = False
print(data)
db_views.add_classes_to_database(data)
return HttpResponse(json.dumps(data), content_type="application/json")
# def get_course_offering_ajax(request):
# # print("test")
# data= {}
# data['username']=request.user.username
# data['course_id'] = request.POST.get('course_id', False)
# data['course_name'] = request.POST.get('course_name', False)
# data['instructor'] = request.POST.get('instructor', False)
# # data['school'] = ''
# day = request.POST.get('days',False)
# day = day.split(" ")
# data['days'] = request.POST.get('days', False)
# data['start_period']= request.POST.get('start_period', False)
# data['end_period']= request.POST.get('end_period', False)
# data['year'] = request.POST.get('year', False)
# data['semester'] = request.POST.get('semester', False)
# data['new_year_flag']=False
# # data= {}
# # email = data['email']
# # year = data['year']
# data = {}
# data['email'] = request.user.username
# db_views.add_classes_to_database(data)
# return HttpResponse(json.dumps(data), content_type="application/json")
def get_assigned_schedule_ajax(request):
data = {}
data['email'] = request.user.username
schedule = db_views.get_assigned_schedule(data)
return HttpResponse(json.dumps(schedule), content_type="application/json")
def get_friend_ajax(request):
data = {}
data['email'] = request.user.username
info = db_views.get_friends_list(data)
return HttpResponse(json.dumps(info), content_type="application/json")
def get_friends_schedules(request):
data = {}
data['email'] = request.user.username
info = db_views.get_friends_list(data)
courses = []
for friend in info:
courses.append(db_views.get_overlapping_friends_by_specific_course(data['email'], friend['email']))
data['courses'] = courses
return HttpResponse(json.dumps(data), content_type="application/json")
def get_friend_requests_ajax(request):
data = {}
data['email_of_sendee'] = request.user.username
info = db_views.get_a_person(request.user.username)
# emailer = json_util.loads(name)
name = json.loads(str(info))
data['first_name_emailee'] = name['first_name']
data['last_name_emailee'] = name['last_name']
info = db_views.get_friend_requests(data)
#requests = json.loads(str(info))
for person in info:
del person['_id']
del person['last_name_emailee']
del person['email_of_emailee']
del person['first_of_emailee']
return HttpResponse(json.dumps(info), content_type="application/json")
def accept_friend_request_ajax(request):
data = {}
data['email_of_sendee'] = request.user.username
data['email_of_requester'] = request.POST.get('email_of_requester', False)
db_views.accept_friend_request(data)
return HttpResponse(json.dumps(data), content_type="application/json")
def delete_friend_request_ajax(request):
data = {}
data['email_of_sendee'] = request.user.username
data['email_of_requester'] = request.POST.get('email_of_requester', False)
db_views.deny_friend_request(data)
return HttpResponse(json.dumps(data), content_type="application/json")
def delete_friend_ajax(request):
data = {}
data['email'] = request.user.username
data['first_name'] = request.POST.get('first_name', False)
data['last_name'] = request.POST.get('last_name', False)
data['friend_email'] = request.POST.get('friend_email', False)
db_views.delete_friend_from_friends_list(data)
return HttpResponse(json.dumps(data), content_type="application/json")
def create_user_view(request):
""" GET: render the create new user form
POST: validate the new user data and if valid submit to database
"""
# Display the create user view
if request.method == 'GET':
return render(request, 'create_user.html')
elif request.method == 'POST':
if request.POST.get("request"):
is_valid, data = validate_new_user(request)
if is_valid:
# Data is valid and let's store it in the db
user = User.objects.create_user(username=data['email'],
password=data['pw'])
user.is_active = False
user.save()
db_views.add_student_entry(data)
data["message"] = 'Welcome to our website! Please wait until you have been approved'
data["message_sub"] = 'Welcome to SOCS'
db_views.send_email_to_student(data)
# db_views.add_students_to_database(data)
return redirect("/login")
else:
return render(request, 'create_user.html', dictionary=data)
elif request.POST.get("cancel"):
return redirect("/login")
return render(request, 'create_user.html')
def validate_new_user(request):
""" return (True if data is valid, Dictionary of input and errors)
validate the user data that was entered in request
"""
# Fill data with the information that the user entered
data = {}
data['studName'] = request.POST.get('studName', False).strip().split()
data['email'] = request.POST.get('email', False)
data['school'] = request.POST.get('school', False)
data['address'] = request.POST.get('address', False)
data['pw'] = request.POST.get('password', False)
data['conf_pw'] = request.POST.get('confirm', False)
valid_data = True
# If any data is invalid, set valid_data to False and print error
if len(data['studName']) != 2:
valid_data = False
data['err_studName'] = "Please enter a valid name"
else:
data['first_name'] = data['studName'][0]
data['last_name'] = data['studName'][1]
if validate_email(data['email']):
valid_data = False
data['err_email'] = "Invalid email"
if User.objects.filter(username=data['email']).count():
valid_data = False
data['err_email'] = "A user with that email already exists"
if len(data['school'].strip()) == 0:
valid_data = False
data['err_school'] = "Please enter a school"
if len(data['address'].strip()) == 0:
valid_data = False
data['err_address'] = "Please enter an address"
if len(data['pw'].strip()) == 0:
valid_data = False
data['err_pw'] = "Please enter a password"
if not data['pw'] == data['conf_pw']:
valid_data = False
data['err_conf_pw'] = "Passwords didn't match"
# Return if the valid
return valid_data, data
def validate_email(email):
""" validate an email string """
import re
a = re.compile("^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$")
if a.match(email):
return False
return True
def logout_view(request):
""" log current user out """
# Log the user out using Django Auth
logout(request)
return redirect("/login")
def redirect_to_login(request):
""" redirect the user to login """
# If you go to the homepage, redirect to login
return redirect("/login")
@login_required(redirect_field_name='/login')
def friend_ajax(request):
data = {'first_name': request.POST["first_name"]}
data = db_views.get_possible_friends(request.user.username,
request.POST["first_name"])
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required(redirect_field_name='/login')
def accept_student_request_ajax(request):
if 'Administrator' not in request.user.groups.values_list('name',
flat=True):
return redirect("/dashboard")
# get username
username = request.POST.get('student_email', False)
user = User.objects.filter(username=username)[0]
user.is_active = True
data = {}
data["email"] = username
data["message"] = 'Your account has been approved'
data["message_sub"] = 'Welcome to SOCS'
db_views.send_email_to_student(data)
user.save()
return HttpResponse(content_type="application/json")
@login_required(redirect_field_name='/login')
def deny_student_request_ajax(request):
if 'Administrator' not in request.user.groups.values_list('name',
flat=True):
return redirect("/dashboard")
# get username
username = request.POST.get('student_email', False)
user = User.objects.filter(username=username)[0]
user.delete()
return HttpResponse(content_type="application/json")
def get_course_offerings_ajax(request):
data = {}
data['email'] = request.user.username
data['year'] = time.strftime("%Y")
print(data['year'])
courses = db_views.get_course_offerings(data)
print(courses)
return HttpResponse(json.dumps(courses), content_type="application/json")
def remove_assigned_course_ajax(request):
data = {}
data['email']= request.user.username
data['course_name']=request.POST.get('course_name', False)
data['start_period']= request.POST.get('start_period', False)
data['end_period']= request.POST.get('end_period', False)
data['course_id']= request.POST.get('course_id', False)
data['instructor']= request.POST.get('instructor', False)
days = request.POST.get('days_array', False)
days = days.split(" ")
days.remove("")
data['days_array'] = days
print(data)
db_views.remove_a_class_from_assigned(data)
print("it works")
return HttpResponse(content_type="application/json")
def export_generated_ajax(request):
db_views.export_generated(request)
return HttpResponse(content_type="application/json")
def create_desired_schedule_ajax(request):
data = []
data = request.POST.get('data[]',False)
print(data)
email = request.user.username
schedule = db_views.create_desired_sched(email, data)
print(schedule)
return schedule
# return HttpResponse(json.dumps(schedule), content_type="application/json")
| |
import UserDict
import grokcore.component as grok
import logging
import lxml.etree
import uuid
import zeit.edit.block
import zeit.edit.interfaces
import zope.container.contained
import zope.interface
import zope.location.interfaces
import zope.proxy
import zope.security.proxy
log = logging.getLogger(__name__)
class Base(UserDict.DictMixin,
zeit.edit.block.Element,
zope.container.contained.Contained):
zope.interface.implements(zeit.edit.interfaces.IContainer)
def __init__(self, context, xml):
self.xml = xml
# Set parent last so we don't trigger a write.
self.__parent__ = context
# Implemented in subclasses
def _find_item(self, xml_node, name):
raise NotImplementedError
def _get_keys(self, xml_node):
raise NotImplementedError
def index(self, value):
# A simple implementation would be self.values().index(value), but that
# won't work in the general case since the abstract implementation of
# self.values is "self[x] for x in self.keys()", which defeats the
# purpose of this method.
raise NotImplementedError
def _get_element_type(self, xml_node):
raise NotImplementedError
# Default implementation
def __getitem__(self, key):
try:
position = int(key)
except ValueError:
pass
else:
try:
return self.values()[position]
except IndexError:
raise KeyError(key)
node = self._find_item(self.xml, name=key)
if node:
node = node[0]
element = self._get_element_for_node(node)
if element is None:
log.warning(
'Unknown element tag=%s, id=%s',
node.tag, key)
element = self._get_element_for_node(
node, zeit.edit.block.UnknownBlock.type)
return zope.container.contained.contained(element, self, key)
raise KeyError(key)
def _get_element_for_node(self, node, element_type=None):
if element_type is None:
element_type = self._get_element_type(node)
return zope.component.queryMultiAdapter(
(self, node),
zeit.edit.interfaces.IElement,
name=element_type)
def __iter__(self):
return (unicode(k) for k in self._get_keys(self.xml))
def keys(self):
return list(iter(self))
def slice(self, start, end):
result = []
started = False
for key in self.keys():
if key == start:
started = True
if started:
result.append(self[key])
if key == end:
break
return result
def insert(self, position, item):
"""Insert item at given position into container.
We cannot use insert to modify the XML, since the XML has heterogeneous
child types. This means that XML childs and container childs can be
different! Therefore we have to use append and updateOrder, which only
sorts the childs of the container. This is the reason why we use add +
updateOrder instead of inserting directly into the correct position of
the XML tree.
Since we used updateOrder we have to send an IOrderUpdatedEvent, but
after the item was added / moved. Therefore we suppress the event in
updateOrder and create it explicitely at the end. (It's a subclass of
IContainerModifiedEvent, thus we can replace the modified event with
the updated event.)
"""
is_new = item.__name__ is None
keys = self.keys()
self._add(item)
keys_before_sort = self.keys()
keys.insert(position, item.__name__)
self.updateOrder(keys, send_event=False)
self._p_changed = True
event = self._contained_event(item, is_new)
if event is not None:
zope.event.notify(event)
zope.event.notify(
zeit.edit.interfaces.OrderUpdatedEvent(
self, *keys_before_sort))
def add(self, item):
is_new = item.__name__ is None
self._add(item)
self._p_changed = True
event = self._contained_event(item, is_new)
if event is not None:
zope.event.notify(event)
zope.container.contained.notifyContainerModified(self)
def _contained_event(self, item, is_new):
"""Re-implementation of zope.container.contained.containedEvent
We cannot reuse it, since we already set __name__ and __parent__.
Therefore containedEvent would assume that nothing changed and returns
no event.
"""
event = None
if item.__parent__ != self:
oldparent = item.__parent__
item.__parent__ = self
event = zope.container.contained.ObjectMovedEvent(
item, oldparent, item.__name__, self, item.__name__)
elif is_new:
event = zope.container.contained.ObjectAddedEvent(
item, self, item.__name__)
return event
def _add(self, item):
item.__name__ = self._get_unique_name(item)
self.xml.append(zope.proxy.removeAllProxies(item.xml))
return item.__name__
def _get_unique_name(self, item):
name = item.__name__
if name:
if name in self:
raise zope.container.interfaces.DuplicateIDError(name)
else:
name = self._generate_block_id()
return name
def create_item(self, type_, position=None):
return zope.component.getAdapter(
self, zeit.edit.interfaces.IElementFactory, name=type_)(position)
def _generate_block_id(self):
return 'id-' + str(uuid.uuid4())
def updateOrder(self, order, send_event=True):
old_order = self.keys()
__traceback_info__ = (order, old_order)
if not zope.security.proxy.isinstance(order, (tuple, list)):
raise TypeError('order must be tuple or list, got %s.' %
type(order))
if set(order) != set(old_order):
raise ValueError('order must have the same keys.')
objs = dict(self.items())
for key in order:
self._delete(key)
for key in order:
self._add(objs[key])
self._p_changed = True
if send_event:
zope.event.notify(
zeit.edit.interfaces.OrderUpdatedEvent(self, *old_order))
def get_recursive(self, key, default=None):
item = self.get(key, default)
if item is not default:
return item
for child in self.values():
if zeit.edit.interfaces.IContainer.providedBy(child):
item = child.get_recursive(key, default)
if item is not default:
return item
return default
def filter_values(self, *interfaces):
for child in self.values():
if any([x.providedBy(child) for x in interfaces]):
yield child
def find_first(self, interface):
result = list(self.filter_values(interface))
return result[0] if result else None
def __delitem__(self, key):
item = self._delete(key)
self._p_changed = True
# We cannot reuse zope.container.contained.uncontained, since it would
# set __name__ and __parent__ to None, which cannot be persisted to XML
zope.event.notify(
zope.container.contained.ObjectRemovedEvent(item, self, key))
zope.container.contained.notifyContainerModified(self)
def _delete(self, key):
__traceback_info__ = (key,)
item = self[key]
item.xml.getparent().remove(item.xml)
return item
def __repr__(self):
return zeit.edit.block.Element.__repr__(self)
class TypeOnAttributeContainer(Base):
_find_item = lxml.etree.XPath(
'./*[@cms:__name__ = $name]',
namespaces=dict(
cms='http://namespaces.zeit.de/CMS/cp'))
_get_keys = lxml.etree.XPath(
'./*/@cms:__name__',
namespaces=dict(
cms='http://namespaces.zeit.de/CMS/cp'))
def _get_element_type(self, xml_node):
return xml_node.get('{http://namespaces.zeit.de/CMS/cp}type')
class Sublocations(grok.Adapter):
"""We don't want to dispatch events that happen to the content object
(like being added to the Workingcopy) to their IElement children.
"""
grok.context(zeit.edit.interfaces.IContainer)
grok.implements(zope.location.interfaces.ISublocations)
def sublocations(self):
return []
| |
#!/bin/env python
#
# Copyright (C) 2012 by Adam Ewing (adam.ewing@gmail.com)
#
# Released under the MIT license, see LICENSE.txt
#
import re, os, subprocess, tempfile, pysam, sys, argparse, peakparser
from string import maketrans
def checkfile(fname):
try:
open(fname)
except IOError as e:
print "can't find file: " + fname
sys.exit()
# parse TCGA filename
def getTypeFromTCGA(fname):
try:
fields = os.path.basename(fname).split('-')
sample = fields[3]
if sample[0] == '0':
return 'CANCER'
if sample[0] == '1':
return 'NORMAL'
return None
except:
#print "invalid TCGA filename: " + fname
return None
# some people choose to use reference genomes with chromosome names that don't begin in 'chr'
# since we do use 'chr', we need to detect those cases
def chromNameUsesPrefix(bam):
for ref in bam.references:
if re.search("^chr",ref):
return True
return False
# fix for the most common variation on chromosome names (leaving out the 'chr')
def fixChrName(name):
if name[0] != 'c':
return "chr" + name
else:
return name
# average base quality over an interval
def avgQual(qstring,start,end,zeroChar):
if start < 0:
start = 0
if end > len(qstring):
end = len(qstring)
offset = ord(zeroChar)
return sum(map(lambda x: ord(x)-offset, qstring[start:end]))/(end-start)
# capitalize 1-indexed seq between start and end
def capSeq(seq,start,end):
start -= 1
seq = seq.lower()
chars = []
for c in seq:
chars.append(c)
for i in range(start,end):
chars[i] = chars[i].upper()
return ''.join(chars)
# runs bwa stdsw to align two sequences
def bwastdsw(queryName,query,refName,ref,queryIsFile=False,refIsFile=False):
qfileName = ''
rfileName = ''
if queryIsFile:
qfileName = query
else:
qfile = tempfile.NamedTemporaryFile(delete=False)
qfile.write(">%s\n%s\n" % (queryName,query))
qfile.close()
qfileName = qfile.name
if refIsFile:
rfileName = ref
else:
rfile = tempfile.NamedTemporaryFile(delete=False)
rfile.write(">%s\n%s\n" % (refName,ref))
rfile.close()
rfileName = rfile.name
# can use the -f option to only consider forward strand since bwa prints aligned seqs
# in the forward direction
args = ['bwa', 'stdsw', rfileName, qfileName]
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)
fnum = 0
alignments = []
for pline in p.stdout.readlines():
if re.search("^>", pline):
fnum = 0
col = pline.strip().split("\t")
alignments.append(BwastdswAlignResult(col))
alignments[-1].queryLen = len(query) # FIXME make this work with filename input
alignments[-1].targetLen = len(ref)
# print "%d\t" % fnum + "\t".join(col)
fnum += 1
elif fnum == 1:
alignments[-1].targetSeq = pline.strip()
# print "%d\t" % fnum + pline.strip()
fnum += 1
elif fnum == 2:
alignments[-1].matchString = pline.strip()
# print "%d\t" % fnum + pline.strip()
fnum += 1
elif fnum == 3:
alignments[-1].querySeq = pline.strip()
# print "%d\t" % fnum + pline.strip()
fnum += 1
p.stdout.close()
p.kill()
p.wait()
if not queryIsFile:
os.unlink(qfileName)
if not refIsFile:
os.unlink(rfileName)
if len(alignments) == 0:
return None
if len(alignments) == 1:
return alignments[0]
# more than 1 alignment, pick the 'best'
bestaln = alignments[0]
for align in alignments:
if align.alnLength() > bestaln.alnLength() and align.pctID() > 90:
bestaln = align
return bestaln
def fetchRegion(bamFile,refGenome,maxReadLen,chr,start,end,zeroChar,minClipQual,usechr=False):
maxReadLen = int(maxReadLen)
start = int(start)
end = int(end)
if not usechr: # can fix to use pysam.Fastafile function to get sequence names when it's available
chr = chr.lstrip('chr')
regionRefSeq = refGenome.fetch(reference=chr, start=start-maxReadLen, end=end+maxReadLen)
cluster = ClippedCluster(chr,start,end,maxReadLen)
# some .bam files are aligned against references that leave out the 'chr' prefix on the chromosome names
if not chromNameUsesPrefix(bamFile):
chr = chr.lstrip('chr')
for read in bamFile.fetch(chr, start-maxReadLen, end+maxReadLen):
if not read.is_unmapped and not read.is_duplicate:
cliplen = read.rlen - read.qlen # minimum soft clipping: if rlen < qlen --> bases were soft-clipped
if cliplen > 10:
refseq = ''
leftclip = 0
rightclip = 0
if (read.qstart == cliplen): # left-clipped
leftclip = cliplen
if (read.qstart == 0): # right-clipped
rightclip = cliplen
if (read.qstart > 0 and read.qstart < cliplen):
leftclip = cliplen - read.qstart
rightclip = cliplen - leftclip
breakside = ''
breakloc = 0
clipqual = 0
if (leftclip > rightclip-10): # 10 is arbitrary
breakside = 'L'
breakloc = read.pos
clipqual = avgQual(read.qual,0,leftclip,zeroChar)
elif (rightclip > leftclip-10):
breakside = 'R'
breakloc = read.pos + (read.rlen - rightclip)
clipqual = avgQual(read.qual,rightclip,read.rlen,zeroChar)
else:
breakside = 'A' # ambiguous
if (clipqual >= int(minClipQual)):
align = bwastdsw('query',read.seq,'target',regionRefSeq)
if align:
#print "aligned"
breakLeft = start - maxReadLen + align.targetStart
breakRight = start - maxReadLen + align.targetEnd
if (breakLeft >= start-10 and breakLeft <= end+10) or (breakRight >= start-10 and breakRight <= end+10):
cluster.aligns.append(align)
cluster.reads.append(read)
#outseq = capSeq(read.seq, align.queryStart, align.queryEnd)
#print "%s len=%d left=%d right=%d rl=%d rr=%d" % (outseq, read.rlen, breakLeft, breakRight, align.queryStart, align.queryEnd)
#print read.qual
#print align
#print regionRefSeq
cluster.assignBreaks()
return cluster;
def mergeClusters(cl1,cl2,teList):
if not cl1.hasReads():
cl2.mapTE(teList)
return cl2
if not cl2.hasReads():
cl1.mapTE(teList)
return cl1
if (cl1.chr == cl2.chr and cl1.start == cl2.start and cl1.end == cl2.end and cl1.maxrdln == cl2.maxrdln):
new = ClippedCluster(cl1.chr, cl1.start, cl1.end, cl1.maxrdln)
new.aligns = cl1.aligns
for align in cl2.aligns:
new.aligns.append(align)
new.reads = cl1.reads
for read in cl2.reads:
new.reads.append(read)
new.assignBreaks()
new.mapTE(teList)
new.type = cl1.type + "," + cl2.type
return new
else:
raise ValueError('cannot merge clusters that have different locations')
class ClippedCluster:
def __init__(self,chr,start,end,maxReadLen):
self.chr = chr
self.start = int(start)
self.end = int(end)
self.maxrdln = maxReadLen
self.aligns = [] # BwastdswAlignResult objects
self.reads = [] # pysam AlignedRead objects
# assigned by functions:
self.assign = False # has assignBreaks() been run?
self.lgood = False # is left break good? (defined in bestBreakLeft())
self.rgood = False # is right break good? (defined in bestBreakRight())
self.lbest = 0 # best guess for left break
self.rbest = 0 # best guess for right break
self.lbreaks = [] # candidate left break positions
self.rbreaks = [] # candidate right break positions
self.type = ''
self.teqlen = [] # TE query lengths
self.tetype = [] # TE families (list of those detected)
self.testart = [] # list of TE starts (positions in TEs)
self.teend = [] # list of TE ends (positions in TEs)
self.testr = [] # list of TE orientations
self.minTEQueryLen = 15 # minimum length of TE seq for alignment to be valid
def hasReads(self):
if len(self.reads) > 0:
return True
return False
def mapTE(self,refFastaDir):
for i in range(len(self.reads)):
teAlign = partialMapTE(self,refFastaDir,
self.reads[i].seq,
self.aligns[i].queryStart,
self.aligns[i].queryEnd)
if (teAlign == None):
self.tetype.append('None')
self.testart.append(0)
self.teend.append(0)
self.testr.append('.')
self.teqlen.append(0)
elif (teAlign.queryLen >= self.minTEQueryLen):
self.tetype.append(teAlign.targetName)
self.testart.append(teAlign.targetStart)
self.teend.append(teAlign.targetEnd)
self.testr.append(teAlign.queryStr)
self.teqlen.append(teAlign.queryLen)
else:
self.tetype.append('None')
self.testart.append(0)
self.teend.append(0)
self.testr.append('.')
self.teqlen.append(teAlign.queryLen)
def assignBreaks(self):
for i in range(len(self.reads)):
leftmargin = self.aligns[i].queryStart - 1
rightmargin = self.reads[i].rlen - self.aligns[i].queryEnd
if leftmargin > rightmargin and rightmargin <= 10:
self.lbreaks.append(self.start - self.maxrdln + self.aligns[i].targetStart)
if leftmargin < rightmargin and leftmargin <= 10:
self.rbreaks.append(self.start - self.maxrdln + self.aligns[i].targetEnd)
self.assign = True
if len(self.lbreaks) > 0:
self.bestBreakLeft()
if len(self.rbreaks) > 0:
self.bestBreakRight()
self.retryBreaks()
def bestBreakLeft(self):
if not self.assign:
self.assignBreaks()
# count unique positions
posCountDict = {}
for pos in self.lbreaks:
if posCountDict.has_key(pos):
posCountDict[pos] += 1
else:
posCountDict[pos] = 1
# get modal position
maxCount = 0
maxCountPos = 0
for pos,count in posCountDict.iteritems():
if count > maxCount:
maxCount = count
maxCountPos = pos
# if over half the reads have this break, it's good
if float(maxCount)/float(len(self.lbreaks)) > 0.5:
self.lgood = True
self.lbest = int(maxCountPos)
def bestBreakRight(self):
if not self.assign:
self.assignBreaks()
# count unique positions
posCountDict = {}
for pos in self.rbreaks:
if posCountDict.has_key(pos):
posCountDict[pos] += 1
else:
posCountDict[pos] = 1
# get modal position
maxCount = 0
maxCountPos = 0
for pos,count in posCountDict.iteritems():
if count > maxCount:
maxCount = count
maxCountPos = pos
# if over half the reads have this break, it's good
if float(maxCount)/float(len(self.rbreaks)) > 0.5:
self.rgood = True
self.rbest = int(maxCountPos)
def retryBreaks(self):
"""look for breaks that aren't majority but do indicate a TSD"""
if self.lgood and not self.rgood:
# look for TSD in rbreaks
tsdopts = {} # TSD candidates, stores count
for rpos in self.rbreaks:
if (rpos - self.lgood) >= 2 and (rpos - self.lgood) <= 50:
if rpos in tsdopts:
tsdopts[rpos] += 1
else:
tsdopts[rpos] = 1
maxCount = 0
maxCountPos = 0
numTiedForBest = 0
for pos,count in tsdopts.iteritems():
if count > maxCount:
maxCount = count
maxCountPos = pos
if count == maxCount:
numTiedForBest += 1
if maxCount > 0 and numTiedForBest == 0:
self.rgood = True
sys.stderr.write("better break found for " + self.chr + ":" + str(self.start) + "-" + str(self.end) + "\n")
self.rbest = int(maxCountPos)
if self.rgood and not self.lgood:
# look for TSD in lbreaks
tsdopts = {} # TSD candidates, stores count
for lpos in self.lbreaks:
if (self.rgood - lpos) >= 2 and (self.rgood - lpos) <= 50:
if lpos in tsdopts:
tsdopts[lpos] += 1
else:
tsdopts[lpos] = 1
maxCount = 0
maxCountPos = 0
numTiedForBest = 0
for pos,count in tsdopts.iteritems():
if count > maxCount:
maxCount = count
maxCountPos = pos
if count == maxCount:
numTiedForBest += 1
if maxCount > 0 and numTiedForBest == 0:
self.lgood = True
sys.stderr.write("better break found for " + self.chr + ":" + str(self.start) + "-" + str(self.end) + "\n")
self.lbest = int(maxCountPos)
def majorityTE(self):
"""returns the most frequently identified TE"""
tecount = {}
i = 0
for te in self.tetype:
# only count reads with long enough TE seq for alignment
if te in tecount and self.teqlen[i] >= self.minTEQueryLen:
tecount[te] += 1
elif self.teqlen[i] >= self.minTEQueryLen:
tecount[te] = 1
i += 1
if len(self.tetype) > 0:
majTE = self.tetype[0]
maxcount = 0
for (te,count) in tecount.iteritems():
if count > maxcount:
majTE = te
maxcount = count
return majTE
else:
return 'None'
def outstring(self):
output = "\t".join((str(self.lgood), str(self.rgood), str(self.lbest), str(self.rbest),
str(len(self.lbreaks)), str(len(self.rbreaks)), str(len(self.reads)),
self.type, self.majorityTE()))
return output
def infodump(self):
output = ''
for i in range(len(self.reads)):
rbreak = self.start - self.maxrdln + self.aligns[i].targetEnd
lbreak = self.start - self.maxrdln + self.aligns[i].targetStart
outseq = capSeq(self.reads[i].seq, self.aligns[i].queryStart, self.aligns[i].queryEnd)
output += ("%s tr=%d tl=%d lr=%d ll=%d te=%s,%s,%s,%s"
% (outseq, rbreak, lbreak, self.aligns[i].queryStart, self.aligns[i].queryEnd,
self.tetype[i],str(self.testart[i]),str(self.teend[i]),self.testr[i]) + "\n")
output += ("leftgood=%s rightgood=%s leftbreak=%d rightbreak=%d type=%s"
% (self.lgood, self.rgood, self.lbest, self.rbest, self.type) + "\n")
return output
class BwastdswAlignResult:
def __init__(self,col):
self.queryName = col[3].lstrip('>')
self.targetName = col[0].lstrip('>')
self.queryStart = int(col[1])
self.queryEnd = int(col[2])
self.queryStr = col[4]
self.targetStart = int(col[5])
self.targetEnd = int(col[6])
self.cigarString = col[8]
self.queryLen = 0
self.targetLen = 0
self.targetSeq = ''
self.querySeq = ''
self.matchString = ''
def pctID(self):
return float(sum(map(lambda x: int(x=='|'), self.matchString)))/float(len(self.querySeq))*100
def alnLength(self):
return len(self.targetSeq)
def __str__(self):
output = ("queryName=%s targetName=%s queryStart=%d queryEnd=%d queryStr=%s targetStart=%d targetEnd=%d pctID=%f queryLen=%d targetLen=%d"
% (self.queryName, self.targetName, self.queryStart, self.queryEnd, self.queryStr, self.targetStart, self.targetEnd, self.pctID(),
self.queryLen, self.targetLen))
return output
def partialMapTE(self,refFastaDir,querySeq,excludeStart,excludeEnd):
"""
Maps querySeq to a list of FASTA files (with names), returns best result (as BwastdswAlignResult)
Only maps region outside of (excludeStart,excludeEnd)
"""
excludeStart = int(excludeStart)
excludeEnd = int(excludeEnd)
results = []
checkfile(refFastaDir + "/config.txt")
f = open(refFastaDir + "/config.txt",'r')
for line in f:
if not re.search("^#", line):
(name,file) = line.strip().split()
repfasta = refFastaDir + "/" + file
checkfile(repfasta)
leftQuery = querySeq[0:excludeStart]
rightQuery = querySeq[excludeEnd:len(querySeq)-1]
leftAlign = bwastdsw('query',leftQuery,name,repfasta,refIsFile=True)
rightAlign = bwastdsw('query',rightQuery,name,repfasta,refIsFile=True)
if leftAlign != None and leftAlign.pctID > 90 and len(leftAlign.querySeq) >= 15:
results.append(leftAlign)
if rightAlign != None and rightAlign.pctID > 90 and len(rightAlign.querySeq) >= 15:
results.append(rightAlign)
f.close()
bestPctID = 0
bestResult = None
for result in results:
if result != None and result.pctID() > bestPctID:
bestResult = result
bestPctID = result.pctID()
return bestResult
def main(args):
peakparser.checkOutDir(args.outBaseName,args.outDirName)
configPath = args.outDirName + "/" + args.outBaseName + "/" + args.configFileName
checkfile(configPath)
configDict = peakparser.readConfig(configPath,args.outBaseName,args.outDirName)
cancerBamFile = ''
normalBamFile = ''
refGenomeFile = args.refGenomeFile
cancerCallsFile = args.outDirName + "/" + args.outBaseName + "/canceronly.tab.txt"
normalCallsFile = args.outDirName + "/" + args.outBaseName + "/normalonly.tab.txt"
germCallsFile = args.outDirName + "/" + args.outBaseName + "/germline.tab.txt"
otherCallsFile = args.outDirName + "/" + args.outBaseName + "/other.tab.txt"
# fix if unmerged
if not configDict.has_key('bamFileName1'):
configDict['bamFileName1'] = configDict['bamFileName']
configDict['bamFileName2'] = configDict['bamFileName']
bamType1 = getTypeFromTCGA(configDict['bamFileName1'])
bamType2 = getTypeFromTCGA(configDict['bamFileName2'])
print ("bamfile1=%s bamFile2=%s bamType1=%s bamType2=%s"
% (configDict['bamFileName1'], configDict['bamFileName2'], bamType1, bamType2))
if bamType1 != bamType2 and bamType1 != None and bamType2 != None:
if bamType1 == 'CANCER':
if bamType2 != 'NORMAL':
raise NameError('bam1 is cancer but bam2 is not normal')
cancerBamFile = configDict['bamFileName1']
normalBamFile = configDict['bamFileName2']
if bamType2 == 'CANCER':
if bamType1 != 'NORMAL':
raise NameError('bam2 is cancer but bam1 is not normal')
cancerBamFile = configDict['bamFileName2']
normalBamFile = configDict['bamFileName1']
else:
print 'cannot determine bamfile cancer/normal from filenames in config.txt, defaulting to normal.'
normalBamFile = configDict['bamFileName1']
cancerBamFile = configDict['bamFileName2']
checkfile(cancerBamFile)
checkfile(normalBamFile)
checkfile(normalCallsFile)
checkfile(cancerCallsFile)
checkfile(germCallsFile)
checkfile(otherCallsFile)
checkfile(refGenomeFile)
cancerBam = pysam.Samfile(cancerBamFile, 'rb') # rb = read, binary
normalBam = pysam.Samfile(normalBamFile, 'rb') # rb = read, binary
cancerCalls = open(cancerCallsFile, 'r')
normalCalls = open(normalCallsFile, 'r')
germCalls = open(germCallsFile, 'r')
otherCalls = open(otherCallsFile, 'r')
refGenome = pysam.Fastafile(refGenomeFile)
cancerBreaksOut = open(args.outDirName + "/" + args.outBaseName + "/cancerbreaks.tab.txt", 'w')
normalBreaksOut = open(args.outDirName + "/" + args.outBaseName + "/normalbreaks.tab.txt", 'w')
germBreaksOut = open(args.outDirName + "/" + args.outBaseName + "/germlinebreaks.tab.txt", 'w')
otherBreaksOut = open(args.outDirName + "/" + args.outBaseName + "/otherbreaks.tab.txt", 'w')
callSetListNames = ('cancer', 'normal', 'germ','other')
callSetListInFiles = (cancerCalls, normalCalls, germCalls, otherCalls)
callSetListOutFiles = (cancerBreaksOut, normalBreaksOut, germBreaksOut, otherBreaksOut)
for i in range(len(callSetListNames)):
for line in callSetListInFiles[i]:
col = line.strip().split("\t")
chr = col[0]
start = int(col[1])
end = int(col[2])
cancerCluster = fetchRegion(cancerBam,refGenome,int(args.maxReadLen),chr,start,end,args.zeroChar,int(args.minClipQual),args.usechr)
cancerCluster.type='CANCER'
normalCluster = fetchRegion(normalBam,refGenome,int(args.maxReadLen),chr,start,end,args.zeroChar,int(args.minClipQual),args.usechr)
normalCluster.type='NORMAL'
mergeCluster = mergeClusters(cancerCluster,normalCluster,args.refFastaDir)
clusterout = mergeCluster.outstring()
infodumpout = mergeCluster.infodump()
callSetListOutFiles[i].write(line.strip("\n") + "\t" + clusterout + "\n" + infodumpout + "\n")
callSetListInFiles[i].close()
callSetListOutFiles[i].close()
if __name__ == '__main__':
# commandline args
parser = argparse.ArgumentParser(description='parse the output of discordant.py')
parser.add_argument('-c', '--config', dest='configFileName', default='config.txt',
help='config file left by discordant.py')
parser.add_argument('-o', '--outbasename', dest='outBaseName', required=True,
help='basename for output files')
parser.add_argument('-d', '--outdirname', dest='outDirName', default='output',
help='output directory')
parser.add_argument('-e-', '--eltfile', dest='eltFile', default='sumEltList.txt',
help='list of element families to include')
parser.add_argument('-m', '--maxReadLen', dest='maxReadLen', default=100,
help='max read length in basepairs (default 100 bp)')
parser.add_argument('-z', '--zerochar', dest='zeroChar', default='#',
help='for fastq quality scores, the character corresponding to zero (default #)')
parser.add_argument('-g', '--refgenome', dest='refGenomeFile', required=True,
help='ref genome in fasta format, indexed with samtools faidx')
parser.add_argument('-q', '--minclipqual', dest='minClipQual', default=30,
help='minimum avg. quality cutoff for trimmed region (default 30)')
parser.add_argument('-r', '--repdir', dest='refFastaDir', required=True,
help='directory of FASTA files with TE reference sequences in them, plus a config.txt file with ref names')
parser.add_argument('--usechr', action="store_true", default=False,
help='set if reference genome uses "chr" prefix (default=False)')
args = parser.parse_args()
main(args)
| |
#!/usr/bin/env python
#
# Copyright (c) 2017 Naoto Yokoyama
#
# Modifications applied to the original work.
#
#
# Original copyright notice:
#
# Copyright 2015 Robb Wagoner
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Parse an SNS event message and send to a Slack Channel
'''
import os
import json
import base64
import re
import requests
from base64 import b64decode
__author__ = "Robb Wagoner (@robbwagoner)"
__copyright__ = "Copyright 2015 Robb Wagoner"
__credits__ = ["Robb Wagoner"]
__license__ = "Apache License, 2.0"
__version__ = "0.1.2"
__maintainer__ = "Robb Wagoner"
__email__ = "robb@pandastrike.com"
__status__ = "Production"
DEFAULT_USERNAME = os.environ.get('DEFAULT_USERNAME', 'AWS Lambda')
DEFAULT_CHANNEL = os.environ.get('DEFAULT_CHANNEL', '#webhook-tests')
DEFAULT_EMOJI = os.environ.get('DEFAULT_EMOJI', ':information_source:')
USERNAME_PREFIX = os.environ.get('USERNAME_PREFIX', '')
def get_slack_emoji(event_src, topic_name, event_cond='default'):
'''Map an event source, severity, and condition to an emoji
'''
emoji_map = {
'autoscaling': {
'notices': {'default': ':scales:'}},
'cloudwatch': {
'notices': {
'ok': ':ok:',
'alarm': ':fire:',
'insuffcient_data': ':question:'},
'alerts': {
'ok': ':ok:',
'alarm': ':fire:',
'insuffcient_data': ':question:'}},
'codepipeline': {
'notices': {
'STARTED': ':ok:',
'FAILED': ':fire:',
'SUCCEEDED': ':ok:'}},
'elasticache': {
'notices': {'default': ':stopwatch:'}},
'rds': {
'notices': {'default': ':registered:'}}}
try:
return emoji_map[event_src][topic_name][event_cond]
except KeyError:
if topic_name == 'alerts':
return ':fire:'
else:
return DEFAULT_EMOJI
def get_slack_username(event_src):
'''Map event source to the Slack username
'''
username_map = {
'cloudwatch': 'AWS CloudWatch',
'autoscaling': 'AWS AutoScaling',
'elasticache': 'AWS ElastiCache',
'codepipeline': 'AWS CodePipeline',
'rds': 'AWS RDS'}
try:
return "{0}{1}".format(USERNAME_PREFIX, username_map[event_src])
except KeyError:
return DEFAULT_USERNAME
def get_slack_channel(region, event_src, topic_name, channel_map):
'''Map region and event type to Slack channel name
'''
try:
return channel_map[topic_name]
except KeyError:
return DEFAULT_CHANNEL
def autoscaling_capacity_change(cause):
'''
'''
s = re.search(r'capacity from (\w+ to \w+)', cause)
if s:
return s.group(0)
def lambda_handler(event, context):
'''The Lambda function handler
'''
config = {
'webhook_url': os.environ['WEBHOOK_URL'],
'channel_map': json.loads(base64.b64decode(os.environ['CHANNEL_MAP']))
}
event_cond = 'default'
sns = event['Records'][0]['Sns']
print('DEBUG EVENT:', sns['Message'])
try:
json_msg = json.loads(sns['Message'])
except ValueError as e:
json_msg = {}
if sns['Subject']:
message = sns['Subject']
else:
message = sns['Message']
# https://api.slack.com/docs/attachments
attachments = []
if json_msg.get('AlarmName'):
event_src = 'cloudwatch'
event_cond = json_msg['NewStateValue']
color_map = {
'OK': 'good',
'INSUFFICIENT_DATA': 'warning',
'ALARM': 'danger'
}
attachments = [{
'fallback': json_msg,
'message': json_msg,
'color': color_map[event_cond],
"fields": [{
"title": "Alarm",
"value": json_msg['AlarmName'],
"short": True
}, {
"title": "Status",
"value": json_msg['NewStateValue'],
"short": True
}, {
"title": "Description",
"value": json_msg['AlarmDescription'],
"short": False
}, {
"title": "Reason",
"value": json_msg['NewStateReason'],
"short": False
}]
}]
elif json_msg.get('Cause'):
event_src = 'autoscaling'
attachments = [{
"text": "Details",
"fallback": message,
"color": "good",
"fields": [{
"title": "Capacity Change",
"value": autoscaling_capacity_change(json_msg['Cause']),
"short": True
}, {
"title": "Event",
"value": json_msg['Event'],
"short": False
}, {
"title": "Cause",
"value": json_msg['Cause'],
"short": False
}]
}]
elif json_msg.get('ElastiCache:SnapshotComplete'):
event_src = 'elasticache'
attachments = [{
"text": "Details",
"fallback": message,
"color": "good",
"fields": [{
"title": "Event",
"value": "ElastiCache Snapshot"
}, {
"title": "Message",
"value": "Snapshot Complete"
}]
}]
elif re.match("RDS", sns.get('Subject') or ''):
event_src = 'rds'
attachments = [{
"fields": [{
"title": "Source",
"value": "{0} '{1}'".format(json_msg['Event Source'], json_msg['Source ID'])
},{
"title": "Message",
"value": json_msg['Event Message']
}]}]
if json_msg.get('Identifier Link'):
title_arr = json_msg['Identifier Link'].split('\n')
if len(title_arr) >= 2:
title_str = title_arr[1]
title_lnk_str = title_arr[0]
else:
title_str = title_lnk_str = title_arr[0]
attachments[0]['fields'].append({
"title": "Details",
"value": "<{0}|{1}>".format(title_str, title_lnk_str)
})
elif json_msg.get('source') == 'aws.codepipeline':
event_src = 'codepipeline'
message = json_msg.get('detail-type')
event_cond = json_msg.get('detail').get('state')
color_map = {
'STARTED': 'good',
'SUCCEEDED': 'good',
'FAILED': 'danger'
}
attachments = [{
'fallback': json_msg.get('detail-type'),
'color': color_map[event_cond],
"fields": [{
"title": "Pipeline",
"value": json_msg.get('detail').get('pipeline')
}, {
"title": "State",
"value": json_msg.get('detail').get('state')
}]
}]
else:
event_src = 'other'
# SNS Topic ARN: arn:aws:sns:<REGION>:<AWS_ACCOUNT_ID>:<TOPIC_NAME>
#
# SNS Topic Names => Slack Channels
# <env>-alerts => alerts-<region>
# <env>-notices => events-<region>
#
region = sns['TopicArn'].split(':')[3]
topic_name = sns['TopicArn'].split(':')[-1]
# event_env = topic_name.split('-')[0]
# event_sev = topic_name.split('-')[1]
# print('DEBUG:', topic_name, region, event_env, event_sev, event_src)
channel_map = config['channel_map']
payload = {
'text': message,
'channel': get_slack_channel(region, event_src, topic_name, channel_map),
'username': get_slack_username(event_src),
'icon_emoji': get_slack_emoji(event_src, topic_name, event_cond.lower())}
if attachments:
payload['attachments'] = attachments
print('DEBUG PAYLOAD:', json.dumps(payload))
webhook_url = config['webhook_url'] if re.match('^https://', config['webhook_url']) else f"https://{config['webhook_url']}"
r = requests.post(webhook_url, json=payload)
return r.status_code
# Test locally
if __name__ == '__main__':
sns_event_template = json.loads(r"""
{
"Records": [
{
"EventVersion": "1.0",
"EventSubscriptionArn": "arn:aws:sns:EXAMPLE",
"EventSource": "aws:sns",
"Sns": {
"SignatureVersion": "1",
"Timestamp": "1970-01-01T00:00:00.000Z",
"Signature": "EXAMPLE",
"SigningCertUrl": "EXAMPLE",
"MessageId": "95df01b4-ee98-5cb9-9903-4c221d41eb5e",
"Message": "{\"AlarmName\":\"sns-slack-test-from-cloudwatch-total-cpu\",\"AlarmDescription\":null,\"AWSAccountId\":\"123456789012\",\"NewStateValue\":\"OK\",\"NewStateReason\":\"Threshold Crossed: 1 datapoint (7.9053535353535365) was not greater than or equal to the threshold (8.0).\",\"StateChangeTime\":\"2015-11-09T21:19:43.454+0000\",\"Region\":\"US - N. Virginia\",\"OldStateValue\":\"ALARM\",\"Trigger\":{\"MetricName\":\"CPUUtilization\",\"Namespace\":\"AWS/EC2\",\"Statistic\":\"AVERAGE\",\"Unit\":null,\"Dimensions\":[],\"Period\":300,\"EvaluationPeriods\":1,\"ComparisonOperator\":\"GreaterThanOrEqualToThreshold\",\"Threshold\":8.0}}",
"MessageAttributes": {
"Test": {
"Type": "String",
"Value": "TestString"
},
"TestBinary": {
"Type": "Binary",
"Value": "TestBinary"
}
},
"Type": "Notification",
"UnsubscribeUrl": "EXAMPLE",
"TopicArn": "arn:aws:sns:us-east-1:123456789012:production-notices",
"Subject": "OK: sns-slack-test-from-cloudwatch-total-cpu"
}
}
]
}""")
print('running locally')
print(lambda_handler(sns_event_template, None))
| |
#!/usr/bin/env python
import sys
PY3 = sys.version > '3'
import platform
import glob
import logging
import serial
import threading
import time
if PY3:
import queue
else:
import Queue
from struct import * #pack()
SENSEL_LOGGING_LEVEL = logging.WARNING #(DEBUG/INFO/WARNING/ERROR/CRITICAL)
SENSEL_BAUD = 115200
SENSEL_TIMEOUT = 1
SENSEL_NULL_LABEL = 255
if PY3:
SENSEL_MAGIC = b'S3NS31'
else:
SENSEL_MAGIC = 'S3NS31'
SENSEL_FRAME_PRESSURE_FLAG = 0x01
SENSEL_FRAME_LABELS_FLAG = 0x02
SENSEL_FRAME_CONTACTS_FLAG = 0x04
SENSEL_PT_RESERVED = 0
SENSEL_PT_FRAME = 1
SENSEL_PT_BUFFERED_FRAME = 2
SENSEL_PT_BUFFERED_FRAME_END = 3
SENSEL_PT_ASYNC_FRAME = 4
SENSEL_PT_FRAME_NACK = 5
SENSEL_PT_READ_ACK = 6
SENSEL_PT_READ_NACK = 7
SENSEL_PT_RVS_ACK = 8
SENSEL_PT_RVS_NACK = 9
SENSEL_PT_WRITE_ACK = 10
SENSEL_PT_WRITE_NACK = 11
SENSEL_PT_WVS_ACK = 12
SENSEL_PT_WVS_NACK = 13
SENSEL_EVENT_CONTACT_INVALID = 0
SENSEL_EVENT_CONTACT_START = 1
SENSEL_EVENT_CONTACT_MOVE = 2
SENSEL_EVENT_CONTACT_END = 3
SENSEL_BOARD_ADDR = 0x01
SENSEL_READ_HEADER = (SENSEL_BOARD_ADDR | (1 << 7))
SENSEL_WRITE_HEADER = (SENSEL_BOARD_ADDR)
SENSEL_REG_MAGIC = 0x00
SENSEL_REG_FW_PROTOCOL_VERSION = 0x06
SENSEL_REG_FW_VERSION_MAJOR = 0x07
SENSEL_REG_FW_VERSION_MINOR = 0x08
SENSEL_REG_FW_VERSION_BUILD = 0x09
SENSEL_REG_FW_VERSION_RELEASE = 0x0B
SENSEL_REG_DEVICE_ID = 0x0C
SENSEL_REG_DEVICE_REVISION = 0x0E
SENSEL_REG_DEVICE_SERIAL_NUMBER = 0x0F
SENSEL_REG_SENSOR_COL_ACTIVE_COUNT = 0x10
SENSEL_REG_SENSOR_ROW_ACTIVE_COUNT = 0x11
SENSEL_REG_SENSOR_ACTIVE_AREA_WIDTH_UM = 0x14
SENSEL_REG_SENSOR_ACTIVE_AREA_HEIGHT_UM = 0x18
SENSEL_REG_SENSOR_CONSTRUCTION = 0x1C
SENSEL_REG_SCAN_FRAME_RATE = 0x20
SENSEL_REG_SCAN_BUFFER_CONTROL = 0x22
SENSEL_REG_SCAN_RESOLUTION_CONTROL = 0x23
SENSEL_REG_SCAN_CONTENT_CONTROL = 0x24
SENSEL_REG_SCAN_ENABLED = 0x25
SENSEL_REG_SCAN_READ_FRAME = 0x26
SENSEL_REG_PRESSURE_MAP_MAX_VALUE = 0x30
SENSEL_REG_CONTACTS_MAX_COUNT = 0x40
SENSEL_REG_CONTACTS_CALC_ELLIPSE = 0x41
SENSEL_REG_CONTACTS_MAX_BLOBS = 0x42
SENSEL_REG_CONTACTS_ENABLE_BLOB_MERGE = 0x43
SENSEL_REG_CONTACTS_AREA_THRESHOLD = 0x44
SENSEL_REG_CONTACTS_MIN_PEAK = 0x45
SENSEL_REG_CONTACTS_MIN_VALUE = 0x47
SENSEL_REG_CONTACTS_MIN_FORCE = 0x49
SENSEL_REG_CONTACTS_MAX_MOVE_DIST = 0x4B
SENSEL_REG_BASELINE_ENABLED = 0x50
SENSEL_REG_BASELINE_INCREASE_RATE = 0x51
SENSEL_REG_BASELINE_DECREASE_RATE = 0x53
SENSEL_REG_ACCEL_X = 0x60
SENSEL_REG_ACCEL_Y = 0x62
SENSEL_REG_ACCEL_Z = 0x64
SENSEL_REG_BATTERY_STATUS = 0x70
SENSEL_REG_BATTERY_PERCENTAGE = 0x71
SENSEL_REG_POWER_BUTTON_PRESSED = 0x72
SENSEL_REG_LED_BRIGHTNESS = 0x80
SENSEL_REG_SOFT_RESET = 0xE0
SENSEL_REG_ERROR_CODE = 0xEC
SENSEL_REG_RESERVED = 0xF0
SENSEL_REG_BATTERY_VOLTAGE_MV = 0xFE
EC_OK = 0
EC_REG_INVALID_ADDRESS = 1
EC_REG_INVALID_VALUE = 2
EC_REG_INVALID_PERMISSIONS = 3
sensel_serial = None
sensor_nrows = -1
sensor_ncols = -1
sensor_x_to_mm_factor = -1
sensor_y_to_mm_factor = -1
#TODO: Explicitly set this to False or read out actual value in sensor
_sthread = None
_scan_buffer = None
_scan_buffering_enabled = False
_scan_num_buffers = 0
_scan_thread_exit_requested = False
_scan_thread_pause_requested = False
_serial_lock = None
SENSEL_DEVICE_INFO_SIZE = 9
class SenselDeviceInfo():
def __init__(self, data):
self.fw_protocol_version = _convertBufToVal(data[0:1])
self.fw_version_major = _convertBufToVal(data[1:2])
self.fw_version_minor = _convertBufToVal(data[2:3])
self.fw_version_build = _convertBufToVal(data[3:5])
self.fw_version_release = _convertBufToVal(data[5:6])
self.device_id = _convertBufToVal(data[6:8])
self.device_revision = _convertBufToVal(data[8:9])
class SenselContact():
data_size = 30
def __init__(self, data):
if(len(data) != SenselContact.data_size):
logging.error("Unable to create SenselContact. Data length (%d) != contact length (%d)" %
(len(data), SenselContact.data_size))
raise Exception
self.total_force = _convertBufToVal(data[0:4])
self.uid = _convertBufToVal(data[4:8])
self.area = _convertBufToVal(data[8:12])
x_pos = _convertBufToVal(data[12:14])
y_pos = _convertBufToVal(data[14:16])
self.dx = _convertBufToVal(data[16:18])
self.dy = _convertBufToVal(data[18:20])
self.orientation = _convertBufToVal(data[20:22])
self.major_axis = _convertBufToVal(data[22:24])
self.minor_axis = _convertBufToVal(data[24:26])
self.peak_x = _convertBufToVal(data[26:27])
self.peak_y = _convertBufToVal(data[27:28])
self.id = _convertBufToVal(data[28:29])
self.type = _convertBufToVal(data[29:30])
self.x_pos_mm = x_pos * sensor_x_to_mm_factor
self.y_pos_mm = y_pos * sensor_y_to_mm_factor
def __str__(self):
retstring = "Sensel Contact:\n"
retstring += "total_force: %d\n" % self.total_force
retstring += "uid: %d\n" % self.uid
retstring += "area: %d\n" % self.area
retstring += "x_pos: %d\n" % self.x_pos
retstring += "y_pos: %d\n" % self.y_pos
retstring += "dx: %d\n" % self.dx
retstring += "dy: %d\n" % self.dy
retstring += "orientation: %d\n" % self.orientation
retstring += "major_axis: %d\n" % self.major_axis
retstring += "minor_axis: %d\n" % self.minor_axis
retstring += "peak_x: %d\n" % self.peak_x
retstring += "peak_y: %d\n" % self.peak_y
retstring += "id: %d\n" % self.id
retstring += "type: %d\n" % self.type
return retstring
class SenselDevice():
def __init__(self):
pass
def _openAndProbePort(self, port_name):
global sensel_serial
logging.info("Opening port " + str(port_name))
try:
sensel_serial.port=port_name
sensel_serial.open()
sensel_serial.flushInput()
resp = self.readReg(0x00, 6)
except SenselRegisterReadError:
logging.warning("Failed to read magic register")
sensel_serial.close()
return False
except Exception:
e = sys.exc_info()[1]
print(str(e))
logging.warning("Unable to open " + str(port_name))
return False
if(resp == SENSEL_MAGIC):
logging.info("Found sensel sensor at " + str(port_name))
return True
else:
logging.info("Probe didn't read out magic (%s)" % resp)
sensel_serial.close()
return False
def _openSensorWin(self):
logging.info("Opening device on WIN architecture")
for i in range(50):
if self._openAndProbePort(i):
logging.warning("Found sensor on port COM%s" % i)
return True
return False
def _openSensorMac(self):
logging.info("Opening device on MAC architecture")
port_name_list = glob.glob('/dev/tty.usbmodem*') + glob.glob('/dev/tty*') + glob.glob('/dev/cu*')
for port_name in port_name_list:
if self._openAndProbePort(port_name):
logging.warning("Found sensor on port %s" % port_name)
return True
return False
def _openSensorLinux(self):
logging.info("Opening device on LINUX architecture")
port_name_list = glob.glob('/dev/ttyACM*') + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyS*')
for port_name in port_name_list:
if self._openAndProbePort(port_name):
logging.warning("Found sensor on port %s" % port_name)
return True
return False
#TODO: make this static
def _initLogging(self):
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)s() ] %(message)s (%(levelname)s)"
logging.basicConfig(stream=sys.stderr, level=SENSEL_LOGGING_LEVEL, format=FORMAT)
def _serialRead(self, num_bytes):
resp = sensel_serial.read(num_bytes)
if(len(resp) != num_bytes):
raise SenselSerialReadError(len(resp), num_bytes)
return resp
def _serialWrite(self, data):
resp = sensel_serial.write(data)
if(resp != len(data)):
raise SenselSerialWriteError(resp, len(data))
return True;
def _readByteValFromBuf(self, buf, idx):
if PY3:
return buf[idx]
else:
return ord(buf[idx])
# TODO: Pass in None to do auto-detection
def openConnection(self, com_port=None):
global sensel_serial
global _serial_lock
self._initLogging()
platform_name = platform.system()
logging.info("Initializing Sensel on " + platform_name + " platform")
sensel_serial = serial.Serial(
baudrate=SENSEL_BAUD,\
parity=serial.PARITY_NONE,\
stopbits=serial.STOPBITS_ONE,\
bytesize=serial.EIGHTBITS,\
timeout=SENSEL_TIMEOUT)
_serial_lock = threading.RLock()
if(com_port != None):
if platform_name == "Windows": #Windows serial open takes an integer indicating COM port number, so we need to extract that.
if "COM" in com_port:
com_port = int(com_port[3:])
resp = self._openAndProbePort(com_port)
else: #Auto-detect sensor
if platform_name == "Windows":
resp = self._openSensorWin()
elif platform_name == "Darwin":
resp = self._openSensorMac()
else:
resp = self._openSensorLinux()
if resp == False:
logging.error("Failed to open Sensel sensor!")
return resp
def getDeviceInfo(self):
return SenselDeviceInfo(self.readReg(SENSEL_REG_FW_PROTOCOL_VERSION, SENSEL_DEVICE_INFO_SIZE))
def getSensorNumRowsCols(self):
global sensor_nrows
global sensor_ncols
if sensor_nrows == -1 or sensor_ncols == -1:
sensor_nrows = _convertBufToVal(self.readReg(SENSEL_REG_SENSOR_ROW_ACTIVE_COUNT, 1))
sensor_ncols = _convertBufToVal(self.readReg(SENSEL_REG_SENSOR_COL_ACTIVE_COUNT, 1))
return (sensor_nrows, sensor_ncols)
def getSensorActiveAreaDimensionsUM(self):
width = _convertBufToVal(self.readReg(SENSEL_REG_SENSOR_ACTIVE_AREA_WIDTH_UM, 4))
height = _convertBufToVal(self.readReg(SENSEL_REG_SENSOR_ACTIVE_AREA_HEIGHT_UM,4))
return (width, height)
def getMaxForce(self):
return _convertBufToVal(self.readReg(SENSEL_REG_PRESSURE_MAP_MAX_VALUE, 2))
def getMaxContacts(self):
return _convertBufToVal(self.readReg(SENSEL_REG_CONTACTS_MAX_COUNT, 1))
def getFrameRate(self):
return _convertBufToVal(self.readReg(SENSEL_REG_SCAN_FRAME_RATE, 1))
def getSerialNumber(self):
serial_num_str = self.readRegVSP(SENSEL_REG_DEVICE_SERIAL_NUMBER)
if PY3:
serial_num_list = [ x for x in serial_num_str ]
else:
serial_num_list = [ ord(x) for x in serial_num_str ]
serial_num_list.reverse()
return serial_num_list
def getBatteryVoltagemV(self):
return _convertBufToVal(self.readReg(SENSEL_REG_BATTERY_VOLTAGE_MV, 2))
def setFrameContentControl(self, content):
return self.writeReg(SENSEL_REG_SCAN_CONTENT_CONTROL, 1, bytearray([content]))
def setLEDBrightness(self, brightness_levels):
if len(brightness_levels) > 16:
logging.error("You cannot set %d brightness levels (16 max)" % brightness_levels)
return False
return self.writeReg(SENSEL_REG_LED_BRIGHTNESS, len(brightness_levels), bytearray(brightness_levels))
def resetSoft(self):
return self.writeReg(SENSEL_REG_SOFT_RESET, 1, bytearray([1]))
def _populateDimensions(self):
global sensor_nrows
global sensor_ncols
global sensor_x_to_mm_factor
global sensor_y_to_mm_factor
(sensor_nrows, sensor_ncols) = self.getSensorNumRowsCols()
sensor_max_x = 256 * (sensor_ncols - 1)
sensor_max_y = 256 * (sensor_nrows - 1)
(sensor_width_um, sensor_height_um) = self.getSensorActiveAreaDimensionsUM()
sensor_width_mm = sensor_width_um / 1000.0
sensor_height_mm = sensor_height_um / 1000.0
sensor_x_to_mm_factor = sensor_width_mm / sensor_max_x
sensor_y_to_mm_factor = sensor_height_mm / sensor_max_y
def _setBufferControl(self, num_buffers):
global _scan_buffering_enabled
global _scan_num_buffers
if num_buffers > 255:
logging.error("Invalid num buffers! (%d)" % num_buffers)
return
_scan_buffering_enabled = (num_buffers > 0)
_scan_num_buffers = num_buffers
#print ("BUFFERING ENABLED" if _scan_buffering_enabled else "BUFFERING DISABLED")
return self.writeReg(SENSEL_REG_SCAN_BUFFER_CONTROL, 1, bytearray([num_buffers]))
def startScanning(self, num_buffers):
global _scan_buffering_enabled
global _scan_thread_exit_requested
global _sthread
global _scan_buffer
#We need to assign the nrows/ncols if we haven't already
self._populateDimensions()
self._setBufferControl(num_buffers)
resp = self.writeReg(SENSEL_REG_SCAN_ENABLED, 1, bytearray([0x01]))
if (not _scan_buffering_enabled) or (resp):
return resp
#We get here if buffering is enabled and we've successfully started scanning
#kick off scanning thread
_scan_thread_exit_requested = False
if PY3:
_scan_buffer = queue.Queue()
else:
_scan_buffer = Queue.Queue()
_sthread = threading.Thread(target=self._scanThread, name="SCAN_THREAD", args=())
_sthread.start()
def stopScanning(self):
global _scan_buffering_enabled
global _scan_thread_exit_requested
global _sthread
if _scan_buffering_enabled: #stop scanning thread
_scan_thread_exit_requested = True
_sthread.join()
return self.writeReg(SENSEL_REG_SCAN_ENABLED, 1, bytearray([0x00]))
def _scanThread(self):
global _scan_thread_exit_requested
global _scan_thread_pause_requested
global _scan_buffer
global _serial_lock
logging.info("Scan thread start")
frame_rate = self.getFrameRate()
nominal_period = 1.0 / frame_rate
read_delay = 0.25 * nominal_period
logging.info("using read_delay: %f" % read_delay)
while not _scan_thread_exit_requested:
while _scan_thread_pause_requested:
pass
_serial_lock.acquire()
self._sendFrameReadReq()
#Read until we get a buffer end
while True:
frame_data = self._readFrameData()
if frame_data:
_scan_buffer.put(frame_data)
else:
break
_serial_lock.release()
time.sleep(read_delay)
logging.info("Scan thread exit")
#The user doesn't need to know that we're sending a write request
def readFrame(self):
global _scan_buffering_enabled
global _scan_buffer
global _serial_lock
if _scan_buffering_enabled:
if _scan_buffer.empty():
return None
else:
parsed_data = self._parseFrameData(_scan_buffer.get())
_scan_buffer.task_done()
return parsed_data
else:
#For non-buffered frame reads, we simply issue a synchronous read
_serial_lock.acquire()
self._sendFrameReadReq()
frame_data = self._readFrameData()
_serial_lock.release()
return self._parseFrameData(frame_data)
def pauseScanThread(self):
global _scan_thread_pause_requested
_scan_thread_pause_requested = True
def resumeScanThread(self):
global _scan_thread_pause_requested
_scan_thread_pause_requested = False
def _sendFrameReadReq(self):
#Send first read request
cmd = pack('BBB', SENSEL_READ_HEADER, SENSEL_REG_SCAN_READ_FRAME, 0)
return self._serialWrite(cmd)
#Reads frame data, and verifies checksum
def _readFrameData(self):
global _scan_buffering_enabled
ack = _convertBufToVal(self._serialRead(1))
if _scan_buffering_enabled and (ack == SENSEL_PT_BUFFERED_FRAME_END):
return None
if(ack != SENSEL_PT_FRAME and ack != SENSEL_PT_BUFFERED_FRAME):
logging.error("Failed to recieve ACK on force frame finish! (received %d)\n" % ack)
raise SenselSerialReadError(0, 1)
frame_size = _convertBufToVal(self._serialRead(2))
logging.info("reading frame of %d bytes" % frame_size)
frame_data = self._serialRead(frame_size)
resp_checksum = _convertBufToVal(self._serialRead(1))
if not self._verifyChecksum(frame_data, resp_checksum):
logging.error("Response checksum didn't match checksum (resp_checksum=%d, checksum=%d)" %
(resp_checksum, checksum))
raise SenselSerialReadError(1, 1)
return frame_data
def _parseFrameData(self, frame_data):
if len(frame_data) < 2:
logging.error("Frame data size is less than 2!")
raise SenselSerialReadError(2, 0)
#Pull off frame header info
content_bit_mask = _convertBufToVal(frame_data[0])
lost_frame_count = _convertBufToVal(frame_data[1])
frame_data = frame_data[2:]
logging.info("content mask: %d, lost frames: %d" % (content_bit_mask, lost_frame_count))
if content_bit_mask & SENSEL_FRAME_PRESSURE_FLAG:
logging.info("Received pressure map")
raise NotImplementedError
if content_bit_mask & SENSEL_FRAME_LABELS_FLAG:
logging.info("Received labels map")
raise NotImplementedError
if content_bit_mask & SENSEL_FRAME_CONTACTS_FLAG:
logging.info("Received contacts")
num_contacts = _convertBufToVal(frame_data[0])
frame_data = frame_data[1:]
contacts = []
for i in range(num_contacts):
contacts.append(SenselContact(frame_data[:SenselContact.data_size]))
frame_data = frame_data[SenselContact.data_size:]
else:
contacts = None
return (lost_frame_count, None, None, contacts)
def _verifyChecksum(self, data, checksum):
curr_sum = 0
for val in data:
if PY3:
curr_sum += val
else:
curr_sum += ord(val)
curr_sum = (curr_sum & 0xFF)
if(checksum != curr_sum):
logging.error("Checksum failed! (%d != %d)" % (checksum, curr_sum))
return False
else:
logging.debug("Checksum passed! (%d == %d)" % (checksum, curr_sum))
return True
def readContacts(self):
frame = self.readFrame()
if frame:
(rolling_frame_counter, force_image, label_image, contacts) = frame
return contacts
else:
return None
def readReg(self, reg, size):
global _serial_lock
cmd = pack('BBB', SENSEL_READ_HEADER, reg, size)
_serial_lock.acquire()
try:
self._serialWrite(cmd)
ack = _convertBufToVal(self._serialRead(1))
resp_size = _convertBufToVal(self._serialRead(2))
if(ack != SENSEL_PT_READ_ACK):
logging.error("Failed to receive ACK from reg read (received %d)" % ack)
raise SenselSerialReadError(1, 0)
if(resp_size != size):
logging.error("Response size didn't match request size (resp_size=%d, req_size=%d)" % (resp_size, size))
raise SenselSerialReadError(resp_size, size)
resp = self._serialRead(size)
resp_checksum = _convertBufToVal(self._serialRead(1))
except (SenselSerialWriteError, SenselSerialReadError):
raise SenselRegisterReadError(reg, size)
_serial_lock.release()
if not self._verifyChecksum(resp, resp_checksum):
logging.error("Response checksum didn't match checksum (resp_checksum=%d, checksum=%d)" % (resp_checksum, checksum))
raise SenselSerialReadError(1, 1)
return resp
def readRegVSP(self, reg):
global _serial_lock
cmd = pack('BBB', SENSEL_READ_HEADER, reg, 0) # 0 for RVS
_serial_lock.acquire()
try:
self._serialWrite(cmd)
ack = _convertBufToVal(self._serialRead(1))
if(ack != SENSEL_PT_RVS_ACK):
logging.error("Failed to receive ACK from vsp read (received %d)" % ack)
raise SenselSerialReadError
vsp_size = _convertBufToVal(self._serialRead(2))
resp = self._serialRead(vsp_size)
resp_checksum = _convertBufToVal(self._serialRead(1))
except SenselSerialReadError:
raise SenselRegisterReadVSPError(reg, vsp_size)
_serial_lock.release()
if not self._verifyChecksum(resp, resp_checksum):
raise SenselRegisterReadVSPError(reg, vsp_size)
return resp
def readErrorCode(self):
return _convertBufToVal(self.readReg(0xEC, 1))
def printErrorCode(self, error_code):
if error_code == 0:
print("Success!")
elif error_code == 1:
print("ERROR: Invalid write address")
elif error_code == 2:
print("ERROR: Invalid write value")
elif error_code == 3:
print("ERROR: Invalid register write permissions")
def writeReg(self, reg, size, data):
global _serial_lock
cmd = pack('BBB', SENSEL_WRITE_HEADER, reg, size)
checksum = 0
for d in data:
checksum += d
checksum &= 0xFF
_serial_lock.acquire()
try:
self._serialWrite(cmd)
self._serialWrite(data)
self._serialWrite(bytearray([checksum]))
resp = _convertBufToVal(self._serialRead(1)) #Read ACK
except (SenselSerialWriteError, SenselSerialReadError):
raise SenselRegisterWriteError(reg, size, data, False, 0)
if (resp != SENSEL_PT_WRITE_ACK):
raise SenselRegisterWriteError(reg, size, data, True, resp)
ec = self.readErrorCode()
_serial_lock.release() #We should hold the lock through the EC read
return ec
def closeConnection(self):
sensel_serial.close()
def _convertBufToVal(buf):
if PY3:
if type(buf) is int:
return buf
else:
buf = ["{:02d}".format(ord(c)) for c in buf]
final_val = 0
for i in range(len(buf)):
final_val |= (int(buf[i]) << (i * 8))
return final_val
class SenselError(Exception):
"""Base class for exceptions in this module"""
pass
class SenselSerialReadError(SenselError):
"""Exception raised when a serial read fails"""
def __init__(self, num_bytes_read, num_bytes_requested):
self.num_bytes_read = num_bytes_read
self.num_bytes_requested = num_bytes_requested
logging.error("Requested %d bytes, received %d bytes" % (num_bytes_requested, num_bytes_read))
class SenselFrameDecompressionError(SenselError):
"""Exception raised when a serial read fails"""
def __init__(self, num_bytes_decompressed, num_bytes_expected):
logging.error("Only decompressed %d of %d bytes" %
(num_bytes_decompressed, num_bytes_expected))
class SenselSerialWriteError(SenselError):
"""Exception raised when a serial read fails"""
def __init__(self, num_bytes_written, num_bytes_requested):
self.num_bytes_written = num_bytes_written
self.num_bytes_requested = num_bytes_requested
logging.error("Requested %d bytes, wrote %d bytes" % (num_bytes_requested, num_bytes_written))
class SenselRegisterReadError(SenselError):
"""Exception raised when a register read times out"""
def __init__(self, reg, size):
self.reg = reg
self.size = size
logging.error("Failed to read register 0x%02X (size %d)" % (reg, size))
class SenselRegisterReadVSPError(SenselError):
"""Exception raised when a variable-sized register read times out"""
def __init__(self, reg, vsp_size):
self.reg = reg
self.vsp_size = vsp_size
logging.error("Failed to read VSP register 0x%02X (vsp size %d)" % (reg, vsp_size))
class SenselRegisterWriteError(SenselError):
"""Exception raised when a register write times out or a non-ACK value is read"""
def __init__(self, reg, size, data, ack_received, response):
self.reg = reg
self.size = size
self.data = data
self.ack_received = ack_received
self.response = response
logging.error("Failed to write register 0x%02X (size=%d, ack_received=%d, response=%d)" % (reg, size, ack_received, response))
| |
# Written by Petru Paler and Ross Cohen
# see LICENSE.txt for license information
from types import IntType, LongType, StringType, ListType, TupleType, DictType
def decode_int(x, f):
f = f+1
newf = x.index('e', f)
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_string(x, f):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
lastkey = None
while x[f] != 'e':
k, f = decode_string(x, f)
if lastkey >= k:
raise ValueError
lastkey = k
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
def bdecode(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
def test_bdecode():
try:
bdecode('0:0:')
assert 0
except ValueError:
pass
try:
bdecode('ie')
assert 0
except ValueError:
pass
try:
bdecode('i341foo382e')
assert 0
except ValueError:
pass
assert bdecode('i4e') == 4L
assert bdecode('i0e') == 0L
assert bdecode('i123456789e') == 123456789L
assert bdecode('i-10e') == -10L
try:
bdecode('i-0e')
assert 0
except ValueError:
pass
try:
bdecode('i123')
assert 0
except ValueError:
pass
try:
bdecode('')
assert 0
except ValueError:
pass
try:
bdecode('i6easd')
assert 0
except ValueError:
pass
try:
bdecode('35208734823ljdahflajhdf')
assert 0
except ValueError:
pass
try:
bdecode('2:abfdjslhfld')
assert 0
except ValueError:
pass
assert bdecode('0:') == ''
assert bdecode('3:abc') == 'abc'
assert bdecode('10:1234567890') == '1234567890'
try:
bdecode('02:xy')
assert 0
except ValueError:
pass
try:
bdecode('l')
assert 0
except ValueError:
pass
assert bdecode('le') == []
try:
bdecode('leanfdldjfh')
assert 0
except ValueError:
pass
assert bdecode('l0:0:0:e') == ['', '', '']
try:
bdecode('relwjhrlewjh')
assert 0
except ValueError:
pass
assert bdecode('li1ei2ei3ee') == [1, 2, 3]
assert bdecode('l3:asd2:xye') == ['asd', 'xy']
assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]]
try:
bdecode('d')
assert 0
except ValueError:
pass
try:
bdecode('defoobar')
assert 0
except ValueError:
pass
assert bdecode('de') == {}
assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'}
assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}}
try:
bdecode('d3:fooe')
assert 0
except ValueError:
pass
try:
bdecode('di1e0:e')
assert 0
except ValueError:
pass
try:
bdecode('d1:b0:1:a0:e')
assert 0
except ValueError:
pass
try:
bdecode('d1:a0:1:a0:e')
assert 0
except ValueError:
pass
try:
bdecode('i03e')
assert 0
except ValueError:
pass
try:
bdecode('l01:ae')
assert 0
except ValueError:
pass
try:
bdecode('9999:x')
assert 0
except ValueError:
pass
try:
bdecode('l0:')
assert 0
except ValueError:
pass
try:
bdecode('d0:0:')
assert 0
except ValueError:
pass
try:
bdecode('d0:')
assert 0
except ValueError:
pass
try:
bdecode('00:')
assert 0
except ValueError:
pass
try:
bdecode('l-3:e')
assert 0
except ValueError:
pass
try:
bdecode('i-03e')
assert 0
except ValueError:
pass
class Bencached(object):
__slots__ = ['bencoded']
def __init__(self, s):
self.bencoded = s
def bencode_int(x, b):
b.extend(('i', str(x), 'e'))
def bencode_string(x, b):
b.extend((str(len(x)), ':', x))
def bencode_list(x, b):
b.append('l')
for e in x:
encode_func[type(e)](e, b)
b.append('e')
def bencode_dict(x, b):
b.append('d')
klist = x.keys()
klist.sort()
for k in klist:
assert type(k) is StringType
b.extend((str(len(k)), ':', k))
encode_func[type(x[k])](x[k], b)
b.append('e')
def bencode_cached(x, b):
b.append(x.bencoded)
encode_func = {}
encode_func[IntType] = bencode_int
encode_func[LongType] = bencode_int
encode_func[StringType] = bencode_string
encode_func[ListType] = bencode_list
encode_func[TupleType] = bencode_list
encode_func[DictType] = bencode_dict
encode_func[Bencached] = bencode_cached
def bencode(x):
b = []
try:
encode_func[type(x)](x, b)
except KeyError:
raise ValueError
return ''.join(b)
def test_bencode():
assert bencode(4) == 'i4e'
assert bencode(0) == 'i0e'
assert bencode(-10) == 'i-10e'
assert bencode(12345678901234567890L) == 'i12345678901234567890e'
assert bencode('') == '0:'
assert bencode('abc') == '3:abc'
assert bencode('1234567890') == '10:1234567890'
assert bencode([]) == 'le'
assert bencode([1, 2, 3]) == 'li1ei2ei3ee'
assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee'
assert bencode({}) == 'de'
assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee'
assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee'
try:
bencode({1: 'foo'})
except AssertionError:
return
assert 0
try:
import psyco
psyco.bind(bdecode)
psyco.bind(bencode)
except ImportError:
pass
| |
# -*- coding: utf-8 -*-
#
# SQLAlchemy documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 26 19:50:10 2008.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../../lib"))
sys.path.insert(0, os.path.abspath("../..")) # examples
sys.path.insert(0, os.path.abspath("."))
# -- General configuration --------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.5.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"zzzeeksphinx",
"changelog",
"sphinx_paramlinks",
]
needs_extensions = {"zzzeeksphinx": "1.2.1"}
# Add any paths that contain templates here, relative to this directory.
# not sure why abspath() is needed here, some users
# have reported this.
templates_path = [os.path.abspath("templates")]
nitpicky = False
# The suffix of source filenames.
source_suffix = ".rst"
# section names used by the changelog extension.
changelog_sections = [
"general",
"platform",
"orm",
"orm declarative",
"orm querying",
"orm configuration",
"examples",
"engine",
"sql",
"schema",
"extensions",
"mypy",
"asyncio",
"postgresql",
"mysql",
"sqlite",
"mssql",
"oracle",
"firebird",
]
# tags to sort on inside of sections
changelog_inner_tag_sort = [
"feature",
"usecase",
"change",
"changed",
"performance",
"bug",
"deprecated",
"removed",
"renamed",
"moved",
]
# how to render changelog links
changelog_render_ticket = "https://www.sqlalchemy.org/trac/ticket/%s"
changelog_render_pullreq = {
"default": "https://github.com/sqlalchemy/sqlalchemy/pull/%s",
"github": "https://github.com/sqlalchemy/sqlalchemy/pull/%s",
}
changelog_render_changeset = "https://www.sqlalchemy.org/trac/changeset/%s"
exclude_patterns = ["build", "**/unreleased*/*", "*_include.rst"]
# zzzeeksphinx makes these conversions when it is rendering the
# docstrings classes, methods, and functions within the scope of
# Sphinx autodoc
autodocmods_convert_modname = {
"sqlalchemy.sql.sqltypes": "sqlalchemy.types",
"sqlalchemy.sql.type_api": "sqlalchemy.types",
"sqlalchemy.sql.schema": "sqlalchemy.schema",
"sqlalchemy.sql.elements": "sqlalchemy.sql.expression",
"sqlalchemy.sql.selectable": "sqlalchemy.sql.expression",
"sqlalchemy.sql.dml": "sqlalchemy.sql.expression",
"sqlalchemy.sql.ddl": "sqlalchemy.schema",
"sqlalchemy.sql.base": "sqlalchemy.sql.expression",
"sqlalchemy.sql.operators": "sqlalchemy.sql.expression",
"sqlalchemy.event.base": "sqlalchemy.event",
"sqlalchemy.engine.base": "sqlalchemy.engine",
"sqlalchemy.engine.url": "sqlalchemy.engine",
"sqlalchemy.engine.row": "sqlalchemy.engine",
"sqlalchemy.engine.cursor": "sqlalchemy.engine",
"sqlalchemy.engine.result": "sqlalchemy.engine",
"sqlalchemy.ext.asyncio.result": "sqlalchemy.ext.asyncio",
"sqlalchemy.ext.asyncio.engine": "sqlalchemy.ext.asyncio",
"sqlalchemy.ext.asyncio.session": "sqlalchemy.ext.asyncio",
"sqlalchemy.util._collections": "sqlalchemy.util",
"sqlalchemy.orm.attributes": "sqlalchemy.orm",
"sqlalchemy.orm.relationships": "sqlalchemy.orm",
"sqlalchemy.orm.interfaces": "sqlalchemy.orm",
"sqlalchemy.orm.query": "sqlalchemy.orm",
"sqlalchemy.orm.util": "sqlalchemy.orm",
}
autodocmods_convert_modname_w_class = {
("sqlalchemy.engine.interfaces", "Connectable"): "sqlalchemy.engine",
("sqlalchemy.sql.base", "DialectKWArgs"): "sqlalchemy.sql.base",
}
# on the referencing side, a newer zzzeeksphinx extension
# applies shorthand symbols to references so that we can have short
# names that are still using absolute references.
zzzeeksphinx_module_prefixes = {
"_sa": "sqlalchemy",
"_engine": "sqlalchemy.engine",
"_url": "sqlalchemy.engine",
"_result": "sqlalchemy.engine",
"_row": "sqlalchemy.engine",
"_schema": "sqlalchemy.schema",
"_types": "sqlalchemy.types",
"_asyncio": "sqlalchemy.ext.asyncio",
"_expression": "sqlalchemy.sql.expression",
"_sql": "sqlalchemy.sql.expression",
"_dml": "sqlalchemy.sql.expression",
"_ddl": "sqlalchemy.schema",
"_functions": "sqlalchemy.sql.functions",
"_pool": "sqlalchemy.pool",
# base event API, like listen() etc.
"_event": "sqlalchemy.event",
# core events like PoolEvents, ConnectionEvents
"_events": "sqlalchemy.events",
# note Core events are linked as sqlalchemy.event.<cls>
# ORM is sqlalchemy.orm.<cls>.
"_ormevent": "sqlalchemy.orm",
"_ormevents": "sqlalchemy.orm",
"_exc": "sqlalchemy.exc",
"_reflection": "sqlalchemy.engine.reflection",
"_orm": "sqlalchemy.orm",
"_query": "sqlalchemy.orm",
"_ormexc": "sqlalchemy.orm.exc",
"_roles": "sqlalchemy.sql.roles",
"_baked": "sqlalchemy.ext.baked",
"_horizontal": "sqlalchemy.ext.horizontal_shard",
"_associationproxy": "sqlalchemy.ext.associationproxy",
"_automap": "sqlalchemy.ext.automap",
"_hybrid": "sqlalchemy.ext.hybrid",
"_compilerext": "sqlalchemy.ext.compiler",
"_mutable": "sqlalchemy.ext.mutable",
"_declarative": "sqlalchemy.ext.declarative",
"_future": "sqlalchemy.future",
"_futureorm": "sqlalchemy.future.orm",
"_postgresql": "sqlalchemy.dialects.postgresql",
"_mysql": "sqlalchemy.dialects.mysql",
"_mssql": "sqlalchemy.dialects.mssql",
"_oracle": "sqlalchemy.dialects.oracle",
"_sqlite": "sqlalchemy.dialects.sqlite",
"_util": "sqlalchemy.util",
}
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "contents"
# General information about the project.
project = u"SQLAlchemy"
copyright = u"2007-2021, the SQLAlchemy authors and contributors" # noqa
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.4"
# The full version, including alpha/beta/rc tags.
release = "1.4.25"
release_date = "September 22, 2021"
site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org")
site_adapter_template = "docs_adapter.mako"
site_adapter_py = "docs_adapter.py"
# arbitrary number recognized by builders.py, incrementing this
# will force a rebuild
build_number = "3"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# have the "gettext" build generate .pot for each individual
# .rst
gettext_compact = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "zzzeeksphinx"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = "default.css"
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s %s Documentation" % (project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%m/%d/%Y %H:%M:%S"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"notfound": "notfound.html"}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
# html_copy_source = True
html_copy_source = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "SQLAlchemydoc"
# autoclass_content = 'both'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples (source start
# file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"contents",
"sqlalchemy_%s.tex" % release.replace(".", "_"),
"SQLAlchemy Documentation",
"Mike Bayer",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# sets TOC depth to 2.
latex_preamble = r"\setcounter{tocdepth}{3}"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# latex_elements = {
# 'papersize': 'letterpaper',
# 'pointsize': '10pt',
# }
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"sqlalchemy",
u"SQLAlchemy Documentation",
[u"SQLAlchemy authors"],
1,
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u"SQLAlchemy"
epub_author = u"SQLAlchemy authors"
epub_publisher = u"SQLAlchemy authors"
epub_copyright = u"2007-2015, SQLAlchemy authors"
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
| |
try:
import tkinter as tk
except (ImportError) as e:
print(e, "\nTrying as Tkinter")
try:
import Tkinter as tk
except (ImportError) as e:
print(e, "\nCheck to see if tkinter/Tkinter is imported")
import os
import subprocess
import SubnetCalculation
#import _thread
#import Converter
class myProg(tk.Tk):
def __init__(self, master):
tk.Tk.__init__(self, master)
self.converter_count = 0
self.master = master
self.main_window()
def main_window(self):
f = tk.Frame(self, bg="black", width=1000, height=1000)
f.pack()
#
headerArea= tk.Frame(f, bg="white")
headerArea.pack(expand=True, fill=tk.BOTH)
global textArea
textArea = tk.Frame(f, bg="lightgreen")
textArea.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
global convertArea
convertArea = tk.Frame(f, bg="lightgreen")
convertArea.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
#convertArea.pack_forget()
global buttonArea_sub
buttonArea_sub = tk.Frame(f, bg="lightgreen")
buttonArea_sub.pack(side=tk.RIGHT, expand=True, fill=tk.BOTH)
global buttonArea_con
buttonArea_con = tk.Frame(f, bg="lightgreen")
buttonArea_con.pack(side=tk.RIGHT, expand=True, fill=tk.BOTH)
# Grid Details-------------------------------
# self.grid()
#HEADERAREA===================================================
# Lable Detailed headerArea---------------------------
self.mainHeader = tk.StringVar()
head = tk.Label(headerArea, font=("Calibri Bold",20), textvariable=self.mainHeader, bg="turquoise")
head.grid(column=0, row=0, ipadx=145)
mainHead = "Networking Control Panel"
self.mainHeader.set(mainHead)
#if self.sub_count == False:
#CONVERTAREA================================================
# labels Deatiled convertArea-------------------------
self.entryVariable = tk.StringVar()
self.entry = tk.Entry(convertArea, font=("Calibri", 15), textvariable=self.entryVariable, width=20)
self.entry.grid(column=1, row=0, columnspan=2, padx=10)
self.entry.focus()
self.entryReturned = tk.StringVar()
self.entry = tk.Entry(convertArea, font=("Calibri", 15) ,textvariable=self.entryReturned, width=20)
self.entry.grid(column=1, row=1, columnspan=2, padx=10)
self.convertLabel = tk.StringVar()
ConvertLabel = tk.Label(convertArea, font=("Calibri", 15), textvariable=self.convertLabel, bg="lightgrey")
ConvertLabel.grid(column=0, row=0)
self.convertLabel.set(u"Enter Value To Convert")
convertArea.pack_forget()
#TEXTAREA=====================================================
#Labels Detailed textArea----------------------------
self.filler0 = tk.StringVar()
label1 = tk.Label(textArea, textvariable=self.filler0, bg="lightgreen")
label1.grid(column=0, row=0, ipadx=55, ipady=5)
self.filler1 = tk.StringVar()
label2 = tk.Label(textArea, textvariable=self.filler1, bg="lightgreen")
label2.grid(column=2, row=0, ipadx=55, ipady=5)
self.filler2 = tk.StringVar()
label3 = tk.Label(textArea, textvariable=self.filler2, bg="lightgreen")
label3.grid(column=0, row=6, columnspan=2)
self.myLabel = tk.StringVar()
label = tk.Label(textArea, font=("Calibri",15) ,textvariable=self.myLabel, bg="lightgreen")
header = "Subnetted Output"
self.myLabel.set(header)
label.grid(column=1, row=0)
self.label_startIP = tk.StringVar()
lbl_startIP = tk.Label(textArea, font=("Calibri", 14), textvariable=self.label_startIP, bg="lightgrey")
lbl_startIP.grid(column=0, row=1, ipadx=20, pady=5)
self.label_startIP.set("Start IP")
self.label_endIP = tk.StringVar()
lbl_endIP = tk.Label(textArea, font=("Calibri", 14), textvariable=self.label_endIP, bg="lightgrey")
lbl_endIP.grid(column=0, row=2, ipadx=25, pady=5)
self.label_endIP.set("End IP")
self.label_mask = tk.StringVar()
lbl_mask = tk.Label(textArea, font=("Calibri", 14), textvariable=self.label_mask, bg="lightgrey")
lbl_mask.grid(column=0, row=3, ipadx=28, pady=5)
self.label_mask.set("Mask")
self.label_numHosts = tk.StringVar()
lbl_numHosts = tk.Label(textArea, font=("Calibri", 14), textvariable=self.label_numHosts, bg="lightgrey")
lbl_numHosts.grid(column=0, row=4, ipadx=5, pady=5)
self.label_numHosts.set("# of Hosts")
# Entries Deatailed textArea---------------------------
self.entry_startIP = tk.StringVar()
self.start_IP = tk.Entry(textArea, font=("Calibri", 14), textvariable=self.entry_startIP, bd=5, relief="ridge")
self.start_IP.grid(column=1, row=1, columnspan=2)
self.entry_endIP = tk.StringVar()
self.end_IP = tk.Entry(textArea, font=("Calibri", 14), textvariable=self.entry_endIP, bd=5, relief="ridge")
self.end_IP.grid(column=1, row=2, columnspan=2)
self.entry_mask = tk.StringVar()
self.ent_mask = tk.Entry(textArea, font=("Calibri", 14), textvariable=self.entry_mask, bd=5, relief="ridge")
self.ent_mask.grid(column=1, row=3, columnspan=2)
self.entry_hosts = tk.StringVar()
self.ent_host = tk.Entry(textArea, font=("calibri", 14), textvariable=self.entry_hosts, width=7, bd=5, relief=("ridge"))
self.ent_host.grid(column=1, row=4, padx=20)
#BUTTONAREA SUB===================================================
# Buttons Detailes buttonArea_sub---------------------------
converter = tk.Button(buttonArea_sub, text="Converter", command=self.new_converter, bg="red", bd=5, relief="raised")
converter.grid(column=1, row=2, padx=10)
subnet = tk.Button(buttonArea_sub, text="Subnets", command=self.new_subnet, bg="red", bd=5, relief="raised")
subnet.grid(column=2, row=2, padx=5)
find_Mask = tk.Button(buttonArea_sub, text="Find Mask", command=self.Mask_button_click, bg="turquoise", bd=5, relief="raised")
find_Mask.grid(column=1, row=3, pady=5, ipadx=2)
find_Range = tk.Button(buttonArea_sub, text="Find Range", command=self.Calculate_Range, bg="turquoise", bd=5, relief="raised")
find_Range.grid(column=1, row=4, pady=5)
find_host = tk.Button(buttonArea_sub, text="Find Hosts", command=self.Calculate_Hosts, bg="turquoise", bd=5, relief="raised")
find_host.grid(column=1, row=5, pady=5, ipadx=2)
#BUTTONAREA CON====================================================
# Buttons Detailed buttonArea_con----------------------------
converter = tk.Button(buttonArea_con, text="Converter", command=self.new_converter, bg="red", bd=5, relief="raised")
converter.grid(column=1, row=0, padx=10)
subnet = tk.Button(buttonArea_con, text="Subnets", command=self.new_subnet, bg="red", bd=5, relief="raised")
subnet.grid(column=2, row=0, padx=5)
button_Hex_Bin = tk.Button(buttonArea_con, text=u"Hex 2 Bin", bg="turquoise", bd=5, relief="raised", command=self.hex_bin)
button_Hex_Bin.grid(column=1, row=1, pady=5)
button_Hex_Dec = tk.Button(buttonArea_con, text=u"Hex 2 Dec", bg="turquoise", bd=5, relief="raised", command=self.hex_dec)
button_Hex_Dec.grid(column=2, row=1, pady=5)
button_Bin_Dec = tk.Button(buttonArea_con, text=u"Bin 2 Dec", bg="turquoise", bd=5, relief="raised", command=self.bin_dec)
button_Bin_Dec.grid(column=1, row=2, pady=5)
button_Bin_Hex = tk.Button(buttonArea_con, text=u"Bin 2 Hex", bg="turquoise", bd=5, relief="raised", command=self.bin_hex)
button_Bin_Hex.grid(column=2, row=2, pady=5, ipadx=2)
button_Dec_Bin = tk.Button(buttonArea_con, text=u"Dec 2 Bin", bg="turquoise", bd=5, relief="raised", command=self.dec_bin)
button_Dec_Bin.grid(column=1, row=3, pady=5)
button_Dec_Hex = tk.Button(buttonArea_con, text=u"Dec 2 Hex", bg="turquoise", bd=5, relief="raised", command=self.dec_hex)
button_Dec_Hex.grid(column=2, row=3, pady=5)
buttonArea_con.pack_forget()
def Mask_button_click(self):
order = ["start IP", "end IP", "hosts"]
result = []
r = SubnetCalculation.subnet_calculation
result = r.verify_IP(self.entry_startIP.get(), self.entry_endIP.get(), self.entry_hosts.get())
print(result)
for indx in range(0, 3):
try:
if result[indx] != 'valid' and indx == 0:
self.entry_startIP.set(self.entry_startIP.get() +" invalid IP")
order[indx] = "1"
break
if result[indx] != 'valid' and indx == 1:
self.entry_endIP.set(self.entry_endIP.get() + " invalid ip")
order[indx] = "1"
break
if result[indx] != 'valid' and indx == 2:
self.entry_hosts.set(self.entry_hosts.get() + " invalid host")
order[indx] = "1"
break
except TypeError as e:
print(e)
if "1" not in order:
SubnetCalculation.calculateMask(self.entry_startIP.get(), self.entry_endIP.get(), self.entry_hosts.get())
def Calculate_Range():
pass
def Calculate_Hosts():
pass
def convert_to_binary(self, num):
#for i in range(0,100,2):
try:
i = int(num)
if i == 0:
pass
else:
i = str(bin(i))
i = i.split('b')
i = i[:2]
return i[1]
except (TypeError, ValueError) as e:
print()
print(e)
return "\nTry Again :("
def hex_convert(self, x):
x = x.upper()
bi_total = ""
binary = []
decimal = ""
count = 0
try:
hexaList = [['1', '1'],['2', '2'],['3','3'], ['4', '4'],['5', '5'],['6', '6'],['7', '7'],['8', '8'],['9', '9'],['A', '10'],['B', '11'],['C', '12'],['D', '13'],['E', '14'],['F', '15']]
for i in hexaList:
for j in i:
if x == j:
if count > 0:
break
else:
decimal = int(hexaList[hexaList.index(i)][1])
binary.append(self.convert_to_binary(int(decimal)))
if count >= 0:
for k in binary:
if len(k) < 4:
k = "0"*(4-len(k)) + k
bi_total += k
count += 1
except TypeError as e:
print(e)
return bi_total
def hex_bin(self):
theHex = self.entryVariable.get()
theHex = str(theHex)
hList = []
total = ""
if len(theHex) > 1:
for i in range(len(theHex)):
hList.append(theHex[i])
else:
hList.append(theHex)
for i in hList:
if i == "0":
total += "0000"
else:
total += self.hex_convert(i)
self.entryReturned.set(total.rstrip())
def hex_dec(self):
theHex = self.entryVariable.get()
self.entryReturned.set(int(theHex, 16))
def bin_hex(self):
decimal = self.entryVariable.get()
decimal = int(decimal, 2)
decimal = hex(int(decimal))
decimal = decimal.split('x')
hexadecimal = decimal[1]
self.entryReturned.set(hexadecimal)
def bin_dec(self):
decimal = self.entryVariable.get()
decimal = int(decimal, 2)
self.entryReturned.set(decimal)
def dec_hex(self):
try:
decimal = self.entryVariable.get()
decimal = hex(int(decimal))
decimal = decimal.split('x')
self.entryReturned.set(decimal[1])
except ValueError as e:
self.myText.set(e)
print(e)
def dec_bin(self):
try:
decimal = self.entryVariable.get()
i = int(decimal)
if i == 0:
self.entryReturned.set("00000000")
else:
i = str(bin(i))
i = i.split('b')
self.entryReturned.set(i[1])
except ValueError as e:
self.entryReturned.set(e)
print(e)
def new_converter(self):
try:
textArea.pack_forget()
convertArea.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
buttonArea_sub.pack_forget()
buttonArea_con.pack(side=tk.RIGHT, expand=True, fill=tk.BOTH)
except:
pass
def new_subnet(self):
try:
convertArea.pack_forget()
textArea.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)
buttonArea_con.pack_forget()
buttonArea_sub.pack(side=tk.RIGHT, expand=True, fill=tk.BOTH)
except (AttributeError, NameError) as e:
print(e)
def Calculate_Mask(fip, lip, hst):
countnum = 0
countdot = 0
firstip = fip
fip_total = len(firstip)
lastip = lip
lip_total = len(lastip)
host = hst
digits = ["1","2","3","4","5","6","7","8","9"]
for i in firstip: # check that first ip is all numbers - 3 dots
if i in digits:
countnum += 1
if i == ".":
countdot += 1
if fip_total != (countnum + countdot) or countdot != 3 or countnum < 4 or countnum > 12: # redo to check if right instead of wrong. (...12345) passes this test
return "Not A Valid IP Address"
count = 0
for i in lastip:
for j in digits:
if i == str(j):
count += 1
if lip_total != count:
pass # needs to return error with last ip
if __name__ == "__main__":
prog = myProg(None)
prog.title('Networking Control Panel')
prog.configure(background="black", borderwidth=10)
prog.mainloop()
| |
import pymongo
from django.core.management.base import BaseCommand
from django.conf import settings
from optparse import make_option
from crits.core.mongo_tools import mongo_connector
class Command(BaseCommand):
"""
Script Class.
"""
option_list = BaseCommand.option_list + (
make_option('--remove-indexes',
'-r',
action='store_true',
dest='remove',
default=False,
help='Remove all indexes. Does NOT create.'),
)
help = 'Creates indexes for MongoDB.'
def handle(self, *args, **options):
"""
Script Execution.
"""
remove = options.get('remove')
if remove:
remove_indexes()
else:
create_indexes()
def remove_indexes():
"""
Removes all indexes from all collections.
"""
coll_list = [settings.COL_BACKDOORS,
settings.COL_BUCKET_LISTS,
settings.COL_CAMPAIGNS,
settings.COL_COMMENTS,
settings.COL_DOMAINS,
settings.COL_EMAIL,
settings.COL_EVENTS,
settings.COL_EXPLOITS,
settings.COL_INDICATORS,
settings.COL_IPS,
settings.COL_NOTIFICATIONS,
'%s.files' % settings.COL_OBJECTS,
'%s.chunks' % settings.COL_OBJECTS,
settings.COL_PCAPS,
'%s.files' % settings.COL_PCAPS,
'%s.chunks' % settings.COL_PCAPS,
settings.COL_SAMPLES,
'%s.files' % settings.COL_SAMPLES,
'%s.chunks' % settings.COL_SAMPLES,
settings.COL_TARGETS,
]
for coll in coll_list:
print "Removing index for: %s" % coll
c = mongo_connector(coll)
c.drop_indexes()
def create_indexes():
"""
Creates the default set of indexes for the system. Depending on your use
cases, as well as quantity of data, admins may wish to tweak these indexes
to best fit their requirements.
"""
print "Creating indexes (duplicates will be ignored automatically)"
analysis_results = mongo_connector(settings.COL_ANALYSIS_RESULTS)
analysis_results.ensure_index("service_name", background=True)
analysis_results.ensure_index("object_type", background=True)
analysis_results.ensure_index("object_id", background=True)
bucket_lists = mongo_connector(settings.COL_BUCKET_LISTS)
bucket_lists.ensure_index("name", background=True)
backdoors = mongo_connector(settings.COL_BACKDOORS)
backdoors.ensure_index("name", background=True)
campaigns = mongo_connector(settings.COL_CAMPAIGNS)
campaigns.ensure_index("objects.value", background=True)
campaigns.ensure_index("relationships.value", background=True)
campaigns.ensure_index("bucket_list", background=True)
comments = mongo_connector(settings.COL_COMMENTS)
comments.ensure_index("obj_id", background=True)
comments.ensure_index("users", background=True)
comments.ensure_index("tags", background=True)
comments.ensure_index("status", background=True)
domains = mongo_connector(settings.COL_DOMAINS)
domains.ensure_index("domain", background=True)
domains.ensure_index("objects.value", background=True)
domains.ensure_index("relationships.value", background=True)
domains.ensure_index("campaign.name", background=True)
domains.ensure_index("bucket_list", background=True)
emails = mongo_connector(settings.COL_EMAIL)
emails.ensure_index("objects.value", background=True)
emails.ensure_index("relationships.value", background=True)
emails.ensure_index("campaign.name", background=True)
emails.ensure_index("bucket_list", background=True)
events = mongo_connector(settings.COL_EVENTS)
events.ensure_index("objects.value", background=True)
events.ensure_index("relationships.value", background=True)
events.ensure_index("campaign.name", background=True)
events.ensure_index("bucket_list", background=True)
exploits = mongo_connector(settings.COL_EXPLOITS)
exploits.ensure_index("name", background=True)
indicators = mongo_connector(settings.COL_INDICATORS)
indicators.ensure_index("value", background=True)
indicators.ensure_index("objects.value", background=True)
indicators.ensure_index("relationships.value", background=True)
indicators.ensure_index("campaign.name", background=True)
indicators.ensure_index("bucket_list", background=True)
ips = mongo_connector(settings.COL_IPS)
ips.ensure_index("ip", background=True)
ips.ensure_index("objects.value", background=True)
ips.ensure_index("relationships.value", background=True)
ips.ensure_index("campaign.name", background=True)
ips.ensure_index("bucket_list", background=True)
if settings.FILE_DB == settings.GRIDFS:
objects_files = mongo_connector('%s.files' % settings.COL_OBJECTS)
objects_files.ensure_index("md5", background=True)
objects_chunks = mongo_connector('%s.chunks' % settings.COL_OBJECTS)
objects_chunks.ensure_index([("files_id",pymongo.ASCENDING),
("n", pymongo.ASCENDING)],
unique=True)
notifications = mongo_connector(settings.COL_NOTIFICATIONS)
notifications.ensure_index("obj_id", background=True)
# auto-expire notifications after 30 days
notifications.ensure_index("date", background=True,
expireAfterSeconds=2592000)
notifications.ensure_index("users", background=True)
pcaps = mongo_connector(settings.COL_PCAPS)
pcaps.ensure_index("md5", background=True)
pcaps.ensure_index("objects.value", background=True)
pcaps.ensure_index("relationships.value", background=True)
pcaps.ensure_index("campaign.name", background=True)
pcaps.ensure_index("bucket_list", background=True)
if settings.FILE_DB == settings.GRIDFS:
pcaps_files = mongo_connector('%s.files' % settings.COL_PCAPS)
pcaps_files.ensure_index("md5", background=True)
pcaps_chunks = mongo_connector('%s.chunks' % settings.COL_PCAPS)
pcaps_chunks.ensure_index([("files_id", pymongo.ASCENDING),
("n", pymongo.ASCENDING)],
unique=True)
raw_data = mongo_connector(settings.COL_RAW_DATA)
raw_data.ensure_index("link_id", background=True)
raw_data.ensure_index("md5", background=True)
raw_data.ensure_index("objects.value", background=True)
raw_data.ensure_index("relationships.value", background=True)
raw_data.ensure_index("campaign.name", background=True)
raw_data.ensure_index("bucket_list", background=True)
samples = mongo_connector(settings.COL_SAMPLES)
samples.ensure_index("source.name", background=True)
samples.ensure_index("md5", background=True)
samples.ensure_index("sha1", background=True)
samples.ensure_index("sha256", background=True)
samples.ensure_index("ssdeep", background=True)
samples.ensure_index("mimetype", background=True)
samples.ensure_index("filetype", background=True)
samples.ensure_index("size", background=True)
samples.ensure_index("filename", background=True)
samples.ensure_index("objects.value", background=True)
samples.ensure_index("relationships.value", background=True)
samples.ensure_index("campaign.name", background=True)
samples.ensure_index("analysis.results.result", background=True)
samples.ensure_index("analysis.results.md5", background=True)
samples.ensure_index("bucket_list", background=True)
if settings.FILE_DB == settings.GRIDFS:
samples_files = mongo_connector('%s.files' % settings.COL_SAMPLES)
samples_files.ensure_index("md5", background=True)
samples_chunks = mongo_connector('%s.chunks' % settings.COL_SAMPLES)
samples_chunks.ensure_index([("files_id", pymongo.ASCENDING),
("n", pymongo.ASCENDING)],
unique=True)
screenshots = mongo_connector(settings.COL_SCREENSHOTS)
screenshots.ensure_index("tags", background=True)
targets = mongo_connector(settings.COL_TARGETS)
targets.ensure_index("objects.value", background=True)
targets.ensure_index("relationships.value", background=True)
targets.ensure_index("campaign.name", background=True)
targets.ensure_index("bucket_list", background=True)
| |
# Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
# old_cwd = os.getcwd()
# os.chdir(os.path.dirname(__file__))
import gc
import numpy as np
import xarray as xr
import dask
import datacube
import warnings
# Command line tool imports
import argparse
import collections
from osgeo import gdal
from datetime import datetime
from . import dc_utilities as utilities
from .dc_utilities import create_default_clean_mask
# os.chdir(old_cwd)
# Author: KMF
# Creation date: 2016-06-13
def NDWI(data, normalize=False, band_pair=0):
"""
Computes various versions of the Normalized Difference Water Index for an `xarray.Dataset`.
Values should be in the range [-1,1] for valid LANDSAT data (the bands are positive).
Parameters
----------
data: xarray.Dataset or numpy.ndarray
An `xarray.Dataset` containing the bands specified by `band_pair` or
a 2D NumPy array with two columns - the band pair.
normalize: bool
Whether or not to normalize to the range [0,1].
band_pair: int
The band pair to use.
Band pair 0 uses 'nir' and 'swir1': (nir - swir1)/(nir + swir1).
Band pair 1 uses 'green' and 'nir': (green - nir)/(green + nir).
Returns
-------
ndwi: xarray.DataArray
An `xarray.DataArray` with the same shape as `dataset` - the same coordinates in
the same order.
"""
bands = [None] * 2
if band_pair == 0:
bands = ['nir', 'swir1']
elif band_pair == 1:
bands = ['green', 'nir']
else:
raise AssertionError('The band_pair parameter must be in [0,1]')
if isinstance(data, xr.Dataset):
ndwi = (data[bands[0]] - data[bands[1]]) / (data[bands[0]] + data[bands[1]])
if normalize:
ndwi = (ndwi - ndwi.min())/(ndwi.max() - ndwi.min())
else:
ndwi = data[:,0] - data[:,1]
if normalize:
ndwi = (ndwi - np.nanmin(ndwi))/(np.nanmax(ndwi) - np.nanmin(ndwi))
return ndwi
def wofs_classify(dataset_in, clean_mask=None, x_coord='longitude', y_coord='latitude',
time_coord='time', no_data=-9999, mosaic=False):
"""
Performs WOfS (Water Observations from Space) algorithm on given dataset.
The WOfS algorithm is defined for Landsat 5/7 Collection 1 Level 2 data.
It can also be used on Landsat 8 Collection 1 data with good accuracy.
References:
- Mueller, et al. (2015) "Water observations from space: Mapping surface water from
25 years of Landsat imagery across Australia." Remote Sensing of Environment.
- https://github.com/GeoscienceAustralia/eo-tools/blob/stable/eotools/water_classifier.py
Parameters
----------
dataset_in: xarray.Dataset
dataset retrieved from the Data Cube; should contain
coordinates: time, latitude, longitude
variables: blue, green, red, nir, swir1, swir2
x_coord, y_coord, time_coord: str
Names of DataArrays in `dataset_in` to use as x, y,
and time coordinates.
clean_mask: np.ndarray
numpy array with dtype boolean - true for values user considers clean;
if user does not provide a clean mask, all values will be considered clean
no_data: numeric
no data pixel value; default: -9999
mosaic: bool
flag to indicate if dataset_in is a mosaic. If mosaic = False, dataset_in
should have a time coordinate and wofs will run over each time slice; otherwise, dataset_in
should not have a time coordinate and wofs will run over the single mosaicked image
Returns
-------
dataset_out: xarray.DataArray
wofs water classification results: 0 - not water; 1 - water
Raises
------
ValueError
if dataset_in is an empty xarray.Dataset.
"""
def _band_ratio(a, b):
"""
Calculates a normalized ratio index
"""
return (a - b) / (a + b)
def _run_regression(band1, band2, band3, band4, band5, band7):
"""
Regression analysis based on Australia's training data
TODO: Return type
"""
# Compute normalized ratio indices
ndi_52 = _band_ratio(band5, band2)
ndi_43 = _band_ratio(band4, band3)
ndi_72 = _band_ratio(band7, band2)
if isinstance(band1, np.ndarray):
classified = np.full_like(band1, no_data, dtype='uint8')
elif isinstance(band1, dask.array.core.Array):
classified = dask.array.full_like(band1, no_data, dtype='uint8',
chunks=band1.chunks)
# Start with the tree's left branch, finishing nodes as needed
# Left branch
r1 = ndi_52 <= -0.01
r2 = band1 <= 2083.5
classified[r1 & ~r2] = 0 #Node 3
r3 = band7 <= 323.5
_tmp = r1 & r2
_tmp2 = _tmp & r3
_tmp &= ~r3
r4 = ndi_43 <= 0.61
classified[_tmp2 & r4] = 1 #Node 6
classified[_tmp2 & ~r4] = 0 #Node 7
r5 = band1 <= 1400.5
_tmp2 = _tmp & ~r5
r6 = ndi_43 <= -0.01
classified[_tmp2 & r6] = 1 #Node 10
classified[_tmp2 & ~r6] = 0 #Node 11
_tmp &= r5
r7 = ndi_72 <= -0.23
_tmp2 = _tmp & ~r7
r8 = band1 <= 379
classified[_tmp2 & r8] = 1 #Node 14
classified[_tmp2 & ~r8] = 0 #Node 15
_tmp &= r7
r9 = ndi_43 <= 0.22
classified[_tmp & r9] = 1 #Node 17
_tmp &= ~r9
r10 = band1 <= 473
classified[_tmp & r10] = 1 #Node 19
classified[_tmp & ~r10] = 0 #Node 20
# Left branch complete; cleanup
del r2, r3, r4, r5, r6, r7, r8, r9, r10
gc.collect()
# Right branch of regression tree
r1 = ~r1
r11 = ndi_52 <= 0.23
_tmp = r1 & r11
r12 = band1 <= 334.5
_tmp2 = _tmp & ~r12
classified[_tmp2] = 0 #Node 23
_tmp &= r12
r13 = ndi_43 <= 0.54
_tmp2 = _tmp & ~r13
classified[_tmp2] = 0 #Node 25
_tmp &= r13
r14 = ndi_52 <= 0.12
_tmp2 = _tmp & r14
classified[_tmp2] = 1 #Node 27
_tmp &= ~r14
r15 = band3 <= 364.5
_tmp2 = _tmp & r15
r16 = band1 <= 129.5
classified[_tmp2 & r16] = 1 #Node 31
classified[_tmp2 & ~r16] = 0 #Node 32
_tmp &= ~r15
r17 = band1 <= 300.5
_tmp2 = _tmp & ~r17
_tmp &= r17
classified[_tmp] = 1 #Node 33
classified[_tmp2] = 0 #Node 34
_tmp = r1 & ~r11
r18 = ndi_52 <= 0.34
classified[_tmp & ~r18] = 0 #Node 36
_tmp &= r18
r19 = band1 <= 249.5
classified[_tmp & ~r19] = 0 #Node 38
_tmp &= r19
r20 = ndi_43 <= 0.45
classified[_tmp & ~r20] = 0 #Node 40
_tmp &= r20
r21 = band3 <= 364.5
classified[_tmp & ~r21] = 0 #Node 42
_tmp &= r21
r22 = band1 <= 129.5
classified[_tmp & r22] = 1 #Node 44
classified[_tmp & ~r22] = 0 #Node 45
# Completed regression tree
return classified
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
# Extract dataset bands needed for calculations
blue = dataset_in.blue
green = dataset_in.green
red = dataset_in.red
nir = dataset_in.nir
swir1 = dataset_in.swir1
swir2 = dataset_in.swir2
# Ignore warnings about division by zero and NaNs.
classified = _run_regression(blue.data, green.data, red.data,
nir.data, swir1.data, swir2.data)
# classified_clean = classified - classified + no_data
if isinstance(blue.data, np.ndarray):
classified_clean = np.where(clean_mask, classified, no_data)
elif isinstance(blue.data, dask.array.core.Array):
classified_clean = dask.array.where(clean_mask, classified, no_data)
# Create xarray of data
x_coords = dataset_in[x_coord]
y_coords = dataset_in[y_coord]
time = None
coords = None
dims = None
if mosaic:
coords = [y_coords, x_coords]
dims = [y_coord, x_coord]
else:
time_coords = dataset_in[time_coord]
coords = [time_coords, y_coords, x_coords]
dims = [time_coord, y_coord, x_coord]
data_array = xr.DataArray(classified_clean, coords=coords, dims=dims)
if mosaic:
dataset_out = xr.Dataset({'wofs': data_array},
coords={y_coord: y_coords, x_coord: x_coords})
else:
dataset_out = xr.Dataset(
{'wofs': data_array},
coords={time_coord: time_coords, y_coord: y_coords, x_coord: x_coords})
return dataset_out
def ledaps_classify(water_band, qa_bands, no_data=-9999):
#TODO: refactor for input/output datasets
fill_qa = qa_bands[0]
cloud_qa = qa_bands[1]
cloud_shadow_qa = qa_bands[2]
adjacent_cloud_qa = qa_bands[3]
snow_qa = qa_bands[4]
ddv_qa = qa_bands[5]
fill_mask = np.reshape(np.in1d(fill_qa.reshape(-1), [0]), fill_qa.shape)
cloud_mask = np.reshape(np.in1d(cloud_qa.reshape(-1), [0]), cloud_qa.shape)
cloud_shadow_mask = np.reshape(np.in1d(cloud_shadow_qa.reshape(-1), [0]), cloud_shadow_qa.shape)
adjacent_cloud_mask = np.reshape(np.in1d(adjacent_cloud_qa.reshape(-1), [255]), adjacent_cloud_qa.shape)
snow_mask = np.reshape(np.in1d(snow_qa.reshape(-1), [0]), snow_qa.shape)
ddv_mask = np.reshape(np.in1d(ddv_qa.reshape(-1), [0]), ddv_qa.shape)
clean_mask = fill_mask & cloud_mask & cloud_shadow_mask & adjacent_cloud_mask & snow_mask & ddv_mask
water_mask = np.reshape(np.in1d(water_band.reshape(-1), [255]), water_band.shape) #Will be true if 255 -> water
classified = np.copy(water_mask)
classified.astype(int)
classified_clean = np.full(classified.shape, no_data)
classified_clean[clean_mask] = classified[clean_mask]
return classified_clean
def cfmask_classify(cfmask, no_data=-9999):
#TODO: refactor for input/output datasets
clean_mask = np.reshape(np.in1d(cfmask.reshape(-1), [2, 3, 4, 255], invert=True), cfmask.shape)
water_mask = np.reshape(np.in1d(cfmask.reshape(-1), [1]), cfmask.shape)
classified = np.copy(water_mask)
classified.astype(int)
classified_clean = np.full(classified.shape, no_data)
classified_clean[clean_mask] = classified[clean_mask]
return classified_clean
def main(classifier, platform, product_type, min_lon, max_lon, min_lat, max_lat, start_date, end_date, dc_config):
"""
Description:
Command-line water detection tool - creates a time-series from
water analysis performed on data retrieved by the Data Cube,
shows plots of the normalized water observations (total water
observations / total clear observations), total water observations,
and total clear observations, and saves a GeoTIFF of the results
Assumptions:
The command-line tool assumes there is a measurement called cf_mask
Inputs:
classifier (str)
platform (str)
product_type (str)
min_lon (str)
max_lon (str)
min_lat (str)
max_lat (str)
start_date (str)
end_date (str)
dc_config (str)
"""
# Initialize data cube object
dc = datacube.Datacube(config=dc_config, app='dc-mosaicker')
# Validate arguments
if classifier not in ['cfmask', 'ledaps', 'wofs']:
print('ERROR: Unknown water classifier. Classifier options: cfmask, ledaps, wofs')
return
products = dc.list_products()
platform_names = set([product[6] for product in products.values])
if platform not in platform_names:
print('ERROR: Invalid platform.')
print('Valid platforms are:')
for name in platform_names:
print(name)
return
product_names = [product[0] for product in products.values]
if product_type not in product_names:
print('ERROR: Invalid product type.')
print('Valid product types are:')
for name in product_names:
print(name)
return
try:
min_lon = float(args.min_lon)
max_lon = float(args.max_lon)
min_lat = float(args.min_lat)
max_lat = float(args.max_lat)
except:
print('ERROR: Longitudes/Latitudes must be float values')
return
try:
start_date_str = start_date
end_date_str = end_date
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
except:
print('ERROR: Invalid date format. Date format: YYYY-MM-DD')
return
if not os.path.exists(dc_config):
print('ERROR: Invalid file path for dc_config')
return
# Retrieve data from Data Cube
dataset_in = dc.load(
platform=platform,
product=product_type,
time=(start_date, end_date),
lon=(min_lon, max_lon),
lat=(min_lat, max_lat))
# Get information needed for saving as GeoTIFF
# Spatial ref
crs = dataset_in.crs
spatial_ref = utilities.get_spatial_ref(crs)
# Upper left coordinates
ul_lon = dataset_in.longitude.values[0]
ul_lat = dataset_in.latitude.values[0]
# Resolution
products = dc.list_products()
resolution = products.resolution[products.name == 'ls7_ledaps']
lon_dist = resolution.values[0][1]
lat_dist = resolution.values[0][0]
# Rotation
lon_rtn = 0
lat_rtn = 0
geotransform = (ul_lon, lon_dist, lon_rtn, ul_lat, lat_rtn, lat_dist)
# Run desired classifier
water_class = None
if classifier == 'cfmask': #TODO: implement when cfmask_classify is refactored
return
elif classifier == 'ledaps': #TODO: implement when cfmask_classify is refactored
return
elif classifier == 'wofs':
water_class = wofs_classify(dataset_in)
dataset_out = utilities.perform_timeseries_analysis(water_class)
print(dataset_out)
out_file = (
str(min_lon) + '_' + str(min_lat) + '_' + start_date_str + '_' + end_date_str + '_' + classifier + '_.tif')
utilities.save_to_geotiff(out_file, gdal.GDT_Float32, dataset_out, geotransform, spatial_ref)
if __name__ == '__main__':
start_time = datetime.now()
parser = argparse.ArgumentParser()
parser.add_argument('classifier', help='Water classifier; options: cfmask, ledaps, wofs')
parser.add_argument('platform', help='Data platform; example: LANDSAT_7')
parser.add_argument('product', help='Product type; example: ls7_ledaps')
parser.add_argument('min_lon', help='Minimum longitude')
parser.add_argument('max_lon', help='Maximum longitude')
parser.add_argument('min_lat', help='Minimum latitude')
parser.add_argument('max_lat', help='Maximum latitude')
parser.add_argument('start_date', help='Start date; format: YYYY-MM-DD')
parser.add_argument('end_date', help='End date; format: YYYY-MM-DD')
parser.add_argument(
'dc_config',
nargs='?',
default='~/.datacube.conf',
help='Datacube configuration path; default: ~/.datacube.conf')
args = parser.parse_args()
main(args.classifier, args.platform, args.product, args.min_lon, args.max_lon, args.min_lat, args.max_lat,
args.start_date, args.end_date, args.dc_config)
end_time = datetime.now()
print('Execution time: ' + str(end_time - start_time))
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake LDAP server for test harness.
This class does very little error checking, and knows nothing about ldap
class definitions. It implements the minimum emulation of the python ldap
library to work with nova.
"""
import fnmatch
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
class Store(object):
def __init__(self):
if hasattr(self.__class__, '_instance'):
raise Exception(_('Attempted to instantiate singleton'))
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = _StorageDict()
return cls._instance
class _StorageDict(dict):
def keys(self, pat=None):
ret = super(_StorageDict, self).keys()
if pat is not None:
ret = fnmatch.filter(ret, pat)
return ret
def delete(self, key):
try:
del self[key]
except KeyError:
pass
def flushdb(self):
self.clear()
def hgetall(self, key):
"""Returns the hash for the given key
Creates the hash if the key doesn't exist.
"""
try:
return self[key]
except KeyError:
self[key] = {}
return self[key]
def hget(self, key, field):
hashdict = self.hgetall(key)
try:
return hashdict[field]
except KeyError:
hashdict[field] = {}
return hashdict[field]
def hset(self, key, field, val):
hashdict = self.hgetall(key)
hashdict[field] = val
def hmset(self, key, value_dict):
hashdict = self.hgetall(key)
for field, val in value_dict.items():
hashdict[field] = val
SCOPE_BASE = 0
SCOPE_ONELEVEL = 1 # Not implemented
SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
MOD_REPLACE = 2
class NO_SUCH_OBJECT(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
class SERVER_DOWN(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
def initialize(_uri):
"""Opens a fake connection with an LDAP server."""
return FakeLDAP()
def _match_query(query, attrs):
"""Match an ldap query to an attribute dictionary.
The characters &, |, and ! are supported in the query. No syntax checking
is performed, so malformed queries will not work correctly.
"""
# cut off the parentheses
inner = query[1:-1]
if inner.startswith('&'):
# cut off the &
l, r = _paren_groups(inner[1:])
return _match_query(l, attrs) and _match_query(r, attrs)
if inner.startswith('|'):
# cut off the |
l, r = _paren_groups(inner[1:])
return _match_query(l, attrs) or _match_query(r, attrs)
if inner.startswith('!'):
# cut off the ! and the nested parentheses
return not _match_query(query[2:-1], attrs)
(k, _sep, v) = inner.partition('=')
return _match(k, v, attrs)
def _paren_groups(source):
"""Split a string into parenthesized groups."""
count = 0
start = 0
result = []
for pos in xrange(len(source)):
if source[pos] == '(':
if count == 0:
start = pos
count += 1
if source[pos] == ')':
count -= 1
if count == 0:
result.append(source[start:pos + 1])
return result
def _match(key, value, attrs):
"""Match a given key and value against an attribute list."""
if key not in attrs:
return False
# This is a wild card search. Implemented as all or nothing for now.
if value == "*":
return True
if key != "objectclass":
return value in attrs[key]
# it is an objectclass check, so check subclasses
values = _subs(value)
for v in values:
if v in attrs[key]:
return True
return False
def _subs(value):
"""Returns a list of subclass strings.
The strings represent the ldap object class plus any subclasses that
inherit from it. Fakeldap doesn't know about the ldap object structure,
so subclasses need to be defined manually in the dictionary below.
"""
subs = {'groupOfNames': ['novaProject']}
if value in subs:
return [value] + subs[value]
return [value]
def _from_json(encoded):
"""Convert attribute values from json representation.
Args:
encoded -- a json encoded string
Returns a list of strings
"""
return [str(x) for x in jsonutils.loads(encoded)]
def _to_json(unencoded):
"""Convert attribute values into json representation.
Args:
unencoded -- an unencoded string or list of strings. If it
is a single string, it will be converted into a list.
Returns a json string
"""
return jsonutils.dumps(list(unencoded))
server_fail = False
class FakeLDAP(object):
"""Fake LDAP connection."""
def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN()
pass
def unbind_s(self):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN()
pass
def add_s(self, dn, attr):
"""Add an object with the specified attributes at dn."""
if server_fail:
raise SERVER_DOWN()
key = "%s%s" % (self.__prefix, dn)
value_dict = dict([(k, _to_json(v)) for k, v in attr])
Store.instance().hmset(key, value_dict)
def delete_s(self, dn):
"""Remove the ldap object at specified dn."""
if server_fail:
raise SERVER_DOWN()
Store.instance().delete("%s%s" % (self.__prefix, dn))
def modify_s(self, dn, attrs):
"""Modify the object at dn using the attribute list.
:param dn: a dn
:param attrs: a list of tuples in the following form::
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
if server_fail:
raise SERVER_DOWN()
store = Store.instance()
key = "%s%s" % (self.__prefix, dn)
for cmd, k, v in attrs:
values = _from_json(store.hget(key, k))
if cmd == MOD_ADD:
values.append(v)
elif cmd == MOD_REPLACE:
values = [v]
else:
values.remove(v)
values = store.hset(key, k, _to_json(values))
def modrdn_s(self, dn, newrdn):
oldobj = self.search_s(dn, SCOPE_BASE)
if not oldobj:
raise NO_SUCH_OBJECT()
newdn = "%s,%s" % (newrdn, dn.partition(',')[2])
newattrs = oldobj[0][1]
modlist = []
for attrtype in newattrs.keys():
modlist.append((attrtype, newattrs[attrtype]))
self.add_s(newdn, modlist)
self.delete_s(dn)
def search_s(self, dn, scope, query=None, fields=None):
"""Search for all matching objects under dn using the query.
Args:
dn -- dn to search under
scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported
query -- query to filter objects by
fields -- fields to return. Returns all fields if not specified
"""
if server_fail:
raise SERVER_DOWN()
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
store = Store.instance()
if scope == SCOPE_BASE:
pattern = "%s%s" % (self.__prefix, dn)
keys = store.keys(pattern)
else:
keys = store.keys("%s*%s" % (self.__prefix, dn))
if not keys:
raise NO_SUCH_OBJECT()
objects = []
for key in keys:
# get the attributes from the store
attrs = store.hgetall(key)
# turn the values from the store into lists
# pylint: disable=E1103
attrs = dict([(k, _from_json(v))
for k, v in attrs.iteritems()])
# filter the objects by query
if not query or _match_query(query, attrs):
# filter the attributes by fields
attrs = dict([(k, v) for k, v in attrs.iteritems()
if not fields or k in fields])
objects.append((key[len(self.__prefix):], attrs))
return objects
@property
def __prefix(self): # pylint: disable=R0201
"""Get the prefix to use for all keys."""
return 'ldap:'
| |
import inflection
from tinymodel.internals import defaults
from tinymodel.internals.validation import (
match_field_values,
remove_calculated_values,
remove_has_many_values,
remove_float_values,
remove_datetime_values,
validate_order_by,
validate_fuzzy_fields,
)
def render_to_response(cls, response, return_type='json', *alien_params):
"""
Translates the given response into one or more TinyModel isntances
based on the expected type of response.
:param tinymodel.TinyModel cls: The base class to translate the response to.
:param [json|tinymodel.TinyModel|foreign_model] response: The response to translates
:param string return_type: The expected type of the response. Must be one of [tinymodel|foreign_model|json]
:param list alien_params: A list of params that need to be carried from the initial response to the final response.
These are meant to be ignored by this function and returned as is.
:rtype [tinymodel.TinyModel|list(tinymodel.TinyModel)]: The translated response.
"""
from tinymodel.service import Service
if return_type not in Service.ALLOWED_RETURN_TYPES:
raise ValueError('"%r" is not a valid return type for services. '
'Allowed types are: %s' % Service.ALLOWED_RETURN_TYPES)
is_list = True
if return_type == 'tinymodel':
if not isinstance(response, (list, tuple, set)):
is_list = False
response = [response]
for o in response:
if not isinstance(o, cls):
raise TypeError('%r does not match the expected response type "tinymodel"' % o)
elif return_type == 'foreign_model':
if not isinstance(response, (list, tuple, set)):
is_list = False
response = [response]
for o in response:
if type(o) in (defaults.SUPPORTED_BUILTINS.keys() + list(defaults.COLLECTION_TYPES)):
raise TypeError('Response is not a foreign model, it is of built-in type %r' % type(o))
elif issubclass(type(o), cls.__bases__[0]):
raise TypeError('Response is not a foreign model, it is of type %r' % cls.__bases__[0])
response = [cls(from_foreign_model=o) for o in response]
elif return_type == 'json':
if isinstance(response, (list, tuple, set)):
response = [cls(from_json=o) for o in response]
else:
is_list = False
response = [cls(from_json=response)]
response = [response] if is_list else response
response.extend(alien_params)
return response
def __get_resp_with_alien_params(response):
alien_params = []
if response and isinstance(response, (list, tuple, set)):
response = list(response)
response, extra = response[:1], response[1:]
part_of_response = []
if extra:
resptype = type(response[0])
for o in extra:
if isinstance(o, resptype):
part_of_response.append(o)
else:
alien_params.append(o)
response.extend(part_of_response)
return response, alien_params
def __call_api_method(cls, service, method_name, endpoint_name=None,
set_model_defaults=False, return_fields=[], **kwargs):
"""
Calls a generic method from the given class using the given params.
:param tinymodel.TinyModel cls: The class needed to perform class-level operations.
:param tinymodel.service.Service: An initialized Service containing the service-specific methods meant to use.
:param str method_name: The exact name of the method to call.
:param str endpoint_name: The name of endpoint to communicate with storage.
:param boolean set_model_defaults: True and kwargs can contain calculated values.
:params list(str) return_fields: List of fields used in aggregation
:param dict kwargs: The params to validate and send to the service-specific method.
:rtype [tinymodel.TinyModel|list(tinymodel.TinyModel)]: The translated response.
"""
# find special params
extra_params = {}
if method_name == 'find':
extra_params['limit'] = kwargs.pop('limit')
extra_params['offset'] = kwargs.pop('offset')
extra_params['order_by'] = kwargs.pop('order_by')
extra_params['expand_related'] = kwargs.pop('expand_related')
if method_name in ('find', 'get_or_create'):
if kwargs.get('fuzzy'):
validate_fuzzy_fields(cls, kwargs.get('fuzzy'))
if 'fuzzy' in kwargs:
extra_params['fuzzy'] = kwargs.pop('fuzzy')
if 'fuzzy_match_exclude' in kwargs:
extra_params['fuzzy_match_exclude'] = kwargs.pop('fuzzy_match_exclude')
kwargs = cls(set_defaults=set_model_defaults, **kwargs).to_json(return_raw=True)
kwargs = remove_calculated_values(cls, **kwargs)
match_field_values(cls, **kwargs)
if not hasattr(service, method_name):
raise AttributeError('The given service need a "%s" method!' % method_name)
if endpoint_name is None:
endpoint_name = inflection.underscore(cls.__name__)
kwargs.update(extra_params)
if method_name == 'sum':
response = getattr(service, method_name)(endpoint_name=endpoint_name, return_fields=return_fields, **kwargs)
else:
response = getattr(service, method_name)(endpoint_name=endpoint_name, **kwargs)
response, alien_params = __get_resp_with_alien_params(response)
return render_to_response(cls, response, service.return_type, *alien_params)
def find(cls, service, endpoint_name=None, limit=None, offset=None, order_by={},
fuzzy=[], fuzzy_match_exclude=[], expand_related=False, **kwargs):
""" Performs a search operation given the passed arguments. """
kwargs = remove_has_many_values(cls, **kwargs)
kwargs = remove_float_values(cls, **kwargs)
validate_order_by(cls, order_by)
kwargs.update({
'offset': offset,
'limit': limit,
'order_by': order_by,
'fuzzy': fuzzy,
'fuzzy_match_exclude': fuzzy_match_exclude,
'expand_related': expand_related,
})
return __call_api_method(cls, service, 'find', endpoint_name, False, **kwargs)[0]
def create(cls, service, endpoint_name=None, **kwargs):
""" Performs a create operation given the passed arguments, ignoring default values. """
return __call_api_method(cls, service, 'create', endpoint_name, True, **kwargs)[0]
def delete(cls, service, endpoint_name=None, **kwargs):
"""Performs a delete operation given the passed arguments, ignoring default values."""
kwargs = remove_has_many_values(cls, **kwargs)
kwargs = remove_datetime_values(cls, **kwargs)
kwargs = remove_float_values(cls, **kwargs)
return __call_api_method(cls, service, 'delete', endpoint_name, **kwargs)[0]
def get_or_create(cls, service, endpoint_name=None, **kwargs):
"""
Performs a <get_or_create> operation. Optionally <find> and <create> service
methods may be used instead of a service-specific <get_or_create>
"""
found = find(cls, service, endpoint_name, **kwargs)
if found:
return found[0], False
return create(cls, service, endpoint_name, **kwargs), True
def update(cls, service, endpoint_name=None, **kwargs):
""" Performs an update matching the given arguments. """
return __call_api_method(cls, service, 'update', endpoint_name, False, **kwargs)[0]
def create_or_update_by(cls, service, by=[], endpoint_name=None, **kwargs):
kwargs_find = filter(lambda (k, v): k in by, kwargs.items())
if not kwargs_find:
raise ValueError("Missing values for 'by' parameter.")
found_objects = find(cls=cls, service=service, endpoint_name=endpoint_name, **dict(kwargs_find))
if found_objects:
kwargs_update = filter(lambda (k, v): k not in by, kwargs.items())
kwargs_update.append(('id', found_objects[0].id))
return update(cls, service, endpoint_name, **dict(kwargs_update)), False
return create(cls, service, endpoint_name, **kwargs), True
def sum(cls, service, endpoint_name=None, return_fields=[], **kwargs):
"""
Performs a sum aggregation over return_fields matching the given arguments.
"""
if not return_fields:
raise ValueError("Missing values for 'return_fields' parameter.")
kwargs = remove_has_many_values(cls, **kwargs)
kwargs = remove_datetime_values(cls, **kwargs)
kwargs = remove_float_values(cls, **kwargs)
return __call_api_method(cls, service, 'sum', endpoint_name,
return_fields=return_fields, **kwargs)[0]
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images.
The preprocessing steps for VGG were introduced in the following technical
report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
Karen Simonyan and Andrew Zisserman
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_R_MEAN = 123.68 / 255
_G_MEAN = 116.78 / 255
_B_MEAN = 103.94 / 255
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = _random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
with tf.control_dependencies(asserts):
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def _central_crop(image_list, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
the list of cropped images.
"""
outputs = []
for image in image_list:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = (image_height - crop_height) / 2
offset_width = (image_width - crop_width) / 2
outputs.append(_crop(image, offset_height, offset_width,
crop_height, crop_width))
return outputs
def _mean_image_subtraction(image, means):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=2, values=channels)
def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
return new_height, new_width
def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def preprocess_for_train(image,
output_height,
output_width,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing.
Returns:
A preprocessed image.
"""
resize_side = tf.random_uniform(
[], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32)
image = _aspect_preserving_resize(image, resize_side)
image = _random_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
image = tf.image.random_flip_left_right(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocess_for_eval(image, output_height, output_width, resize_side):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side: The smallest side of the image for aspect-preserving resizing.
Returns:
A preprocessed image.
"""
image = _aspect_preserving_resize(image, resize_side)
image = _central_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocess_image(image, output_height, output_width, is_training=False,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, then this value
is used for rescaling.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, this value is
ignored. Otherwise, the resize side is sampled from
[resize_size_min, resize_size_max].
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(image, output_height, output_width,
resize_side_min, resize_side_max)
else:
return preprocess_for_eval(image, output_height, output_width,
resize_side_min)
| |
"""
Shared resources supporting priorities and preemption.
These resources can be used to limit the number of processes using them
concurrently. A process needs to *request* the usage right to a resource. Once
the usage right is not needed anymore it has to be *released*. A gas station
can be modelled as a resource with a limited amount of fuel-pumps. Vehicles
arrive at the gas station and request to use a fuel-pump. If all fuel-pumps are
in use, the vehicle needs to wait until one of the users has finished refueling
and releases its fuel-pump.
These resources can be used by a limited number of processes at a time.
Processes *request* these resources to become a user and have to *release* them
once they are done. For example, a gas station with a limited number of fuel
pumps can be modeled with a `Resource`. Arriving vehicles request a fuel-pump.
Once one is available they refuel. When they are done, the release the
fuel-pump and leave the gas station.
Requesting a resource is modelled as "putting a process' token into the
resources" and releasing a resources correspondingly as "getting a process'
token out of the resource". Thus, calling ``request()``/``release()`` is
equivalent to calling ``put()``/``get()``. Note, that releasing a resource will
always succeed immediately, no matter if a process is actually using a resource
or not.
Besides :class:`Resource`, there is a :class:`PriorityResource`, where
processes can define a request priority, and a :class:`PreemptiveResource`
whose resource users can be preempted by requests with a higher priority.
"""
from simpy.core import BoundClass
from simpy.resources import base
class Preempted(object):
"""Cause of an preemption :class:`~simpy.events.Interrupt` containing
information about the preemption.
"""
def __init__(self, by, usage_since, resource):
self.by = by
"""The preempting :class:`simpy.events.Process`."""
self.usage_since = usage_since
"""The simulation time at which the preempted process started to use
the resource."""
self.resource = resource
"""The resource which was lost, i.e., caused the preemption."""
class Request(base.Put):
"""Request usage of the *resource*. The event is triggered once access is
granted. Subclass of :class:`simpy.resources.base.Put`.
If the maximum capacity of users has not yet been reached, the request is
triggered immediately. If the maximum capacity has been
reached, the request is triggered once an earlier usage request on the
resource is released.
The request is automatically released when the request was created within
a :keyword:`with` statement.
"""
def __exit__(self, exc_type, value, traceback):
super(Request, self).__exit__(exc_type, value, traceback)
# Don't release the resource on generator cleanups. This seems to create
# unclaimable circular references otherwise.
if exc_type is not GeneratorExit:
self.resource.release(self)
class Release(base.Get):
"""Releases the usage of *resource* granted by *request*. This event is
triggered immediately. Subclass of :class:`simpy.resources.base.Get`.
"""
def __init__(self, resource, request):
self.request = request
"""The request (:class:`Request`) that is to be released."""
super(Release, self).__init__(resource)
class PriorityRequest(Request):
"""Request the usage of *resource* with a given *priority*. If the
*resource* supports preemption and *preempt* is ``True`` other usage
requests of the *resource* may be preempted (see
:class:`PreemptiveResource` for details).
This event type inherits :class:`Request` and adds some additional
attributes needed by :class:`PriorityResource` and
:class:`PreemptiveResource`
"""
def __init__(self, resource, priority=0, preempt=True):
self.priority = priority
"""The priority of this request. A smaller number means higher
priority."""
self.preempt = preempt
"""Indicates whether the request should preempt a resource user or not
(:class:`PriorityResource` ignores this flag)."""
self.time = resource._env.now
"""The time at which the request was made."""
self.key = (self.priority, self.time, not self.preempt)
"""Key for sorting events. Consists of the priority (lower value is
more important), the time at which the request was made (earlier
requests are more important) and finally the preemption flag (preempt
requests are more important)."""
super(PriorityRequest, self).__init__(resource)
class SortedQueue(list):
"""Queue for sorting events by their :attr:`~PriorityRequest.key`
attribute.
"""
def __init__(self, maxlen=None):
super(SortedQueue, self).__init__()
self.maxlen = maxlen
"""Maximum length of the queue."""
def append(self, item):
"""Sort *item* into the queue.
Raise a :exc:`RuntimeError` if the queue is full.
"""
if self.maxlen is not None and len(self) >= self.maxlen:
raise RuntimeError('Cannot append event. Queue is full.')
super(SortedQueue, self).append(item)
super(SortedQueue, self).sort(key=lambda e: e.key)
class Resource(base.BaseResource):
"""Resource with *capacity* of usage slots that can be requested by
processes.
If all slots are taken, requests are enqueued. Once a usage request is
released, a pending request will be triggered.
The *env* parameter is the :class:`~simpy.core.Environment` instance the
resource is bound to.
"""
def __init__(self, env, capacity=1):
if capacity <= 0:
raise ValueError('"capacity" must be > 0.')
super(Resource, self).__init__(env, capacity)
self.users = []
"""List of :class:`Request` events for the processes that are currently
using the resource."""
self.queue = self.put_queue
"""Queue of pending :class:`Request` events. Alias of
:attr:`~simpy.resources.base.BaseResource.put_queue`.
"""
@property
def count(self):
"""Number of users currently using the resource."""
return len(self.users)
request = BoundClass(Request)
"""Request a usage slot."""
release = BoundClass(Release)
"""Release a usage slot."""
def _do_put(self, event):
if len(self.users) < self.capacity:
self.users.append(event)
event.succeed()
def _do_get(self, event):
try:
self.users.remove(event.request)
except ValueError:
pass
event.succeed()
class PriorityResource(Resource):
"""A :class:`~simpy.resources.resource.Resource` supporting prioritized
requests.
Pending requests in the :attr:`~Resource.queue` are sorted in ascending
order by their *priority* (that means lower values are more important).
"""
PutQueue = SortedQueue
"""Type of the put queue. See
:attr:`~simpy.resources.base.BaseResource.put_queue` for details."""
GetQueue = list
"""Type of the get queue. See
:attr:`~simpy.resources.base.BaseResource.get_queue` for details."""
def __init__(self, env, capacity=1):
super(PriorityResource, self).__init__(env, capacity)
request = BoundClass(PriorityRequest)
"""Request a usage slot with the given *priority*."""
release = BoundClass(Release)
"""Release a usage slot."""
class PreemptiveResource(PriorityResource):
"""A :class:`~simpy.resources.resource.PriorityResource` with preemption.
If a request is preempted, the process of that request will receive an
:class:`~simpy.events.Interrupt` with a :class:`Preempted` instance as
cause.
"""
def _do_put(self, event):
if len(self.users) >= self.capacity and event.preempt:
# Check if we can preempt another process
preempt = sorted(self.users, key=lambda e: e.key)[-1]
if preempt.key > event.key:
self.users.remove(preempt)
preempt.proc.interrupt(Preempted(by=event.proc,
usage_since=preempt.time,
resource=self))
return super(PreemptiveResource, self)._do_put(event)
| |
import os
import time
import typing
from datetime import datetime
from cauldron import environ
from cauldron import templating
from cauldron.cli import threads
from cauldron.render import stack as render_stack
from cauldron.runner.python_file import UserAbortError
from cauldron.session import projects
from cauldron.session import report
from cauldron.session.caching import SharedCache
class ExposedProject(object):
"""
A simplified form of the project for exposure to Cauldron users. A
single exposed project is created when the Cauldron library is first
imported and that exposed project is accessible from the cauldron
root module.
"""
def __init__(self):
self._project = None # type: typing.Optional[projects.Project]
@property
def internal_project(self) -> typing.Union[projects.Project, None]:
"""
The current Cauldron project that is represented by this object.
The value will be None if no project has been loaded.
"""
return self._project
@property
def id(self) -> typing.Union[str, None]:
"""Identifier for the project."""
return self._project.id if self._project else None
@property
def display(self) -> typing.Union[None, report.Report]:
"""The display report for the current project."""
return (
self._project.current_step.report
if self._project and self._project.current_step else
None
)
@property
def shared(self) -> typing.Union[None, SharedCache]:
"""The shared display object associated with this project."""
return self._project.shared if self._project else None
@property
def settings(self) -> typing.Union[None, SharedCache]:
"""The settings associated with this project."""
return self._project.settings if self._project else None
@property
def title(self) -> typing.Union[None, str]:
"""The title of this project."""
return self._project.title if self._project else None
@title.setter
def title(self, value: typing.Union[None, str]):
"""
Modifies the title of the project, which is initially loaded from the
`cauldron.json` file.
"""
if not self._project:
raise RuntimeError('Failed to assign title to an unloaded project')
self._project.title = value
def load(self, project: typing.Union[projects.Project, None]):
"""Connects this object to the specified source project."""
self._project = project
def unload(self):
"""Disconnects this object from the specified source project."""
self._project = None
def path(self, *args: typing.List[str]) -> typing.Union[None, str]:
"""
Creates an absolute path in the project source directory from the
relative path components.
:param args:
Relative components for creating a path within the project source
directory
:return:
An absolute path to the specified file or directory within the
project source directory.
"""
if not self._project:
return None
return environ.paths.clean(os.path.join(
self._project.source_directory,
*args
))
def stop(self, message: str = None, silent: bool = False):
"""
Stops the execution of the project at the current step immediately
without raising an error. Use this to abort running the project in
situations where some critical branching action should prevent the
project from continuing to run.
:param message:
A custom display message to include in the display for the stop
action. This message is ignored if silent is set to True.
:param silent:
When True nothing will be shown in the notebook display when the
step is stopped. When False, the notebook display will include
information relating to the stopped action.
"""
me = self.get_internal_project()
if not me or not me.current_step:
return
if not silent:
render_stop_display(me.current_step, message)
raise UserAbortError(halt=True)
def get_internal_project(
self,
timeout: float = 1
) -> typing.Union['projects.Project', None]:
"""
Attempts to return the internally loaded project. This function
prevents race condition issues where projects are loaded via threads
because the internal loop will try to continuously load the internal
project until it is available or until the timeout is reached.
:param timeout:
Maximum number of seconds to wait before giving up and returning
None.
"""
if self.internal_project:
return self.internal_project
count = int(timeout / 0.1)
for _ in range(count):
project = self.internal_project
if project:
return project
time.sleep(0.1)
return self.internal_project
class ExposedStep(object):
"""
A simplified form of a ProjectStep that is exposed to the for Cauldron
users.
"""
@property
def _step(self) -> typing.Union[None, 'projects.ProjectStep']:
"""
Internal access to the source step. Should not be used outside
of Cauldron development.
:return:
The ProjectStep instance that this ExposedStep represents
"""
import cauldron
try:
return cauldron.project.get_internal_project().current_step
except Exception:
return None
@property
def start_time(self) -> typing.Union[datetime, None]:
"""
The time at which the step started running. If the step has never run
this value will be `None`.
"""
return self._step.start_time
@property
def end_time(self) -> typing.Union[datetime, None]:
"""
The time at which the step stopped running. If the step has never run
or is currently running, this value will be `None`.
"""
return self._step.end_time
@property
def elapsed_time(self) -> float:
"""
The number of seconds that has elapsed since the step started running
if the step is still running. Or, if the step has already finished
running, the amount of time that elapsed during the last execution of
the step.
"""
return self._step.elapsed_time
@property
def visible(self) -> bool:
"""
Whether or not this step will be visible in the display after it
has finished running. Steps are always visible while running or
if they fail to run due to an error. However, if this is set to
False, once the step completes successfully it will no longer be
visible in the display.
"""
return self._step.is_visible
@visible.setter
def visible(self, value: bool):
"""Setter for the visible property."""
self._step.is_visible = value
def stop(
self,
message: str = None,
silent: bool = False,
halt: bool = False
):
"""
Stops the execution of the current step immediately without raising
an error. Use this to abort the step running process if you want
to return early.
:param message:
A custom display message to include in the display for the stop
action. This message is ignored if silent is set to True.
:param silent:
When True nothing will be shown in the notebook display when the
step is stopped. When False, the notebook display will include
information relating to the stopped action.
:param halt:
Whether or not to keep running other steps in the project after
this step has been stopped. By default this is False and after this
stops running, future steps in the project will continue running
if they've been queued to run. If you want stop execution entirely,
set this value to True and the current run command will be aborted
entirely.
"""
step = self._step
if not step:
return
if not silent:
render_stop_display(step, message)
raise UserAbortError(halt=halt)
def breathe(self):
"""
Checks the current execution state for the running step and responds
to any changes in that state. Particular useful for checking to see
if a step has been aborted by the user during long-running executions.
"""
if self._step:
threads.abort_thread()
def write_to_console(self, message: str):
"""
Writes the specified message to the console stdout without including
it in the notebook display.
"""
if not self._step:
raise ValueError(
'Cannot write to the console stdout on an uninitialized step'
)
interceptor = self._step.report.stdout_interceptor
interceptor.write_source('{}'.format(message))
def render_to_console(self, message: str, **kwargs):
"""
Renders the specified message to the console using Jinja2 template
rendering with the kwargs as render variables. The message will also
be dedented prior to rendering in the same fashion as other Cauldron
template rendering actions.
:param message:
Template string to be rendered.
:param kwargs:
Variables to be used in rendering the template.
"""
rendered = templating.render(message, **kwargs)
return self.write_to_console(rendered)
def render_stop_display(step: 'projects.ProjectStep', message: str):
"""Renders a stop action to the Cauldron display."""
stack = render_stack.get_formatted_stack_frame(
project=step.project,
error_stack=False
)
try:
names = [frame['filename'] for frame in stack]
index = names.index(os.path.realpath(__file__))
frame = stack[index - 1]
except Exception:
frame = {}
stop_message = (
'{}'.format(message)
if message else
'This step was explicitly stopped prior to its completion'
)
dom = templating.render_template(
'step-stop.html',
message=stop_message,
frame=frame
)
step.report.append_body(dom)
| |
#!/usr/bin/env python
from paddle.trainer_config_helpers import *
height = 224
width = 224
num_class = 1000
batch_size = get_config_arg('batch_size', int, 64)
layer_num = get_config_arg("layer_num", int, 50)
is_infer = get_config_arg("is_infer", bool, False)
num_samples = get_config_arg('num_samples', int, 2560)
args = {
'height': height,
'width': width,
'color': True,
'num_class': num_class,
'is_infer': is_infer,
'num_samples': num_samples
}
define_py_data_sources2(
"train.list" if not is_infer else None,
"test.list" if is_infer else None,
module="provider",
obj="process",
args=args)
settings(
batch_size=batch_size,
learning_rate=0.01 / batch_size,
learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * batch_size))
#######################Network Configuration #############
def conv_bn_layer(name,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
active_type=ReluActivation()):
"""
A wrapper for conv layer with batch normalization layers.
Note:
conv layer has no activation.
"""
tmp = img_conv_layer(
name=name + "_conv",
input=input,
filter_size=filter_size,
num_channels=channels,
num_filters=num_filters,
stride=stride,
padding=padding,
act=LinearActivation(),
bias_attr=False)
return batch_norm_layer(
name=name + "_bn",
input=tmp,
act=active_type,
use_global_stats=is_infer)
def bottleneck_block(name, input, num_filters1, num_filters2):
"""
A wrapper for bottlenect building block in ResNet.
Last conv_bn_layer has no activation.
Addto layer has activation of relu.
"""
last_name = conv_bn_layer(
name=name + '_branch2a',
input=input,
filter_size=1,
num_filters=num_filters1,
stride=1,
padding=0)
last_name = conv_bn_layer(
name=name + '_branch2b',
input=last_name,
filter_size=3,
num_filters=num_filters1,
stride=1,
padding=1)
last_name = conv_bn_layer(
name=name + '_branch2c',
input=last_name,
filter_size=1,
num_filters=num_filters2,
stride=1,
padding=0,
active_type=LinearActivation())
return addto_layer(
name=name + "_addto", input=[input, last_name], act=ReluActivation())
def mid_projection(name, input, num_filters1, num_filters2, stride=2):
"""
A wrapper for middile projection in ResNet.
projection shortcuts are used for increasing dimensions,
and other shortcuts are identity
branch1: projection shortcuts are used for increasing
dimensions, has no activation.
branch2x: bottleneck building block, shortcuts are identity.
"""
# stride = 2
branch1 = conv_bn_layer(
name=name + '_branch1',
input=input,
filter_size=1,
num_filters=num_filters2,
stride=stride,
padding=0,
active_type=LinearActivation())
last_name = conv_bn_layer(
name=name + '_branch2a',
input=input,
filter_size=1,
num_filters=num_filters1,
stride=stride,
padding=0)
last_name = conv_bn_layer(
name=name + '_branch2b',
input=last_name,
filter_size=3,
num_filters=num_filters1,
stride=1,
padding=1)
last_name = conv_bn_layer(
name=name + '_branch2c',
input=last_name,
filter_size=1,
num_filters=num_filters2,
stride=1,
padding=0,
active_type=LinearActivation())
return addto_layer(
name=name + "_addto", input=[branch1, last_name], act=ReluActivation())
img = data_layer(name='image', size=height * width * 3)
def deep_res_net(res2_num=3, res3_num=4, res4_num=6, res5_num=3):
"""
A wrapper for 50,101,152 layers of ResNet.
res2_num: number of blocks stacked in conv2_x
res3_num: number of blocks stacked in conv3_x
res4_num: number of blocks stacked in conv4_x
res5_num: number of blocks stacked in conv5_x
"""
# For ImageNet
# conv1: 112x112
tmp = conv_bn_layer(
"conv1",
input=img,
filter_size=7,
channels=3,
num_filters=64,
stride=2,
padding=3)
tmp = img_pool_layer(name="pool1", input=tmp, pool_size=3, stride=2)
# conv2_x: 56x56
tmp = mid_projection(
name="res2_1", input=tmp, num_filters1=64, num_filters2=256, stride=1)
for i in xrange(2, res2_num + 1, 1):
tmp = bottleneck_block(
name="res2_" + str(i), input=tmp, num_filters1=64, num_filters2=256)
# conv3_x: 28x28
tmp = mid_projection(
name="res3_1", input=tmp, num_filters1=128, num_filters2=512)
for i in xrange(2, res3_num + 1, 1):
tmp = bottleneck_block(
name="res3_" + str(i),
input=tmp,
num_filters1=128,
num_filters2=512)
# conv4_x: 14x14
tmp = mid_projection(
name="res4_1", input=tmp, num_filters1=256, num_filters2=1024)
for i in xrange(2, res4_num + 1, 1):
tmp = bottleneck_block(
name="res4_" + str(i),
input=tmp,
num_filters1=256,
num_filters2=1024)
# conv5_x: 7x7
tmp = mid_projection(
name="res5_1", input=tmp, num_filters1=512, num_filters2=2048)
for i in xrange(2, res5_num + 1, 1):
tmp = bottleneck_block(
name="res5_" + str(i),
input=tmp,
num_filters1=512,
num_filters2=2048)
tmp = img_pool_layer(
name='avgpool',
input=tmp,
pool_size=7,
stride=1,
pool_type=AvgPooling())
return fc_layer(input=tmp, size=num_class, act=SoftmaxActivation())
if layer_num == 50:
resnet = deep_res_net(3, 4, 6, 3)
elif layer_num == 101:
resnet = deep_res_net(3, 4, 23, 3)
elif layer_num == 152:
resnet = deep_res_net(3, 8, 36, 3)
else:
print("Wrong layer number.")
if is_infer:
outputs(resnet)
else:
lbl = data_layer(name="label", size=num_class)
loss = cross_entropy(name='loss', input=resnet, label=lbl)
outputs(loss)
| |
#! /usr/bin/env python
# Released to the public domain, by Tim Peters, 03 October 2000.
"""reindent [-d][-r][-v] [ path ... ]
-d (--dryrun) Dry run. Analyze, but don't make any changes to, files.
-r (--recurse) Recurse. Search for all .py files in subdirectories too.
-n (--nobackup) No backup. Does not make a ".bak" file before reindenting.
-v (--verbose) Verbose. Print informative msgs; else no output.
-h (--help) Help. Print this usage information and exit.
Change Python (.py) files to use 4-space indents and no hard tab characters.
Also trim excess spaces and tabs from ends of lines, and remove empty lines
at the end of files. Also ensure the last line ends with a newline.
If no paths are given on the command line, reindent operates as a filter,
reading a single source file from standard input and writing the transformed
source to standard output. In this case, the -d, -r and -v flags are
ignored.
You can pass one or more file and/or directory paths. When a directory
path, all .py files within the directory will be examined, and, if the -r
option is given, likewise recursively for subdirectories.
If output is not to standard output, reindent overwrites files in place,
renaming the originals with a .bak extension. If it finds nothing to
change, the file is left alone. If reindent does change a file, the changed
file is a fixed-point for future runs (i.e., running reindent on the
resulting .py file won't change it again).
The hard part of reindenting is figuring out what to do with comment
lines. So long as the input files get a clean bill of health from
tabnanny.py, reindent should do a good job.
The backup file is a copy of the one that is being reindented. The ".bak"
file is generated with shutil.copy(), but some corner cases regarding
user/group and permissions could leave the backup file more readable that
you'd prefer. You can always use the --nobackup option to prevent this.
"""
__version__ = "1"
import tokenize
import os, shutil
import sys
verbose = 0
recurse = 0
dryrun = 0
makebackup = True
def usage(msg=None):
if msg is not None:
print >> sys.stderr, msg
print >> sys.stderr, __doc__
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
import getopt
global verbose, recurse, dryrun, makebackup
try:
opts, args = getopt.getopt(sys.argv[1:], "drnvh",
["dryrun", "recurse", "nobackup", "verbose", "help"])
except getopt.error, msg:
usage(msg)
return
for o, a in opts:
if o in ('-d', '--dryrun'):
dryrun += 1
elif o in ('-r', '--recurse'):
recurse += 1
elif o in ('-n', '--nobackup'):
makebackup = False
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-h', '--help'):
usage()
return
if not args:
r = Reindenter(sys.stdin)
r.run()
r.write(sys.stdout)
return
for arg in args:
check(arg)
def check(file):
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "listing directory", file
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if ((recurse and os.path.isdir(fullname) and
not os.path.islink(fullname) and
not os.path.split(fullname)[1].startswith("."))
or name.lower().endswith(".py")):
check(fullname)
return
if verbose:
print "checking", file, "...",
try:
f = open(file)
except IOError, msg:
errprint("%s: I/O Error: %s" % (file, str(msg)))
return
r = Reindenter(f)
f.close()
if r.run():
if verbose:
print "changed."
if dryrun:
print "But this is a dry run, so leaving it alone."
if not dryrun:
bak = file + ".bak"
if makebackup:
shutil.copyfile(file, bak)
if verbose:
print "backed up", file, "to", bak
f = open(file, "w")
r.write(f)
f.close()
if verbose:
print "wrote new", file
return True
else:
if verbose:
print "unchanged."
return False
def _rstrip(line, JUNK='\n \t'):
"""Return line stripped of trailing spaces, tabs, newlines.
Note that line.rstrip() instead also strips sundry control characters,
but at least one known Emacs user expects to keep junk like that, not
mentioning Barry by name or anything <wink>.
"""
i = len(line)
while i > 0 and line[i-1] in JUNK:
i -= 1
return line[:i]
class Reindenter:
def __init__(self, f):
self.find_stmt = 1 # next token begins a fresh stmt?
self.level = 0 # current indent level
# Raw file lines.
self.raw = f.readlines()
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it's "\n".
self.lines = [_rstrip(line).expandtabs() + "\n"
for line in self.raw]
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
# List of (lineno, indentlevel) pairs, one for each stmt and
# comment line. indentlevel is -1 for comment lines, as a
# signal that tokenize doesn't know what to do about them;
# indeed, they're our headache!
self.stats = []
def run(self):
tokenize.tokenize(self.getline, self.tokeneater)
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == "\n":
lines.pop()
# Sentinel.
stats = self.stats
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = self.after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats)-1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i+1][0]
have = getlspace(lines[thisstmt])
want = thislevel * 4
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in xrange(i+1, len(stats)-1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == getlspace(lines[jline]):
want = jlevel * 4
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in xrange(i-1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = have + getlspace(after[jline-1]) - \
getlspace(lines[jline])
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line in lines[thisstmt:nextstmt]:
if diff > 0:
if line == "\n":
after.append(line)
else:
after.append(" " * diff + line)
else:
remove = min(getlspace(line), -diff)
after.append(line[remove:])
return self.raw != self.after
def write(self, f):
f.writelines(self.after)
# Line-getter for tokenize.
def getline(self):
if self.index >= len(self.lines):
line = ""
else:
line = self.lines[self.index]
self.index += 1
return line
# Line-eater for tokenize.
def tokeneater(self, type, token, (sline, scol), end, line,
INDENT=tokenize.INDENT,
DEDENT=tokenize.DEDENT,
NEWLINE=tokenize.NEWLINE,
COMMENT=tokenize.COMMENT,
NL=tokenize.NL):
if type == NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
self.find_stmt = 1
elif type == INDENT:
self.find_stmt = 1
self.level += 1
elif type == DEDENT:
self.find_stmt = 1
self.level -= 1
elif type == COMMENT:
if self.find_stmt:
self.stats.append((sline, -1))
# but we're still looking for a new stmt, so leave
# find_stmt alone
elif type == NL:
pass
elif self.find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
self.find_stmt = 0
if line: # not endmarker
self.stats.append((sline, self.level))
# Count number of leading blanks.
def getlspace(line):
i, n = 0, len(line)
while i < n and line[i] == " ":
i += 1
return i
if __name__ == '__main__':
main()
| |
# coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which 01-codes a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| |
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
import sys
import types
import uuid
import eventlet.greenpool
import yaql.expressions
from murano.common import utils
import murano.dsl.murano_object
import murano.dsl.yaql_expression as yaql_expression
def serialize(value, memo=None):
if memo is None:
memo = set()
if isinstance(value, types.DictionaryType):
result = {}
for d_key, d_value in value.iteritems():
result[d_key] = serialize(d_value, memo)
return result
elif isinstance(value, murano.dsl.murano_object.MuranoObject):
if value.object_id not in memo:
memo.add(value.object_id)
return serialize(value.to_dictionary(), memo)
else:
return value.object_id
elif isinstance(value, types.ListType):
return [serialize(t, memo) for t in value]
else:
return value
def execute_instruction(instruction, action, context):
old_instruction = context.get_data('$?currentInstruction')
context.set_data(instruction, '?currentInstruction')
result = action()
context.set_data(old_instruction, '?currentInstruction')
return result
def evaluate(value, context, max_depth=sys.maxint):
if isinstance(value, yaql.expressions.Expression):
value = yaql_expression.YaqlExpression(value)
if isinstance(value, yaql_expression.YaqlExpression):
func = lambda: evaluate(value.evaluate(context), context, 1)
if max_depth <= 0:
return func
else:
return execute_instruction(value, func, context)
elif isinstance(value, types.DictionaryType):
result = {}
for d_key, d_value in value.iteritems():
result[evaluate(d_key, context, max_depth - 1)] = \
evaluate(d_value, context, max_depth - 1)
return result
elif isinstance(value, types.ListType):
return [evaluate(t, context, max_depth - 1) for t in value]
elif isinstance(value, types.TupleType):
return tuple(evaluate(list(value), context, max_depth - 1))
elif callable(value):
return value()
elif isinstance(value, types.StringTypes):
return value
elif isinstance(value, collections.Iterable):
return list(value)
else:
return value
def needs_evaluation(value):
if isinstance(value, (yaql_expression.YaqlExpression,
yaql.expressions.Expression)):
return True
elif isinstance(value, types.DictionaryType):
for d_key, d_value in value.iteritems():
if needs_evaluation(d_value) or needs_evaluation(d_key):
return True
elif isinstance(value, types.StringTypes):
return False
elif isinstance(value, collections.Iterable):
for t in value:
if needs_evaluation(t):
return True
return False
def exclude_lists(list1, list2):
result = []
for item in list1:
exists = False
for old_item in list2:
if not utils.is_different(item, old_item):
exists = True
break
if not exists:
result.append(item)
return result
def check_dicts(dict):
for key,value in dict.items():
if isinstance(value, types.DictionaryType):
return check_dicts(value)
elif isinstance(value, types.ListType):
if len(value) > 0:
return True
else:
return True
return False
def exclude_dicts(dict1, dict2, max_levels=0):
result = {}
for key, value1 in dict1.items():
if key in dict2:
value2 = dict2[key]
if type(value2) != type(value1):
if (isinstance(value1, types.StringTypes) and
isinstance(value2, types.StringTypes)):
continue
raise TypeError()
if max_levels != 1 and isinstance(value2, types.DictionaryType):
res = exclude_dicts(
value1, value2,
0 if max_levels == 0 else max_levels - 1)
if len(res) > 0:
if check_dicts(res):
result[key] = res
elif max_levels != 1 and isinstance(value2, types.ListType):
res = exclude_lists(value1, value2)
if len(res) > 0:
result[key] = res
else:
result[key] = value1
return result
def merge_lists(list1, list2):
result = []
for item in list1 + list2:
exists = False
for old_item in result:
if not utils.is_different(item, old_item):
exists = True
break
if not exists:
result.append(item)
return result
def merge_dicts(dict1, dict2, max_levels=0):
result = {}
for key, value1 in dict1.items():
result[key] = value1
if key in dict2:
value2 = dict2[key]
if type(value2) != type(value1):
if (isinstance(value1, types.StringTypes) and
isinstance(value2, types.StringTypes)):
continue
raise TypeError()
if max_levels != 1 and isinstance(value2, types.DictionaryType):
result[key] = merge_dicts(
value1, value2,
0 if max_levels == 0 else max_levels - 1)
elif max_levels != 1 and isinstance(value2, types.ListType):
result[key] = merge_lists(value1, value2)
else:
result[key] = value2
for key, value1 in dict2.items():
if key not in result:
result[key] = value1
return result
def generate_id():
return uuid.uuid4().hex
def parallel_select(collection, func):
# workaround for eventlet issue 232
# https://github.com/eventlet/eventlet/issues/232
def wrapper(element):
try:
return func(element), False, None
except Exception as e:
return e, True, sys.exc_info()[2]
gpool = eventlet.greenpool.GreenPool()
result = list(gpool.imap(wrapper, collection))
try:
exception = next(t for t in result if t[1])
except StopIteration:
return map(lambda t: t[0], result)
else:
raise exception[0], None, exception[2]
def to_python_codestyle(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def enum(**enums):
return type('Enum', (), enums)
def get_executor(context):
return context.get_data('$?executor')
def get_class_loader(context):
return context.get_data('$?classLoader')
def get_type(context):
return context.get_data('$?type')
def get_environment(context):
return context.get_data('$?environment')
def get_object_store(context):
return context.get_data('$?objectStore')
def get_this(context):
return context.get_data('$?this')
def get_caller_context(context):
return context.get_data('$?callerContext')
def get_attribute_store(context):
return context.get_data('$?attributeStore')
def get_current_instruction(context):
return context.get_data('$?currentInstruction')
def get_current_method(context):
return context.get_data('$?currentMethod')
def get_current_exception(context):
return context.get_data('$?currentException')
| |
"""Fava's main WSGI application.
when using Fava's WSGI app, make sure to set ``app.config['BEANCOUNT_FILES']``.
To start a simple server::
from fava.application import app
app.config['BEANCOUNT_FILES'] = ['/path/to/file.beancount']
app.run('localhost', 5000)
Attributes:
app: An instance of :class:`flask.Flask`, this is Fava's WSGI application.
"""
from __future__ import annotations
import datetime
import functools
import threading
from io import BytesIO
from typing import Any
import flask
import markdown2 # type: ignore
import werkzeug.urls
from beancount import __version__ as beancount_version
from beancount.core.account import ACCOUNT_RE
from beancount.utils.text_utils import replace_numbers
from flask import abort
from flask import Flask
from flask import redirect
from flask import render_template
from flask import render_template_string
from flask import request
from flask import send_file
from flask.wrappers import Response
from flask_babel import Babel # type: ignore
from flask_babel import get_translations
from werkzeug.utils import secure_filename
from fava import __version__ as fava_version
from fava import LANGUAGES
from fava import template_filters
from fava.context import g
from fava.core import FavaLedger
from fava.core.charts import FavaJSONEncoder
from fava.core.documents import is_document_or_import_file
from fava.help import HELP_PAGES
from fava.helpers import FavaAPIException
from fava.json_api import json_api
from fava.serialisation import serialise
from fava.util import next_key
from fava.util import resource_path
from fava.util import send_file_inline
from fava.util import setup_logging
from fava.util import slugify
from fava.util.date import Interval
from fava.util.excel import HAVE_EXCEL
STATIC_FOLDER = resource_path("static")
setup_logging()
app = Flask( # pylint: disable=invalid-name
__name__,
template_folder=str(resource_path("templates")),
static_folder=str(STATIC_FOLDER),
)
app.register_blueprint(json_api, url_prefix="/<bfile>/api")
app.json_encoder = FavaJSONEncoder # type: ignore
jinja_extensions = app.jinja_options.setdefault("extensions", [])
jinja_extensions.append("jinja2.ext.do")
jinja_extensions.append("jinja2.ext.loopcontrols")
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.config["HAVE_EXCEL"] = HAVE_EXCEL
app.config["ACCOUNT_RE"] = ACCOUNT_RE
REPORTS = [
"balance_sheet",
"commodities",
"documents",
"events",
"editor",
"errors",
"holdings",
"import",
"income_statement",
"journal",
"options",
"query",
"statistics",
"trial_balance",
]
LOAD_FILE_LOCK = threading.Lock()
def ledger_slug(ledger: FavaLedger) -> str:
"""Generate URL slug for a ledger."""
title_slug = slugify(ledger.options["title"])
return title_slug or slugify(ledger.beancount_file_path)
def update_ledger_slugs(ledgers: list[FavaLedger]) -> None:
"""Update the dictionary mapping URL slugs to ledgers."""
ledgers_by_slug: dict[str, FavaLedger] = {}
for ledger in ledgers:
slug = ledger_slug(ledger)
unique_key = next_key(slug, ledgers_by_slug)
ledgers_by_slug[unique_key] = ledger
app.config["LEDGERS"] = ledgers_by_slug
def _load_file() -> None:
"""Load Beancount files.
This is run automatically on the first request.
"""
ledgers = [
FavaLedger(filepath) for filepath in app.config["BEANCOUNT_FILES"]
]
update_ledger_slugs(ledgers)
def get_locale() -> str | None:
"""Get locale.
Returns:
The locale that should be used for Babel. If not given as an option to
Fava, guess from browser.
"""
lang = g.ledger.fava_options.language
if lang is not None:
return lang
return request.accept_languages.best_match(["en"] + LANGUAGES)
BABEL = Babel(app)
BABEL.localeselector(get_locale)
for function in template_filters.FILTERS:
app.add_template_filter(function) # type: ignore
app.add_template_filter(serialise)
@app.url_defaults
def _inject_filters(endpoint: str, values: dict[str, str | None]) -> None:
if "bfile" not in values and app.url_map.is_endpoint_expecting(
endpoint, "bfile"
):
values["bfile"] = g.beancount_file_slug
if endpoint in ["static", "index"]:
return
for name in ["conversion", "interval", "account", "filter", "time"]:
if name not in values:
values[name] = request.args.get(name)
def static_url(filename: str) -> str:
"""Return a static url with an mtime query string for cache busting."""
file_path = STATIC_FOLDER / filename
try:
mtime = int(file_path.stat().st_mtime)
except FileNotFoundError:
mtime = 0
return url_for("static", filename=filename, mtime=mtime)
CACHED_URL_FOR = functools.lru_cache(2048)(flask.url_for)
def url_for(endpoint: str, **values: str | int) -> str:
"""A wrapper around flask.url_for that uses a cache."""
_inject_filters(endpoint, values)
return CACHED_URL_FOR(endpoint, **values)
def url_for_source(**kwargs: str) -> str:
"""URL to source file (possibly link to external editor)."""
if g.ledger.fava_options.use_external_editor:
return (
f"beancount://{kwargs.get('file_path')}"
+ f"?lineno={kwargs.get('line', 1)}"
)
return url_for("report", report_name="editor", **kwargs)
def translations() -> Any:
"""Get translations catalog."""
# pylint: disable=protected-access
return get_translations()._catalog
app.add_template_global(static_url, "static_url")
app.add_template_global(datetime.date.today, "today")
app.add_template_global(url_for, "url_for")
app.add_template_global(url_for_source, "url_for_source")
app.add_template_global(translations, "translations")
@app.context_processor
def template_context() -> dict[str, FavaLedger]:
"""Inject variables into the template context."""
return dict(ledger=g.ledger)
@app.before_request
def _perform_global_filters() -> None:
ledger = getattr(g, "ledger", None)
if ledger:
# check (and possibly reload) source file
if request.blueprint != "json_api":
ledger.changed()
ledger.filter(
account=request.args.get("account"),
filter=request.args.get("filter"),
time=request.args.get("time"),
)
@app.after_request
def _incognito(response: Response) -> Response:
"""Replace all numbers with 'X'."""
if app.config.get("INCOGNITO") and response.content_type.startswith(
"text/html"
):
is_editor = (
request.endpoint == "report"
and request.view_args is not None
and request.view_args["report_name"] == "editor"
)
if not is_editor:
original_text = response.get_data(as_text=True)
response.set_data(replace_numbers(original_text))
return response
@app.url_value_preprocessor
def _pull_beancount_file(_: str | None, values: dict[str, str] | None) -> None:
g.beancount_file_slug = values.pop("bfile", None) if values else None
with LOAD_FILE_LOCK:
if not app.config.get("LEDGERS"):
_load_file()
if g.beancount_file_slug:
if g.beancount_file_slug not in app.config["LEDGERS"]:
if not any(
g.beancount_file_slug == ledger_slug(ledger)
for ledger in app.config["LEDGERS"].values()
):
abort(404)
# one of the file slugs changed, update the mapping
update_ledger_slugs(app.config["LEDGERS"].values())
g.ledger = app.config["LEDGERS"][g.beancount_file_slug]
g.conversion = request.args.get("conversion", "at_cost")
g.interval = Interval.get(request.args.get("interval", "month"))
@app.errorhandler(FavaAPIException)
def fava_api_exception(error: FavaAPIException) -> str:
"""Handle API errors."""
return render_template(
"_layout.html", page_title="Error", content=error.message
)
@app.route("/")
@app.route("/<bfile>/")
def index() -> werkzeug.wrappers.response.Response:
"""Redirect to the Income Statement (of the given or first file)."""
if not g.beancount_file_slug:
g.beancount_file_slug = next(iter(app.config["LEDGERS"]))
index_url = url_for("index")
default_path = app.config["LEDGERS"][
g.beancount_file_slug
].fava_options.default_page
return redirect(f"{index_url}{default_path}")
@app.route("/<bfile>/account/<name>/")
@app.route("/<bfile>/account/<name>/<subreport>/")
def account(name: str, subreport: str = "journal") -> str:
"""The account report."""
if subreport in ["journal", "balances", "changes"]:
return render_template(
"account.html", account_name=name, subreport=subreport
)
return abort(404)
@app.route("/<bfile>/document/", methods=["GET"])
def document() -> Response:
"""Download a document."""
filename = request.args.get("filename")
if filename is None:
return abort(404)
if is_document_or_import_file(filename, g.ledger):
return send_file_inline(filename)
return abort(404)
@app.route("/<bfile>/statement/", methods=["GET"])
def statement() -> Response:
"""Download a statement file."""
entry_hash = request.args.get("entry_hash", "")
key = request.args.get("key", "")
document_path = g.ledger.statement_path(entry_hash, key)
return send_file_inline(document_path)
@app.route("/<bfile>/holdings/by_<aggregation_key>/")
def holdings_by(aggregation_key: str) -> str:
"""The holdings report."""
if aggregation_key in ["account", "currency", "cost_currency"]:
return render_template(
"_layout.html",
active_page="holdings",
aggregation_key=aggregation_key,
)
return abort(404)
@app.route("/<bfile>/<report_name>/")
def report(report_name: str) -> str:
"""Endpoint for most reports."""
if report_name in REPORTS:
return render_template("_layout.html", active_page=report_name)
return abort(404)
@app.route("/<bfile>/extension/<report_name>/")
def extension_report(report_name: str) -> str:
"""Endpoint for extension reports."""
try:
template, extension = g.ledger.extensions.template_and_extension(
report_name
)
content = render_template_string(template, extension=extension)
return render_template(
"_layout.html", content=content, page_title=extension.report_title
)
except LookupError:
return abort(404)
@app.route("/<bfile>/download-query/query_result.<result_format>")
def download_query(result_format: str) -> Any:
"""Download a query result."""
name, data = g.ledger.query_shell.query_to_file(
request.args.get("query_string", ""), result_format
)
filename = f"{secure_filename(name.strip())}.{result_format}"
return send_file(data, as_attachment=True, download_name=filename)
@app.route("/<bfile>/download-journal/")
def download_journal() -> Any:
"""Download a Journal file."""
now = datetime.datetime.now().replace(microsecond=0)
filename = f"journal_{now.isoformat()}.beancount"
data = BytesIO(bytes(render_template("beancount_file"), "utf8"))
return send_file(data, as_attachment=True, download_name=filename)
@app.route("/<bfile>/help/", defaults={"page_slug": "_index"})
@app.route("/<bfile>/help/<string:page_slug>")
def help_page(page_slug: str) -> str:
"""Fava's included documentation."""
if page_slug not in HELP_PAGES:
abort(404)
html = markdown2.markdown_path(
(resource_path("help") / (page_slug + ".md")),
extras=["fenced-code-blocks", "tables", "header-ids"],
)
return render_template(
"_layout.html",
active_page="help",
page_slug=page_slug,
help_html=render_template_string(
html,
beancount_version=beancount_version,
fava_version=fava_version,
),
HELP_PAGES=HELP_PAGES,
)
@app.route("/jump")
def jump() -> werkzeug.wrappers.response.Response:
"""Redirect back to the referer, replacing some parameters.
This is useful for sidebar links, e.g. a link ``/jump?time=year``
would set the time filter to `year` on the current page.
When accessing ``/jump?param1=abc`` from
``/example/page?param1=123¶m2=456``, this view should redirect to
``/example/page?param1=abc¶m2=456``.
"""
url = werkzeug.urls.url_parse(request.referrer)
qs_dict = url.decode_query()
for key, values in request.args.lists():
if values == [""]:
try:
del qs_dict[key]
except KeyError:
pass
else:
qs_dict.setlist(key, values)
redirect_url = url.replace(
query=werkzeug.urls.url_encode(qs_dict, sort=True)
)
return redirect(werkzeug.urls.url_unparse(redirect_url))
| |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import time
import mock
from os_xenapi.client import exception as xenapi_exception
from os_xenapi.client import host_glance
from os_xenapi.client import XenAPI
from nova.compute import utils as compute_utils
from nova import context
from nova import exception
from nova.image import glance as common_glance
from nova.tests.unit.virt.xenapi import stubs
from nova import utils
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import vm_utils
class TestGlanceStore(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(TestGlanceStore, self).setUp()
self.store = glance.GlanceStore()
self.flags(api_servers=['http://localhost:9292'], group='glance')
self.flags(connection_url='http://localhost',
connection_password='test_pass',
group='xenserver')
self.context = context.RequestContext(
'user', 'project', auth_token='foobar')
fake.reset()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.stub_out('nova.virt.xenapi.vm_utils.get_sr_path',
lambda *a, **kw: '/fake/sr/path')
self.instance = {'uuid': 'blah',
'system_metadata': [],
'auto_disk_config': True,
'os_type': 'default',
'xenapi_use_agent': 'true'}
def _get_params(self):
return {'image_id': 'fake_image_uuid',
'endpoint': 'http://localhost:9292',
'sr_path': '/fake/sr/path',
'api_version': 2,
'extra_headers': {'X-Auth-Token': 'foobar',
'X-Roles': '',
'X-Tenant-Id': 'project',
'X-User-Id': 'user',
'X-Identity-Status': 'Confirmed'}}
def _get_download_params(self):
params = self._get_params()
params['uuid_stack'] = ['uuid1']
return params
@mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
def test_download_image(self, mock_make_uuid_stack):
params = self._get_download_params()
with mock.patch.object(self.session, 'call_plugin_serialized'
) as mock_call_plugin:
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
mock_call_plugin.assert_called_once_with('glance.py',
'download_vhd2',
**params)
mock_make_uuid_stack.assert_called_once_with()
@mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
@mock.patch.object(random, 'shuffle')
@mock.patch.object(time, 'sleep')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_download_image_retry(self, mock_fault, mock_sleep,
mock_shuffle, mock_make_uuid_stack):
params = self._get_download_params()
self.flags(num_retries=2, group='glance')
params.pop("endpoint")
calls = [mock.call('glance.py', 'download_vhd2',
endpoint='http://10.0.1.1:9292',
**params),
mock.call('glance.py', 'download_vhd2',
endpoint='http://10.0.0.1:9293',
**params)]
glance_api_servers = ['http://10.0.1.1:9292',
'http://10.0.0.1:9293']
self.flags(api_servers=glance_api_servers, group='glance')
with (mock.patch.object(self.session, 'call_plugin_serialized')
) as mock_call_plugin_serialized:
error_details = ["", "", "RetryableError", ""]
error = self.session.XenAPI.Failure(details=error_details)
mock_call_plugin_serialized.side_effect = [error, "success"]
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
mock_call_plugin_serialized.assert_has_calls(calls)
self.assertEqual(1, mock_fault.call_count)
def _get_upload_params(self, auto_disk_config=True,
expected_os_type='default'):
params = {}
params['vdi_uuids'] = ['fake_vdi_uuid']
params['properties'] = {'auto_disk_config': auto_disk_config,
'os_type': expected_os_type}
return params
@mock.patch.object(utils, 'get_auto_disk_config_from_instance')
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image(self, mock_upload, mock_sr_path, mock_extra_header,
mock_disk_config):
params = self._get_upload_params()
mock_upload.return_value = 'fake_upload'
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_disk_config.return_value = 'true'
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 0, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)
@mock.patch.object(utils, 'get_auto_disk_config_from_instance')
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image_None_os_type(self, mock_upload, mock_sr_path,
mock_extra_header, mock_disk_config):
self.instance['os_type'] = None
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_upload.return_value = 'fake_upload'
mock_disk_config.return_value = 'true'
params = self._get_upload_params(True, 'linux')
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 0, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)
mock_disk_config.assert_called_once_with(self.instance)
@mock.patch.object(utils, 'get_auto_disk_config_from_instance')
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image_no_os_type(self, mock_upload, mock_sr_path,
mock_extra_header, mock_disk_config):
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_upload.return_value = 'fake_upload'
del self.instance['os_type']
params = self._get_upload_params(True, 'linux')
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 0, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)
mock_disk_config.assert_called_once_with(self.instance)
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image_auto_config_disk_disabled(
self, mock_upload, mock_sr_path, mock_extra_header):
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_upload.return_value = 'fake_upload'
sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
self.instance["system_metadata"] = sys_meta
params = self._get_upload_params("disabled")
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 0, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image_raises_exception(self, mock_upload, mock_sr_path,
mock_extra_header):
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_upload.side_effect = RuntimeError
params = self._get_upload_params()
self.assertRaises(RuntimeError, self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 0, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)
@mock.patch.object(time, 'sleep')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_upload_image_retries_then_raises_exception(self,
mock_add_inst,
mock_time_sleep):
self.flags(num_retries=2, group='glance')
params = self._get_params()
params.update(self._get_upload_params())
error_details = ["", "", "RetryableError", ""]
error = XenAPI.Failure(details=error_details)
with mock.patch.object(self.session, 'call_plugin_serialized',
side_effect=error) as mock_call_plugin:
self.assertRaises(exception.CouldNotUploadImage,
self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
time_sleep_args = [mock.call(0.5), mock.call(1)]
call_plugin_args = [
mock.call('glance.py', 'upload_vhd2', **params),
mock.call('glance.py', 'upload_vhd2', **params),
mock.call('glance.py', 'upload_vhd2', **params)]
add_inst_args = [
mock.call(self.context, self.instance, error,
(XenAPI.Failure, error, mock.ANY)),
mock.call(self.context, self.instance, error,
(XenAPI.Failure, error, mock.ANY)),
mock.call(self.context, self.instance, error,
(XenAPI.Failure, error, mock.ANY))]
mock_time_sleep.assert_has_calls(time_sleep_args)
mock_call_plugin.assert_has_calls(call_plugin_args)
mock_add_inst.assert_has_calls(add_inst_args)
@mock.patch.object(time, 'sleep')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_upload_image_retries_on_signal_exception(self,
mock_add_inst,
mock_time_sleep):
self.flags(num_retries=2, group='glance')
params = self._get_params()
params.update(self._get_upload_params())
error_details = ["", "task signaled", "", ""]
error = XenAPI.Failure(details=error_details)
# Note(johngarbutt) XenServer 6.1 and later has this error
error_details_v61 = ["", "signal: SIGTERM", "", ""]
error_v61 = self.session.XenAPI.Failure(details=error_details_v61)
with mock.patch.object(self.session, 'call_plugin_serialized',
side_effect=[error, error_v61, None]
) as mock_call_plugin:
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
time_sleep_args = [mock.call(0.5), mock.call(1)]
call_plugin_args = [
mock.call('glance.py', 'upload_vhd2', **params),
mock.call('glance.py', 'upload_vhd2', **params),
mock.call('glance.py', 'upload_vhd2', **params)]
add_inst_args = [
mock.call(self.context, self.instance, error,
(XenAPI.Failure, error, mock.ANY)),
mock.call(self.context, self.instance, error_v61,
(XenAPI.Failure, error_v61, mock.ANY))]
mock_time_sleep.assert_has_calls(time_sleep_args)
mock_call_plugin.assert_has_calls(call_plugin_args)
mock_add_inst.assert_has_calls(add_inst_args)
@mock.patch.object(utils, 'get_auto_disk_config_from_instance')
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image_raises_exception_image_not_found(self,
mock_upload,
mock_sr_path,
mock_extra_header,
mock_disk_config):
params = self._get_upload_params()
mock_upload.return_value = 'fake_upload'
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_disk_config.return_value = 'true'
image_id = 'fake_image_id'
mock_upload.side_effect = xenapi_exception.PluginImageNotFound(
image_id=image_id
)
self.assertRaises(exception.ImageNotFound, self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 0, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)
| |
from collections import OrderedDict
import errno
import glob
from hashlib import md5
from io import StringIO
import json
import os
from pathlib import Path
import re
from typing import Any, Dict, List, Optional, Text, Type, Union
import warnings
import random
import string
from ruamel import yaml as yaml
from ruamel.yaml import RoundTripRepresenter, YAMLError
from ruamel.yaml.constructor import DuplicateKeyError, BaseConstructor, ScalarNode
from rasa.shared.constants import (
DEFAULT_LOG_LEVEL,
ENV_LOG_LEVEL,
NEXT_MAJOR_VERSION_FOR_DEPRECATIONS,
CONFIG_SCHEMA_FILE,
MODEL_CONFIG_SCHEMA_FILE,
)
from rasa.shared.exceptions import (
FileIOException,
FileNotFoundException,
YamlSyntaxException,
RasaException,
)
import rasa.shared.utils.validation
DEFAULT_ENCODING = "utf-8"
YAML_VERSION = (1, 2)
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def wrap_with_color(*args: Any, color: Text) -> Text:
return color + " ".join(str(s) for s in args) + bcolors.ENDC
def raise_warning(
message: Text,
category: Optional[Type[Warning]] = None,
docs: Optional[Text] = None,
**kwargs: Any,
) -> None:
"""Emit a `warnings.warn` with sensible defaults and a colored warning msg."""
original_formatter = warnings.formatwarning
def should_show_source_line() -> bool:
if "stacklevel" not in kwargs:
if category == UserWarning or category is None:
return False
if category == FutureWarning:
return False
return True
def formatwarning(
message: Text,
category: Optional[Type[Warning]],
filename: Text,
lineno: Optional[int],
line: Optional[Text] = None,
) -> Text:
"""Function to format a warning the standard way."""
if not should_show_source_line():
if docs:
line = f"More info at {docs}"
else:
line = ""
formatted_message = original_formatter(
message, category, filename, lineno, line
)
return wrap_with_color(formatted_message, color=bcolors.WARNING)
if "stacklevel" not in kwargs:
# try to set useful defaults for the most common warning categories
if category == DeprecationWarning:
kwargs["stacklevel"] = 3
elif category in (UserWarning, FutureWarning):
kwargs["stacklevel"] = 2
warnings.formatwarning = formatwarning
warnings.warn(message, category=category, **kwargs)
warnings.formatwarning = original_formatter
def write_text_file(
content: Text,
file_path: Union[Text, Path],
encoding: Text = DEFAULT_ENCODING,
append: bool = False,
) -> None:
"""Writes text to a file.
Args:
content: The content to write.
file_path: The path to which the content should be written.
encoding: The encoding which should be used.
append: Whether to append to the file or to truncate the file.
"""
mode = "a" if append else "w"
with open(file_path, mode, encoding=encoding) as file:
file.write(content)
def read_file(filename: Union[Text, Path], encoding: Text = DEFAULT_ENCODING) -> Any:
"""Read text from a file."""
try:
with open(filename, encoding=encoding) as f:
return f.read()
except FileNotFoundError:
raise FileNotFoundException(
f"Failed to read file, " f"'{os.path.abspath(filename)}' does not exist."
)
except UnicodeDecodeError:
raise FileIOException(
f"Failed to read file '{os.path.abspath(filename)}', "
f"could not read the file using {encoding} to decode "
f"it. Please make sure the file is stored with this "
f"encoding."
)
def read_json_file(filename: Union[Text, Path]) -> Any:
"""Read json from a file."""
content = read_file(filename)
try:
return json.loads(content)
except ValueError as e:
raise FileIOException(
f"Failed to read json from '{os.path.abspath(filename)}'. Error: {e}"
)
def list_directory(path: Text) -> List[Text]:
"""Returns all files and folders excluding hidden files.
If the path points to a file, returns the file. This is a recursive
implementation returning files in any depth of the path."""
if not isinstance(path, str):
raise ValueError(
f"`resource_name` must be a string type. " f"Got `{type(path)}` instead"
)
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
results = []
for base, dirs, files in os.walk(path, followlinks=True):
# sort files for same order across runs
files = sorted(files, key=_filename_without_prefix)
# add not hidden files
good_files = filter(lambda x: not x.startswith("."), files)
results.extend(os.path.join(base, f) for f in good_files)
# add not hidden directories
good_directories = filter(lambda x: not x.startswith("."), dirs)
results.extend(os.path.join(base, f) for f in good_directories)
return results
else:
raise ValueError(f"Could not locate the resource '{os.path.abspath(path)}'.")
def list_files(path: Text) -> List[Text]:
"""Returns all files excluding hidden files.
If the path points to a file, returns the file."""
return [fn for fn in list_directory(path) if os.path.isfile(fn)]
def _filename_without_prefix(file: Text) -> Text:
"""Splits of a filenames prefix until after the first ``_``."""
return "_".join(file.split("_")[1:])
def list_subdirectories(path: Text) -> List[Text]:
"""Returns all folders excluding hidden files.
If the path points to a file, returns an empty list."""
return [fn for fn in glob.glob(os.path.join(path, "*")) if os.path.isdir(fn)]
def deep_container_fingerprint(
obj: Union[List[Any], Dict[Any, Any], Any], encoding: Text = DEFAULT_ENCODING
) -> Text:
"""Calculate a hash which is stable, independent of a containers key order.
Works for lists and dictionaries. For keys and values, we recursively call
`hash(...)` on them. Keep in mind that a list with keys in a different order
will create the same hash!
Args:
obj: dictionary or list to be hashed.
encoding: encoding used for dumping objects as strings
Returns:
hash of the container.
"""
if isinstance(obj, dict):
return get_dictionary_fingerprint(obj, encoding)
elif isinstance(obj, list):
return get_list_fingerprint(obj, encoding)
elif hasattr(obj, "fingerprint") and callable(obj.fingerprint):
return obj.fingerprint()
else:
return get_text_hash(str(obj), encoding)
def get_dictionary_fingerprint(
dictionary: Dict[Any, Any], encoding: Text = DEFAULT_ENCODING
) -> Text:
"""Calculate the fingerprint for a dictionary.
The dictionary can contain any keys and values which are either a dict,
a list or a elements which can be dumped as a string.
Args:
dictionary: dictionary to be hashed
encoding: encoding used for dumping objects as strings
Returns:
The hash of the dictionary
"""
stringified = json.dumps(
{
deep_container_fingerprint(k, encoding): deep_container_fingerprint(
v, encoding
)
for k, v in dictionary.items()
},
sort_keys=True,
)
return get_text_hash(stringified, encoding)
def get_list_fingerprint(
elements: List[Any], encoding: Text = DEFAULT_ENCODING
) -> Text:
"""Calculate a fingerprint for an unordered list.
Args:
elements: unordered list
encoding: encoding used for dumping objects as strings
Returns:
the fingerprint of the list
"""
stringified = json.dumps(
[deep_container_fingerprint(element, encoding) for element in elements]
)
return get_text_hash(stringified, encoding)
def get_text_hash(text: Text, encoding: Text = DEFAULT_ENCODING) -> Text:
"""Calculate the md5 hash for a text."""
return md5(text.encode(encoding)).hexdigest() # nosec
def json_to_string(obj: Any, **kwargs: Any) -> Text:
"""Dumps a JSON-serializable object to string.
Args:
obj: JSON-serializable object.
kwargs: serialization options. Defaults to 2 space indentation
and disable escaping of non-ASCII characters.
Returns:
The objects serialized to JSON, as a string.
"""
indent = kwargs.pop("indent", 2)
ensure_ascii = kwargs.pop("ensure_ascii", False)
return json.dumps(obj, indent=indent, ensure_ascii=ensure_ascii, **kwargs)
def fix_yaml_loader() -> None:
"""Ensure that any string read by yaml is represented as unicode."""
def construct_yaml_str(self: BaseConstructor, node: ScalarNode) -> Any:
# Override the default string handling function
# to always return unicode objects
return self.construct_scalar(node)
yaml.Loader.add_constructor("tag:yaml.org,2002:str", construct_yaml_str)
yaml.SafeLoader.add_constructor("tag:yaml.org,2002:str", construct_yaml_str)
yaml.allow_duplicate_keys = False
def replace_environment_variables() -> None:
"""Enable yaml loader to process the environment variables in the yaml."""
# eg. ${USER_NAME}, ${PASSWORD}
env_var_pattern = re.compile(r"^(.*)\$\{(.*)\}(.*)$")
yaml.Resolver.add_implicit_resolver("!env_var", env_var_pattern, None)
def env_var_constructor(loader: BaseConstructor, node: ScalarNode) -> Text:
"""Process environment variables found in the YAML."""
value = loader.construct_scalar(node)
expanded_vars = os.path.expandvars(value)
not_expanded = [
w for w in expanded_vars.split() if w.startswith("$") and w in value
]
if not_expanded:
raise RasaException(
f"Error when trying to expand the "
f"environment variables in '{value}'. "
f"Please make sure to also set these "
f"environment variables: '{not_expanded}'."
)
return expanded_vars
yaml.SafeConstructor.add_constructor("!env_var", env_var_constructor)
fix_yaml_loader()
replace_environment_variables()
def read_yaml(content: Text, reader_type: Union[Text, List[Text]] = "safe") -> Any:
"""Parses yaml from a text.
Args:
content: A text containing yaml content.
reader_type: Reader type to use. By default "safe" will be used.
Raises:
ruamel.yaml.parser.ParserError: If there was an error when parsing the YAML.
"""
if _is_ascii(content):
# Required to make sure emojis are correctly parsed
content = (
content.encode("utf-8")
.decode("raw_unicode_escape")
.encode("utf-16", "surrogatepass")
.decode("utf-16")
)
yaml_parser = yaml.YAML(typ=reader_type)
yaml_parser.version = YAML_VERSION
yaml_parser.preserve_quotes = True
return yaml_parser.load(content) or {}
def _is_ascii(text: Text) -> bool:
return all(ord(character) < 128 for character in text)
def read_yaml_file(filename: Union[Text, Path]) -> Union[List[Any], Dict[Text, Any]]:
"""Parses a yaml file.
Raises an exception if the content of the file can not be parsed as YAML.
Args:
filename: The path to the file which should be read.
Returns:
Parsed content of the file.
"""
try:
return read_yaml(read_file(filename, DEFAULT_ENCODING))
except (YAMLError, DuplicateKeyError) as e:
raise YamlSyntaxException(filename, e)
def write_yaml(
data: Any,
target: Union[Text, Path, StringIO],
should_preserve_key_order: bool = False,
) -> None:
"""Writes a yaml to the file or to the stream
Args:
data: The data to write.
target: The path to the file which should be written or a stream object
should_preserve_key_order: Whether to force preserve key order in `data`.
"""
_enable_ordered_dict_yaml_dumping()
if should_preserve_key_order:
data = convert_to_ordered_dict(data)
dumper = yaml.YAML()
# no wrap lines
dumper.width = YAML_LINE_MAX_WIDTH
# use `null` to represent `None`
dumper.representer.add_representer(
type(None),
lambda self, _: self.represent_scalar("tag:yaml.org,2002:null", "null"),
)
if isinstance(target, StringIO):
dumper.dump(data, target)
return
with Path(target).open("w", encoding=DEFAULT_ENCODING) as outfile:
dumper.dump(data, outfile)
YAML_LINE_MAX_WIDTH = 4096
def is_key_in_yaml(file_path: Union[Text, Path], *keys: Text) -> bool:
"""Checks if any of the keys is contained in the root object of the yaml file.
Arguments:
file_path: path to the yaml file
keys: keys to look for
Returns:
`True` if at least one of the keys is found, `False` otherwise.
Raises:
FileNotFoundException: if the file cannot be found.
"""
try:
with open(file_path, encoding=DEFAULT_ENCODING) as file:
return any(
any(line.lstrip().startswith(f"{key}:") for key in keys)
for line in file
)
except FileNotFoundError:
raise FileNotFoundException(
f"Failed to read file, " f"'{os.path.abspath(file_path)}' does not exist."
)
def convert_to_ordered_dict(obj: Any) -> Any:
"""Convert object to an `OrderedDict`.
Args:
obj: Object to convert.
Returns:
An `OrderedDict` with all nested dictionaries converted if `obj` is a
dictionary, otherwise the object itself.
"""
if isinstance(obj, OrderedDict):
return obj
# use recursion on lists
if isinstance(obj, list):
return [convert_to_ordered_dict(element) for element in obj]
if isinstance(obj, dict):
out = OrderedDict()
# use recursion on dictionaries
for k, v in obj.items():
out[k] = convert_to_ordered_dict(v)
return out
# return all other objects
return obj
def _enable_ordered_dict_yaml_dumping() -> None:
"""Ensure that `OrderedDict`s are dumped so that the order of keys is respected."""
yaml.add_representer(
OrderedDict,
RoundTripRepresenter.represent_dict,
representer=RoundTripRepresenter,
)
def is_logging_disabled() -> bool:
"""Returns `True` if log level is set to WARNING or ERROR, `False` otherwise."""
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
return log_level in ("ERROR", "WARNING")
def create_directory_for_file(file_path: Union[Text, Path]) -> None:
"""Creates any missing parent directories of this file path."""
create_directory(os.path.dirname(file_path))
def dump_obj_as_json_to_file(filename: Union[Text, Path], obj: Any) -> None:
"""Dump an object as a json string to a file."""
write_text_file(json.dumps(obj, ensure_ascii=False, indent=2), filename)
def dump_obj_as_yaml_to_string(
obj: Any, should_preserve_key_order: bool = False
) -> Text:
"""Writes data (python dict) to a yaml string.
Args:
obj: The object to dump. Has to be serializable.
should_preserve_key_order: Whether to force preserve key order in `data`.
Returns:
The object converted to a YAML string.
"""
buffer = StringIO()
write_yaml(obj, buffer, should_preserve_key_order=should_preserve_key_order)
return buffer.getvalue()
def create_directory(directory_path: Text) -> None:
"""Creates a directory and its super paths.
Succeeds even if the path already exists."""
try:
os.makedirs(directory_path)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
def raise_deprecation_warning(
message: Text,
warn_until_version: Text = NEXT_MAJOR_VERSION_FOR_DEPRECATIONS,
docs: Optional[Text] = None,
**kwargs: Any,
) -> None:
"""
Thin wrapper around `raise_warning()` to raise a deprecation warning. It requires
a version until which we'll warn, and after which the support for the feature will
be removed.
"""
if warn_until_version not in message:
message = f"{message} (will be removed in {warn_until_version})"
# need the correct stacklevel now
kwargs.setdefault("stacklevel", 3)
# we're raising a `FutureWarning` instead of a `DeprecationWarning` because
# we want these warnings to be visible in the terminal of our users
# https://docs.python.org/3/library/warnings.html#warning-categories
raise_warning(message, FutureWarning, docs, **kwargs)
def read_validated_yaml(filename: Union[Text, Path], schema: Text) -> Any:
"""Validates YAML file content and returns parsed content.
Args:
filename: The path to the file which should be read.
schema: The path to the schema file which should be used for validating the
file content.
Returns:
The parsed file content.
Raises:
YamlValidationException: In case the model configuration doesn't match the
expected schema.
"""
content = read_file(filename)
rasa.shared.utils.validation.validate_yaml_schema(content, schema)
return read_yaml(content)
def read_config_file(filename: Union[Path, Text]) -> Dict[Text, Any]:
"""Parses a yaml configuration file. Content needs to be a dictionary.
Args:
filename: The path to the file which should be read.
Raises:
YamlValidationException: In case file content is not a `Dict`.
Returns:
Parsed config file.
"""
return read_validated_yaml(filename, CONFIG_SCHEMA_FILE)
def read_model_configuration(filename: Union[Path, Text]) -> Dict[Text, Any]:
"""Parses a model configuration file.
Args:
filename: The path to the file which should be read.
Raises:
YamlValidationException: In case the model configuration doesn't match the
expected schema.
Returns:
Parsed config file.
"""
return read_validated_yaml(filename, MODEL_CONFIG_SCHEMA_FILE)
def is_subdirectory(path: Text, potential_parent_directory: Text) -> bool:
"""Checks if `path` is a subdirectory of `potential_parent_directory`.
Args:
path: Path to a file or directory.
potential_parent_directory: Potential parent directory.
Returns:
`True` if `path` is a subdirectory of `potential_parent_directory`.
"""
if path is None or potential_parent_directory is None:
return False
path = os.path.abspath(path)
potential_parent_directory = os.path.abspath(potential_parent_directory)
return potential_parent_directory in path
def random_string(length: int) -> Text:
"""Returns a random string of given length."""
return "".join(random.choices(string.ascii_uppercase + string.digits, k=length))
| |
# VMware vSphere Python SDK
# Copyright (c) 2008-2015 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## @file connect.py
## @brief Connect to a VMOMI ServiceInstance.
##
## Detailed description (for Doxygen goes here)
"""
Connect to a VMOMI ServiceInstance.
Detailed description (for [e]pydoc goes here).
"""
from six import reraise
import sys
import re
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
from xml.parsers.expat import ExpatError
import requests
from requests.auth import HTTPBasicAuth
from pyVmomi import vim, vmodl, SoapStubAdapter, SessionOrientedStub
from pyVmomi.VmomiSupport import nsMap, versionIdMap, versionMap, IsChildVersion
from pyVmomi.VmomiSupport import GetServiceVersions
"""
Global regular expression for parsing host and port connection
See http://www.ietf.org/rfc/rfc3986.txt sec 3.2.2
"""
_rx = re.compile(r"(^\[.+\]|[^:]+)(:\d+)?$")
_si = None
"""
Global (thread-shared) ServiceInstance
@todo: Get rid of me?
"""
class closing(object):
"""
Helper class for using closable objects in a 'with' statement,
similar to the one provided by contextlib.
"""
def __init__(self, obj):
self.obj = obj
def __enter__(self):
return self.obj
def __exit__(self, *exc_info):
self.obj.close()
class VimSessionOrientedStub(SessionOrientedStub):
'''A vim-specific SessionOrientedStub. See the SessionOrientedStub class
in pyVmomi/SoapAdapter.py for more information.'''
# The set of exceptions that should trigger a relogin by the session stub.
SESSION_EXCEPTIONS = (
vim.fault.NotAuthenticated,
)
@staticmethod
def makeUserLoginMethod(username, password, locale=None):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.Login(username, password, locale)
return _doLogin
@staticmethod
def makeExtensionLoginMethod(extensionKey):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.LoginExtensionByCertificate(extensionKey)
return _doLogin
@staticmethod
def makeCertHokTokenLoginMethod(stsUrl, stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a HoK SAML token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param stsUrl: URL of the SAML Token issuing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(stsUrl)
def _doLogin(soapStub):
import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_hok_saml_assertion(cert,key)
def _requestModifier(request):
return sso.add_saml_context(request, samlAssertion, key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
with soapStub.requestModifier(_requestModifier):
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin
@staticmethod
def makeCredBearerTokenLoginMethod(username,
password,
stsUrl,
stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a Bearer token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param username: username of the user/service registered with STS.
@param password: password of the user/service registered with STS.
@param stsUrl: URL of the SAML Token issueing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(username)
assert(password)
assert(stsUrl)
def _doLogin(soapStub):
import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_bearer_saml_assertion(username,
password,
cert,
key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin
def Connect(host='localhost', port=443, user='root', pwd='',
service="hostd", adapter="SOAP", namespace=None, path="/sdk",
version=None, keyFile=None, certFile=None):
"""
Connect to the specified server, login and return the service
instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param namespace: Namespace *** Deprecated: Use version instead ***
@type namespace: string
@param path: Path
@type path: string
@param version: Version
@type version: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
"""
try:
info = re.match(_rx, host)
if info is not None:
host = info.group(1)
if host[0] == '[':
host = info.group(1)[1:-1]
if info.group(2) is not None:
port = int(info.group(2)[1:])
except ValueError as ve:
pass
if namespace:
assert(version is None)
version = versionMap[namespace]
elif not version:
version="vim.version.version6"
si, stub = __Login(host, port, user, pwd, service, adapter, version, path,
keyFile, certFile)
SetSi(si)
return si
def Disconnect(si):
"""
Disconnect (logout) service instance
@param si: Service instance (returned from Connect)
"""
# Logout
__Logout(si)
SetSi(None)
## Method that gets a local ticket for the specified user
def GetLocalTicket(si, user):
try:
sessionManager = si.content.sessionManager
except Exception as e:
if type(e).__name__ == 'ExpatError':
msg = 'Malformed response while querying for local ticket: "%s"' % e
raise vim.fault.HostConnectFault(msg=msg)
else:
msg = 'Failed to query for local ticket: "%s"' % e
raise vim.fault.HostConnectFault(msg=msg)
localTicket = sessionManager.AcquireLocalTicket(userName=user)
return (localTicket.userName, file(localTicket.passwordFilePath).read())
## Private method that performs the actual Connect and returns a
## connected service instance object.
def __Login(host, port, user, pwd, service, adapter, version, path,
keyFile, certFile):
"""
Private method that performs the actual Connect and returns a
connected service instance object.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param version: Version
@type version: string
@param path: Path
@type path: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
"""
# XXX remove the adapter and service arguments once dependent code is fixed
if adapter != "SOAP":
raise ValueError(adapter)
# Create the SOAP stub adapter
stub = SoapStubAdapter(host, port, version=version, path=path,
certKeyFile=keyFile, certFile=certFile)
# Get Service instance
si = vim.ServiceInstance("ServiceInstance", stub)
try:
content = si.RetrieveContent()
except vmodl.MethodFault:
raise
except Exception as e:
# NOTE (hartsock): preserve the traceback for diagnostics
# pulling and preserving the traceback makes diagnosing connection
# failures easier since the fault will also include where inside the
# library the fault occurred. Without the traceback we have no idea
# why the connection failed beyond the message string.
(type, value, traceback) = sys.exc_info()
if traceback:
fault = vim.fault.HostConnectFault(msg=str(e))
reraise(vim.fault.HostConnectFault, fault, traceback)
else:
raise vim.fault.HostConnectFault(msg=str(e))
# Get a ticket if we're connecting to localhost and password is not specified
if host == 'localhost' and not pwd:
try:
(user, pwd) = GetLocalTicket(si, user)
except:
pass # This is not supported against vCenter, and connecting
# with an empty password is fine in debug builds
# Login
try:
x = content.sessionManager.Login(user, pwd, None)
except vim.fault.InvalidLogin:
raise
except Exception as e:
raise
return si, stub
## Private method that performs the actual Disonnect
def __Logout(si):
"""
Disconnect (logout) service instance
@param si: Service instance (returned from Connect)
"""
try:
if si:
content = si.RetrieveContent()
content.sessionManager.Logout()
except Exception as e:
pass
## Get the saved service instance.
def GetSi():
""" Get the saved service instance. """
return _si
## Set the saved service instance.
def SetSi(si):
""" Set the saved service instance. """
global _si
_si = si
## Get the global saved stub
def GetStub():
""" Get the global saved stub. """
si = GetSi()
if si:
return si._GetStub()
return None;
## RAII-style class for managing connections
class Connection(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.si = None
def __enter__(self):
self.si = Connect(*self.args, **self.kwargs)
return self.si
def __exit__(self, *exc_info):
if self.si:
Disconnect(self.si)
self.si = None
class SmartConnection(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.si = None
def __enter__(self):
self.si = SmartConnect(*self.args, **self.kwargs)
return self.si
def __exit__(self, *exc_info):
if self.si:
Disconnect(self.si)
self.si = None
## Private method that returns an ElementTree describing the API versions
## supported by the specified server. The result will be vimServiceVersions.xml
## if it exists, otherwise vimService.wsdl if it exists, otherwise None.
def __GetServiceVersionDescription(protocol, server, port, path):
"""
Private method that returns a root from an ElementTree describing the API versions
supported by the specified server. The result will be vimServiceVersions.xml
if it exists, otherwise vimService.wsdl if it exists, otherwise None.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
"""
url = "%s://%s:%s/%s/vimServiceVersions.xml" % (protocol, server, port, path)
try:
sock = requests.get(url, verify=False)
if sock.status_code == 200:
tree = ElementTree.fromstring(sock.content)
return tree
except ExpatError:
pass
url = "%s://%s:%s/%s/vimService.wsdl" % (protocol, server, port, path)
try:
sock = requests.get(url, verify=False)
if sock.status_code == 200:
tree = ElementTree.fromstring(sock.content)
return tree
except ExpatError:
pass
return None
## Private method that returns true if the service version description document
## indicates that the desired version is supported
def __VersionIsSupported(desiredVersion, serviceVersionDescription):
"""
Private method that returns true if the service version description document
indicates that the desired version is supported
@param desiredVersion: The version we want to see if the server supports
(eg. vim.version.version2.
@type desiredVersion: string
@param serviceVersionDescription: A root ElementTree for vimServiceVersions.xml
or vimService.wsdl.
@type serviceVersionDescription: root ElementTree
"""
root = serviceVersionDescription
if root.tag == 'namespaces':
# serviceVersionDescription appears to be a vimServiceVersions.xml document
if root.get('version') != '1.0':
raise RuntimeError('vimServiceVersions.xml has version %s,' \
' which is not understood' % (root.get('version')))
desiredVersionId = versionIdMap[desiredVersion]
supportedVersion = None
for namespace in root.findall('namespace'):
versionId = namespace.findtext('version')
if versionId == desiredVersionId:
return True
else:
for versionId in namespace.findall('priorVersions/version'):
if versionId.text == desiredVersionId:
return True
else:
# serviceVersionDescription must be a vimService.wsdl document
wsdlNS = 'http://schemas.xmlsoap.org/wsdl/'
importElement = serviceVersionDescription.find('.//{%s}import' % wsdlNS)
supportedVersion = versionMap[importElement.get('namespace')[4:]]
if IsChildVersion(supportedVersion, desiredVersion):
return True
return False
## Private method that returns the most preferred API version supported by the
## specified server,
def __FindSupportedVersion(protocol, server, port, path, preferredApiVersions):
"""
Private method that returns the most preferred API version supported by the
specified server,
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred.
@type preferredApiVersions: string or string list
"""
serviceVersionDescription = __GetServiceVersionDescription(protocol,
server,
port,
path)
if serviceVersionDescription is None:
return None
if not isinstance(preferredApiVersions, list):
preferredApiVersions = [ preferredApiVersions ]
for desiredVersion in preferredApiVersions:
if __VersionIsSupported(desiredVersion, serviceVersionDescription):
return desiredVersion
return None
def SmartConnect(protocol='https', host='localhost', port=443, user='root', pwd='',
service="hostd", path="/sdk",
preferredApiVersions=None):
"""
Determine the most preferred API version supported by the specified server,
then connect to the specified server using that API version, login and return
the service instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
supportedVersion = __FindSupportedVersion(protocol,
host,
port,
path,
preferredApiVersions)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
portNumber = protocol == "http" and -int(port) or int(port)
return Connect(host=host,
port=portNumber,
user=user,
pwd=pwd,
service=service,
adapter='SOAP',
version=supportedVersion,
path=path)
def OpenUrlWithBasicAuth(url, user='root', pwd=''):
"""
Open the specified URL, using HTTP basic authentication to provide
the specified credentials to the server as part of the request.
Returns the response as a file-like object.
"""
return requests.get(url, auth=HTTPBasicAuth(user, pwd), verify=False)
def OpenPathWithStub(path, stub):
"""
Open the specified path using HTTP, using the host/port/protocol
associated with the specified stub. If the stub has a session cookie,
it is included with the HTTP request. Returns the response as a
file-like object.
"""
import httplib
if not hasattr(stub, 'scheme'):
raise vmodl.fault.NotSupported()
elif stub.scheme == httplib.HTTPConnection:
protocol = 'http'
elif stub.scheme == httplib.HTTPSConnection:
protocol = 'https'
else:
raise vmodl.fault.NotSupported()
hostPort = stub.host
url = '%s://%s%s' % (protocol, hostPort, path)
headers = {}
if stub.cookie:
headers["Cookie"] = stub.cookie
return requests.get(url, headers=headers, verify=False)
| |
from unittest.mock import MagicMock
from unittest.mock import patch
from dateutil.parser import parse
from django.contrib.auth import get_user_model
from orchestra.core.errors import AssignmentPolicyError
from orchestra.core.errors import CreationPolicyError
from orchestra.core.errors import IllegalTaskSubmission
from orchestra.core.errors import ModelSaveError
from orchestra.core.errors import ReviewPolicyError
from orchestra.core.errors import TaskAssignmentError
from orchestra.core.errors import TaskStatusError
from orchestra.core.errors import WorkerCertificationError
from orchestra.models import Iteration
from orchestra.models import Project
from orchestra.models import Task
from orchestra.models import TaskAssignment
from orchestra.models import Worker
from orchestra.models import WorkerCertification
from orchestra.tests.helpers import OrchestraTransactionTestCase
from orchestra.tests.helpers.fixtures import StepFactory
from orchestra.tests.helpers.fixtures import TaskAssignmentFactory
from orchestra.tests.helpers.fixtures import TaskFactory
from orchestra.tests.helpers.fixtures import TodoFactory
from orchestra.tests.helpers.fixtures import ProjectFactory
from orchestra.tests.helpers.fixtures import setup_models
from orchestra.tests.helpers.fixtures import WorkflowVersionFactory
from orchestra.utils.task_lifecycle import AssignmentPolicyType
from orchestra.utils.task_lifecycle import assert_new_task_status_valid
from orchestra.utils.task_lifecycle import assign_task
from orchestra.utils.task_lifecycle import create_subsequent_tasks
from orchestra.utils.task_lifecycle import get_next_task_status
from orchestra.utils.task_lifecycle import get_task_overview_for_worker
from orchestra.utils.task_lifecycle import is_worker_certified_for_task
from orchestra.utils.task_lifecycle import role_counter_required_for_new_task
from orchestra.utils.task_lifecycle import submit_task
from orchestra.utils.task_lifecycle import tasks_assigned_to_worker
from orchestra.utils.task_lifecycle import worker_assigned_to_rejected_task
from orchestra.utils.task_lifecycle import worker_has_reviewer_status
from orchestra.utils.task_lifecycle import end_project
from orchestra.utils.task_properties import current_assignment
from orchestra.workflow.defaults import get_default_creation_policy
MOCK_CURRENT = '2018-01-17T00:00:00Z'
DEADLINE1_DATETIME = '2018-01-18T00:00:00Z'
DEADLINE2_DATETIME = '2018-01-19T00:00:00Z'
class BasicTaskLifeCycleTestCase(OrchestraTransactionTestCase):
"""
Test modular functions in the task_lifecycle
"""
def setUp(self):
super().setUp()
setup_models(self)
self.workflow_version = WorkflowVersionFactory()
self.step = StepFactory(
slug='step-slug',
workflow_version=self.workflow_version)
self.project = ProjectFactory(
workflow_version=self.workflow_version)
def test_is_worker_certified_for_task(self):
task = Task.objects.filter(status=Task.Status.AWAITING_PROCESSING)[0]
for worker_certification in (
WorkerCertification.objects.filter(worker=self.workers[0])):
worker_certification.staffbot_enabled = False
worker_certification.save()
# workers[0] has a certification
self.assertTrue(
is_worker_certified_for_task(self.workers[0],
task,
WorkerCertification.Role.ENTRY_LEVEL))
self.assertFalse(
is_worker_certified_for_task(self.workers[0],
task,
WorkerCertification.Role.ENTRY_LEVEL,
require_staffbot_enabled=True))
# workers[2] has no certification
self.assertFalse(
is_worker_certified_for_task(self.workers[2],
task,
WorkerCertification.Role.ENTRY_LEVEL))
def test_not_allowed_new_assignment(self):
assert_new_task_status_valid(Task.Status.AWAITING_PROCESSING)
assert_new_task_status_valid(Task.Status.PENDING_REVIEW)
invalid_statuses = [Task.Status.PROCESSING,
Task.Status.REVIEWING,
Task.Status.POST_REVIEW_PROCESSING,
Task.Status.COMPLETE,
Task.Status.ABORTED]
for status in invalid_statuses:
with self.assertRaises(TaskStatusError):
assert_new_task_status_valid(status)
def test_is_worker_assigned(self):
task = self.tasks['review_task']
# worker is not related to any task
self.assertFalse(task.is_worker_assigned(self.workers[2]))
# worker is assigned to a task.
self.assertTrue(task.is_worker_assigned(self.workers[0]))
# TODO(jrbotros): write this test when per-user max tasks logic created
def test_worker_assigned_to_max_tasks(self):
pass
def test_worker_assigned_to_rejected_task(self):
assignments = TaskAssignment.objects.filter(
worker=self.workers[4],
status=TaskAssignment.Status.PROCESSING,
task__status=Task.Status.POST_REVIEW_PROCESSING)
self.assertTrue(assignments.exists())
self.assertTrue(worker_assigned_to_rejected_task(self.workers[4]))
def test_worker_has_reviewer_status(self):
self.assertFalse(worker_has_reviewer_status(self.workers[0]))
self.assertTrue(worker_has_reviewer_status(self.workers[1]))
self.assertFalse(worker_has_reviewer_status(self.workers[2]))
self.assertFalse(worker_has_reviewer_status(self.workers[4]))
self.assertTrue(worker_has_reviewer_status(self.workers[5]))
self.assertTrue(worker_has_reviewer_status(self.workers[6]))
def test_role_counter_required_for_new_task(self):
task = TaskFactory(status=Task.Status.COMPLETE)
with self.assertRaises(TaskAssignmentError):
role_counter_required_for_new_task(task)
project = self.projects['assignment_policy']
# Create first task in test project
create_subsequent_tasks(project)
self.assertEqual(project.tasks.count(), 1)
# Assign initial task to worker 0
task = project.tasks.first()
counter = role_counter_required_for_new_task(task)
self.assertEqual(counter, 0)
initial_task = assign_task(self.workers[0].id,
task.id)
# Submit task; next task should be created
with patch('orchestra.utils.task_lifecycle._is_review_needed',
return_value=True):
initial_task = submit_task(initial_task.id, {},
Iteration.Status.REQUESTED_REVIEW,
self.workers[0])
counter = role_counter_required_for_new_task(initial_task)
self.assertEqual(counter, 1)
initial_task = assign_task(self.workers[1].id,
task.id)
initial_task = submit_task(initial_task.id, {},
Iteration.Status.REQUESTED_REVIEW,
self.workers[1])
counter = role_counter_required_for_new_task(initial_task)
self.assertEqual(counter, 2)
def test_assign_task(self):
entry_task = TaskFactory(
project=self.projects['base_test_project'],
status=Task.Status.AWAITING_PROCESSING,
step=self.test_step)
# No iterations should be present for task
self.assertEqual(
Iteration.objects.filter(assignment__task=entry_task).count(), 0)
# Assign entry-level task to entry-level worker
entry_task = assign_task(self.workers[0].id, entry_task.id)
self.assertTrue(entry_task.is_worker_assigned(self.workers[0]))
self.assertEqual(entry_task.status, Task.Status.PROCESSING)
self.assertEqual(entry_task.assignments.count(), 1)
entry_assignment = entry_task.assignments.first()
# A single iteration was created for the assignment
self.assertEqual(entry_assignment.iterations.count(), 1)
self.assertEqual(
Iteration.objects.filter(assignment__task=entry_task).count(), 1)
self.assertEqual(
entry_assignment.iterations.first().start_datetime,
entry_assignment.start_datetime)
# Attempt to assign task which isn't awaiting a new assignment
invalid = (Task.Status.PROCESSING, Task.Status.ABORTED,
Task.Status.REVIEWING, Task.Status.COMPLETE,
Task.Status.POST_REVIEW_PROCESSING)
for status in invalid:
invalid_status_task = Task.objects.create(
project=self.projects['base_test_project'],
status=status,
step=self.test_step)
with self.assertRaises(TaskAssignmentError):
invalid_status_task = assign_task(
self.workers[0].id, invalid_status_task.id)
# Attempt to assign review task to worker already in review hierarchy
review_task = Task.objects.create(
project=self.projects['base_test_project'],
status=Task.Status.PENDING_REVIEW,
step=self.test_step)
test_data = {'test_assign': True}
TaskAssignmentFactory(
worker=self.workers[1],
task=review_task,
status=TaskAssignment.Status.SUBMITTED,
in_progress_task_data=test_data)
with self.assertRaises(TaskAssignmentError):
assign_task(self.workers[1].id, review_task.id)
self.assertEqual(
current_assignment(review_task).in_progress_task_data, test_data)
# Attempt to assign review task to worker not certified for task
with self.assertRaises(WorkerCertificationError):
assign_task(self.workers[2].id, review_task.id)
self.assertEqual(
current_assignment(review_task).in_progress_task_data, test_data)
# Assign review task to review worker
self.assertEqual(review_task.assignments.count(), 1)
review_task = assign_task(self.workers[3].id, review_task.id)
self.assertEqual(review_task.assignments.count(), 2)
reviewer_assignment = current_assignment(review_task)
self.assertEqual(
reviewer_assignment.worker, self.workers[3])
self.assertEqual(
reviewer_assignment.in_progress_task_data, test_data)
self.assertEqual(
reviewer_assignment.iterations.count(), 1)
self.assertEqual(
reviewer_assignment.iterations.first().start_datetime,
reviewer_assignment.start_datetime)
self.assertEqual(
review_task.status, Task.Status.REVIEWING)
def test_get_task_overview_for_worker(self):
task = self.tasks['review_task']
with self.assertRaises(TaskAssignmentError):
get_task_overview_for_worker(task.id, self.workers[2])
data = get_task_overview_for_worker(task.id, self.workers[0])
expected = {
'project': {'details': task.project.short_description,
'id': task.project.id,
'project_data': {},
'status': 'Active',
'scratchpad_url': None},
'workflow': {'slug': 'w1',
'name': 'Workflow One'},
'workflow_version': {'slug': 'test_workflow',
'name': 'The workflow'},
'prerequisites': {},
'step': {'slug': 'step1', 'name': 'The first step'},
'status': 'Submitted',
'task': {'data': {'test_key': 'test_value'},
'status': 'Pending Review'},
'task_id': task.id,
'assignment_id': task.assignments.get(worker=self.workers[0]).id,
'is_reviewer': False,
'is_read_only': True,
'is_project_admin': False,
'worker': {
'username': self.workers[0].user.username,
'first_name': self.workers[0].user.first_name,
'last_name': self.workers[0].user.last_name,
}
}
self.assertEqual(data, expected)
# Superuser should have access to latest task data
superuser = get_user_model().objects.create_superuser(
'superuser', 'superuser@b12.io', 'test-password')
superworker = Worker.objects.create(user=superuser)
data = get_task_overview_for_worker(task.id, superworker)
expected['is_project_admin'] = True
self.assertEqual(data, expected)
def test_task_assignment_saving(self):
"""
Ensure that workers are required for human tasks,
and no workers are required for machine tasks.
"""
workflow_version = self.workflow_versions['test_workflow_2']
simple_machine = self.workflow_steps[
workflow_version.slug]['simple_machine']
project = Project.objects.create(workflow_version=workflow_version,
short_description='',
priority=0,
task_class=0)
task = Task.objects.create(project=project,
status=Task.Status.PROCESSING,
step=simple_machine)
# We expect an error because a worker
# is being saved on a machine task.
with self.assertRaises(ModelSaveError):
TaskAssignment.objects.create(worker=self.workers[0],
task=task,
status=0,
in_progress_task_data={})
human_step = self.workflow_steps[workflow_version.slug]['step4']
task = Task.objects.create(project=project,
status=Task.Status.PROCESSING,
step=human_step)
# We expect an error because no worker
# is being saved on a human task
with self.assertRaises(ModelSaveError):
TaskAssignment.objects.create(task=task,
status=0,
in_progress_task_data={})
def test_illegal_get_next_task_status(self):
task = self.tasks['awaiting_processing']
illegal_statuses = [
Task.Status.AWAITING_PROCESSING,
Task.Status.PENDING_REVIEW,
Task.Status.COMPLETE
]
iteration_statuses = [
Iteration.Status.REQUESTED_REVIEW,
Iteration.Status.PROVIDED_REVIEW
]
for status in illegal_statuses:
for iteration_status in iteration_statuses:
with self.assertRaises(IllegalTaskSubmission):
task.status = status
get_next_task_status(task, iteration_status)
# Entry level-related statuses cannot be rejected
with self.assertRaises(IllegalTaskSubmission):
task.status = Task.Status.PROCESSING
get_next_task_status(task, Iteration.Status.PROVIDED_REVIEW)
with self.assertRaises(IllegalTaskSubmission):
task.status = Task.Status.POST_REVIEW_PROCESSING
get_next_task_status(task, Iteration.Status.PROVIDED_REVIEW)
def test_sampled_get_next_task_status(self):
task = self.tasks['awaiting_processing']
step = task.step
step.review_policy = {'policy': 'sampled_review',
'rate': 0.5,
'max_reviews': 1}
step.save()
task.status = Task.Status.PROCESSING
complete_count = 0
for i in range(0, 1000):
next_status = get_next_task_status(
task, Iteration.Status.REQUESTED_REVIEW)
complete_count += next_status == Task.Status.COMPLETE
self.assertTrue(complete_count > 400)
self.assertTrue(complete_count < 600)
def test_legal_get_next_task_status(self):
task = self.tasks['awaiting_processing']
step = task.step
task.status = Task.Status.PROCESSING
step.review_policy = {}
step.save()
with self.assertRaises(ReviewPolicyError):
get_next_task_status(task,
Iteration.Status.REQUESTED_REVIEW)
step.review_policy = {'policy': 'sampled_review',
'rate': 1,
'max_reviews': 1}
step.save()
self.assertEqual(
get_next_task_status(task,
Iteration.Status.REQUESTED_REVIEW),
Task.Status.PENDING_REVIEW)
step.review_policy = {'policy': 'sampled_review',
'rate': 0,
'max_reviews': 1}
step.save()
self.assertEqual(
get_next_task_status(task,
Iteration.Status.REQUESTED_REVIEW),
Task.Status.COMPLETE)
task.status = Task.Status.POST_REVIEW_PROCESSING
self.assertEqual(
get_next_task_status(task,
Iteration.Status.REQUESTED_REVIEW),
Task.Status.REVIEWING)
task = self.tasks['review_task']
task.status = Task.Status.REVIEWING
step.review_policy = {'policy': 'sampled_review',
'rate': 1,
'max_reviews': 0}
step.save()
self.assertEqual(
get_next_task_status(task,
Iteration.Status.REQUESTED_REVIEW),
Task.Status.COMPLETE)
step.review_policy = {'policy': 'sampled_review',
'rate': 1,
'max_reviews': 2}
step.save()
self.assertEqual(
get_next_task_status(task,
Iteration.Status.REQUESTED_REVIEW),
Task.Status.PENDING_REVIEW)
# after max reviews done a task goes to state complete
TaskAssignment.objects.create(
worker=self.workers[1],
task=task,
status=TaskAssignment.Status.SUBMITTED,
assignment_counter=1,
in_progress_task_data={})
task.save()
step.review_policy = {'policy': 'sampled_review',
'rate': 1,
'max_reviews': 1}
step.save()
self.assertEqual(
get_next_task_status(task,
Iteration.Status.REQUESTED_REVIEW),
Task.Status.COMPLETE)
def test_preassign_workers(self):
project = self.projects['assignment_policy']
# Create first task in test project
create_subsequent_tasks(project)
self.assertEqual(project.tasks.count(), 1)
# Assign initial task to worker 0
initial_task = assign_task(self.workers[0].id,
project.tasks.first().id)
# Submit task; next task should be created
with patch('orchestra.utils.task_lifecycle._is_review_needed',
return_value=False):
initial_task = submit_task(initial_task.id, {},
Iteration.Status.REQUESTED_REVIEW,
self.workers[0])
self.assertEqual(project.tasks.count(), 2)
related_task = project.tasks.exclude(id=initial_task.id).first()
# Worker 0 not certified for related tasks, so should not have been
# auto-assigned
self.assertEqual(related_task.assignments.count(), 0)
self.assertEqual(related_task.status, Task.Status.AWAITING_PROCESSING)
# Reset project
project.tasks.all().delete()
# Create first task in test project
create_subsequent_tasks(project)
self.assertEqual(project.tasks.count(), 1)
# Assign initial task to worker 0
initial_task = assign_task(self.workers[0].id,
project.tasks.first().id)
# Submit task; verify we use the reviewer assignment policy
mock_preassign_workers = MagicMock(return_value=initial_task)
patch_path = 'orchestra.utils.task_lifecycle._preassign_workers'
with patch(patch_path, new=mock_preassign_workers):
initial_task = submit_task(initial_task.id, {},
Iteration.Status.REQUESTED_REVIEW,
self.workers[0])
mock_preassign_workers.assert_called_once_with(
initial_task, AssignmentPolicyType.REVIEWER)
# Reset project
project.tasks.all().delete()
# Create first task in test project
create_subsequent_tasks(project)
self.assertEqual(project.tasks.count(), 1)
# Assign initial task to worker 4
initial_task = assign_task(self.workers[4].id,
project.tasks.first().id)
# Submit task; next task should be created
with patch('orchestra.utils.task_lifecycle._is_review_needed',
return_value=False):
initial_task = submit_task(initial_task.id, {},
Iteration.Status.REQUESTED_REVIEW,
self.workers[4])
self.assertEqual(project.tasks.count(), 2)
related_task = project.tasks.exclude(id=initial_task.id).first()
# Worker 4 is certified for related task and should have been assigned
self.assertEqual(related_task.assignments.count(), 1)
self.assertEqual(related_task.status, Task.Status.PROCESSING)
self.assertTrue(
related_task.is_worker_assigned(self.workers[4]))
def test_todolist_templates_to_apply(self):
project = self.projects['assignment_policy']
mock = MagicMock(return_value=True)
with patch('orchestra.utils.task_lifecycle.add_todolist_template',
new=mock):
# Create first task in test project
create_subsequent_tasks(project)
assert mock.called_once
assert mock.call_args[0][0] == 'project-checklist'
def test_malformed_assignment_policy(self):
project = self.projects['assignment_policy']
workflow_version = project.workflow_version
first_step = self.workflow_steps[workflow_version.slug]['step_0']
# Create an invalid machine step with an assignment policy
malformed_step = StepFactory(
workflow_version=workflow_version,
slug='machine_step',
is_human=False,
assignment_policy={
'policy_function': {
'entry_level': {
'path': ('orchestra.assignment_policies.'
'previously_completed_steps'),
'kwargs': {
'related_steps': ['step_0']
},
}
}
},
creation_policy=get_default_creation_policy(),
)
malformed_step.creation_depends_on.add(first_step)
# Create first task in project
create_subsequent_tasks(project)
self.assertEqual(project.tasks.count(), 1)
# Assign initial task to worker 0 and mark as complete
initial_task = assign_task(self.workers[4].id,
project.tasks.first().id)
initial_task.status = Task.Status.COMPLETE
initial_task.save()
# Cannot preassign machine task
with self.assertRaises(AssignmentPolicyError):
create_subsequent_tasks(project)
# Reset project
project.tasks.all().delete()
# Machine should not be member of assignment policy
first_step.assignment_policy = {
'policy_function': {
'entry_level': {
'path': ('orchestra.assignment_policies.'
'previously_completed_steps'),
'kwargs': {
'related_steps': ['machine_step']
},
},
}
}
first_step.save()
with self.assertRaises(AssignmentPolicyError):
create_subsequent_tasks(project)
def test_malformed_creation_policy(self):
project = self.projects['creation_policy']
workflow_version = project.workflow_version
first_step = self.workflow_steps[
workflow_version.slug]['creation_policy_step_0']
# Create an invalid machine step with an assignment policy
malformed_step = StepFactory(
workflow_version=workflow_version,
slug='machine_step',
is_human=False,
creation_policy={},
)
malformed_step.creation_depends_on.add(first_step)
# Create first task in project
create_subsequent_tasks(project)
self.assertEqual(project.tasks.count(), 1)
# Assign initial task to worker 0 and mark as complete
initial_task = assign_task(self.workers[4].id,
project.tasks.first().id)
initial_task.status = Task.Status.COMPLETE
initial_task.save()
# Cannot have an invalid blob for the creation_policy
with self.assertRaises(CreationPolicyError):
create_subsequent_tasks(project)
@patch('orchestra.utils.task_lifecycle.schedule_machine_tasks')
def test_schedule_machine_tasks(self, mock_schedule):
project = self.projects['test_human_and_machine']
# Create first task in project
create_subsequent_tasks(project)
# Assign initial task to worker 0 and mark as complete
initial_task = assign_task(self.workers[0].id,
project.tasks.first().id)
initial_task.status = Task.Status.COMPLETE
initial_task.save()
create_subsequent_tasks(project)
self.assertEqual(mock_schedule.call_count, 1)
self.assertEqual(mock_schedule.call_args[0][0], project)
steps = list(project.workflow_version.steps.filter(is_human=False))
self.assertEqual(mock_schedule.call_args[0][1], steps)
@patch('orchestra.utils.task_lifecycle._preassign_workers')
@patch('orchestra.utils.task_lifecycle.schedule_machine_tasks')
def test_schedule_machine_tasks_failed(self, mock_schedule,
mock_preassign):
project = self.projects['test_human_and_machine']
# Create first task in project
create_subsequent_tasks(project)
# Assign initial task to worker 0 and mark as complete
initial_task = assign_task(self.workers[0].id,
project.tasks.first().id)
initial_task.status = Task.Status.COMPLETE
initial_task.save()
mock_preassign.side_effect = Exception
with self.assertRaises(Exception):
create_subsequent_tasks(project)
mock_schedule.assert_not_called()
def test_next_todo_with_earlier_due_time(self):
task = self.tasks['next_todo_task']
task.step = self.step
task.project = self.project
task.save()
# create todos with different due date times and one without
TodoFactory(
step=self.step,
project=self.project,
title='todo1',
due_datetime=parse(DEADLINE2_DATETIME))
TodoFactory(
step=self.step,
project=self.project,
title='todo2')
tasks_assigned = tasks_assigned_to_worker(self.workers[5])
for t in tasks_assigned:
if t['id'] == task.id:
next_todo_due = t['next_todo_dict'].get('due_datetime', None)
# should select the todo with a deadline
self.assertEqual(next_todo_due, DEADLINE2_DATETIME)
self.assertEqual(t['should_be_active'], True)
TodoFactory(
step=self.step,
project=self.project,
title='todo3',
due_datetime=parse(DEADLINE1_DATETIME))
tasks_assigned = tasks_assigned_to_worker(self.workers[5])
for t in tasks_assigned:
if t['id'] == task.id:
next_todo_due = t['next_todo_dict'].get('due_datetime', None)
# should select the todo with earliest deadline
self.assertEqual(next_todo_due, DEADLINE1_DATETIME)
self.assertEqual(t['should_be_active'], True)
@patch('orchestra.utils.task_lifecycle.timezone')
def test_next_todo_with_earlier_start_time(self, mock_timezone):
mock_timezone.now = MagicMock(return_value=parse(
MOCK_CURRENT))
task = self.tasks['next_todo_task']
task.step = self.step
task.project = self.project
task.save()
# create todos with different due date times and one without
TodoFactory(
step=self.step,
project=self.project,
title='todo1',
start_by_datetime=parse(DEADLINE2_DATETIME))
TodoFactory(
step=self.step,
project=self.project,
title='todo2',
start_by_datetime=parse(DEADLINE1_DATETIME))
tasks_assigned = tasks_assigned_to_worker(self.workers[5])
for t in tasks_assigned:
if t['id'] == task.id:
next_todo_start = t['next_todo_dict'].get(
'start_by_datetime', None)
# should select the todo with a deadline
self.assertEqual(next_todo_start, DEADLINE1_DATETIME)
self.assertEqual(t['should_be_active'], False)
class EndProjectTestCase(OrchestraTransactionTestCase):
def setUp(self):
super().setUp()
setup_models(self)
@patch('orchestra.utils.task_lifecycle.archive_project_slack_group')
@patch('orchestra.tests.helpers.workflow.abort_cleanup_function')
def test_end_project_calls_abort_completion_function(
self, mock_abort_cleanup_fn, mock_archive_project_slack_group):
path = 'orchestra.tests.helpers.workflow.abort_cleanup_function'
abort_completion_function = {
'path': path,
'kwargs': {
'some_key': 'some_value'
}
}
project = self.projects['test_human_and_machine']
project.workflow_version.abort_completion_function = (
abort_completion_function)
project.workflow_version.save()
end_project(project.id)
kwargs = abort_completion_function['kwargs']
mock_abort_cleanup_fn.assert_called_with(
project, **kwargs)
| |
# -*- coding: latin-1 -*-
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing tests for keyboard module"""
from __future__ import unicode_literals
from __future__ import print_function
import sys
import os
import unittest
import subprocess
import time
sys.path.append(".")
if sys.platform == 'win32':
from pywinauto.keyboard import send_keys, KeySequenceError
from pywinauto.keyboard import KeyAction, VirtualKeyAction, PauseAction
from pywinauto.sysinfo import is_x64_Python, is_x64_OS
from pywinauto.application import Application
else:
from pywinauto import mouse
from pywinauto.linux.keyboard import send_keys, KeySequenceError, KeyAction
from pywinauto.linux import clipboard
def mfc_samples():
mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
return mfc_samples_folder
def _notepad_exe():
if is_x64_Python() or not is_x64_OS():
return r"C:\Windows\System32\notepad.exe"
else:
return r"C:\Windows\SysWOW64\notepad.exe"
def _test_app():
test_folder = os.path.join(os.path.dirname
(os.path.dirname
(os.path.dirname
(os.path.abspath(__file__)))),
r"apps/SendKeysTester")
return os.path.join(test_folder, r"send_keys_test_app")
class SendKeysTests(unittest.TestCase):
"""Unit tests for the Sendkeys module"""
def setUp(self):
"""Start the application set some data and ensure the application is in the state we want it."""
if sys.platform == 'win32':
self.app = Application()
self.app.start(_notepad_exe())
self.dlg = self.app.UntitledNotepad
self.ctrl = self.dlg.Edit
else:
self.app = subprocess.Popen("exec " + _test_app(), shell=True)
time.sleep(0.1)
mouse.click(coords=(300, 300))
time.sleep(0.1)
def tearDown(self):
"""Close the application after tests"""
if sys.platform == 'win32':
try:
self.dlg.close(0.1)
except Exception: # TimeoutError:
pass
try:
if self.app.Notepad["Do&n't Save"].exists():
self.app.Notepad["Do&n't Save"].click()
self.app.Notepad["Do&n't Save"].wait_not('visible')
except Exception: # TimeoutError:
pass
finally:
if self.dlg.exists(timeout=0.1):
self.app.kill()
else:
# call Popen.kill() on Linux since Application.kill() is not implemented yet
self.app.kill()
def receive_text(self):
"""Receive data from text field"""
received = ' '
if sys.platform == 'win32':
received = self.ctrl.text_block()
else:
time.sleep(0.2)
send_keys('^a')
time.sleep(0.2)
send_keys('^c')
send_keys('{RIGHT}')
received = clipboard.get_data()
return received
def __run_NormalCharacters_with_options(self, **args):
"""Make sure that sending any character in range """
#unused var: missed = []
for i in range(32, 127):
# skip characters that must be escaped
if chr(i) in '~!@#$%^&*()_+{}|:"<>? ':
continue
send_keys(chr(i), pause = .001, **args)
received = self.receive_text()[-1]
self.assertEqual(i, ord(received))
# Space tests
def testNormalWithSpaces(self):
"""Make sure that with spaces option works"""
self.__run_NormalCharacters_with_options(with_spaces = True)
def testNormalWithoutSpaces(self):
"""Make sure that with spaces option works"""
self.__run_NormalCharacters_with_options(with_spaces = False)
def testSpaceWithSpaces(self):
"""Make sure that with spaces option works"""
send_keys(" \t \t ", pause = .001, with_spaces = True)
received = self.receive_text()
self.assertEqual(" ", received)
def testSpaceWithoutSpaces(self):
"""Make sure that with spaces option works"""
send_keys(" \t \t ", pause = .001, with_spaces = False)
received = self.receive_text()
self.assertEqual("", received)
# Tab tests
def testNormalWithTabs(self):
"""Make sure that with spaces option works"""
self.__run_NormalCharacters_with_options(with_tabs = True)
def testNormalWithoutTabs(self):
"""Make sure that with spaces option works"""
self.__run_NormalCharacters_with_options(with_tabs = False)
def testTabWithTabs(self):
"""Make sure that with spaces option works"""
send_keys("\t \t \t", pause = .1, with_tabs = True)
received = self.receive_text()
self.assertEqual("\t\t\t", received)
def testTabWithoutTabs(self):
"""Make sure that with spaces option works"""
send_keys("\t a\t b\t", pause = .1, with_tabs = False)
received = self.receive_text()
self.assertEqual("ab", received)
def testTab(self):
"""Make sure that with spaces option works"""
send_keys("{TAB} {TAB} ", pause = .3)
received = self.receive_text()
self.assertEqual("\t\t", received)
# Newline tests
def testNormalWithNewlines(self):
"""Make sure that with spaces option works"""
self.__run_NormalCharacters_with_options(with_newlines = True)
def testNormalWithoutNewlines(self):
"""Make sure that with_newlines option works"""
self.__run_NormalCharacters_with_options(with_newlines = False)
def testNewlinesWithNewlines(self):
"""Make sure that with_newlines option works"""
send_keys("\t \t \t a~\tb\nc", pause = .5, with_newlines = True)
received = self.receive_text()
if sys.platform == 'win32':
self.assertEqual("a\r\nb\r\nc", received)
else:
self.assertEqual("a\nb\nc", received)
def testNewlinesWithoutNewlines(self):
""""Make sure that with_newlines option works"""
send_keys("\t \t \t\na", pause = .01, with_newlines = False)
received = self.receive_text()
self.assertEqual("a", received)
#def testANSIExtendedCharacters(self):
# "Make sure that sending any character in range "
# #self.cmd = Application()
# #self.cmd.start("cmd.exe", create_new_console=True, wait_for_idle=False)
# ActionLogger().log('Preferred encoding: ' + locale.getpreferredencoding())
#
# #os.system("chcp 850")
# matched = 0
# extended_chars = b"\x81\x82\x83\xa1\xe1\xff"
# for char in extended_chars:
# if six.PY3:
# c = str(char)
# else:
# c = char.decode(locale.getpreferredencoding()) #'cp850')
# send_keys(c, pause = .01)
# received = self.receive_text()[-1]
# if c == received:
# matched += 1
# else:
# print("expected %s, recieved %s"% (
# repr(c), repr(received)))
# self.assertEqual(matched, len(extended_chars))
def testCharsThatMustBeEscaped(self):
"""Make sure that escaping characters works"""
send_keys("{%}{^}{+}{(}{)}{{}{}}{~}")
received = self.receive_text()
self.assertEqual("%^+(){}~", received)
def testIncorrectCases(self):
"""Make sure that incorrect key sequences raise an exception"""
self.assertRaises(KeySequenceError, send_keys, "{ENTER")
self.assertRaises(KeySequenceError, send_keys, "ENTER)")
self.assertRaises(RuntimeError, send_keys, "%{Enterius}")
self.assertRaises(KeySequenceError, send_keys, "{PAUSE small}")
try:
send_keys("{ENTER five}")
except KeySequenceError as exc:
self.assertEqual("invalid repetition count five", str(exc))
try:
send_keys("ENTER}")
except KeySequenceError as exc:
self.assertEqual("`}` should be preceeded by `{`", str(exc))
def testKeyDescription(self):
"""Test KeyAction._"""
self.assertEqual("<X>", str(KeyAction("X")))
self.assertEqual("<Y down>", str(KeyAction("Y", up=False)))
self.assertEqual("<Y up>", str(KeyAction("Y", down=False)))
#self.assertEqual("<ENTER>", str(VirtualKeyAction(13))) # == "<VK_RETURN>" in Python 2.7 (TODO)
if sys.platform == 'win32':
self.assertEqual("<PAUSE 1.00>", str(PauseAction(1.0)))
def testRepetition(self):
"""Make sure that repeated action works"""
send_keys("{TAB 3}{PAUSE 0.5}{F 3}", pause = .3)
received = self.receive_text()
self.assertEqual("\t\t\tFFF", received)
def testShiftModifier(self):
"""Make sure that Shift modifier works"""
send_keys("+(a)")
received = self.receive_text()
self.assertEqual("A", received)
if sys.platform != 'win32':
def testAltModifier(self):
"""Make sure that alt modifier works"""
clipboard.set_data('abc')
# check alt via opening edit menu and paste text from clipboard
time.sleep(0.3)
send_keys('%(e)')
time.sleep(0.3)
send_keys('{ENTER}')
received = self.receive_text()
self.assertEqual('abc', received)
if sys.platform == 'win32':
class SendKeysModifiersTests(unittest.TestCase):
"""Unit tests for the Sendkeys module (modifiers)"""
def setUp(self):
"""Start the application and ensure it's in the state we want"""
self.app = Application().start(os.path.join(mfc_samples(), u"CtrlTest.exe"))
self.dlg = self.app.Control_Test_App
def tearDown(self):
"""Close the application after tests"""
try:
self.dlg.close(0.5)
except Exception:
pass
finally:
self.app.kill()
def testModifiersForFewChars(self):
"""Make sure that repeated action works"""
send_keys("%(SC)", pause = .3)
dlg = self.app.window(title='Using C++ Derived Class')
dlg.wait('ready')
dlg.Done.close_click()
dlg.wait_not('visible')
send_keys("%(H{LEFT}{UP}{ENTER})", pause = .3)
dlg = self.app.window(title='Sample Dialog with spin controls')
dlg.wait('ready')
dlg.Done.close_click()
dlg.wait_not('visible')
#====================================================================
if __name__ == "__main__":
unittest.main()
#import doctest
#doctest.testmod()
| |
import contextlib
import os
from testtools import TestCase
from mock import Mock
from troveclient import base
from troveclient import exceptions
"""
Unit tests for base.py
"""
def obj_class(self, res, loaded=True):
return res
class BaseTest(TestCase):
def test_getid(self):
obj = "test"
r = base.getid(obj)
self.assertEqual(obj, r)
test_id = "test_id"
obj = Mock()
obj.id = test_id
r = base.getid(obj)
self.assertEqual(test_id, r)
class ManagerTest(TestCase):
def setUp(self):
super(ManagerTest, self).setUp()
self.orig__init = base.Manager.__init__
base.Manager.__init__ = Mock(return_value=None)
self.orig_os_makedirs = os.makedirs
def tearDown(self):
super(ManagerTest, self).tearDown()
base.Manager.__init__ = self.orig__init
os.makedirs = self.orig_os_makedirs
def test___init__(self):
api = Mock()
base.Manager.__init__ = self.orig__init
manager = base.Manager(api)
self.assertEqual(api, manager.api)
def test_completion_cache(self):
manager = base.Manager()
# handling exceptions
mode = "w"
cache_type = "unittest"
obj_class = Mock
with manager.completion_cache(cache_type, obj_class, mode):
pass
os.makedirs = Mock(side_effect=OSError)
with manager.completion_cache(cache_type, obj_class, mode):
pass
def test_write_to_completion_cache(self):
manager = base.Manager()
# no cache object, nothing should happen
manager.write_to_completion_cache("non-exist", "val")
def side_effect_func(val):
return val
manager._mock_cache = Mock()
manager._mock_cache.write = Mock(return_value=None)
manager.write_to_completion_cache("mock", "val")
self.assertEqual(1, manager._mock_cache.write.call_count)
def _get_mock(self):
manager = base.Manager()
manager.api = Mock()
manager.api.client = Mock()
def side_effect_func(self, body, loaded=True):
return body
manager.resource_class = Mock(side_effect=side_effect_func)
return manager
def test__get_with_response_key_none(self):
manager = self._get_mock()
url_ = "test-url"
body_ = "test-body"
resp_ = "test-resp"
manager.api.client.get = Mock(return_value=(resp_, body_))
r = manager._get(url=url_, response_key=None)
self.assertEqual(body_, r)
def test__get_with_response_key(self):
manager = self._get_mock()
response_key = "response_key"
body_ = {response_key: "test-resp-key-body"}
url_ = "test_url_get"
manager.api.client.get = Mock(return_value=(url_, body_))
r = manager._get(url=url_, response_key=response_key)
self.assertEqual(body_[response_key], r)
def test__create(self):
manager = base.Manager()
manager.api = Mock()
manager.api.client = Mock()
response_key = "response_key"
data_ = "test-data"
body_ = {response_key: data_}
url_ = "test_url_post"
manager.api.client.post = Mock(return_value=(url_, body_))
return_raw = True
r = manager._create(url_, body_, response_key, return_raw)
self.assertEqual(data_, r)
return_raw = False
@contextlib.contextmanager
def completion_cache_mock(*arg, **kwargs):
yield
mock = Mock()
mock.side_effect = completion_cache_mock
manager.completion_cache = mock
manager.resource_class = Mock(return_value="test-class")
r = manager._create(url_, body_, response_key, return_raw)
self.assertEqual("test-class", r)
def get_mock_mng_api_client(self):
manager = base.Manager()
manager.api = Mock()
manager.api.client = Mock()
return manager
def test__delete(self):
resp_ = "test-resp"
body_ = "test-body"
manager = self.get_mock_mng_api_client()
manager.api.client.delete = Mock(return_value=(resp_, body_))
# _delete just calls api.client.delete, and does nothing
# the correctness should be tested in api class
manager._delete("test-url")
pass
def test__update(self):
resp_ = "test-resp"
body_ = "test-body"
manager = self.get_mock_mng_api_client()
manager.api.client.put = Mock(return_value=(resp_, body_))
body = manager._update("test-url", body_)
self.assertEqual(body_, body)
class ManagerListTest(ManagerTest):
def setUp(self):
super(ManagerListTest, self).setUp()
@contextlib.contextmanager
def completion_cache_mock(*arg, **kwargs):
yield
self.manager = base.Manager()
self.manager.api = Mock()
self.manager.api.client = Mock()
self.response_key = "response_key"
self.data_p = ["p1", "p2"]
self.body_p = {self.response_key: self.data_p}
self.url_p = "test_url_post"
self.manager.api.client.post = Mock(return_value=(self.url_p,
self.body_p))
self.data_g = ["g1", "g2", "g3"]
self.body_g = {self.response_key: self.data_g}
self.url_g = "test_url_get"
self.manager.api.client.get = Mock(return_value=(self.url_g,
self.body_g))
mock = Mock()
mock.side_effect = completion_cache_mock
self.manager.completion_cache = mock
def tearDown(self):
super(ManagerListTest, self).tearDown()
def obj_class(self, res, loaded=True):
return res
def test_list_with_body_none(self):
body = None
l = self.manager._list("url", self.response_key, obj_class, body)
self.assertEqual(len(self.data_g), len(l))
for i in range(0, len(l)):
self.assertEqual(self.data_g[i], l[i])
def test_list_body_not_none(self):
body = "something"
l = self.manager._list("url", self.response_key, obj_class, body)
self.assertEqual(len(self.data_p), len(l))
for i in range(0, len(l)):
self.assertEqual(self.data_p[i], l[i])
def test_list_key_mapping(self):
data_ = {"values": ["p1", "p2"]}
body_ = {self.response_key: data_}
url_ = "test_url_post"
self.manager.api.client.post = Mock(return_value=(url_, body_))
l = self.manager._list("url", self.response_key,
obj_class, "something")
data = data_["values"]
self.assertEqual(len(data), len(l))
for i in range(0, len(l)):
self.assertEqual(data[i], l[i])
def test_list_without_key_mapping(self):
data_ = {"v1": "1", "v2": "2"}
body_ = {self.response_key: data_}
url_ = "test_url_post"
self.manager.api.client.post = Mock(return_value=(url_, body_))
l = self.manager._list("url", self.response_key,
obj_class, "something")
self.assertEqual(len(data_), len(l))
class ManagerWithFind(TestCase):
def setUp(self):
super(ManagerWithFind, self).setUp()
self.orig__init = base.ManagerWithFind.__init__
base.ManagerWithFind.__init__ = Mock(return_value=None)
self.manager = base.ManagerWithFind()
def tearDown(self):
super(ManagerWithFind, self).tearDown()
base.ManagerWithFind.__init__ = self.orig__init
def test_find(self):
obj1 = Mock()
obj1.attr1 = "v1"
obj1.attr2 = "v2"
obj1.attr3 = "v3"
obj2 = Mock()
obj2.attr1 = "v1"
obj2.attr2 = "v2"
self.manager.list = Mock(return_value=[obj1, obj2])
self.manager.resource_class = Mock
# exactly one match case
found = self.manager.find(attr1="v1", attr2="v2", attr3="v3")
self.assertEqual(obj1, found)
# no match case
self.assertRaises(exceptions.NotFound, self.manager.find,
attr1="v2", attr2="v2", attr3="v3")
# multiple matches case
obj2.attr3 = "v3"
self.assertRaises(exceptions.NoUniqueMatch, self.manager.find,
attr1="v1", attr2="v2", attr3="v3")
def test_findall(self):
obj1 = Mock()
obj1.attr1 = "v1"
obj1.attr2 = "v2"
obj1.attr3 = "v3"
obj2 = Mock()
obj2.attr1 = "v1"
obj2.attr2 = "v2"
self.manager.list = Mock(return_value=[obj1, obj2])
found = self.manager.findall(attr1="v1", attr2="v2", attr3="v3")
self.assertEqual(1, len(found))
self.assertEqual(obj1, found[0])
found = self.manager.findall(attr1="v2", attr2="v2", attr3="v3")
self.assertEqual(0, len(found))
found = self.manager.findall(attr7="v1", attr2="v2")
self.assertEqual(0, len(found))
def test_list(self):
# this method is not yet implemented, exception expected
self.assertRaises(NotImplementedError, self.manager.list)
class ResourceTest(TestCase):
def setUp(self):
super(ResourceTest, self).setUp()
self.orig___init__ = base.Resource.__init__
def tearDown(self):
super(ResourceTest, self).tearDown()
base.Resource.__init__ = self.orig___init__
def test___init__(self):
manager = Mock()
manager.write_to_completion_cache = Mock(return_value=None)
info_ = {}
robj = base.Resource(manager, info_)
self.assertEqual(0, manager.write_to_completion_cache.call_count)
info_ = {"id": "id-with-less-than-36-char"}
robj = base.Resource(manager, info_)
self.assertEqual(info_["id"], robj.id)
self.assertEqual(0, manager.write_to_completion_cache.call_count)
id_ = "id-with-36-char-"
for i in range(36 - len(id_)):
id_ = id_ + "-"
info_ = {"id": id_}
robj = base.Resource(manager, info_)
self.assertEqual(info_["id"], robj.id)
self.assertEqual(1, manager.write_to_completion_cache.call_count)
info_["name"] = "test-human-id"
# Resource.HUMAN_ID is False
robj = base.Resource(manager, info_)
self.assertEqual(info_["id"], robj.id)
self.assertEqual(None, robj.human_id)
self.assertEqual(2, manager.write_to_completion_cache.call_count)
# base.Resource.HUMAN_ID = True
info_["HUMAN_ID"] = True
robj = base.Resource(manager, info_)
self.assertEqual(info_["id"], robj.id)
self.assertEqual(info_["name"], robj.human_id)
self.assertEqual(4, manager.write_to_completion_cache.call_count)
def test_human_id(self):
manager = Mock()
manager.write_to_completion_cache = Mock(return_value=None)
info_ = {"name": "test-human-id"}
robj = base.Resource(manager, info_)
self.assertEqual(None, robj.human_id)
info_["HUMAN_ID"] = True
robj = base.Resource(manager, info_)
self.assertEqual(info_["name"], robj.human_id)
robj.name = "new-human-id"
self.assertEqual("new-human-id", robj.human_id)
def get_mock_resource_obj(self):
base.Resource.__init__ = Mock(return_value=None)
robj = base.Resource()
return robj
def test__add_details(self):
robj = self.get_mock_resource_obj()
info_ = {"name": "test-human-id", "test_attr": 5}
robj._add_details(info_)
self.assertEqual(info_["name"], robj.name)
self.assertEqual(info_["test_attr"], robj.test_attr)
def test___getattr__(self):
robj = self.get_mock_resource_obj()
info_ = {"name": "test-human-id", "test_attr": 5}
robj._add_details(info_)
self.assertEqual(info_["test_attr"], robj.__getattr__("test_attr"))
# TODO: looks like causing infinite recursive calls
#robj.__getattr__("test_non_exist_attr")
def test___repr__(self):
robj = self.get_mock_resource_obj()
info_ = {"name": "test-human-id", "test_attr": 5}
robj._add_details(info_)
expected = "<Resource name=test-human-id, test_attr=5>"
self.assertEqual(expected, robj.__repr__())
def test_get(self):
robj = self.get_mock_resource_obj()
manager = Mock()
manager.get = None
robj.manager = object()
robj.get()
manager = Mock()
robj.manager = Mock()
robj.id = "id"
new = Mock()
new._info = {"name": "test-human-id", "test_attr": 5}
robj.manager.get = Mock(return_value=new)
robj.get()
self.assertEqual("test-human-id", robj.name)
self.assertEqual(5, robj.test_attr)
def tes___eq__(self):
robj = self.get_mock_resource_obj()
other = base.Resource()
info_ = {"name": "test-human-id", "test_attr": 5}
robj._info = info_
other._info = {}
self.assertNotTrue(robj.__eq__(other))
robj._info = info_
self.assertTrue(robj.__eq__(other))
robj.id = "rid"
other.id = "oid"
self.assertNotTrue(robj.__eq__(other))
other.id = "rid"
self.assertTrue(robj.__eq__(other))
# not instance of the same class
other = Mock()
self.assertNotTrue(robj.__eq__(other))
def test_is_loaded(self):
robj = self.get_mock_resource_obj()
robj._loaded = True
self.assertTrue(robj.is_loaded())
robj._loaded = False
self.assertFalse(robj.is_loaded())
def test_set_loaded(self):
robj = self.get_mock_resource_obj()
robj.set_loaded(True)
self.assertTrue(robj._loaded)
robj.set_loaded(False)
self.assertFalse(robj._loaded)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import copy
import functools
import hashlib
from oslo_log import log as logging
import six
from stevedore import extension
from heat.common import exception
from heat.common.i18n import _
from heat.engine import environment
from heat.objects import raw_template as template_object
LOG = logging.getLogger(__name__)
__all__ = ['Template']
_template_classes = None
def get_version(template_data, available_versions):
version_keys = set(key for key, version in available_versions)
candidate_keys = set(k for k, v in six.iteritems(template_data) if
isinstance(v, six.string_types))
keys_present = version_keys & candidate_keys
if len(keys_present) > 1:
explanation = _('Ambiguous versions (%s)') % ', '.join(keys_present)
raise exception.InvalidTemplateVersion(explanation=explanation)
try:
version_key = keys_present.pop()
except KeyError:
explanation = _('Template version was not provided')
raise exception.InvalidTemplateVersion(explanation=explanation)
return version_key, template_data[version_key]
def _get_template_extension_manager():
return extension.ExtensionManager(
namespace='heat.templates',
invoke_on_load=False,
on_load_failure_callback=raise_extension_exception)
def raise_extension_exception(extmanager, ep, err):
raise TemplatePluginNotRegistered(name=ep.name, error=six.text_type(err))
class TemplatePluginNotRegistered(exception.HeatException):
msg_fmt = _("Could not load %(name)s: %(error)s")
def get_template_class(template_data):
available_versions = list(six.iterkeys(_template_classes))
version = get_version(template_data, available_versions)
version_type = version[0]
try:
return _template_classes[version]
except KeyError:
av_list = sorted(
[v for k, v in available_versions if k == version_type])
msg_data = {'version': ': '.join(version),
'version_type': version_type,
'available': ', '.join(v for v in av_list)}
if len(av_list) > 1:
explanation = _('"%(version)s". "%(version_type)s" '
'should be one of: %(available)s') % msg_data
else:
explanation = _('"%(version)s". "%(version_type)s" '
'should be: %(available)s') % msg_data
raise exception.InvalidTemplateVersion(explanation=explanation)
class Template(collections.Mapping):
'''A stack template.'''
def __new__(cls, template, *args, **kwargs):
'''Create a new Template of the appropriate class.'''
global _template_classes
if _template_classes is None:
mgr = _get_template_extension_manager()
_template_classes = dict((tuple(name.split('.')), mgr[name].plugin)
for name in mgr.names())
if cls != Template:
TemplateClass = cls
else:
TemplateClass = get_template_class(template)
return super(Template, cls).__new__(TemplateClass)
def __init__(self, template, template_id=None, files=None, env=None):
'''
Initialise the template with a JSON object and a set of Parameters
'''
self.id = template_id
self.t = template
self.files = files or {}
self.maps = self[self.MAPPINGS]
self.env = env or environment.Environment({})
self.version = get_version(self.t,
list(six.iterkeys(_template_classes)))
self.t_digest = None
def __deepcopy__(self, memo):
return Template(copy.deepcopy(self.t, memo), files=self.files,
env=self.env)
@classmethod
def load(cls, context, template_id, t=None):
'''Retrieve a Template with the given ID from the database.'''
if t is None:
t = template_object.RawTemplate.get_by_id(context, template_id)
env = environment.Environment(t.environment)
return cls(t.template, template_id=template_id, files=t.files, env=env)
def store(self, context=None):
'''Store the Template in the database and return its ID.'''
rt = {
'template': self.t,
'files': self.files,
'environment': self.env.user_env_as_dict()
}
if self.id is None:
new_rt = template_object.RawTemplate.create(context, rt)
self.id = new_rt.id
else:
template_object.RawTemplate.update_by_id(context, self.id, rt)
return self.id
def __iter__(self):
'''Return an iterator over the section names.'''
return (s for s in self.SECTIONS
if s not in self.SECTIONS_NO_DIRECT_ACCESS)
def __len__(self):
'''Return the number of sections.'''
return len(self.SECTIONS) - len(self.SECTIONS_NO_DIRECT_ACCESS)
@abc.abstractmethod
def param_schemata(self, param_defaults=None):
'''Return a dict of parameters.Schema objects for the parameters.'''
pass
@abc.abstractmethod
def get_section_name(self, section):
"""Return a correct section name."""
pass
@abc.abstractmethod
def parameters(self, stack_identifier, user_params, param_defaults=None):
'''Return a parameters.Parameters object for the stack.'''
pass
@classmethod
def validate_resource_key_type(cls, key, valid_types, typename,
allowed_keys, rsrc_name, rsrc_data):
"""Validation type of the specific resource key.
Used in validate_resource_definition and check correctness of
key's type.
"""
if key not in allowed_keys:
raise ValueError(_('"%s" is not a valid '
'keyword inside a resource '
'definition') % key)
if key in rsrc_data:
if not isinstance(rsrc_data.get(key), valid_types):
args = {'name': rsrc_name, 'key': key,
'typename': typename}
message = _('Resource %(name)s %(key)s type '
'must be %(typename)s') % args
raise TypeError(message)
return True
else:
return False
@abc.abstractmethod
def validate_resource_definitions(self, stack):
"""Check section's type of ResourceDefinitions."""
pass
@abc.abstractmethod
def resource_definitions(self, stack):
'''Return a dictionary of ResourceDefinition objects.'''
pass
@abc.abstractmethod
def add_resource(self, definition, name=None):
'''Add a resource to the template.
The resource is passed as a ResourceDefinition object. If no name is
specified, the name from the ResourceDefinition should be used.
'''
pass
def remove_resource(self, name):
'''Remove a resource from the template.'''
self.t.get(self.RESOURCES, {}).pop(name)
def parse(self, stack, snippet):
return parse(self.functions, stack, snippet)
def validate(self):
'''Validate the template.
Validates the top-level sections of the template as well as syntax
inside select sections. Some sections are not checked here but in
code parts that are responsible for working with the respective
sections (e.g. parameters are check by parameters schema class).
'''
t_digest = hashlib.sha256(
six.text_type(self.t).encode('utf-8')).hexdigest()
# TODO(kanagaraj-manickam) currently t_digest is stored in self. which
# is used to check whether already template is validated or not.
# But it needs to be loaded from dogpile cache backend once its
# available in heat (http://specs.openstack.org/openstack/heat-specs/
# specs/liberty/constraint-validation-cache.html). This is required
# as multiple heat-engines may process the same template at least
# in case of instance_group. And it fixes partially bug 1444316
if t_digest == self.t_digest:
return
# check top-level sections
for k in six.iterkeys(self.t):
if k not in self.SECTIONS:
raise exception.InvalidTemplateSection(section=k)
# check resources
for res in six.itervalues(self[self.RESOURCES]):
try:
if not res or not res.get('Type'):
message = _('Each Resource must contain '
'a Type key.')
raise exception.StackValidationFailed(message=message)
except AttributeError:
message = _('Resources must contain Resource. '
'Found a [%s] instead') % type(res)
raise exception.StackValidationFailed(message=message)
self.t_digest = t_digest
@classmethod
def create_empty_template(cls,
version=('heat_template_version', '2015-04-30')):
'''Creates an empty template.
Creates a new empty template with given version. If version is
not provided, a new empty HOT template of version "2015-04-30"
is returned.
:param version: A tuple containing version header of the
template: version key and value. E.g. ("heat_template_version",
"2015-04-30")
:returns: A new empty template.
'''
tmpl = {version[0]: version[1]}
return cls(tmpl)
def parse(functions, stack, snippet):
recurse = functools.partial(parse, functions, stack)
if isinstance(snippet, collections.Mapping):
if len(snippet) == 1:
fn_name, args = next(six.iteritems(snippet))
Func = functions.get(fn_name)
if Func is not None:
return Func(stack, fn_name, recurse(args))
return dict((k, recurse(v)) for k, v in six.iteritems(snippet))
elif (not isinstance(snippet, six.string_types) and
isinstance(snippet, collections.Iterable)):
return [recurse(v) for v in snippet]
else:
return snippet
| |
import base64
import json
import logging
import os
import time
from pathlib import Path
import click
import requests
from Crypto import Random
from Crypto.Cipher import AES, PKCS1_v1_5
from Crypto.Hash import SHA512
from Crypto.Protocol.KDF import PBKDF2
from Crypto.PublicKey import RSA
from Crypto.Util.Padding import pad
from requests import HTTPError
from tenacity import retry, stop_after_delay, wait_fixed
from n26.config import Config, MFA_TYPE_SMS
from n26.const import DAILY_WITHDRAWAL_LIMIT, DAILY_PAYMENT_LIMIT
from n26.util import create_request_url
LOGGER = logging.getLogger(__name__)
BASE_URL_DE = 'https://api.tech26.de'
BASE_URL_GLOBAL = 'https://api.tech26.global'
BASIC_AUTH_HEADERS = {"Authorization": "Basic bmF0aXZld2ViOg=="}
USER_AGENT = ("Mozilla/5.0 (X11; Linux x86_64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/59.0.3071.86 Safari/537.36")
GET = "get"
POST = "post"
EXPIRATION_TIME_KEY = "expiration_time"
ACCESS_TOKEN_KEY = "access_token"
REFRESH_TOKEN_KEY = "refresh_token"
GRANT_TYPE_PASSWORD = "password"
GRANT_TYPE_REFRESH_TOKEN = "refresh_token"
class Api(object):
"""
Api class can be imported as a library in order to use it within applications
"""
def __init__(self, cfg: Config = None):
"""
Constructor accepting None to maintain backward compatibility
:param cfg: configuration object
"""
if not cfg:
cfg = Config()
self.config = cfg
self._token_data = {}
BASIC_AUTH_HEADERS["device-token"] = self.config.DEVICE_TOKEN.value
@property
def token_data(self) -> dict:
if self.config.LOGIN_DATA_STORE_PATH.value is None:
return self._token_data
else:
return self._read_token_file(self.config.LOGIN_DATA_STORE_PATH.value)
@token_data.setter
def token_data(self, data: dict):
if self.config.LOGIN_DATA_STORE_PATH.value is None:
self._token_data = data
else:
self._write_token_file(data, self.config.LOGIN_DATA_STORE_PATH.value)
@staticmethod
def _read_token_file(path: str) -> dict:
"""
:return: the stored token data or an empty dict
"""
LOGGER.debug("Reading token data from {}".format(path))
path = Path(path).expanduser().resolve()
if not path.exists():
return {}
if not path.is_file():
raise IsADirectoryError("File path exists and is not a file: {}".format(path))
if path.stat().st_size <= 0:
# file is empty
return {}
with open(path, "r") as file:
return json.loads(file.read())
@staticmethod
def _write_token_file(token_data: dict, path: str):
LOGGER.debug("Writing token data to {}".format(path))
path = Path(path).expanduser().resolve()
# delete existing file if permissions don't match or file size is abnormally small
if path.exists() and (path.stat().st_mode != 0o100600 or path.stat().st_size < 10):
path.unlink()
path.parent.mkdir(parents=True, exist_ok=True, mode=0o700)
with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, 0o600), 'w') as file:
file.seek(0)
file.write(json.dumps(token_data, indent=2))
file.truncate()
# IDEA: @get_token decorator
def get_account_info(self) -> dict:
"""
Retrieves basic account information
"""
return self._do_request(GET, BASE_URL_DE + '/api/me')
def get_account_statuses(self) -> dict:
"""
Retrieves additional account information
"""
return self._do_request(GET, BASE_URL_DE + '/api/me/statuses')
def get_addresses(self) -> dict:
"""
Retrieves a list of addresses of the account owner
"""
return self._do_request(GET, BASE_URL_DE + '/api/addresses')
def get_balance(self) -> dict:
"""
Retrieves the current balance
"""
return self._do_request(GET, BASE_URL_DE + '/api/accounts')
def get_spaces(self) -> dict:
"""
Retrieves a list of all spaces
"""
return self._do_request(GET, BASE_URL_DE + '/api/spaces')
def barzahlen_check(self) -> dict:
return self._do_request(GET, BASE_URL_DE + '/api/barzahlen/check')
def get_cards(self):
"""
Retrieves a list of all cards
"""
return self._do_request(GET, BASE_URL_DE + '/api/v2/cards')
def get_account_limits(self) -> list:
"""
Retrieves a list of all active account limits
"""
return self._do_request(GET, BASE_URL_DE + '/api/settings/account/limits')
def set_account_limits(self, daily_withdrawal_limit: int = None, daily_payment_limit: int = None) -> None:
"""
Sets account limits
:param daily_withdrawal_limit: daily withdrawal limit
:param daily_payment_limit: daily payment limit
"""
if daily_withdrawal_limit is not None:
self._do_request(POST, BASE_URL_DE + '/api/settings/account/limits', json={
"limit": DAILY_WITHDRAWAL_LIMIT,
"amount": daily_withdrawal_limit
})
if daily_payment_limit is not None:
self._do_request(POST, BASE_URL_DE + '/api/settings/account/limits', json={
"limit": DAILY_PAYMENT_LIMIT,
"amount": daily_payment_limit
})
def get_contacts(self):
"""
Retrieves a list of all contacts
"""
return self._do_request(GET, BASE_URL_DE + '/api/smrt/contacts')
def get_standing_orders(self) -> dict:
"""
Get a list of standing orders
"""
return self._do_request(GET, BASE_URL_DE + '/api/transactions/so')
def get_transactions(self, from_time: int = None, to_time: int = None, limit: int = 20, pending: bool = None,
categories: str = None, text_filter: str = None, last_id: str = None) -> dict:
"""
Get a list of transactions.
Note that some parameters can not be combined in a single request (like text_filter and pending) and
will result in a bad request (400) error.
:param from_time: earliest transaction time as a Timestamp > 0 - milliseconds since 1970 in CET
:param to_time: latest transaction time as a Timestamp > 0 - milliseconds since 1970 in CET
:param limit: Limit the number of transactions to return to the given amount - default 20 as the n26 API returns
only the last 20 transactions by default
:param pending: show only pending transactions
:param categories: Comma separated list of category IDs
:param text_filter: Query string to search for
:param last_id: ??
:return: list of transactions
"""
if pending and limit:
# pending does not support limit
limit = None
return self._do_request(GET, BASE_URL_DE + '/api/smrt/transactions', {
'from': from_time,
'to': to_time,
'limit': limit,
'pending': pending,
'categories': categories,
'textFilter': text_filter,
'lastId': last_id
})
def get_transactions_limited(self, limit: int = 5) -> dict:
import warnings
warnings.warn(
"get_transactions_limited is deprecated, use get_transactions(limit=5) instead",
DeprecationWarning
)
return self.get_transactions(limit=limit)
def get_balance_statement(self, statement_url: str):
"""
Retrieves a balance statement as pdf content
:param statement_url: Download URL of a balance statement document
"""
return self._do_request(GET, BASE_URL_DE + statement_url)
def get_statements(self) -> list:
"""
Retrieves a list of all statements
"""
return self._do_request(GET, BASE_URL_DE + '/api/statements')
def block_card(self, card_id: str) -> dict:
"""
Blocks a card.
If the card is already blocked this will have no effect.
:param card_id: the id of the card to block
:return: some info about the card (not including it's blocked state... thanks n26!)
"""
return self._do_request(POST, BASE_URL_DE + '/api/cards/%s/block' % card_id)
def unblock_card(self, card_id: str) -> dict:
"""
Unblocks a card.
If the card is already unblocked this will have no effect.
:param card_id: the id of the card to block
:return: some info about the card (not including it's unblocked state... thanks n26!)
"""
return self._do_request(POST, BASE_URL_DE + '/api/cards/%s/unblock' % card_id)
def get_savings(self) -> dict:
return self._do_request(GET, BASE_URL_DE + '/api/hub/savings/accounts')
def get_statistics(self, from_time: int = 0, to_time: int = int(time.time()) * 1000) -> dict:
"""
Get statistics in a given time frame
:param from_time: Timestamp - milliseconds since 1970 in CET
:param to_time: Timestamp - milliseconds since 1970 in CET
"""
if not from_time:
from_time = 0
if not to_time:
to_time = int(time.time()) * 1000
return self._do_request(GET, BASE_URL_DE + '/api/smrt/statistics/categories/%s/%s' % (from_time, to_time))
def get_available_categories(self) -> list:
return self._do_request(GET, BASE_URL_DE + '/api/smrt/categories')
def get_invitations(self) -> list:
return self._do_request(GET, BASE_URL_DE + '/api/aff/invitations')
def _do_request(self, method: str = GET, url: str = "/", params: dict = None,
json: dict = None, headers: dict = None) -> list or dict or None:
"""
Executes a http request based on the given parameters
:param method: the method to use (GET, POST)
:param url: the url to use
:param params: query parameters that will be appended to the url
:param json: request body
:param headers: custom headers
:return: the response parsed as a json
"""
access_token = self.get_token()
_headers = {'Authorization': 'Bearer {}'.format(access_token)}
if headers is not None:
_headers.update(headers)
url = create_request_url(url, params)
if method is GET:
response = requests.get(url, headers=_headers, json=json)
elif method is POST:
response = requests.post(url, headers=_headers, json=json)
else:
raise ValueError("Unsupported method: {}".format(method))
response.raise_for_status()
# some responses do not return data so we just ignore the body in that case
if len(response.content) > 0:
if "application/json" in response.headers.get("Content-Type", ""):
return response.json()
else:
return response.content
def get_encryption_key(self, public_key: str = None) -> dict:
"""
Receive public encryption key for the JSON String containing the PIN encryption key
"""
return self._do_request(GET, BASE_URL_DE + '/api/encryption/key', params={
'publicKey': public_key
})
def encrypt_user_pin(self, pin: str):
"""
Encrypts user PIN and prepares it in a format required for a transaction order
:return: encrypted and base64 encoded PIN as well as an
encrypted and base64 encoded JSON containing the PIN encryption key
"""
# generate AES256 key and IV
random_password = Random.get_random_bytes(32)
salt = Random.get_random_bytes(16)
# noinspection PyTypeChecker
key = PBKDF2(random_password, salt, 32, count=1000000, hmac_hash_module=SHA512)
iv = Random.new().read(AES.block_size)
key64 = base64.b64encode(key).decode('utf-8')
iv64 = base64.b64encode(iv).decode('utf-8')
# encode the key and iv as a json string
aes_secret = {
'secretKey': key64,
'iv': iv64
}
# json string has to be represented in byte form for encryption
unencrypted_aes_secret = bytes(json.dumps(aes_secret), 'utf-8')
# Encrypt the secret JSON with RSA using the provided public key
public_key = self.get_encryption_key()
public_key_non64 = base64.b64decode(public_key['publicKey'])
public_key_object = RSA.importKey(public_key_non64)
public_key_cipher = PKCS1_v1_5.new(public_key_object)
encrypted_secret = public_key_cipher.encrypt(unencrypted_aes_secret)
encrypted_secret64 = base64.b64encode(encrypted_secret)
# Encrypt user's pin
private_key_cipher = AES.new(key=key, mode=AES.MODE_CBC, iv=iv)
# the pin has to be padded and transformed into bytes for a correct ecnryption format
encrypted_pin = private_key_cipher.encrypt(pad(bytes(pin, 'utf-8'), 16))
encrypted_pin64 = base64.b64encode(encrypted_pin)
return encrypted_secret64, encrypted_pin64
def create_transaction(self, iban: str, bic: str, name: str, reference: str, amount: float, pin: str):
"""
Creates a bank transfer order
:param iban: recipient IBAN
:param bic: recipient BIC
:param name: recipient name
:param reference: transaction reference
:param amount: money amount
:param pin: user PIN required for the transaction approval
"""
encrypted_secret, encrypted_pin = self.encrypt_user_pin(pin)
pin_headers = {
'encrypted-secret': encrypted_secret,
'encrypted-pin': encrypted_pin
}
# Prepare headers as a json for a transaction call
data = {
"transaction": {
"amount": amount,
"partnerBic": bic,
"partnerIban": iban,
"partnerName": name,
"referenceText": reference,
"type": "DT"
}
}
return self._do_request(POST, BASE_URL_DE + '/api/transactions', json=data, headers=pin_headers)
def is_authenticated(self) -> bool:
"""
:return: whether valid token data exists
"""
return self._validate_token(self.token_data)
def authenticate(self):
"""
Starts a new authentication flow with the N26 servers.
This method requires user interaction to approve a 2FA request.
Therefore you should make sure if you can bypass this
by refreshing or reusing an existing token by calling is_authenticated()
and refresh_authentication() respectively.
:raises PermissionError: if the token is invalid even after the refresh
"""
LOGGER.debug("Requesting token for username: {}".format(self.config.USERNAME.value))
token_data = self._request_token(self.config.USERNAME.value, self.config.PASSWORD.value)
# add expiration time to expiration in _validate_token()
token_data[EXPIRATION_TIME_KEY] = time.time() + token_data["expires_in"]
# if it's still not valid, raise an exception
if not self._validate_token(token_data):
raise PermissionError("Unable to request authentication token")
# save token data
self.token_data = token_data
def refresh_authentication(self):
"""
Refreshes an existing authentication using a (possibly expired) token.
:raises AssertionError: if no existing token data was found
:raises PermissionError: if the token is invalid even after the refresh
"""
token_data = self.token_data
if REFRESH_TOKEN_KEY in token_data:
LOGGER.debug("Trying to refresh existing token")
refresh_token = token_data[REFRESH_TOKEN_KEY]
token_data = self._refresh_token(refresh_token)
else:
raise AssertionError("Cant refresh token since no existing token data was found. "
"Please initiate a new authentication instead.")
# add expiration time to expiration in _validate_token()
token_data[EXPIRATION_TIME_KEY] = time.time() + token_data["expires_in"]
# if it's still not valid, raise an exception
if not self._validate_token(token_data):
raise PermissionError("Unable to refresh authentication token")
# save token data
self.token_data = token_data
def get_token(self):
"""
Returns the access token to use for api authentication.
If a token has been requested before it will be reused if it is still valid.
If the previous token has expired it will be refreshed.
If no token has been requested it will be requested from the server.
:return: the access token
"""
new_auth = False
if not self._validate_token(self.token_data):
try:
self.refresh_authentication()
except HTTPError as http_error:
if http_error.response.status_code != 401:
raise http_error
new_auth = True
except AssertionError:
new_auth = True
if new_auth:
self.authenticate()
return self.token_data[ACCESS_TOKEN_KEY]
def _request_token(self, username: str, password: str) -> dict:
"""
Request an authentication token from the server
:return: the token or None if the response did not contain a token
"""
mfa_token = self._initiate_authentication_flow(username, password)
self._request_mfa_approval(mfa_token)
return self._complete_authentication_flow(mfa_token)
@staticmethod
def _initiate_authentication_flow(username: str, password: str) -> str:
LOGGER.debug("Requesting authentication flow for user {}".format(username))
values_token = {
"grant_type": GRANT_TYPE_PASSWORD,
"username": username,
"password": password
}
# TODO: Seems like the user-agent is not necessary but might be a good idea anyway
response = requests.post(BASE_URL_GLOBAL + "/oauth2/token", data=values_token, headers=BASIC_AUTH_HEADERS)
if response.status_code != 403:
raise ValueError("Unexpected response for initial auth request: {}".format(response.text))
response_data = response.json()
if response_data.get("error", "") == "mfa_required":
return response_data["mfaToken"]
else:
raise ValueError("Unexpected response data")
@staticmethod
def _refresh_token(refresh_token: str):
"""
Refreshes an authentication token
:param refresh_token: the refresh token issued by the server when requesting a token
:return: the refreshed token data
"""
LOGGER.debug("Requesting token refresh using refresh_token {}".format(refresh_token))
values_token = {
'grant_type': GRANT_TYPE_REFRESH_TOKEN,
'refresh_token': refresh_token,
}
response = requests.post(BASE_URL_GLOBAL + '/oauth2/token', data=values_token, headers=BASIC_AUTH_HEADERS)
response.raise_for_status()
return response.json()
def _request_mfa_approval(self, mfa_token: str):
LOGGER.debug("Requesting MFA approval using mfa_token {}".format(mfa_token))
mfa_data = {
"mfaToken": mfa_token
}
if self.config.MFA_TYPE.value == MFA_TYPE_SMS:
mfa_data['challengeType'] = "otp"
else:
mfa_data['challengeType'] = "oob"
response = requests.post(
BASE_URL_DE + "/api/mfa/challenge",
json=mfa_data,
headers={
**BASIC_AUTH_HEADERS,
"User-Agent": USER_AGENT,
"Content-Type": "application/json"
})
response.raise_for_status()
@retry(wait=wait_fixed(5), stop=stop_after_delay(60))
def _complete_authentication_flow(self, mfa_token: str) -> dict:
LOGGER.debug("Completing authentication flow for mfa_token {}".format(mfa_token))
mfa_response_data = {
"mfaToken": mfa_token
}
if self.config.MFA_TYPE.value == MFA_TYPE_SMS:
mfa_response_data['grant_type'] = "mfa_otp"
hint = click.style("Enter the 6 digit SMS OTP code", fg="yellow")
# type=str because it can have significant leading zeros
mfa_response_data['otp'] = click.prompt(hint, type=str)
else:
mfa_response_data['grant_type'] = "mfa_oob"
response = requests.post(BASE_URL_DE + "/oauth2/token", data=mfa_response_data, headers=BASIC_AUTH_HEADERS)
response.raise_for_status()
tokens = response.json()
return tokens
@staticmethod
def _validate_token(token_data: dict):
"""
Checks if a token is valid
:param token_data: the token data to check
:return: true if valid, false otherwise
"""
if EXPIRATION_TIME_KEY not in token_data:
# there was a problem adding the expiration_time property
return False
elif time.time() >= token_data[EXPIRATION_TIME_KEY]:
# token has expired
return False
return ACCESS_TOKEN_KEY in token_data and token_data[ACCESS_TOKEN_KEY]
| |
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import os
import unittest
import numpy as np
from systemds.context import SystemDSContext
from systemds.examples.tutorials.adult import DataManager
from systemds.operator import Frame, Matrix, OperationNode
from systemds.operator.algorithm import (confusionMatrix, kmeans, l2svm,
multiLogReg, multiLogRegPredict,
scale, scaleApply, split, winsorize)
from systemds.script_building import DMLScript
class Test_DMLScript(unittest.TestCase):
"""
Test class for adult dml script tutorial code.
"""
sds: SystemDSContext = None
d: DataManager = None
neural_net_src_path: str = "tests/examples/tutorials/neural_net_source.dml"
preprocess_src_path: str = "tests/examples/tutorials/preprocess.dml"
dataset_path_train: str = "../../test/resources/datasets/adult/train_data.csv"
dataset_path_train_mtd: str = "../../test/resources/datasets/adult/train_data.csv.mtd"
dataset_path_test: str = "../../test/resources/datasets/adult/test_data.csv"
dataset_path_test_mtd: str = "../../test/resources/datasets/adult/test_data.csv.mtd"
dataset_jspec: str = "../../test/resources/datasets/adult/jspec.json"
@classmethod
def setUpClass(cls):
cls.sds = SystemDSContext()
cls.d = DataManager()
@classmethod
def tearDownClass(cls):
cls.sds.close()
def test_train_data(self):
x = self.d.get_train_data_pandas()
self.assertEqual((32561, 14), x.shape)
def test_train_labels(self):
y = self.d.get_train_labels_pandas()
self.assertEqual((32561,), y.shape)
def test_test_data(self):
x_l = self.d.get_test_data_pandas()
self.assertEqual((16281, 14), x_l.shape)
def test_test_labels(self):
y_l = self.d.get_test_labels_pandas()
self.assertEqual((16281,), y_l.shape)
def test_train_data_pandas_vs_systemds(self):
pandas = self.d.get_train_data_pandas()
systemds = self.d.get_train_data(self.sds).compute()
self.assertTrue(len(pandas.columns.difference(systemds.columns)) == 0)
self.assertEqual(pandas.shape, systemds.shape)
def test_train_labels_pandas_vs_systemds(self):
# Pandas does not strip the parsed values.. so i have to do it here.
pandas = np.array(
[x.strip() for x in self.d.get_train_labels_pandas().to_numpy().flatten()])
systemds = self.d.get_train_labels(
self.sds).compute().to_numpy().flatten()
comp = pandas == systemds
self.assertTrue(comp.all())
def test_test_labels_pandas_vs_systemds(self):
# Pandas does not strip the parsed values.. so i have to do it here.
pandas = np.array(
[x.strip() for x in self.d.get_test_labels_pandas().to_numpy().flatten()])
systemds = self.d.get_test_labels(
self.sds).compute().to_numpy().flatten()
comp = pandas == systemds
self.assertTrue(comp.all())
def test_transform_encode_train_data(self):
jspec = self.d.get_jspec(self.sds)
train_x, M1 = self.d.get_train_data(self.sds).transform_encode(spec=jspec)
train_x_numpy = train_x.compute()
self.assertEqual((32561, 107), train_x_numpy.shape)
def test_transform_encode_apply_test_data(self):
jspec = self.d.get_jspec(self.sds)
train_x, M1 = self.d.get_train_data(self.sds).transform_encode(spec=jspec)
test_x = self.d.get_test_data(self.sds).transform_apply(spec=jspec, meta=M1)
test_x_numpy = test_x.compute()
self.assertEqual((16281, 107), test_x_numpy.shape)
def test_transform_encode_train_labels(self):
jspec_dict = {"recode":["income"]}
jspec = self.sds.scalar(f'"{jspec_dict}"')
train_y, M1 = self.d.get_train_labels(self.sds).transform_encode(spec=jspec)
train_y_numpy = train_y.compute()
self.assertEqual((32561, 1), train_y_numpy.shape)
def test_transform_encode_test_labels(self):
jspec_dict = {"recode":["income"]}
jspec = self.sds.scalar(f'"{jspec_dict}"')
train_y, M1 = self.d.get_train_labels(self.sds).transform_encode(spec=jspec)
test_y = self.d.get_test_labels(self.sds).transform_apply(spec=jspec, meta=M1)
test_y_numpy = test_y.compute()
self.assertEqual((16281, 1), test_y_numpy.shape)
def test_multi_log_reg(self):
# Reduced because we want the tests to finish a bit faster.
train_count = 10000
test_count = 500
jspec_data = self.d.get_jspec(self.sds)
train_x_frame = self.d.get_train_data(self.sds)[0:train_count]
train_x, M1 = train_x_frame.transform_encode(spec=jspec_data)
test_x_frame = self.d.get_test_data(self.sds)[0:test_count]
test_x = test_x_frame.transform_apply(spec=jspec_data, meta=M1)
jspec_dict = {"recode": ["income"]}
jspec_labels = self.sds.scalar(f'"{jspec_dict}"')
train_y_frame = self.d.get_train_labels(self.sds)[0:train_count]
train_y, M2 = train_y_frame.transform_encode(spec=jspec_labels)
test_y_frame = self.d.get_test_labels(self.sds)[0:test_count]
test_y = test_y_frame.transform_apply(spec=jspec_labels, meta=M2)
betas = multiLogReg(train_x, train_y)
[_, y_pred, acc] = multiLogRegPredict(test_x, betas, test_y)
[_, conf_avg] = confusionMatrix(y_pred, test_y)
confusion_numpy = conf_avg.compute()
self.assertTrue(confusion_numpy[0][0] > 0.8)
self.assertTrue(confusion_numpy[0][1] < 0.5)
self.assertTrue(confusion_numpy[1][1] > 0.5)
self.assertTrue(confusion_numpy[1][0] < 0.2)
# def test_neural_net(self):
# # Reduced because we want the tests to finish a bit faster.
# train_count = 15000
# test_count = 5000
# train_data, train_labels, test_data, test_labels = self.d.get_preprocessed_dataset(interpolate=True, standardize=True, dimred=0.1)
# # Train data
# X = self.sds.from_numpy( train_data[:train_count])
# Y = self.sds.from_numpy( train_labels[:train_count])
# # Test data
# Xt = self.sds.from_numpy(test_data[:test_count])
# Yt = self.sds.from_numpy(test_labels[:test_count])
# FFN_package = self.sds.source(self.neural_net_src_path, "fnn", print_imported_methods=True)
# network = FFN_package.train(X, Y, 1, 16, 0.01, 1)
# self.assertTrue(type(network) is not None) # sourcing and training seems to works
# FFN_package.save_model(network, '"model/python_FFN/"').compute(verbose=True)
# # TODO This does not work yet, not sure what the problem is
# #probs = FFN_package.predict(Xt, network).compute(True)
# # FFN_package.eval(Yt, Yt).compute()
# def test_level1(self):
# # Reduced because we want the tests to finish a bit faster.
# train_count = 15000
# test_count = 5000
# train_data, train_labels, test_data, test_labels = self.d.get_preprocessed_dataset(interpolate=True,
# standardize=True, dimred=0.1)
# # Train data
# X = self.sds.from_numpy(train_data[:train_count])
# Y = self.sds.from_numpy(train_labels[:train_count])
# Y = Y + 1.0
# # Test data
# Xt = self.sds.from_numpy(test_data[:test_count])
# Yt = self.sds.from_numpy(test_labels[:test_count])
# Yt = Yt + 1.0
# betas = multiLogReg(X, Y)
# [_, y_pred, acc] = multiLogRegPredict(Xt, betas, Yt).compute()
# self.assertGreater(acc, 80) #Todo remove?
# # todo add text how high acc should be with this config
# confusion_matrix_abs, _ = confusionMatrix(self.sds.from_numpy(y_pred), Yt).compute()
# # todo print confusion matrix? Explain cm?
# self.assertTrue(
# np.allclose(
# confusion_matrix_abs,
# np.array([[3583, 502],
# [245, 670]])
# )
# )
# def test_level2(self):
# train_count = 32561
# test_count = 16281
# SCHEMA = '"DOUBLE,STRING,DOUBLE,STRING,DOUBLE,STRING,STRING,STRING,STRING,STRING,DOUBLE,DOUBLE,DOUBLE,STRING,STRING"'
# F1 = self.sds.read(
# self.dataset_path_train,
# schema=SCHEMA
# )
# F2 = self.sds.read(
# self.dataset_path_test,
# schema=SCHEMA
# )
# jspec = self.sds.read(self.dataset_jspec, data_type="scalar", value_type="string")
# PREPROCESS_package = self.sds.source(self.preprocess_src_path, "preprocess", print_imported_methods=True)
# X1 = F1.rbind(F2)
# X1, M1 = X1.transform_encode(spec=jspec)
# X = PREPROCESS_package.get_X(X1, 1, train_count)
# Y = PREPROCESS_package.get_Y(X1, 1, train_count)
# Xt = PREPROCESS_package.get_X(X1, train_count, train_count+test_count)
# Yt = PREPROCESS_package.get_Y(X1, train_count, train_count+test_count)
# Yt = PREPROCESS_package.replace_value(Yt, 3.0, 1.0)
# Yt = PREPROCESS_package.replace_value(Yt, 4.0, 2.0)
# # better alternative for encoding. This was intended, but it does not work
# #F2 = F2.replace("<=50K.", "<=50K")
# #F2 = F2.replace(">50K.", ">50K")
# #X1, M = F1.transform_encode(spec=jspec)
# #X2 = F2.transform_apply(spec=jspec, meta=M)
# #X = PREPROCESS_package.get_X(X1, 1, train_count)
# #Y = PREPROCESS_package.get_Y(X1, 1, train_count)
# #Xt = PREPROCESS_package.get_X(X2, 1, test_count)
# #Yt = PREPROCESS_package.get_Y(X2, 1, test_count)
# # TODO somehow throws error at predict with this included
# #X, mean, sigma = scale(X, True, True)
# #Xt = scaleApply(Xt, mean, sigma)
# betas = multiLogReg(X, Y)
# [_, y_pred, acc] = multiLogRegPredict(Xt, betas, Yt)
# confusion_matrix_abs, _ = confusionMatrix(y_pred, Yt).compute()
# print(confusion_matrix_abs)
# self.assertTrue(
# np.allclose(
# confusion_matrix_abs,
# np.array([[11593., 1545.],
# [842., 2302.]])
# )
# )
# def test_level3(self):
# train_count = 32561
# test_count = 16281
# SCHEMA = '"DOUBLE,STRING,DOUBLE,STRING,DOUBLE,STRING,STRING,STRING,STRING,STRING,DOUBLE,DOUBLE,DOUBLE,STRING,STRING"'
# F1 = self.sds.read(
# self.dataset_path_train,
# schema=SCHEMA
# )
# F2 = self.sds.read(
# self.dataset_path_test,
# schema=SCHEMA
# )
# jspec = self.sds.read(self.dataset_jspec, data_type="scalar", value_type="string")
# PREPROCESS_package = self.sds.source(self.preprocess_src_path, "preprocess", print_imported_methods=True)
# X1 = F1.rbind(F2)
# X1, M1 = X1.transform_encode(spec=jspec)
# X = PREPROCESS_package.get_X(X1, 1, train_count)
# Y = PREPROCESS_package.get_Y(X1, 1, train_count)
# Xt = PREPROCESS_package.get_X(X1, train_count, train_count + test_count)
# Yt = PREPROCESS_package.get_Y(X1, train_count, train_count + test_count)
# Yt = PREPROCESS_package.replace_value(Yt, 3.0, 1.0)
# Yt = PREPROCESS_package.replace_value(Yt, 4.0, 2.0)
# # better alternative for encoding
# # F2 = F2.replace("<=50K.", "<=50K")
# # F2 = F2.replace(">50K.", ">50K")
# # X1, M = F1.transform_encode(spec=jspec)
# # X2 = F2.transform_apply(spec=jspec, meta=M)
# # X = PREPROCESS_package.get_X(X1, 1, train_count)
# # Y = PREPROCESS_package.get_Y(X1, 1, train_count)
# # Xt = PREPROCESS_package.get_X(X2, 1, test_count)
# # Yt = PREPROCESS_package.get_Y(X2, 1, test_count)
# # TODO somehow throws error at predict with this included
# # X, mean, sigma = scale(X, True, True)
# # Xt = scaleApply(Xt, mean, sigma)
# FFN_package = self.sds.source(self.neural_net_src_path, "fnn", print_imported_methods=True)
# epochs = 1
# batch_size = 16
# learning_rate = 0.01
# seed = 42
# network = FFN_package.train(X, Y, epochs, batch_size, learning_rate, seed)
# """
# If more ressources are available, one can also choose to train the model using a parameter server.
# Here we use the same parameters as before, however we need to specifiy a few more.
# """
# ################################################################################################################
# # workers = 1
# # utype = '"BSP"'
# # freq = '"EPOCH"'
# # mode = '"LOCAL"'
# # network = FFN_package.train_paramserv(X, Y, epochs,
# # batch_size, learning_rate, workers, utype, freq, mode,
# # seed)
# ################################################################################################################
# FFN_package.save_model(network, '"model/python_FFN/"').compute(verbose=True)
# """
# Next we evaluate our network on the test set which was not used for training.
# The predict function with the test features and our trained network returns a matrix of class probabilities.
# This matrix contains for each test sample the probabilities for each class.
# For predicting the most likely class of a sample, we choose the class with the highest probability.
# """
# ################################################################################################################
# #probs = FFN_package.predict(Xt, network)
# ################################################################################################################
# """
# To evaluate how well our model performed on the test set, we can use the probability matrix from the predict call and the real test labels
# and compute the log-cosh loss.
# """
# ################################################################################################################
# #FFN_package.eval(Xt, Yt).compute(True)
# ################################################################################################################
if __name__ == "__main__":
unittest.main(exit=False)
| |
import unittest
from distance import DefaultClasses
from distance.base import (
ObjectFragment,
Fragment,
)
from distance._impl.level_objects.objects import (
SubTeleporter,
WinLogic,
GoldenSimple,
WorldText,
)
from distance._impl.fragments.levelfragments import (
CarScreenTextDecodeTriggerFragment,
AnimatorFragment,
)
from distance._impl.fragments.npfragments import (
NamedPropertiesFragment,
ByteNamedProperty,
OldCarScreenTextDecodeTriggerFragment,
)
from distance.printing import PrintContext
from distance.constants import ForceType
from construct import Container
from distance._common import (
ModesMapperProperty,
MedalTimesMapperProperty,
MedalScoresMapperProperty,
)
from . import common
from .common import check_exceptions, write_read
PROBER = DefaultClasses.level_objects
def TagFragment(name, tag, **kw):
kw['class_tag'] = tag
return type(name, (Fragment,), kw)
class InfoDisplayBoxTest(unittest.TestCase):
def test_read(self):
obj = PROBER.read("tests/in/customobject/infodisplaybox 1.bytes")
self.assertEqual(obj.texts, ["Text0", "Text1", "Text2", "", "Text4"])
def test_read_2(self):
obj = PROBER.read("tests/in/customobject/infodisplaybox 2.bytes")
self.assertEqual(obj.texts, ["Test_2", "", "", "", ""])
def test_ver_0(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/infodisplaybox ver_0 1.bytes")
self.assertEqual(obj.texts[0], "Flight ability\ncorrupted")
p.print_object(obj)
def test_ver_0_2(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/infodisplaybox ver_0 2.bytes")
self.assertEqual(obj.texts[0], "Synchronizing with\r\nold <color=#00ff77>checkpoint</color>\r\nnetwork")
self.assertAlmostEqual(obj.per_char_speed, 0.02)
p.print_object(obj)
def test_print(self):
p = PrintContext.for_test()
p.print_object(PROBER.read("tests/in/customobject/infodisplaybox 1.bytes"))
def test_quarantinetrigger(self):
obj = PROBER.read("tests/in/customobject/quarantinetrigger empty infodisplaylogic.bytes")
check_exceptions(obj)
PrintContext.for_test().print_object(obj)
class InfoDisplayBoxV2WriteReadTest(common.WriteReadTest):
filename = "tests/in/customobject/infodisplaybox 1.bytes"
read_obj = PROBER.read
def verify_obj(self, obj):
self.assertEqual(obj.texts[0], "Text0")
class WorldTextTest(unittest.TestCase):
def test_read(self):
obj = PROBER.read("tests/in/customobject/worldtext 1.bytes")
self.assertEqual(obj.text, "Test text")
def test_read_default_helloworld(self):
obj = PROBER.read("tests/in/customobject/worldtext helloworld.bytes")
self.assertIsNone(obj.text, None)
def test_read_3(self):
obj = PROBER.read("tests/in/customobject/worldtext weird.bytes")
self.assertEqual(obj.text, "Zero-G")
def test_print(self):
p = PrintContext.for_test()
p.print_object(PROBER.read("tests/in/customobject/worldtext helloworld.bytes"))
def test_create(self):
obj = WorldText(text="test")
res, rdb = write_read(obj)
self.assertEqual(res.text, "test")
class TeleporterTest(unittest.TestCase):
def test_link_id(self):
obj = PROBER.read("tests/in/customobject/tele exit checkpoint.bytes")
tele = next(obj for obj in obj.children if obj.type == 'Teleporter')
self.assertIsInstance(tele, SubTeleporter)
self.assertEqual(tele.link_id, 334)
def test_with_checkpoint(self):
obj = PROBER.read("tests/in/customobject/tele exit checkpoint.bytes")
tele = next(obj for obj in obj.children if obj.type == 'Teleporter')
self.assertIsInstance(tele, SubTeleporter)
self.assertEqual(tele.trigger_checkpoint, 1)
def test_without_checkpoint(self):
obj = PROBER.read("tests/in/customobject/tele exit nocheckpoint.bytes")
tele = next(obj for obj in obj.children if obj.type == 'Teleporter')
self.assertIsInstance(tele, SubTeleporter)
self.assertEqual(tele.trigger_checkpoint, 0)
def test_print_data(self):
p = PrintContext.for_test()
p.print_object(PROBER.read("tests/in/customobject/tele exit nocheckpoint.bytes"))
def test_virusspiritspawner(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/virusspiritspawner.bytes")
tele = next(obj for obj in obj.children if obj.type == 'Teleporter')
self.assertIsInstance(tele, SubTeleporter)
self.assertEqual(tele.destination, 6666)
p.print_object(obj)
def test_build_6641(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/tele build 6641.bytes")
tele = next(obj for obj in obj.children if obj.type == 'Teleporter')
self.assertEqual(tele.destination, 3)
p.print_object(obj)
class OldTeleporterTest(unittest.TestCase):
def test_read(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/tele v0.bytes")
p.print_object(obj)
tele = next(obj for obj in obj.children if obj.type == 'Teleporter')
self.assertIsInstance(tele, SubTeleporter)
self.assertEqual(0, tele.link_id)
self.assertEqual(0, tele.destination)
class SoccerGoalTest(unittest.TestCase):
def test_read(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/soccergoal.bytes")
check_exceptions(obj)
p.print_object(obj)
class BatteryBuildingTest(unittest.TestCase):
# has PulseMaterial, which does NOT contain named properties
def test_read(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/batterybuilding.bytes")
check_exceptions(obj)
p.print_object(obj)
class RotatingSpotLightTest(unittest.TestCase):
def test_read(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/rotatingspotlight.bytes")
check_exceptions(obj)
p.print_object(obj)
class GravityTriggerTest(unittest.TestCase):
def test_default(self):
obj = PROBER.read("tests/in/customobject/gravtrigger default.bytes")
self.assertEqual(obj.disable_gravity, True)
self.assertEqual(obj.music_id, 19)
self.assertEqual(obj.one_time_trigger, True)
self.assertEqual(obj.disable_music_trigger, False)
def test_changed(self):
obj = PROBER.read("tests/in/customobject/gravtrigger changed.bytes")
self.assertEqual(obj.disable_gravity, False)
self.assertEqual(obj.music_id, 3)
self.assertEqual(obj.one_time_trigger, False)
self.assertEqual(obj.disable_music_trigger, True)
def test_old(self):
# only verify we don't error here
PROBER.read("tests/in/customobject/gravtrigger old.bytes")
# TODO update when we read old format
def test_print_default(self):
p = PrintContext.for_test()
p.print_object(PROBER.read("tests/in/customobject/gravtrigger default.bytes"))
def test_print_changed(self):
p = PrintContext.for_test()
p.print_object(PROBER.read("tests/in/customobject/gravtrigger changed.bytes"))
class ForceZoneBoxTest(unittest.TestCase):
files = ("default", "changed wind", "changed gravity")
def test_default(self):
obj = PROBER.read("tests/in/customobject/forcezone default.bytes")
self.assertEqual(obj.force_type, ForceType.WIND)
self.assertEqual(obj.drag_multiplier, 1.0)
def test_gravity(self):
obj = PROBER.read("tests/in/customobject/forcezone changed gravity.bytes")
self.assertEqual(obj.force_type, ForceType.GRAVITY)
self.assertEqual(obj.disable_global_gravity, 1)
self.assertEqual("Custom Zone", obj.custom_name)
def test_print(self):
for fname in self.files:
with self.subTest(fname=fname):
p = PrintContext.for_test()
filename = f"tests/in/customobject/forcezone {fname}.bytes"
p.print_object(PROBER.read(filename))
class EnableAbilitiesBoxTest(unittest.TestCase):
def test_default(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/enableabilitiesbox default.bytes")
self.assertEqual(obj.abilities.get('EnableFlying', 0), 0)
self.assertEqual(obj.enable_boosting, 0)
self.assertEqual(obj.enable_jumping, 0)
self.assertEqual(obj.enable_jets, 0)
self.assertEqual(obj.enable_flying, 0)
self.assertEqual(obj.bloom_out, 1)
p.print_object(obj)
def test_flyboost(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/enableabilitiesbox flyboost.bytes")
self.assertEqual(obj.abilities['EnableFlying'], 1)
self.assertEqual(obj.abilities['EnableBoosting'], 1)
self.assertEqual(obj.enable_boosting, 1)
self.assertEqual(obj.enable_jumping, 0)
self.assertEqual(obj.enable_jets, 0)
self.assertEqual(obj.enable_flying, 1)
self.assertEqual(obj.bloom_out, 1)
p.print_object(obj)
def test_all_off(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/enableabilitiesbox all off.bytes")
self.assertEqual(obj.abilities['EnableFlying'], 0)
self.assertEqual(obj.abilities['EnableBoosting'], 0)
self.assertEqual(obj.enable_boosting, 0)
self.assertEqual(obj.enable_jumping, 0)
self.assertEqual(obj.enable_jets, 0)
self.assertEqual(obj.enable_flying, 0)
self.assertEqual(obj.bloom_out, 0)
p.print_object(obj)
class S5OffsetTest(unittest.TestCase):
def test_glasssplineroadstraight(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/glasssplineroadstraight s5 offset.bytes")
self.assertEqual(len(obj.children), 2)
p.print_object(obj)
def test_jumpbarrierlowhi(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/jumpbarrierlowhi s5 offset.bytes")
self.assertEqual(len(obj.children), 3)
p.print_object(obj)
class EmpireEndZoneTest(unittest.TestCase):
def test_normal(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/endzone.bytes")
self.assertEqual(len(obj.children), 9)
win_logic = next(obj for obj in obj.children if obj.type == 'WinLogic')
self.assertEqual(WinLogic, type(win_logic))
self.assertIsNone(win_logic.delay_before_broadcast)
p.print_object(obj)
def test_delay_before_broadcast(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/endzone delay.bytes")
self.assertEqual(len(obj.children), 9)
win_logic = next(obj for obj in obj.children if obj.type == 'WinLogic')
self.assertEqual(WinLogic, type(win_logic))
self.assertAlmostEqual(3.0, win_logic.delay_before_broadcast)
p.print_object(obj)
def test_weird_textmesh(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/endzone weird textmesh.bytes")
check_exceptions(obj)
p.print_object(obj)
class CarScreenTextDecodeTriggerTest(unittest.TestCase):
def test_trigger(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/decodetrigger.bytes")
frag = obj['CarScreenTextDecodeTrigger']
self.assertEqual(CarScreenTextDecodeTriggerFragment, type(frag))
p.print_object(obj)
self.assertEqual(obj.text, "Please, help us.")
self.assertEqual(obj.time_text, "")
def test_ver0(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/decodetrigger v0.bytes")
frag = obj['CarScreenTextDecodeTrigger']
self.assertEqual(OldCarScreenTextDecodeTriggerFragment, type(frag))
p.print_object(frag)
self.assertEqual(obj.text, "INPUT(666\u2020):Extract();")
self.assertAlmostEqual(obj.per_char_speed, 0.02)
self.assertEqual(obj.clear_on_finish, True)
self.assertEqual(obj.destroy_on_trigger_exit, False)
self.assertEqual(obj.time_text, "Download")
class SplineRoadTest(unittest.TestCase):
def test_tracknodes(self):
p = PrintContext.for_test()
obj = PROBER.read("tests/in/customobject/splineroad.bytes")
node0 = obj.children[0]['TrackNode']
node1 = obj.children[1]['TrackNode']
self.assertEqual(79, node0.parent_id)
self.assertEqual(59, node0.snap_id)
self.assertEqual(79, node1.parent_id)
self.assertEqual(100, node1.snap_id)
p.print_object(obj)
class CubeGsTest(unittest.TestCase):
def test_probe(self):
obj = PROBER.read("tests/in/customobject/2cubes.bytes")
self.assertEqual(GoldenSimple, type(obj.children[0]))
self.assertEqual(GoldenSimple, type(obj.children[1]))
class ConstructorTest(unittest.TestCase):
def test_invalid_attr(self):
self.assertRaises(AttributeError, GoldenSimple, test_attr=2)
def test_attr(self):
obj = GoldenSimple(emit_index=50)
self.assertEqual(obj.emit_index, 50)
class TestFragments(unittest.TestCase):
def test_getbytype_after_assign(self):
old_anim = AnimatorFragment()
obj = DefaultClasses.level_objects.create('Group')
obj.fragments = [ObjectFragment(), old_anim]
obj['Animator']
new_anim = AnimatorFragment()
obj.fragments = [ObjectFragment(), new_anim]
res = obj['Animator']
self.assertIs(new_anim, res)
class PropertyTest(unittest.TestCase):
class TestFragment(Fragment):
medals_list = [
Container(time=40, score=200),
Container(time=30, score=400),
Container(time=20, score=700),
Container(time=10, score=1000),
]
modes = ModesMapperProperty('modes_list')
times = MedalTimesMapperProperty('medals_list')
scores = MedalScoresMapperProperty('medals_list')
class NamedPropFragment(NamedPropertiesFragment):
test = ByteNamedProperty('testValue')
def test_create_modes(self):
frag = self.TestFragment(modes={})
self.assertEqual(list(frag.modes_list), [])
def test_create_times(self):
frag = self.TestFragment(times=(50, 49, 48, 47))
self.assertEqual(frag.medals_list, [
Container(time=50, score=200),
Container(time=49, score=400),
Container(time=48, score=700),
Container(time=47, score=1000),
])
def test_create_scores(self):
frag = self.TestFragment(scores=(100, 101, 102, 103))
self.assertEqual(frag.medals_list, [
Container(time=40, score=100),
Container(time=30, score=101),
Container(time=20, score=102),
Container(time=10, score=103),
])
def test_named_property(self):
frag = self.NamedPropFragment(test=34)
self.assertEqual(frag.test, 34)
class SetAbilitiesTriggerDefaultsTest(common.WriteReadTest):
filename = "tests/in/customobject/SetAbilitiesTrigger 6641 default with cube.bytes"
read_obj = PROBER.read
def verify_obj(self, obj):
frag = obj.children[1]['SetAbilitiesTrigger']
self.assertEqual(frag.enable_flying, 1)
self.assertEqual(frag.enable_jumping, 1)
self.assertEqual(frag.enable_boosting, 1)
self.assertEqual(frag.enable_jet_rotating, 1)
self.assertEqual(frag.infinite_cooldown, 0)
self.assertAlmostEqual(frag.delay, 0.0)
self.assertEqual(frag.show_ability_alert, 1)
self.assertEqual(frag.bloom_out, 0)
self.assertEqual(frag.play_sound, 1)
self.assertEqual(frag.show_car_screen_image, 1)
self.assertEqual(frag.timer_text, "downloading")
self.assertEqual(frag.ignore_in_arcade, 0)
self.assertEqual(frag.use_slow_mo, 0)
self.assertAlmostEqual(frag.delay_before_slow_mo, 0.0)
self.assertAlmostEqual(frag.slow_mo_time_scale, 0.25)
self.assertAlmostEqual(frag.slow_mo_duration, 2.0)
self.assertAlmostEqual(frag.glitch_duration_after, 0.66)
self.assertEqual(frag.visuals_only, 0)
class SetAbilitiesTriggerChangedTest(common.WriteReadTest):
filename = "tests/in/customobject/SetAbilitiesTrigger 6641 changed with cube.bytes"
read_obj = PROBER.read
def verify_obj(self, obj):
frag = obj.children[0]['SetAbilitiesTrigger']
self.assertEqual(frag.enable_flying, 0)
self.assertEqual(frag.enable_jumping, 1)
self.assertEqual(frag.enable_boosting, 1)
self.assertEqual(frag.enable_jet_rotating, 0)
self.assertEqual(frag.infinite_cooldown, 0)
self.assertAlmostEqual(frag.delay, 0.0)
self.assertEqual(frag.show_ability_alert, 1)
self.assertEqual(frag.bloom_out, 0)
self.assertEqual(frag.play_sound, 1)
self.assertEqual(frag.show_car_screen_image, 1)
self.assertEqual(frag.timer_text, "testing")
self.assertEqual(frag.ignore_in_arcade, 0)
self.assertEqual(frag.use_slow_mo, 1)
self.assertAlmostEqual(frag.delay_before_slow_mo, 0.5)
self.assertAlmostEqual(frag.slow_mo_time_scale, 0.8)
self.assertAlmostEqual(frag.slow_mo_duration, 10.0)
self.assertAlmostEqual(frag.glitch_duration_after, 0.66)
self.assertEqual(frag.play_slow_mo_audio, 1)
self.assertEqual(frag.visuals_only, 0)
class WarpAnchorDefaultTest(common.WriteReadTest):
filename = "tests/in/customobject/WarpAnchor 6641 default with sphere.bytes"
read_obj = PROBER.read
def verify_obj(self, obj):
frag = obj.children[0]['WarpAnchor']
# many omitted
self.assertEqual(frag.trigger_type, 'sphere')
self.assertEqual(int(frag.trigger_type), 0)
self.assertEqual(frag.my_id, 0)
self.assertEqual(frag.other_id, 0)
self.assertEqual(frag.type_of_warp, 'there_and_back')
self.assertEqual(int(frag.type_of_warp), 0)
self.assertEqual(frag.transition_effect, 'none')
self.assertEqual(int(frag.transition_effect), 0)
self.assertAlmostEqual(frag.glitch_intensity, 1.1)
self.assertEqual(frag.countdown_time_scale, 1.0)
self.assertEqual(frag.disable_countdown, 0)
class WarpAnchorChangedTest(common.WriteReadTest):
filename = "tests/in/customobject/WarpAnchor 6641 changed with cube.bytes"
read_obj = PROBER.read
def verify_obj(self, obj):
frag = obj.children[1]['WarpAnchor']
# many omitted
self.assertEqual(frag.trigger_type, 'box')
self.assertEqual(int(frag.trigger_type), 1)
self.assertEqual(frag.my_id, 15)
self.assertEqual(frag.other_id, 20)
self.assertEqual(frag.type_of_warp, 'there_and_back')
self.assertEqual(int(frag.type_of_warp), 0)
self.assertEqual(frag.transition_effect, 'teleport_virus')
self.assertEqual(int(frag.transition_effect), 4)
self.assertAlmostEqual(frag.glitch_intensity, 4.0)
self.assertEqual(frag.countdown_time_scale, 2.0)
self.assertEqual(frag.disable_countdown, 0)
# vim:set sw=4 ts=8 sts=4 et:
| |
#!/usr/bin/env python
"""Generate c++ and LCM definitions for the LCM Vector concept.
"""
import argparse
import os
import subprocess
def put(fileobj, text, newlines_after=0):
fileobj.write(text.strip('\n') + '\n' * newlines_after)
INDICES_BEGIN = """
/// Describes the row indices of a %(camel)s.
struct DRAKECARS_EXPORT %(indices)s {
/// The total number of rows (coordinates).
static const int kNumCoordinates = %(nfields)d;
// The index of each individual coordinate.
"""
INDICES_FIELD = """static const int %(kname)s = %(k)d;"""
INDICES_FIELD_STORAGE = """const int %(indices)s::%(kname)s;"""
INDICES_END = """
};
"""
def to_kname(field):
return 'k' + ''.join([
word.capitalize()
for word in field.split('_')])
def generate_indices(context, fields):
# pylint: disable=unused-variable
header = context["header"]
camel = context["camel"]
indices = context["indices"]
nfields = len(fields)
kname = "kNumCoordinates"
put(header, INDICES_BEGIN % locals(), 1)
for k, field in enumerate(fields):
kname = to_kname(field)
put(header, INDICES_FIELD % locals(), 1)
put(header, INDICES_END % locals(), 2)
def generate_indices_storage(context, fields):
# pylint: disable=unused-variable
cc = context["cc"]
camel = context["camel"]
indices = context["indices"]
nfields = len(fields)
kname = "kNumCoordinates"
put(cc, INDICES_FIELD_STORAGE % locals(), 1)
for k, field in enumerate(fields):
kname = to_kname(field)
put(cc, INDICES_FIELD_STORAGE % locals(), 1)
put(cc, '', 1)
DEFAULT_CTOR = """
/// Default constructor. Sets all rows to zero.
%(camel)s() : systems::BasicStateAndOutputVector<T>(K::kNumCoordinates) {
this->SetFromVector(VectorX<T>::Zero(K::kNumCoordinates));
}
"""
def generate_default_ctor(context, _):
header = context["header"]
put(header, DEFAULT_CTOR % context, 2)
ACCESSOR_BEGIN = """
/// @name Getters and Setters
//@{
"""
ACCESSOR = """
const T %(field)s() const { return this->GetAtIndex(K::%(kname)s); }
void set_%(field)s(const T& %(field)s) {
this->SetAtIndex(K::%(kname)s, %(field)s);
}
"""
ACCESSOR_END = """
//@}
"""
def generate_accessors(context, fields):
# pylint: disable=unused-variable
header = context["header"]
indices = context["indices"]
put(header, ACCESSOR_BEGIN % locals(), 1)
for field in fields:
kname = to_kname(field)
put(header, ACCESSOR % locals(), 1)
put(header, ACCESSOR_END % locals(), 2)
ENCODE_BEGIN = """
template <typename ScalarType>
bool encode(const double& t, const %(camel)s<ScalarType>& wrap,
// NOLINTNEXTLINE(runtime/references)
drake::lcmt_%(snake)s_t& msg) {
msg.timestamp = static_cast<int64_t>(t * 1000);
"""
ENCODE_FIELD = """ msg.%(field)s = wrap.%(field)s();"""
ENCODE_END = """
return true;
}
"""
def generate_encode(context, fields):
header = context["header"]
put(header, ENCODE_BEGIN % context, 1)
# pylint: disable=unused-variable
for k, field in enumerate(fields):
put(header, ENCODE_FIELD % locals(), 1)
put(header, ENCODE_END % context, 2)
DECODE_BEGIN = """
template <typename ScalarType>
bool decode(const drake::lcmt_%(snake)s_t& msg,
// NOLINTNEXTLINE(runtime/references)
double& t,
// NOLINTNEXTLINE(runtime/references)
%(camel)s<ScalarType>& wrap) {
t = static_cast<double>(msg.timestamp) / 1000.0;
"""
DECODE_FIELD = """ wrap.set_%(field)s(msg.%(field)s);"""
DECODE_END = """
return true;
}
"""
def generate_decode(context, fields):
header = context["header"]
put(header, DECODE_BEGIN % context, 1)
# pylint: disable=unused-variable
for k, field in enumerate(fields):
put(header, DECODE_FIELD % locals(), 1)
put(header, DECODE_END % context, 2)
HEADER_PREAMBLE = """
#pragma once
// This file is generated by a script. Do not edit!
// See %(generator)s.
#include <stdexcept>
#include <string>
#include <Eigen/Core>
#include "lcmtypes/drake/lcmt_%(snake)s_t.hpp"
#include "drake/drakeCars_export.h"
#include "drake/systems/framework/basic_state_and_output_vector.h"
namespace drake {
"""
CLASS_BEGIN = """
/// Specializes BasicStateAndOutputVector with specific getters and setters.
template <typename T>
class %(camel)s : public systems::BasicStateAndOutputVector<T> {
public:
// An abbreviation for our row index constants.
typedef %(indices)s K;
"""
CLASS_END = """
/// @name Implement the LCMVector concept
//@{
typedef drake::lcmt_%(snake)s_t LCMMessageType;
static std::string channel() { return "%(screaming_snake)s"; }
//@}
};
"""
HEADER_POSTAMBLE = """
} // namespace drake
"""
CC_PREAMBLE = """
#include "drake/examples/Cars/gen/%(snake)s.h"
// This file is generated by a script. Do not edit!
// See %(generator)s.
namespace drake {
"""
CC_POSTAMBLE = """
} // namespace drake
"""
LCMTYPE_PREAMBLE = """
// This file is generated by %(generator)s. Do not edit.
package drake;
struct lcmt_%(snake)s_t
{
int64_t timestamp;
"""
LCMTYPE_POSTAMBLE = """
}
"""
def generate_code(args):
# pylint: disable=unused-variable
drake_dist_dir = subprocess.check_output(
"git rev-parse --show-toplevel".split()).strip()
generator = os.path.abspath(__file__).replace(
os.path.join(drake_dist_dir, ''), '')
title_phrase = args.title.split()
camel = ''.join([x.capitalize() for x in title_phrase])
indices = camel + 'Indices'
snake = '_'.join([x.lower() for x in title_phrase])
screaming_snake = '_'.join([x.upper() for x in title_phrase])
header_file = os.path.abspath(
os.path.join(args.header_dir, "%s.h" % snake))
cc_file = os.path.abspath(
os.path.join(args.header_dir, "%s.cc" % snake))
lcm_file = os.path.abspath(
os.path.join(args.lcmtype_dir, "lcmt_%s_t.lcm" % snake))
header = open(header_file, 'w')
cc = open(cc_file, 'w')
lcmtype = open(lcm_file, 'w')
put(header, HEADER_PREAMBLE % locals(), 2)
generate_indices(locals(), args.fields)
put(header, CLASS_BEGIN % locals(), 2)
generate_default_ctor(locals(), args.fields)
generate_accessors(locals(), args.fields)
put(header, CLASS_END % locals(), 2)
generate_encode(locals(), args.fields)
generate_decode(locals(), args.fields)
put(header, HEADER_POSTAMBLE % locals(), 1)
put(cc, CC_PREAMBLE % locals(), 2)
generate_indices_storage(locals(), args.fields)
put(cc, CC_POSTAMBLE % locals(), 1)
put(lcmtype, LCMTYPE_PREAMBLE % locals(), 1)
for field in args.fields:
put(lcmtype, " double %s;" % field, 1)
put(lcmtype, LCMTYPE_POSTAMBLE % locals(), 1)
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--header-dir', help="output directory for header file", default=".")
parser.add_argument(
'--lcmtype-dir', help="output directory for lcm file", default=".")
parser.add_argument(
'--title', help="title phrase, from which type names will be made")
parser.add_argument(
'fields', metavar='FIELD', nargs='+', help="field names for vector")
args = parser.parse_args()
generate_code(args)
if __name__ == "__main__":
main()
| |
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2010-2015. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
"""This is khmer; please see http://khmer.readthedocs.org/."""
from __future__ import print_function
from math import log
import json
from khmer._khmer import Countgraph as _Countgraph
from khmer._khmer import GraphLabels as _GraphLabels
from khmer._khmer import Nodegraph as _Nodegraph
from khmer._khmer import HLLCounter as _HLLCounter
from khmer._khmer import ReadAligner as _ReadAligner
from khmer._khmer import forward_hash
# tests/test_{functions,countgraph,counting_single}.py
from khmer._khmer import forward_hash_no_rc # tests/test_functions.py
from khmer._khmer import reverse_hash # tests/test_functions.py
# tests/counting_single.py
from khmer._khmer import hash_murmur3 # tests/test_functions.py
from khmer._khmer import hash_no_rc_murmur3 # tests/test_functions.py
from khmer._khmer import get_version_cpp as __version_cpp__
# tests/test_version.py
from khmer._khmer import ReadParser # sandbox/to-casava-1.8-fastq.py
# tests/test_read_parsers.py,scripts/{filter-abund-single,load-graph}.py
# scripts/{abundance-dist-single,load-into-counting}.py
import sys
from struct import pack, unpack
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def load_nodegraph(filename):
"""Load a nodegraph object from the given filename and return it.
Keyword argument:
filename -- the name of the nodegraph file
"""
nodegraph = _Nodegraph(1, [1])
nodegraph.load(filename)
return nodegraph
def load_countgraph(filename):
"""Load a countgraph object from the given filename and return it.
Keyword argument:
filename -- the name of the countgraph file
"""
countgraph = _Countgraph(1, [1])
countgraph.load(filename)
return countgraph
def extract_nodegraph_info(filename):
"""Open the given nodegraph file and return a tuple of information.
Returns: the k-mer size, the table size, the number of tables, the version
of the table format, and the type of table flag.
Keyword argument:
filename -- the name of the nodegraph file to inspect
"""
ksize = None
n_tables = None
table_size = None
signature = None
version = None
ht_type = None
occupied = None
uint_size = len(pack('I', 0))
uchar_size = len(pack('B', 0))
ulonglong_size = len(pack('Q', 0))
try:
with open(filename, 'rb') as nodegraph:
signature, = unpack('4s', nodegraph.read(4))
version, = unpack('B', nodegraph.read(1))
ht_type, = unpack('B', nodegraph.read(1))
ksize, = unpack('I', nodegraph.read(uint_size))
n_tables, = unpack('B', nodegraph.read(uchar_size))
occupied, = unpack('Q', nodegraph.read(ulonglong_size))
table_size, = unpack('Q', nodegraph.read(ulonglong_size))
if signature != b"OXLI":
raise ValueError("Node graph '{}' is missing file type "
"signature".format(filename) + str(signature))
except:
raise ValueError("Presence table '{}' is corrupt ".format(filename))
return ksize, round(table_size, -2), n_tables, version, ht_type, occupied
def extract_countgraph_info(filename):
"""Open the given countgraph file and return a tuple of information.
Return: the k-mer size, the table size, the number of tables, the bigcount
flag, the version of the table format, and the type of table flag.
Keyword argument:
filename -- the name of the countgraph file to inspect
"""
ksize = None
n_tables = None
table_size = None
signature = None
version = None
ht_type = None
use_bigcount = None
occupied = None
uint_size = len(pack('I', 0))
ulonglong_size = len(pack('Q', 0))
try:
with open(filename, 'rb') as countgraph:
signature, = unpack('4s', countgraph.read(4))
version, = unpack('B', countgraph.read(1))
ht_type, = unpack('B', countgraph.read(1))
use_bigcount, = unpack('B', countgraph.read(1))
ksize, = unpack('I', countgraph.read(uint_size))
n_tables, = unpack('B', countgraph.read(1))
occupied, = unpack('Q', countgraph.read(ulonglong_size))
table_size, = unpack('Q', countgraph.read(ulonglong_size))
if signature != b'OXLI':
raise ValueError("Count graph file '{}' is missing file type "
"signature. ".format(filename) + str(signature))
except:
raise ValueError("Count graph file '{}' is corrupt ".format(filename))
return ksize, round(table_size, -2), n_tables, use_bigcount, version, \
ht_type, occupied
def calc_expected_collisions(graph, force=False, max_false_pos=.2):
"""Do a quick & dirty expected collision rate calculation on a graph
Also check to see that collision rate is within threshold.
Keyword argument:
graph: the countgraph or nodegraph object to inspect
"""
sizes = graph.hashsizes()
n_ht = float(len(sizes))
occupancy = float(graph.n_occupied())
min_size = min(sizes)
fp_one = occupancy / min_size
fp_all = fp_one ** n_ht
if fp_all > max_false_pos:
print("**", file=sys.stderr)
print("** ERROR: the graph structure is too small for ",
file=sys.stderr)
print("** this data set. Increase data structure size",
file=sys.stderr)
print("** with --max_memory_usage/-M.", file=sys.stderr)
print("**", file=sys.stderr)
print("** Do not use these results!!", file=sys.stderr)
print("**", file=sys.stderr)
print("** (estimated false positive rate of %.3f;" % fp_all,
file=sys.stderr, end=' ')
print("max recommended %.3f)" % max_false_pos, file=sys.stderr)
print("**", file=sys.stderr)
if not force:
sys.exit(1)
return fp_all
def is_prime(number):
"""Check if a number is prime."""
if number < 2:
return False
if number == 2:
return True
if number % 2 == 0:
return False
for _ in range(3, int(number ** 0.5) + 1, 2):
if number % _ == 0:
return False
return True
def get_n_primes_near_x(number, target):
"""Backward-find primes smaller than target.
Step backwards until a number of primes (other than 2) have been
found that are smaller than the target and return them.
Keyword arguments:
number -- the number of primes to find
target -- the number to step backwards from
"""
if target == 1 and number == 1:
return [1]
primes = []
i = target - 1
if i % 2 == 0:
i -= 1
while len(primes) != number and i > 0:
if is_prime(i):
primes.append(i)
i -= 2
if len(primes) != number:
raise RuntimeError("unable to find %d prime numbers < %d" % (number,
target))
return primes
# Expose the cpython objects with __new__ implementations.
# These constructors add the functionality provided by the existing
# factory methods to the constructors defined over in cpython land.
# Additional functionality can be added to these classes as appropriate.
class Countgraph(_Countgraph):
def __new__(cls, k, starting_size, n_tables):
primes = get_n_primes_near_x(n_tables, starting_size)
c = _Countgraph.__new__(cls, k, primes)
c.primes = primes
return c
class GraphLabels(_GraphLabels):
def __new__(cls, k, starting_size, n_tables):
hb = Nodegraph(k, starting_size, n_tables)
c = _GraphLabels.__new__(cls, hb)
c.graph = hb
return c
class CountingGraphLabels(_GraphLabels):
def __new__(cls, k, starting_size, n_tables):
primes = get_n_primes_near_x(n_tables, starting_size)
hb = _Countgraph(k, primes)
c = _GraphLabels.__new__(cls, hb)
c.graph = hb
return c
class Nodegraph(_Nodegraph):
def __new__(cls, k, starting_size, n_tables):
primes = get_n_primes_near_x(n_tables, starting_size)
c = _Nodegraph.__new__(cls, k, primes)
c.primes = primes
return c
class HLLCounter(_HLLCounter):
"""HyperLogLog counter.
A HyperLogLog counter is a probabilistic data structure specialized on
cardinality estimation.
There is a precision/memory consumption trade-off: error rate determines
how much memory is consumed.
# Creating a new HLLCounter:
>>> khmer.HLLCounter(error_rate, ksize)
where the default values are:
- error_rate: 0.01
- ksize: 20
"""
def __len__(self):
return self.estimate_cardinality()
class ReadAligner(_ReadAligner):
"""Sequence to graph aligner.
ReadAligner uses a Countgraph (the counts of k-mers in the target DNA
sequences) as an implicit De Bruijn graph. Input DNA sequences are aligned
to this graph via a paired Hidden Markov Model.
The HMM is configured upon class instantiation; default paramaters for the
HMM are provided in 'defaultTransitionProbablitites' and
'defaultScoringMatrix'.
The main method is 'align'.
"""
defaultTransitionProbabilities = ( # _M, _Ir, _Ig, _Mu, _Iru, _Igu
(log(0.9848843, 2), log(0.0000735, 2), log(0.0000334, 2),
log(0.0150068, 2), log(0.0000017, 2), log(0.0000003, 2)), # M_
(log(0.5196194, 2), log(0.4647955, 2), log(0.0059060, 2),
log(0.0096792, 2)), # Ir_
(log(0.7611255, 2), log(0.2294619, 2), log(0.0072673, 2),
log(0.0021453, 2)), # Ig_
(log(0.0799009, 2), log(0.0000262, 2), log(0.0001836, 2),
log(0.9161349, 2), log(0.0033370, 2), log(0.0004173, 2)), # Mu_
(log(0.1434529, 2), log(0.0036995, 2), log(0.2642928, 2),
log(0.5885548, 2)), # Iru_
(log(0.1384551, 2), log(0.0431328, 2), log(0.6362921, 2),
log(0.1821200, 2)) # Igu_
)
defaultScoringMatrix = [
log(0.955, 2), log(0.04, 2), log(0.004, 2), log(0.001, 2)]
def __new__(cls, count_graph, trusted_cov_cutoff, bits_theta,
**kwargs):
if 'filename' in kwargs:
with open(kwargs.pop('filename')) as paramfile:
params = json.load(paramfile)
scoring_matrix = params['scoring_matrix']
transition_probabilities = params['transition_probabilities']
else:
if 'scoring_matrix' in kwargs:
scoring_matrix = kwargs.pop('scoring_matrix')
else:
scoring_matrix = ReadAligner.defaultScoringMatrix
if 'transition_probabilities' in kwargs:
transition_probabilities = kwargs.pop(
'transition_probabilities')
else:
transition_probabilities = \
ReadAligner.defaultTransitionProbabilities
r = _ReadAligner.__new__(cls, count_graph, trusted_cov_cutoff,
bits_theta, scoring_matrix,
transition_probabilities)
r.graph = count_graph
return r
def __init__(self, *args, **kwargs):
"""
ReadAligner initialization.
HMM state notation abbreviations:
M_t - trusted match; M_u - untrusted match
Ir_t - trusted read insert; Ir_u - untrusted read insert
Ig_t - trusted graph insert; Ig_u - untrusted graph insert
Keyword arguments:
filename - a path to a JSON encoded file providing the scoring matrix
for the HMM in an entry named 'scoring_matrix' and the transition
probababilties for the HMM in an entry named
'transition_probabilities'. If provided the remaining keyword
arguments are ignored. (default: None)
scoring_matrix - a list of floats: trusted match, trusted mismatch,
unstrusted match, untrusted mismatch. (default:
ReadAligner.defaultScoringMatrix)
transition_probabilities - A sparse matrix as a tuple of six tuples.
The inner tuples contain 6, 4, 4, 6, 4, and 4 floats respectively.
Transition are notated as 'StartState-NextState':
(
( M_t-M_t, M_t-Ir_t, M_t-Ig_t, M_t-M_u, M_t-Ir_u, M_t-Ig_u),
(Ir_t-M_t, Ir_t-Ir_t, Ir_t-M_u, Ir_t-Ir_u ),
(Ig_t-M_t, , Ig_t-Ig_t, Ig_t-M_u, Ig_t-Ig_u),
( M_u-M_t, M_u-Ir_t, M_u-Ig_t, M_u-M_u, M_u-Ir_u, M_u-Ig_u),
(Ir_u-M_t, Ir_u-Ir_t, Ir_u-M_u, Ir_u-Ir_u ),
(Ig_u-M_t, , Ig_u-Ig_t, Ig_u-M_u, Ig_u-Ig_u)
)
(default: ReadAligner.defaultTransitionProbabilities)
Note: the underlying CPython implementation creates the ReadAligner
during the __new__ process and so the class initialization actually
occurs there. Instatiation is documented here in __init__ as this is
the traditional way.
"""
_ReadAligner.__init__(self)
| |
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import base64
import ipaddress
import threading
import sys
import signal
import time
import http.server
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.exc import NoResultFound
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import load_der_x509_certificate
import simplejson as json
from keylime.db.registrar_db import RegistrarMain
from keylime.db.keylime_db import DBEngineManager, SessionManager
from keylime import cloud_verifier_common
from keylime import config
from keylime import crypto
from keylime.tpm import tpm2_objects
from keylime import keylime_logging
from keylime.tpm.tpm_main import tpm
from keylime import api_version as keylime_api_version
logger = keylime_logging.init_logging('registrar')
try:
engine = DBEngineManager().make_engine('registrar')
except SQLAlchemyError as err:
logger.error('Error creating SQL engine: %s', err)
sys.exit(1)
class ProtectedHandler(BaseHTTPRequestHandler, SessionManager):
def do_HEAD(self):
"""HEAD not supported"""
config.echo_json_response(self, 405, "HEAD not supported")
def do_PATCH(self):
"""PATCH not supported"""
config.echo_json_response(self, 405, "PATCH not supported")
def do_GET(self):
"""This method handles the GET requests to retrieve status on agents from the Registrar Server.
Currently, only agents resources are available for GETing, i.e. /agents. All other GET uri's
will return errors. agents requests require a single agent_id parameter which identifies the
agent to be returned. If the agent_id is not found, a 404 response is returned.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('GET returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
try:
agent = session.query(RegistrarMain).filter_by(
agent_id=agent_id).first()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if agent is None:
config.echo_json_response(self, 404, "agent_id not found")
logger.warning('GET returning 404 response. agent_id %s not found.', agent_id)
return
if not agent.active:
config.echo_json_response(self, 404, "agent_id not yet active")
logger.warning('GET returning 404 response. agent_id %s not yet active.', agent_id)
return
response = {
'aik_tpm': agent.aik_tpm,
'ek_tpm': agent.ek_tpm,
'ekcert': agent.ekcert,
'ip': agent.ip,
'port': agent.port,
'regcount': agent.regcount,
}
if agent.virtual:
response['provider_keys'] = agent.provider_keys
config.echo_json_response(self, 200, "Success", response)
logger.info('GET returning 200 response for agent_id: %s', agent_id)
else:
# return the available registered uuids from the DB
json_response = session.query(RegistrarMain.agent_id).all()
return_response = [item[0] for item in json_response]
config.echo_json_response(self, 200, "Success", {
'uuids': return_response})
logger.info('GET returning 200 response for agent_id list')
return
def do_POST(self):
"""POST not supported"""
config.echo_json_response(
self, 405, "POST not supported via TLS interface")
def do_PUT(self):
"""PUT not supported"""
config.echo_json_response(
self, 405, "PUT not supported via TLS interface")
def do_DELETE(self):
"""This method handles the DELETE requests to remove agents from the Registrar Server.
Currently, only agents resources are available for DELETEing, i.e. /agents. All other DELETE uri's will return errors.
agents requests require a single agent_id parameter which identifies the agent to be deleted.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "URI not supported")
logger.warning('DELETE agent returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
if session.query(RegistrarMain).filter_by(agent_id=agent_id).delete():
# send response
try:
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
config.echo_json_response(self, 200, "Success")
return
# send response
config.echo_json_response(self, 404)
return
config.echo_json_response(self, 404)
# pylint: disable=W0622
def log_message(self, format, *args):
return
class UnprotectedHandler(BaseHTTPRequestHandler, SessionManager):
def do_HEAD(self):
"""HEAD not supported"""
config.echo_json_response(self, 405, "HEAD not supported")
def do_PATCH(self):
"""PATCH not supported"""
config.echo_json_response(self, 405, "PATCH not supported")
def do_GET(self):
"""This method handles the GET requests to the unprotected side of the Registrar Server
Currently the only supported path is /versions which shows the supported API versions
"""
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /version/ interface")
return
if "version" not in rest_params:
config.echo_json_response(self, 400, "URI not supported")
logger.warning('GET agent returning 400 response. URI not supported: %s', self.path)
return
version_info = {
"current_version": keylime_api_version.current_version(),
"supported_versions": keylime_api_version.all_versions(),
}
config.echo_json_response(self, 200, "Success", version_info)
def do_POST(self):
"""This method handles the POST requests to add agents to the Registrar Server.
Currently, only agents resources are available for POSTing, i.e. /agents. All other POST uri's
will return errors. POST requests require an an agent_id identifying the agent to add, and json
block sent in the body with 2 entries: ek and aik.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('POST agent returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
config.echo_json_response(self, 400, "agent id not found in uri")
logger.warning('POST agent returning 400 response. agent id not found in uri %s', self.path)
return
try:
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
config.echo_json_response(
self, 400, "Expected non zero content length")
logger.warning('POST for %s returning 400 response. Expected non zero content length.', agent_id)
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
ekcert = json_body['ekcert']
aik_tpm = json_body['aik_tpm']
initialize_tpm = tpm()
if ekcert is None or ekcert == 'emulator':
logger.warning('Agent %s did not submit an ekcert' % agent_id)
ek_tpm = json_body['ek_tpm']
else:
if 'ek_tpm' in json_body:
# This would mean the agent submitted both a non-None ekcert, *and*
# an ek_tpm... We can deal with it by just ignoring the ek_tpm they sent
logger.warning('Overriding ek_tpm for agent %s from ekcert' % agent_id)
# If there's an EKCert, we just overwrite their ek_tpm
# Note, we don't validate the EKCert here, other than the implicit
# "is it a valid x509 cert" check. So it's still untrusted.
# This will be validated by the tenant.
ek509 = load_der_x509_certificate(
base64.b64decode(ekcert),
backend=default_backend(),
)
ek_tpm = base64.b64encode(
tpm2_objects.ek_low_tpm2b_public_from_pubkey(
ek509.public_key(),
)
)
aik_attrs = tpm2_objects.get_tpm2b_public_object_attributes(
base64.b64decode(aik_tpm),
)
if aik_attrs != tpm2_objects.AK_EXPECTED_ATTRS:
config.echo_json_response(
self, 400, "Invalid AK attributes")
logger.warning(
"Agent %s submitted AIK with invalid attributes! %s (provided) != %s (expected)",
agent_id,
tpm2_objects.object_attributes_description(aik_attrs),
tpm2_objects.object_attributes_description(tpm2_objects.AK_EXPECTED_ATTRS),
)
return
# try to encrypt the AIK
(blob, key) = initialize_tpm.encryptAIK(
agent_id,
base64.b64decode(ek_tpm),
base64.b64decode(aik_tpm),
)
# special behavior if we've registered this uuid before
regcount = 1
try:
agent = session.query(RegistrarMain).filter_by(
agent_id=agent_id).first()
except NoResultFound:
agent = None
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
if agent is not None:
# keep track of how many ek-ekcerts have registered on this uuid
regcount = agent.regcount
if agent.ek_tpm != ek_tpm or agent.ekcert != ekcert:
logger.warning('WARNING: Overwriting previous registration for this UUID with new ek-ekcert pair!')
regcount += 1
# force overwrite
logger.info('Overwriting previous registration for this UUID.')
try:
session.query(RegistrarMain).filter_by(
agent_id=agent_id).delete()
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
# Check for ip and port
contact_ip = json_body.get('ip', None)
contact_port = json_body.get('port', None)
# Validate ip and port
if contact_ip is not None:
try:
# Use parser from the standard library instead of implementing our own
ipaddress.ip_address(contact_ip)
except ValueError:
logger.warning(f"Contact ip for agent {agent_id} is not a valid ip got: {contact_ip}.")
contact_ip = None
if contact_port is not None:
try:
contact_port = int(contact_port)
if contact_port < 1 or contact_port > 65535:
logger.warning(f"Contact port for agent {agent_id} is not a number between 1 and got: {contact_port}.")
contact_port = None
except ValueError:
logger.warning(f"Contact port for agent {agent_id} is not a valid number got: {contact_port}.")
contact_port = None
# Add values to database
d = {}
d['agent_id'] = agent_id
d['ek_tpm'] = ek_tpm
d['aik_tpm'] = aik_tpm
d['ekcert'] = ekcert
d['ip'] = contact_ip
d['port'] = contact_port
d['virtual'] = int(ekcert == 'virtual')
d['active'] = int(False)
d['key'] = key
d['provider_keys'] = {}
d['regcount'] = regcount
try:
session.add(RegistrarMain(**d))
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
response = {
'blob': blob,
}
config.echo_json_response(self, 200, "Success", response)
logger.info('POST returning key blob for agent_id: %s', agent_id)
except Exception as e:
config.echo_json_response(self, 400, "Error: %s" % e)
logger.warning("POST for %s returning 400 response. Error: %s", agent_id, e)
logger.exception(e)
def do_PUT(self):
"""This method handles the PUT requests to add agents to the Registrar Server.
Currently, only agents resources are available for PUTing, i.e. /agents. All other PUT uri's
will return errors.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('PUT agent returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
config.echo_json_response(self, 400, "agent id not found in uri")
logger.warning('PUT agent returning 400 response. agent id not found in uri %s', self.path)
return
try:
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
config.echo_json_response(
self, 400, "Expected non zero content length")
logger.warning('PUT for %s returning 400 response. Expected non zero content length.', agent_id)
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
auth_tag = json_body['auth_tag']
try:
agent = session.query(RegistrarMain).filter_by(
agent_id=agent_id).first()
except NoResultFound as e:
raise Exception(
"attempting to activate agent before requesting "
"registrar for %s" % agent_id) from e
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
if config.STUB_TPM:
try:
session.query(RegistrarMain).filter(RegistrarMain.agent_id == agent_id).update(
{'active': True})
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
else:
# TODO(kaifeng) Special handling should be removed
if engine.dialect.name == "mysql":
agent.key = agent.key.encode('utf-8')
ex_mac = crypto.do_hmac(agent.key, agent_id)
if ex_mac == auth_tag:
try:
session.query(RegistrarMain).filter(RegistrarMain.agent_id == agent_id).update(
{'active': True})
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
else:
raise Exception(
f"Auth tag {auth_tag} does not match expected value {ex_mac}")
config.echo_json_response(self, 200, "Success")
logger.info('PUT activated: %s', agent_id)
except Exception as e:
config.echo_json_response(self, 400, "Error: %s" % e)
logger.warning("PUT for %s returning 400 response. Error: %s", agent_id, e)
logger.exception(e)
return
def do_DELETE(self):
"""DELETE not supported"""
config.echo_json_response(self, 405, "DELETE not supported")
# pylint: disable=W0622
def log_message(self, format, *args):
return
# consider using PooledProcessMixIn
# https://github.com/muayyad-alsadi/python-PooledProcessMixIn
class RegistrarServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def __init__(self, server_address, RequestHandlerClass):
"""Constructor overridden to provide ability to read file"""
http.server.HTTPServer.__init__(
self, server_address, RequestHandlerClass)
def shutdown(self):
http.server.HTTPServer.shutdown(self)
def do_shutdown(servers):
for server in servers:
server.shutdown()
def start(host, tlsport, port):
"""Main method of the Registrar Server. This method is encapsulated in a function for packaging to allow it to be
called as a function by an external program."""
threads = []
servers = []
serveraddr = (host, tlsport)
RegistrarMain.metadata.create_all(engine, checkfirst=True)
session = SessionManager().make_session(engine)
try:
count = session.query(RegistrarMain.agent_id).count()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if count > 0:
logger.info("Loaded %d public keys from database", count)
server = RegistrarServer(serveraddr, ProtectedHandler)
context = cloud_verifier_common.init_mtls(section='registrar',
generatedir='reg_ca')
if context is not None:
server.socket = context.wrap_socket(server.socket, server_side=True)
thread = threading.Thread(target=server.serve_forever)
threads.append(thread)
# start up the unprotected registrar server
serveraddr2 = (host, port)
server2 = RegistrarServer(serveraddr2, UnprotectedHandler)
thread2 = threading.Thread(target=server2.serve_forever)
threads.append(thread2)
servers.append(server)
servers.append(server2)
logger.info('Starting Cloud Registrar Server on ports %s and %s (TLS) use <Ctrl-C> to stop', port, tlsport)
keylime_api_version.log_api_versions(logger)
for thread in threads:
thread.start()
def signal_handler(signum, frame):
del signum, frame
do_shutdown(servers)
sys.exit(0)
# Catch these signals. Note that a SIGKILL cannot be caught, so
# killing this process with "kill -9" may result in improper shutdown
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# keep the main thread active, so it can process the signals and gracefully shutdown
while True:
if not any([thread.is_alive() for thread in threads]):
# All threads have stopped
break
# Some threads are still going
time.sleep(1)
for thread in threads:
thread.join()
| |
"""Test the helper method for writing tests."""
import asyncio
import functools as ft
import json
import logging
import os
import sys
import threading
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
from io import StringIO
from unittest.mock import MagicMock, Mock, patch
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant import auth, config_entries, core as ha
from homeassistant.auth import (
models as auth_models, auth_store, providers as auth_providers,
permissions as auth_permissions)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import mqtt, recorder
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED, ATTR_SERVICE, DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE, EVENT_PLATFORM_DISCOVERED, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, SERVER_PORT, STATE_ON, STATE_OFF)
from homeassistant.helpers import (
area_registry, device_registry, entity, entity_platform, entity_registry,
intent, restore_state, storage)
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = 'https://example.com/app'
CLIENT_REDIRECT_URI = 'https://example.com/app/callback'
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config.async_load = Mock()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock):
return mock_coro()
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.async_run_job(hass.data['mqtt']._mqtt_on_message, None, None, msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': date_util.as_utc(time)})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@asyncio.coroutine
def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().unsubscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = yield from async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[entity_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[area_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
def mock_device_registry(hass, mock_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[device_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name='Mock Group',
policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {
'name': name,
'policy': policy,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(self, id=None, is_owner=False, is_active=True,
name='Mock User', system_generated=False, groups=None):
"""Initialize mock user."""
kwargs = {
'is_owner': is_owner,
'is_active': is_active,
'name': name,
'system_generated': system_generated,
'groups': groups or [],
'perm_lookup': None,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(
policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config)
assert provider is not None, 'Invalid config specified'
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError('Provider already registered')
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
platform_schema_base=None, async_setup=None,
async_setup_entry=None, async_unload_entry=None,
async_migrate_entry=None, async_remove_entry=None):
"""Initialize the mock module."""
self.__name__ = 'homeassistant.components.{}'.format(domain)
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None,
async_setup_entry=None, scan_interval=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger('homeassistant.helpers.entity_platform')
# Otherwise the constructor will blow up.
if (isinstance(platform, Mock) and
isinstance(platform.PARALLEL_UPDATES, Mock)):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=1, entry_id=None,
source=config_entries.SOURCE_USER, title='Mock Title',
state=None, options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or 'mock-id',
'domain': domain,
'data': data or {},
'options': options,
'version': version,
'title': title,
'connection_class': connection_class,
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
return mock_coro_func(return_value, exception)()
def mock_coro_func(return_value=None, exception=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
if exception:
raise exception
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
data.last_states = {
state.entity_id: restore_state.StoredState(state, now)
for state in states}
_LOGGER.debug('Restore cache: %s', data.last_states)
assert len(data.last_states) == len(states), \
"Duplicate entity_id? {}".format(states)
async def get_restore_state_data() -> restore_state.RestoreStateData:
return data
# Patch the singleton task in hass.data to return our new RestoreStateData
hass.data[key] = hass.async_create_task(get_restore_state_data())
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle('device_info')
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if 'data' not in mock_data or 'version' not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info('Loading data for %s: %s', store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info('Writing data to %s: %s', store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(
data_to_write, cls=store._encoder))
with patch('homeassistant.helpers.storage.Store._async_load',
side_effect=mock_async_load, autospec=True), \
patch('homeassistant.helpers.storage.Store._write_data',
side_effect=mock_write_data, autospec=True):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data['system_health']['info'][domain](hass)
| |
import os
import string
import sys
splunk_home = os.environ.get("SPLUNK_HOME")
egg_dir = os.path.join(splunk_home, "etc", "apps", "snmpmod", "bin")
# directory of the custom MIB eggs
mib_egg_dir = os.path.join(egg_dir, "mibs")
def load_eggs():
# dynamically load in any eggs in $SPLUNK_HOME/etc/apps/mod/bin
for filename in os.listdir(egg_dir):
if filename.endswith(".egg"):
sys.path.append(os.path.join(egg_dir, filename))
sys.path.append(mib_egg_dir)
for filename in os.listdir(mib_egg_dir):
if filename.endswith(".egg"):
sys.path.append(os.path.join(mib_egg_dir, filename))
load_eggs()
from pysnmp.entity.rfc3413.oneliner import cmdgen
# noinspection PyUnresolvedReferences
from pysnmp.proto.rfc1905 import NoSuchInstance
import logging
logging_format_string = '%(levelname)s file="%(filename)s" line=%(lineno)d %(message)s'
def get_cmd_gen():
cmd_gen = cmdgen.CommandGenerator()
return cmd_gen
class SnmpException(Exception):
def __init__(self, msg, error_type):
self.error_type = error_type
self.msg = msg
def walk_oids(cmd_gen, security_object, transport, oids):
"""
Takes a list of oids, runs them against the configured target returning the result table or throws and exception
:param cmd_gen: SNMP cmd_gen
:param security_object: SNMP security object
:param transport: SNMP transport
:param oids oids to poll
:returns Tuple of (e_indication, e_status, e_index, res)
"""
# nextCmd do not work properly with list of oids -return leaaves out of oid node.
# Besides nextCmd doesn't work with large lists.
# We split the list in single oid nodes
results = []
for oid in oids:
oid = [oid]
snmp_result = cmd_gen.nextCmd(security_object, transport, *oid)
error_indication, error_status, error_index, var_binds_table = snmp_result
if error_indication:
raise SnmpException(error_indication, 'snmp_engine')
elif error_status:
msg = '%s at %s' % (error_status.prettyPrint(),
error_index and var_binds_table[int(error_index) - 1][0] or '?')
raise SnmpException(msg, 'pdu')
results = results + var_binds_table
return results
def query_oids(cmd_gen, security_object, transport, oids):
"""
Takes a list of oids and runs them against the target returning the results or throwing an exception
:param cmd_gen: SNMP cmd_gen
:param security_object: SNMP security object
:param transport: SNMP transport
:param oids oids to poll
"""
# The difference between this and walk is query_oids expects an exact oid. walk oids will take .4.5.1 and if
# there's a single value under .4.5.1.0 it will report on that
# I probably don't want to do a walk most of the time. This shit is confusing :(
# getCmd doesn't work with large lists.
# We split the list in chunks of 10 oids.
chunkSize = 30
chunks = [oids[x:x + chunkSize] for x in range(0, len(oids), chunkSize)]
results = []
for oid in chunks:
snmp_result = cmd_gen.getCmd(security_object, transport, *oid)
error_indication, error_status, error_index, var_binds_table = snmp_result
if error_indication:
logging.debug('error_indication=%s error_status=%s error_index=%s', error_indication, error_status, error_index)
raise SnmpException(error_indication, 'snmp_engine')
elif error_status:
msg = '%s at %s' % (error_status.prettyPrint(),
error_index and var_binds_table[int(error_index) - 1][0] or '?')
raise SnmpException(msg, 'pdu')
results = results + var_binds_table
return results
def query_ekinops_card(cmd_gen, security_object, transport, oids):
"""
Takes a oid and runs it against the target returning the results or throwing an exception
:param cmd_gen: SNMP cmd_gen
:param security_object: SNMP security object
:param transport: SNMP transport
:param oids oids to poll
"""
snmp_result = cmd_gen.getCmd(security_object, transport, *oids)
error_indication, error_status, error_index, var_binds_table = snmp_result
if error_indication:
logging.debug('error_indication=%s error_status=%s error_index=%s', error_indication, error_status, error_index)
raise SnmpException(error_indication, 'snmp_engine')
elif error_status:
msg = '%s at %s' % (error_status.prettyPrint(),
error_index and var_binds_table[int(error_index) - 1][0] or '?')
raise SnmpException(msg, 'pdu')
return var_binds_table
def get_v3_auth_protocol(v3_auth_protocol_str):
return {
'usmHMACMD5AuthProtocol': cmdgen.usmHMACMD5AuthProtocol,
'usmHMACSHAAuthProtocol': cmdgen.usmHMACSHAAuthProtocol,
'usmNoAuthProtocol': cmdgen.usmNoAuthProtocol
}.get(v3_auth_protocol_str, cmdgen.usmNoAuthProtocol)
def get_v3_priv_protocol(v3_priv_protocol_str):
return {
'usmDESPrivProtocol': cmdgen.usmDESPrivProtocol,
'usm3DESEDEPrivProtocol': cmdgen.usm3DESEDEPrivProtocol,
'usmAesCfb128Protocol': cmdgen.usmAesCfb128Protocol,
'usmAesCfb192Protocol': cmdgen.usmAesCfb192Protocol,
'usmAesCfb256Protocol': cmdgen.usmAesCfb256Protocol,
'usmNoPrivProtocol': cmdgen.usmNoPrivProtocol,
}.get(v3_priv_protocol_str, cmdgen.usmNoPrivProtocol)
def get_transport(conf):
"""
Get the SNMP transport taking into consideration ipv4/ipv6
:param conf:
:return: SNMP transport
"""
destination = conf.get("destination")
port = int(conf.get("port", 161))
ipv6 = int(conf.get("ipv6", 0))
if ipv6:
transport = cmdgen.Udp6TransportTarget((destination, port))
else:
transport = cmdgen.UdpTransportTarget((destination, port))
return transport
def get_security_object(conf):
"""
Get the SNMP security object from the configuration, taking into consideration the SNMP version
:param conf: Configuration
:return: security object
"""
# snmp 1 and 2C params
snmp_version = conf.get("snmp_version", "2C")
if snmp_version == "3":
v3_security_name = conf.get("v3_securityName", "")
v3_auth_key = conf.get("v3_authKey", None)
v3_priv_key = conf.get("v3_privKey", None)
v3_auth_protocol_str = conf.get("v3_authProtocol", "usmHMACMD5AuthProtocol")
v3_priv_protocol_str = conf.get("v3_privProtocol", "usmDESPrivProtocol")
v3_auth_protocol = get_v3_auth_protocol(v3_auth_protocol_str)
v3_priv_protocol = get_v3_priv_protocol(v3_priv_protocol_str)
security_object = cmdgen.UsmUserData(v3_security_name,
authKey=v3_auth_key,
privKey=v3_priv_key,
authProtocol=v3_auth_protocol,
privProtocol=v3_priv_protocol)
else:
communitystring = conf.get("communitystring", "public")
mp_model_val = 1
if snmp_version == "1":
mp_model_val = 0
security_object = cmdgen.CommunityData(communitystring, mpModel=mp_model_val)
return security_object
def print_validation_error(s):
"""
Print validation error data to be consumed by Splunk
:param s:
:return:
"""
print "<error><message>%s</message></error>" % encode_xml_text(s)
def splunk_escape(data):
input_string = str(data)
if input_string is None or input_string == '':
return ""
s = string.replace(input_string, "'", "")
def should_escape():
import re
if re.search(r"\W+", s):
return True
else:
return False
if should_escape():
return "\"%s\"" % s
else:
return s
# prints XML stream
def print_xml_single_instance_mode(server, event):
print "<stream><event><data>%s</data><host>%s</host></event></stream>" % (
encode_xml_text(event), server)
def encode_xml_text(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("\n", "")
return text
def set_logger_format(name):
# noinspection PyBroadException
try:
# update all the root StreamHandlers with a new formatter that includes the config information
for h in logging.root.handlers:
if isinstance(h, logging.StreamHandler):
formatter = '%(levelname)s file="%(filename)s" line=%(lineno)d stanza="{0}" %(message)s'.format(name)
h.setFormatter(logging.Formatter(formatter))
except Exception:
logging.exception("Couldn't update logging templates")
# prints XML stream
def print_xml_multi_instance_mode(server, event, stanza):
print "<stream><event stanza=""%s""><data>%s</data><host>%s</host></event></stream>" % (
stanza, encode_xml_text(event), server)
# prints simple stream
def print_simple(s):
print "%s\n" % s
# HELPER FUNCTIONS
# prints XML stream
def print_xml_stream(s):
print "<stream><event unbroken=\"1\"><data>%s</data><done/></event></stream>" % encode_xml_text(s)
| |
import os
from typing import List, Union, Iterable, Iterator
import pandas as pd
from kipoiseq.dataclasses import Variant, Interval
from kipoiseq.variant_source import VariantFetcher
try:
from pyranges import PyRanges
except ImportError:
from typing import Any
PyRanges = Any
__all__ = [
'variants_to_pyranges',
'BaseVariantMatcher',
'SingleVariantMatcher',
'MultiVariantsMatcher',
]
def variants_to_pyranges(variants: List[Variant]) -> PyRanges:
"""
Create pyrange object given list of variant objects.
Args:
variants: list of variant objects have CHROM, POS, REF, ALT properties.
"""
import pyranges
df = pd.DataFrame([
(
v.chrom,
v.start,
v.end,
v
)
for v in variants
], columns=['Chromosome', 'Start', 'End', 'variant'])
return pyranges.PyRanges(df)
def pyranges_to_intervals(pr: PyRanges, interval_attrs: List[str] = None):
"""
Convert pyranges into list of intervals.
Args:
pr: pyranges.PyRanges
interval_attrs: attribute of interval which should substituted from pr.
Returns:
List[Interval]: list of intervals
"""
interval_attrs = interval_attrs or list()
for chrom, df in pr:
for _, row in df.iterrows():
attrs = {i: row[i] for i in interval_attrs}
yield Interval(row.Chromosome, row.Start, row.End,
strand=row.get('Strand', '.'), attrs=attrs)
def intervals_to_pyranges(intervals):
"""
Convert list of intervals to pyranges
Args:
intervals List[Interval]: list of intervals
Returns:
pyranges.Pyranges: Pyranges object.
"""
import pyranges
chromosomes, starts, ends, strands = zip(*[
(i.chrom, i.start, i.end, i.strand)
for i in intervals
])
return pyranges.PyRanges(
chromosomes=chromosomes,
strands=strands,
starts=starts,
ends=ends
)
class PyrangesVariantFetcher(VariantFetcher):
def __init__(self, variants: List[Variant]):
self.variants = variants
self._variants_pr = None
@property
def variants_pr(self):
# convert to PyRanges on demand
if self._variants_pr is None:
self._variants_pr = variants_to_pyranges(self.variants)
return self._variants_pr
def fetch_variants(self, interval: Union[Interval, Iterable[Interval]]) -> Iterator[Variant]:
if isinstance(interval, Interval):
interval = [interval]
# convert interval(s) to PyRanges object
interval_pr: PyRanges = intervals_to_pyranges(interval)
# join with variants
pr_join = interval_pr.join(self.variants_pr, suffix='_variant')
yield from pr_join.df["variant"]
def __iter__(self) -> Iterator[Variant]:
yield from self.variants
class BaseVariantMatcher:
"""
Base variant intervals matcher
"""
def __init__(
self,
vcf_file: str = None,
variants: List[Variant] = None,
variant_fetcher: VariantFetcher = None,
gtf_path: str = None,
bed_path: str = None,
pranges: PyRanges = None,
intervals: List[Interval] = None,
interval_attrs: List[str] = None,
vcf_lazy: bool = True,
variant_batch_size: int = 10000
):
"""
Args:
vcf_file: (optional) path of vcf file
variants: (optional) readily processed variants
gtf_path: (optional) path of gtf file contains features
bed_path: (optional) path of bed file
pranges: (optional) pyranges object
intervals: (optional) list of intervals
interval_attrs: attr of intervals should read from files or
pyranges object. This argument is not valid with intervals.
Currently unused
"""
self.variant_fetcher = self._read_variants(
vcf_file, variants, variant_fetcher, vcf_lazy)
self.interval_attrs = interval_attrs
self.pr = self._read_intervals(gtf_path, bed_path, pranges,
intervals, interval_attrs, duplicate_attr=True)
self.variant_batch_size = variant_batch_size
@staticmethod
def _read_variants(
vcf_file=None,
variants=None,
variant_fetcher=None,
vcf_lazy: bool = True,
) -> VariantFetcher:
if vcf_file is not None:
from kipoiseq.extractors import MultiSampleVCF
return MultiSampleVCF(vcf_file, lazy=vcf_lazy)
elif variant_fetcher is not None:
assert isinstance(variant_fetcher, VariantFetcher), \
"Wrong type of variant fetcher: %s" % type(variant_fetcher)
return variant_fetcher
elif variants is not None:
return PyrangesVariantFetcher(variants)
else:
raise ValueError("No source of variants was specified!")
@staticmethod
def _read_intervals(gtf_path=None, bed_path=None, pranges=None,
intervals=None, interval_attrs=None, duplicate_attr=False):
alternatives = [bed_path, pranges, intervals, gtf_path]
if sum(i is not None for i in alternatives) != 1:
raise ValueError('only one of `gth_path`, `bed_path`, `pranges`,'
'`intervals` or should given as input.')
if gtf_path:
import pyranges
pranges = pyranges.read_gtf(
gtf_path, duplicate_attr=duplicate_attr)
elif bed_path:
import pyranges
pranges = pyranges.read_bed(bed_path)
elif intervals:
if interval_attrs is not None:
raise ValueError(
'`interval_attrs` is not valid with `intervals`')
pranges = intervals_to_pyranges(intervals)
return pranges
def __iter__(self):
raise NotImplementedError()
class SingleVariantMatcher(BaseVariantMatcher):
"""
Match and iterate variants with intervals.
"""
def __init__(self, *args, **kwargs):
"""
Args:
vcf_file: path of vcf file
gtf_path: (optional) path of gtf file contains features
bed_path: (optional) path of bed file
pranges: (optional) pyranges object
intervals: (optional) list of intervals
interval_attrs: attr of intervals should read from files or
pyranges object. This argument is not valid with intervals.
"""
super().__init__(*args, **kwargs)
def _read_vcf_pyranges(self, batch_size=10000):
"""
Reads vcf and returns batch of pyranges objects.
Args:
batch_size: size of each batch.
"""
for batch in self.variant_fetcher.batch_iter(batch_size):
yield variants_to_pyranges(batch)
def iter_pyranges(self) -> PyRanges:
"""
Iter matched variants with intervals as pyranges.
Returns:
"""
for pr_variants in self._read_vcf_pyranges():
pr_join = self.pr.join(pr_variants, suffix='_variant')
pr_join.intervals = list(pyranges_to_intervals(
pr_join, interval_attrs=self.interval_attrs))
yield pr_join
def iter_rows(self):
"""
Iter matched variants with intervals as pandas series.
"""
for pr in self.iter_pyranges():
for _, df in pr:
df = df.sort_values(['Start_variant', 'Start'])
for _, row in df.iterrows():
yield row
def __iter__(self) -> (Interval, Variant):
"""
Iterate interval and variant object.
"""
for row in self.iter_rows():
yield row['intervals'], row['variant']
class MultiVariantsMatcher(BaseVariantMatcher):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if hasattr(self.pr, 'intervals'):
self.intervals = self.pr.intervals
else:
self.intervals = pyranges_to_intervals(self.pr)
def __iter__(self):
for i in self.intervals:
yield i, self.variant_fetcher.fetch_variants(i)
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Common enumerations to be used together with |Enum| property.
This module provides many pre-defined enumerations, as well as functions
for creating new enumerations.
New enumerations can be created using the |enumeration| function:
.. code-block:: python
#: Specify a nautically named side, port or starboard
MyEnum = enumeration("port", "starboard")
Typically, enumerations are used to define |Enum| properties:
.. code-block:: python
from bokeh.model import Model
from bokeh.core.properties import Enum
class MyModel(Model):
location = Enum(MyEnum, help="""
Whether the thing should be a port or starboard.
""")
Enumerations have a defined order and support iteration:
.. code-block:: python
>>> for loc in MyEnum:
... print(loc)
...
port
starboard
as well as containment tests:
.. code-block:: python
>>> "port" in MyEnum
True
Enumerations can be easily documented in Sphinx documentation with the
:ref:`bokeh.sphinxext.bokeh_enum` Sphinx extension.
----
.. autofunction:: bokeh.core.enums.enumeration
----
.. |Enum| replace:: :class:`~bokeh.core.properties.Enum`
.. |enumeration| replace:: :func:`~bokeh.core.enums.enumeration`
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from six import string_types
# Bokeh imports
from .. import colors, palettes
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Align',
'Anchor',
'AngleUnits',
'ButtonType',
'DashPattern',
'DateFormat',
'DatetimeUnits',
'Dimension',
'Dimensions',
'Direction',
'Enumeration',
'enumeration',
'FontStyle',
'HatchPattern',
'HatchPatternAbbreviation',
'HoldPolicy',
'HorizontalLocation',
'JitterRandomDistribution',
'LatLon',
'LegendClickPolicy',
'LegendLocation',
'LineCap',
'LineDash',
'LineJoin',
'Location',
'MapType',
'MarkerType',
'NamedColor',
'NumeralLanguage',
'Orientation',
'OutputBackend',
'PaddingUnits',
'Palette',
'RenderLevel',
'RenderMode',
'ResetPolicy',
'RoundingFunction',
'SizingMode',
'SizingPolicy',
'SliderCallbackPolicy',
'SortDirection',
'SpatialUnits',
'StartEnd',
'StepMode',
'TextAlign',
'TextBaseline',
'TextureRepetition',
'TickLabelOrientation',
'TooltipAttachment',
'TooltipFieldFormatter',
'TrackPolicy',
'VerticalAlign',
'VerticalLocation',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Enumeration(object):
''' Represent an enumerated collection of values.
.. note::
Instances of ``Enumeration`` typically should not be constructed
directly. Instead, use the |enumeration| function.
'''
__slots__ = ()
def __iter__(self):
return iter(self._values)
def __contains__(self, value):
if not self._case_sensitive:
value = value.lower()
return value in self._values
def __str__(self):
if self._quote:
return "Enumeration(%s)" % ", ".join(repr(x) for x in self._values)
else:
return "Enumeration(%s)" % ", ".join(self._values)
def __len__(self):
return len(self._values)
__repr__ = __str__
def enumeration(*values, **kwargs):
''' Create an |Enumeration| object from a sequence of values.
Call ``enumeration`` with a sequence of (unique) strings to create an
Enumeration object:
.. code-block:: python
#: Specify the horizontal alignment for rendering text
TextAlign = enumeration("left", "right", "center")
Args:
values (str) : string enumeration values, passed as positional arguments
The order of arguments is the order of the enumeration, and the
first element will be considered the default value when used
to create |Enum| properties.
Keyword Args:
case_sensitive (bool, optional) :
Whether validation should consider case or not (default: True)
quote (bool, optional):
Whther values should be quoted in the string representations
(default: False)
Raises:
ValueError if values empty, if any value is not a string or not unique
Returns:
Enumeration
'''
if not (values and all(isinstance(value, string_types) and value for value in values)):
raise ValueError("expected a non-empty sequence of strings, got %s" % values)
if len(values) != len(set(values)):
raise ValueError("enumeration items must be unique, got %s" % values)
attrs = {value: value for value in values}
attrs.update({
"_values": list(values),
"_default": values[0],
"_case_sensitive": kwargs.get("case_sensitive", True),
"_quote": kwargs.get("quote", False),
})
return type(str("Enumeration"), (Enumeration,), attrs)()
#: Alignment (vertical or horizontal) of a child item
Align = enumeration("start", "center", "end")
#: Specify an anchor position on a box/frame
Anchor = enumeration(
"top_left", "top_center", "top_right",
"center_left", "center", "center_right",
"bottom_left", "bottom_center", "bottom_right")
#: Specify the units for an angle value
AngleUnits = enumeration("deg", "rad")
#: Specify a style for button widgets
ButtonType = enumeration("default", "primary", "success", "warning", "danger")
#: Specify a named dashing patter for stroking lines
DashPattern = enumeration("solid", "dashed", "dotted", "dotdash", "dashdot")
#: Specify a format for printing dates
DateFormat = enumeration("ATOM", "W3C", "RFC-3339", "ISO-8601", "COOKIE", "RFC-822",
"RFC-850", "RFC-1036", "RFC-1123", "RFC-2822", "RSS", "TIMESTAMP")
#: Specify a date/time scale
DatetimeUnits = enumeration("microseconds", "milliseconds", "seconds", "minsec",
"minutes", "hourmin", "hours", "days", "months", "years")
#: Specify a vertical/horizontal dimension
Dimension = enumeration("width", "height")
#: Specify a vertical/horizontal dimensions
Dimensions = enumeration("width", "height", "both")
#: Specify a stroke direction for circles, wedges, etc.
Direction = enumeration("clock", "anticlock")
#: Specify the font style for rendering text
FontStyle = enumeration("normal", "italic", "bold", "bold italic")
_hatch_patterns = (
(" ", "blank"),
(".", "dot"),
("o", "ring"),
("-", "horizontal_line"),
("|", "vertical_line"),
("+", "cross"),
('"', "horizontal_dash"),
(":", "vertical_dash"),
("@", "spiral"),
("/", "right_diagonal_line"),
("\\", "left_diagonal_line"),
("x", "diagonal_cross"),
(",", "right_diagonal_dash"),
("`", "left_diagonal_dash"),
("v", "horizontal_wave"),
(">", "vertical_wave"),
("*", "criss_cross"),
)
#: Specify one of the built-in patterns for hatching fills
HatchPattern = enumeration(*list(zip(*_hatch_patterns))[1])
#: Specify one of the built-in patterns for hatching fills with a one-letter abbreviation
#:
#: The abbreviations are mapped as follows:
#:
#: .. code-block:: none
#:
#: " " : blank
#: "." : dot
#: "o" : ring
#: "-" : horizontal_line
#: "|" : vertical_line
#: "+" : cross
#: '"' : horizontal_dash
#: ":" : vertical_dash
#: "@" : spiral
#: "/" : right_diagonal_line
#: "\\" : left_diagonal_line
#: "x" : diagonal_cross
#: "," : right_diagonal_dash
#: "`" : left_diagonal_dash
#: "v" : horizontal_wave
#: ">" : vertical_wave
#: "*" : criss_cross
HatchPatternAbbreviation = enumeration(*list(zip(*_hatch_patterns))[0], quote=True)
#: Specify whether events should be combined or collected as-is when a Document hold is in effect
HoldPolicy = enumeration("combine", "collect")
#: Specify a horizontal location in plot layouts
HorizontalLocation = enumeration("left", "right")
#: Specify a distribution to use for the Jitter class
JitterRandomDistribution = enumeration("uniform", "normal")
#: Specify whether a dimension or coordinate is latitude or longitude
LatLon = enumeration("lat", "lon")
#: Specify how a legend should respond to click events
LegendClickPolicy = enumeration("none", "hide", "mute")
#: Specify a fixed location for a Bokeh legend
LegendLocation = Anchor
#: Specify how stroked lines should be terminated
LineCap = enumeration("butt", "round", "square")
#: Specify a named dash pattern for stroking lines
LineDash = enumeration("solid", "dashed", "dotted", "dotdash", "dashdot")
#: Specify how stroked lines should be joined together
LineJoin = enumeration("miter", "round", "bevel")
#: Specify a location in plot layouts
Location = enumeration("above", "below", "left", "right")
#: Specify a style for a Google map
MapType = enumeration("satellite", "roadmap", "terrain", "hybrid")
#: Specify one of the built-in marker types
MarkerType = enumeration("asterisk", "circle", "circle_cross", "circle_x", "cross",
"dash", "diamond", "diamond_cross", "hex", "inverted_triangle",
"square", "square_cross", "square_x", "triangle", "x")
#: Specify one of the 137 named CSS colors
NamedColor = enumeration(*colors.named.__all__, case_sensitive=False)
#: Specify a locale for printing numeric values
NumeralLanguage = enumeration("be-nl", "chs", "cs", "da-dk", "de-ch", "de", "en",
"en-gb", "es-ES", "es", "et", "fi", "fr-CA", "fr-ch",
"fr", "hu", "it", "ja", "nl-nl", "pl", "pt-br",
"pt-pt", "ru", "ru-UA", "sk", "th", "tr", "uk-UA")
#: Specify a vertical/horizontal orientation for something
Orientation = enumeration("horizontal", "vertical")
#: Specify an output backend to render a plot area onto
OutputBackend = enumeration("canvas", "svg", "webgl")
#: Whether range padding should be interpreted a percentage or and absolute quantity
PaddingUnits = enumeration("percent", "absolute")
#: Specify the name of a palette from :ref:`bokeh.palettes`
Palette = enumeration(*palettes.__palettes__)
#: Specify a position in the render order for a Bokeh renderer
RenderLevel = enumeration("image", "underlay", "glyph", "annotation", "overlay")
#: Specify a render mode for renderers that support both Canvas or CSS rendering
RenderMode = enumeration("canvas", "css")
#: What reset actions should occur on a Plot reset
ResetPolicy = enumeration("standard", "event_only")
#: Specify a policy for how numbers should be rounded
RoundingFunction = enumeration("round", "nearest", "floor", "rounddown", "ceil", "roundup")
#: Sizing mode policies
SizingMode = enumeration("stretch_width", "stretch_height", "stretch_both",
"scale_width", "scale_height", "scale_both",
"fixed")
#: Individual sizing mode policies
SizingPolicy = enumeration("fixed", "fit", "min", "max")
#: Specify different callback policies for the slider widget
SliderCallbackPolicy = enumeration("continuous", "throttle", "mouseup")
#: Specify sorting directions
SortDirection = enumeration("ascending", "descending")
#: Specify units for mapping values
SpatialUnits = enumeration("screen", "data")
#: Specify a start/end value
StartEnd = enumeration("start", "end")
#: Specify a mode for stepwise interpolation
StepMode = enumeration("before", "after", "center")
#: Specify the horizontal alignment for rendering text
TextAlign = enumeration("left", "right", "center")
#: Specify the baseline location for rendering text
TextBaseline = enumeration("top", "middle", "bottom", "alphabetic", "hanging", "ideographic")
#: Specify how textures used as canvas patterns should repeat
TextureRepetition = enumeration("repeat", "repeat_x", "repeat_y", "no_repeat")
#: Specify how axis tick labels are oriented with respect to the axis
TickLabelOrientation = enumeration("horizontal", "vertical", "parallel", "normal")
#: Specify an attachment for tooltips
TooltipAttachment = enumeration("horizontal", "vertical", "left", "right", "above", "below")
#: Specify how a format string for a tooltip field should be interpreted
TooltipFieldFormatter = enumeration("numeral", "datetime", "printf")
#: Grid track (row/column) sizing policies
TrackPolicy = enumeration("auto", "min", "max", "flex", "fixed")
#: Specify the vertical alignment for rendering text
VerticalAlign = enumeration("top", "middle", "bottom")
#: Specify a vertical location in plot layouts
VerticalLocation = enumeration("above", "below")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| |
import pytest
from sitemessage.messengers.base import MessengerBase
from sitemessage.models import Subscription, DispatchError
from sitemessage.toolbox import recipients, schedule_messages, send_scheduled_messages
from sitemessage.utils import get_registered_messenger_objects
from .testapp.sitemessages import (
WONDERLAND_DOMAIN, MessagePlainForTest, MessengerForTest, BuggyMessenger,
messenger_fb,
messenger_smtp,
messenger_telegram,
messenger_twitter,
messenger_vk,
messenger_xmpp,
)
def test_init_params():
messengers = get_registered_messenger_objects()
my = messengers['test_messenger']
assert my.login == 'mylogin'
assert my.password == 'mypassword'
def test_alias():
messenger = type('MyMessenger', (MessengerBase,), {'alias': 'myalias'})
assert messenger.get_alias() == 'myalias'
messenger = type('MyMessenger', (MessengerBase,), {})
assert messenger.get_alias() == 'MyMessenger'
def test_get_recipients_data(user_create):
user = user_create(attributes=dict(username='myuser'))
to = ['gogi', 'givi', user]
r1 = MessengerForTest.structure_recipients_data(to)
assert len(r1) == len(to)
assert r1[0].address == f'gogi{WONDERLAND_DOMAIN}'
assert r1[0].messenger == 'test_messenger'
assert r1[1].address == f'givi{WONDERLAND_DOMAIN}'
assert r1[1].messenger == 'test_messenger'
assert r1[2].address == f'user_myuser{WONDERLAND_DOMAIN}'
assert r1[2].messenger == 'test_messenger'
def test_recipients():
r = MessagePlainForTest.recipients('smtp', 'someone')
assert len(r) == 1
assert r[0].address == 'someone'
def test_send():
m = MessengerForTest('l', 'p')
m.send('message_cls', 'message_model', 'dispatch_models')
assert m.last_send['message_cls'] == 'message_cls'
assert m.last_send['message_model'] == 'message_model'
assert m.last_send['dispatch_models'] == 'dispatch_models'
m = BuggyMessenger()
recipiets_ = recipients('test_messenger', ['a', 'b', 'c', 'd'])
with pytest.raises(Exception):
m.send('a buggy message', '', recipiets_)
def test_subscription(user_create):
user1 = user_create(attributes=dict(username='first'))
user2 = user_create(attributes=dict(username='second'))
user2.is_active = False
user2.save()
Subscription.create(user1.id, MessagePlainForTest, MessengerForTest)
Subscription.create(user2.id, MessagePlainForTest, MessengerForTest)
assert len(MessagePlainForTest.get_subscribers(active_only=False)) == 2
assert len(MessagePlainForTest.get_subscribers(active_only=True)) == 1
def assert_called_n(func, n=1):
assert func.call_count == n
func.call_count = 0
def test_exception_propagation(monkeypatch):
schedule_messages('text', recipients('telegram', ''))
schedule_messages('text', recipients('telegram', ''))
def new_method(*args, **kwargs):
raise Exception('telegram beforesend failed')
monkeypatch.setattr(messenger_telegram, 'before_send', new_method)
send_scheduled_messages()
errors = list(DispatchError.objects.all())
assert len(errors) == 2
assert errors[0].error_log == 'telegram beforesend failed'
assert errors[1].error_log == 'telegram beforesend failed'
class TestSMTPMessenger:
def setup_method(self, method):
messenger_smtp.smtp.sendmail.call_count = 0
def test_get_address(self):
r = object()
assert messenger_smtp.get_address(r) == r
r = type('r', (object,), dict(email='somewhere'))
assert messenger_smtp.get_address(r) == 'somewhere'
def test_send(self):
schedule_messages('text', recipients('smtp', 'someone'))
send_scheduled_messages()
assert_called_n(messenger_smtp.smtp.sendmail)
def test_send_fail(self):
schedule_messages('text', recipients('smtp', 'someone'))
def new_method(*args, **kwargs):
raise Exception('smtp failed')
old_method = messenger_smtp.smtp.sendmail
messenger_smtp.smtp.sendmail = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'smtp failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_smtp.smtp.sendmail = old_method
def test_send_test_message(self):
messenger_smtp.send_test_message('someone', 'sometext')
assert_called_n(messenger_smtp.smtp.sendmail)
class TestTwitterMessenger:
def test_get_address(self):
r = object()
assert messenger_twitter.get_address(r) == r
r = type('r', (object,), dict(twitter='somewhere'))
assert messenger_twitter.get_address(r) == 'somewhere'
def test_send(self):
schedule_messages('text', recipients('twitter', 'someone'))
send_scheduled_messages()
messenger_twitter.api.statuses.update.assert_called_with(status='@someone text')
def test_send_test_message(self):
messenger_twitter.send_test_message('someone', 'sometext')
messenger_twitter.api.statuses.update.assert_called_with(status='@someone sometext')
messenger_twitter.send_test_message('', 'sometext')
messenger_twitter.api.statuses.update.assert_called_with(status='sometext')
def test_send_fail(self):
schedule_messages('text', recipients('twitter', 'someone'))
def new_method(*args, **kwargs):
raise Exception('tweet failed')
old_method = messenger_twitter.api.statuses.update
messenger_twitter.api.statuses.update = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'tweet failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_twitter.api.statuses.update = old_method
class TestXMPPSleekMessenger:
def test_get_address(self):
r = object()
assert messenger_xmpp.get_address(r) == r
r = type('r', (object,), dict(jabber='somewhere'))
assert messenger_xmpp.get_address(r) == 'somewhere'
def test_send(self):
schedule_messages('text', recipients('xmppsleek', 'someone'))
send_scheduled_messages()
messenger_xmpp.xmpp.send_message.assert_called_once_with(
mtype='chat', mbody='text', mfrom='somjid', mto='someone'
)
def test_send_test_message(self):
messenger_xmpp.send_test_message('someone', 'sometext')
messenger_xmpp.xmpp.send_message.assert_called_with(
mtype='chat', mbody='sometext', mfrom='somjid', mto='someone'
)
def test_send_fail(self):
schedule_messages('text', recipients('xmppsleek', 'someone'))
def new_method(*args, **kwargs):
raise Exception('xmppsleek failed')
old_method = messenger_xmpp.xmpp.send_message
messenger_xmpp.xmpp.send_message = new_method
messenger_xmpp._session_started = True
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'xmppsleek failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_xmpp.xmpp.send_message = old_method
class TestTelegramMessenger:
def setup_method(self, method):
messenger_telegram._verify_bot()
messenger_telegram.lib.post.call_count = 0
def test_get_address(self):
r = object()
assert messenger_telegram.get_address(r) == r
r = type('r', (object,), dict(telegram='chat_id'))
assert messenger_telegram.get_address(r) == 'chat_id'
def test_send(self):
schedule_messages('text', recipients('telegram', '1234567'))
send_scheduled_messages()
assert_called_n(messenger_telegram.lib.post, 2)
assert messenger_telegram.lib.post.call_args[1]['proxies'] == {'https': 'socks5://user:pass@host:port'}
def test_send_test_message(self):
messenger_telegram.send_test_message('someone', 'sometext')
assert_called_n(messenger_telegram.lib.post)
messenger_telegram.send_test_message('', 'sometext')
assert_called_n(messenger_telegram.lib.post)
def test_get_chat_ids(self):
assert messenger_telegram.get_chat_ids() == []
assert_called_n(messenger_telegram.lib.post)
def test_send_fail(self):
schedule_messages('text', recipients('telegram', 'someone'))
def new_method(*args, **kwargs):
raise Exception('telegram failed')
old_method = messenger_telegram.lib.post
messenger_telegram.lib.post = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'telegram failed'
assert errors[0].dispatch.address == 'someone'
finally:
messenger_telegram.lib.post = old_method
class TestFacebookMessenger:
def setup_method(self, method):
messenger_fb.lib.post.call_count = 0
messenger_fb.lib.get.call_count = 0
def test_send(self):
schedule_messages('text', recipients('fb', ''))
send_scheduled_messages()
assert_called_n(messenger_fb.lib.post)
assert messenger_fb.lib.post.call_args[1]['proxies'] == {'https': '0.0.0.0'}
def test_send_test_message(self):
messenger_fb.send_test_message('', 'sometext')
assert_called_n(messenger_fb.lib.post)
messenger_fb.send_test_message('', 'sometext')
assert_called_n(messenger_fb.lib.post)
def test_get_page_access_token(self):
assert messenger_fb.get_page_access_token('app_id', 'app_secret', 'user_token') == {}
assert_called_n(messenger_fb.lib.get, 2)
def test_send_fail(self):
schedule_messages('text', recipients('fb', ''))
def new_method(*args, **kwargs):
raise Exception('fb failed')
old_method = messenger_fb.lib.post
messenger_fb.lib.post = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'fb failed'
assert errors[0].dispatch.address == ''
finally:
messenger_fb.lib.post = old_method
class TestVKontakteMessenger:
def setup_method(self, method):
messenger_vk.lib.post.call_count = 0
messenger_vk.lib.get.call_count = 0
def test_send(self):
schedule_messages('text', recipients('vk', '12345'))
send_scheduled_messages()
assert_called_n(messenger_vk.lib.post)
assert messenger_vk.lib.post.call_args[1]['data']['owner_id'] == '12345'
def test_get_access_token(self, monkeypatch):
monkeypatch.setattr('webbrowser.open', lambda *args: None)
result = messenger_vk.get_access_token(app_id='00000')
assert '00000&scope=wall,' in result
def test_send_test_message(self):
messenger_vk.send_test_message('12345', 'sometext')
assert_called_n(messenger_vk.lib.post)
messenger_vk.send_test_message('12345', 'sometext')
assert_called_n(messenger_vk.lib.post)
def test_send_fail(self):
schedule_messages('text', recipients('vk', '12345'))
def new_method(*args, **kwargs):
raise Exception('vk failed')
old_method = messenger_vk.lib.post
messenger_vk.lib.post = new_method
try:
send_scheduled_messages()
errors = DispatchError.objects.all()
assert len(errors) == 1
assert errors[0].error_log == 'vk failed'
assert errors[0].dispatch.address == '12345'
finally:
messenger_vk.lib.post = old_method
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import common, datetime, json, os, threading, time, urllib, urllib2
# These tests can be run with pytest: http://doc.pytest.org/en/latest/index.html
try:
server_address = 'http://' + os.environ[common.host_environment_variable]
except KeyError:
raise Exception('%s must be set' % common.host_environment_variable)
request_template = server_address + '/computeMetadata/v1/{}'
update_template = server_address + '/emulator/v1/update/{}'
def check_header(headers, key, value):
assert headers.get(key.lower()) == value
def expect_content_absolute(url, expected, expected_content_type, expect_metadata_header=True):
request = urllib2.Request(url, headers={common.metadata_flavor : common.metadata_flavor_google})
try:
response = urllib2.urlopen(request)
except urllib2.URLError as e:
raise Exception('Error connecting to emulator at %s - %s' % (server_address, e))
headers = response.info().dict
if expect_metadata_header:
check_header(headers, common.metadata_flavor, common.metadata_flavor_google)
else:
assert common.metadata_flavor.lower() not in headers
check_header(headers, common.server, common.server_value)
check_header(headers, common.content_type, expected_content_type)
contents = response.read()
assert expected == contents
return headers.get(common.etag.lower())
def expect_content(path, expected, expected_content_type, expect_metadata_header=True):
return expect_content_absolute(request_template.format(path),
expected,
expected_content_type,
expect_metadata_header)
def expect_error(path, expected_code, metadata_flavor_value=common.metadata_flavor_google):
url = request_template.format(path)
request = urllib2.Request(url,
headers={common.metadata_flavor :
metadata_flavor_value} if metadata_flavor_value else {})
try:
response = urllib2.urlopen(request)
# This will always fail but I'm asserting so the response appears in the test results
assert response.read() is None
except urllib2.HTTPError as e:
check_header(e.headers, common.metadata_flavor, common.metadata_flavor_google)
check_header(e.headers, common.server, common.server_value)
check_header(e.headers, common.content_type, common.content_type_html)
assert e.code == expected_code
except urllib2.URLError as e:
raise Exception('Error connecting to emulator at %s - %s' % (server_address, e))
def expect_error_deleting_content(path, expected_code):
request = urllib2.Request(url = update_template.format(path),
headers={common.metadata_flavor: common.metadata_flavor_google})
request.get_method = lambda: "DELETE"
try:
response = urllib2.urlopen(request)
# This will always fail but I'm asserting so the response appears in the test results
assert response.read() is None
except urllib2.HTTPError as e:
check_header(e.headers, common.metadata_flavor, common.metadata_flavor_google)
check_header(e.headers, common.server, common.server_value)
check_header(e.headers, common.content_type, common.content_type_html)
assert e.code == expected_code
except urllib2.URLError as e:
raise Exception('Error connecting to emulator at %s - %s' % (server_address, e))
def expect_error_updating_content(path, new_data, expected_code):
if not isinstance(new_data, str):
new_data = json.dumps(new_data, separators=(',', ':'))
data = urllib.quote_plus(new_data)
request = urllib2.Request(update_template.format(path),
data,
headers={common.metadata_flavor: common.metadata_flavor_google,
common.content_type: common.content_type_text})
try:
response = urllib2.urlopen(request)
# This will always fail but I'm asserting so the response appears in the test results
assert response.read() is None
except urllib2.HTTPError as e:
check_header(e.headers, common.metadata_flavor, common.metadata_flavor_google)
check_header(e.headers, common.server, common.server_value)
check_header(e.headers, common.content_type, common.content_type_html)
assert e.code == expected_code
except urllib2.URLError as e:
raise Exception('Error connecting to emulator at %s - %s' % (server_address, e))
def check_path(path, expected, default='text'):
expected_json = json.dumps(expected, separators=(',', ':'))
if isinstance(expected, list):
expected_text = ''.join(s + '\n' for s in expected)
else:
expected_text = unicode(expected)
etag1 = expect_content(path,
expected_text if default == 'text' else expected_json,
common.content_type_text if default == 'text' else common.content_type_json)
etag2 = expect_content(path + "?alt=text", expected_text, common.content_type_text)
etag3 = expect_content(path + "?alt=json", expected_json, common.content_type_json)
assert len(etag1) == 16
assert etag1 == etag2
assert etag1 == etag3
def check_dir(path, expected):
etag1 = expect_content(path, expected, common.content_type_text)
assert len(etag1) == 16
if path:
etag2 = expect_content(path + '/', expected, common.content_type_text)
assert etag1 == etag2
def check_dir_absolute(url, expected):
etag1 = expect_content_absolute(url, expected, common.content_type_text)
etag2 = expect_content_absolute(url + '/', expected, common.content_type_text)
assert len(etag1) == 16
assert etag1 == etag2
def check_dir_recursive(path, expected, test_trailing_slash=True, use_recursive_field=True, expect_metadata_header=True):
query = '?recursive=true' if use_recursive_field else ''
etag1 = expect_content(path + query, expected, common.content_type_json, expect_metadata_header)
if test_trailing_slash:
etag2 = expect_content(path + '/' + query, expected, common.content_type_json, expect_metadata_header)
else:
etag2 = etag1
data = json.loads(expected, object_pairs_hook=OrderedDict)
def convertToText(prefix, o):
text = ''
if isinstance(o, list):
if len(o) != 0:
if isinstance(o[0], OrderedDict):
for i in range(len(o)):
text += convertToText(prefix + ('/' if len(prefix) != 0 else '') + unicode(i), o[i])
else:
for i in range(len(o)):
text += convertToText(prefix, o[i])
elif isinstance(o, OrderedDict):
for key in o:
text += convertToText(prefix + ('/' if len(prefix) != 0 else '') + common.convertToDashed(key),
o[key])
else:
text += prefix + ' ' + unicode(o) + '\n'
return text
expected_text = convertToText('', data)
query = '?recursive=true&alt=text' if use_recursive_field else '?alt=text'
etag3 = expect_content(path + query, expected_text, common.content_type_text, expect_metadata_header)
if test_trailing_slash:
etag4 = expect_content(path + '/' + query, expected_text, common.content_type_text, expect_metadata_header)
else:
etag4 = etag3
query = '?recursive=true&alt=json' if use_recursive_field else '?alt=json'
etag5 = expect_content(path + query, expected, common.content_type_json, expect_metadata_header)
if test_trailing_slash:
etag6 = expect_content(path + '/' + query, expected, common.content_type_json, expect_metadata_header)
else:
etag6 = etag5
assert len(etag1) == 16
assert etag1 == etag2
assert etag1 == etag3
assert etag1 == etag4
assert etag1 == etag5
assert etag1 == etag6
def delete_content(path):
request = urllib2.Request(update_template.format(path),
headers={common.metadata_flavor: common.metadata_flavor_google})
request.get_method = lambda: "DELETE"
try:
urllib2.urlopen(request)
except urllib2.URLError as e:
raise Exception('Error connecting to emulator at %s - %s' % (server_address, e))
def update_content(path, new_data):
if not isinstance(new_data, str):
new_data = json.dumps(new_data, separators=(',', ':'))
data = urllib.quote_plus(new_data)
request = urllib2.Request(update_template.format(path),
data,
headers={common.metadata_flavor: common.metadata_flavor_google,
common.content_type: common.content_type_text})
try:
urllib2.urlopen(request)
except urllib2.URLError as e:
raise Exception('Error connecting to emulator at %s - %s' % (server_address, e))
def test_root():
check_dir_absolute(server_address, '0.1/\ncomputeMetadata/\n')
def test_root_computeMetadata():
check_dir_absolute(server_address + '/computeMetadata', 'v1/\nv1beta1/\n')
def test_root_computeMetadata_v1():
check_dir('', 'instance/\nproject/\n')
def test_instance():
check_dir('instance', 'attributes/\ncpu-platform\ndescription\ndisks/\nhostname\nid\nimage\nlicenses/\nmachine-type\n'
'maintenance-event\nnetwork-interfaces/\nscheduling/\nservice-accounts/\ntags\nvirtual-clock/\nzone\n')
def test_instance_attributes():
check_dir('instance/attributes', 'my_instance_key1\n')
def test_instance_attributes_myinstancekey1():
check_path('instance/attributes/my_instance_key1', 'my_instance_value1')
def test_instance_cpuplatform():
check_path('instance/cpu-platform', 'Intel Haswell')
def test_instance_description():
check_path('instance/description', 'Fake instance for metadata server emulator')
def test_instance_disks():
check_dir('instance/disks', '0/\n')
def test_instance_disks_0():
check_dir('instance/disks/0', 'device-name\nindex\nmode\ntype\n')
def test_instance_disks_0_devicename():
check_path('instance/disks/0/device-name', 'boot')
def test_instance_disks_0_index():
check_path('instance/disks/0/index', 0)
def test_instance_disks_0_mode():
check_path('instance/disks/0/mode', 'READ_WRITE')
def test_instance_disks_0_type():
check_path('instance/disks/0/type', 'PERSISTENT')
def test_instance_hostname():
check_path('instance/hostname', 'name.project.google.com.internal')
def test_instance_id():
check_path('instance/id', 13512546227574112017)
def test_instance_image():
check_path('instance/image', '')
def test_instance_licenses():
check_dir('instance/licenses', '0/\n1/\n2/\n')
def test_instance_licenses_0():
check_dir('instance/licenses/0', 'id\n')
def test_instance_licenses_0_id():
check_path('instance/licenses/0/id', '0')
def test_instance_machinetype():
check_path('instance/machine-type', 'projects/12345/machineTypes/n1-standard-1')
def test_instance_maintenanceevent():
check_path('instance/maintenance-event', 'NONE')
def test_instance_networkinterfaces():
check_dir('instance/network-interfaces', '0/\n')
def test_instance_networkinterfaces_0():
check_dir('instance/network-interfaces/0', 'access-configs/\nforwarded-ips/\nip\nip-aliases/\nmac\nnetwork\n')
def test_instance_networkinterfaces_0_accessconfigs():
check_dir('instance/network-interfaces/0/access-configs', '0/\n')
def test_instance_networkinterfaces_0_accessconfigs_0():
check_dir('instance/network-interfaces/0/access-configs/0', 'external-ip\ntype\n')
def test_instance_networkinterfaces_0_accessconfigs_0_externalip():
check_path('instance/network-interfaces/0/access-configs/0/external-ip', '0.0.0.0')
def test_instance_networkinterfaces_0_accessconfigs_0_type():
check_path('instance/network-interfaces/0/access-configs/0/type', 'ONE_TO_ONE_NAT')
def test_instance_networkinterfaces_0_forwardedips():
check_dir('instance/network-interfaces/0/forwarded-ips', '')
def test_instance_networkinterfaces_0_ip():
check_path('instance/network-interfaces/0/ip', '0.0.0.0')
def test_instance_networkinterfaces_0_ipaliases():
check_dir('instance/network-interfaces/0/ip-aliases', '')
def test_instance_networkinterfaces_0_mac():
check_path('instance/network-interfaces/0/mac', '00:00:00:00:00:00')
def test_instance_networkinterfaces_0_network():
check_path('instance/network-interfaces/0/network', 'projects/12345/networks/default')
def test_instance_scheduling():
check_dir('instance/scheduling', 'automatic-restart\non-host-maintenance\npreemptible\n')
def test_instance_scheduling_automaticrestart():
check_path('instance/scheduling/automatic-restart', 'TRUE')
def test_instance_scheduling_onhostmaintenance():
check_path('instance/scheduling/on-host-maintenance', 'MIGRATE')
def test_instance_scheduling_preemptible():
check_path('instance/scheduling/preemptible', 'FALSE')
def test_instance_serviceaccounts():
check_dir('instance/service-accounts', '12345-compute@developer.gserviceaccount.com/\ndefault/\n')
def test_instance_serviceaccounts_0():
check_dir('instance/service-accounts/12345-compute@developer.gserviceaccount.com', 'aliases\nemail\nscopes\ntoken\n')
def test_instance_serviceaccounts_0_aliases():
check_path('instance/service-accounts/12345-compute@developer.gserviceaccount.com/aliases', ['default'])
def test_instance_serviceaccounts_0_email():
check_path('instance/service-accounts/12345-compute@developer.gserviceaccount.com/email',
'12345-compute@developer.gserviceaccount.com')
def test_instance_serviceaccounts_0_scopes():
check_path('instance/service-accounts/12345-compute@developer.gserviceaccount.com/scopes',
['https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/userinfo.email'])
def test_instance_serviceaccounts_default():
check_dir('instance/service-accounts/default', 'aliases\nemail\nscopes\ntoken\n')
def test_instance_serviceaccounts_default_aliases():
check_path('instance/service-accounts/default/aliases', ['default'])
def test_instance_serviceaccounts_default_email():
check_path('instance/service-accounts/default/email', '12345-compute@developer.gserviceaccount.com')
def test_instance_serviceaccounts_default_scopes():
check_path('instance/service-accounts/default/scopes',
['https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/userinfo.email'])
def test_instance_tags():
check_path('instance/tags', ['a', 'b', 'c'], default='json')
def test_instance_virtualclock():
check_dir('instance/virtual-clock', 'drift-token\n')
def test_instance_virtualclock_drifttoken():
check_path('instance/virtual-clock/drift-token', '0')
def test_instance_zone():
check_path('instance/zone', 'projects/12345/zones/us-central1-f')
def test_project():
check_dir('project', 'attributes/\nnumeric-project-id\nproject-id\n')
def test_project_attributes():
check_dir('project/attributes', 'my_project_key1\n')
def test_project_attributes_myprojectkey1():
check_path('project/attributes/my_project_key1', 'my_project_value1')
def test_project_numericprojectid():
check_path('project/numeric-project-id', 12345)
def test_project_projectid():
check_path('project/project-id', 'fake-project')
def test_double_slash():
check_path('project//project-id', 'fake-project')
def test_recursive_token_1():
check_dir_recursive('instance/service-accounts/12345-compute@developer.gserviceaccount.com/token',
'{"access_token":"0123456789ABCDEF","expires_in":3000,"token_type":"Bearer"}',
test_trailing_slash=False,
use_recursive_field=False,
expect_metadata_header=False)
def test_recursive_token_2():
check_dir_recursive('instance/service-accounts/12345-compute@developer.gserviceaccount.com/token',
'{"access_token":"0123456789ABCDEF","expires_in":3000,"token_type":"Bearer"}',
test_trailing_slash=False,
use_recursive_field=True,
expect_metadata_header=False)
def test_recursive_token_3():
check_dir_recursive('instance/service-accounts/default/token',
'{"access_token":"0123456789ABCDEF","expires_in":3000,"token_type":"Bearer"}',
test_trailing_slash=False,
use_recursive_field=False,
expect_metadata_header=False)
def test_recursive_token_4():
check_dir_recursive('instance/service-accounts/default/token',
'{"access_token":"0123456789ABCDEF","expires_in":3000,"token_type":"Bearer"}',
test_trailing_slash=False,
use_recursive_field=True,
expect_metadata_header=False)
def test_recursive_instance_attributes():
check_dir_recursive('instance/attributes', '{"my_instance_key1":"my_instance_value1"}')
def test_recursive_instance_disks():
check_dir_recursive('instance/disks', '[{"deviceName":"boot","index":0,"mode":"READ_WRITE","type":"PERSISTENT"}]')
def test_recursive_instance_disks_0():
check_dir_recursive('instance/disks/0', '{"deviceName":"boot","index":0,"mode":"READ_WRITE","type":"PERSISTENT"}')
def test_recursive_instance_licenses():
check_dir_recursive('instance/licenses', '[{"id":"0"},{"id":"1"},{"id":"2"}]')
def test_recursive_instance_licenses_0():
check_dir_recursive('instance/licenses/0', '{"id":"0"}')
def test_recursive_instance_networkinterfaces():
check_dir_recursive('instance/network-interfaces',
'[{"accessConfigs":[{"externalIp":"0.0.0.0","type":"ONE_TO_ONE_NAT"}],"forwardedIps":[],"ip":"0.0.0.0",'
'"ipAliases":[],"mac":"00:00:00:00:00:00","network":"projects/12345/networks/default"}]')
def test_recursive_instance_networkinterfaces_0():
check_dir_recursive('instance/network-interfaces/0',
'{"accessConfigs":[{"externalIp":"0.0.0.0","type":"ONE_TO_ONE_NAT"}],"forwardedIps":[],"ip":"0.0.0.0",'
'"ipAliases":[],"mac":"00:00:00:00:00:00","network":"projects/12345/networks/default"}')
def test_recursive_instance_networkinterfaces_0_accessconfigs():
check_dir_recursive('instance/network-interfaces/0/access-configs', '[{"externalIp":"0.0.0.0","type":"ONE_TO_ONE_NAT"}]')
def test_recursive_instance_networkinterfaces_0_forwardedips():
check_dir_recursive('instance/network-interfaces/0/forwarded-ips', '[]')
def test_recursive_instance_networkinterfaces_0_ipaliases():
check_dir_recursive('instance/network-interfaces/0/ip-aliases', '[]')
def test_recursive_instance_scheduling():
check_dir_recursive('instance/scheduling', '{"automaticRestart":"TRUE","onHostMaintenance":"MIGRATE","preemptible":"FALSE"}')
def test_recursive_instance_serviceaccounts():
check_dir_recursive('instance/service-accounts',
'{"12345-compute@developer.gserviceaccount.com":{"aliases":["default"],'
'"email":"12345-compute@developer.gserviceaccount.com",'
'"scopes":["https://www.googleapis.com/auth/cloud-platform","https://www.googleapis.com/auth/userinfo.email"]},'
'"default":{"aliases":["default"],"email":"12345-compute@developer.gserviceaccount.com",'
'"scopes":["https://www.googleapis.com/auth/cloud-platform","https://www.googleapis.com/auth/userinfo.email"]}}')
def test_recursive_instance_serviceaccounts_1():
check_dir_recursive('instance/service-accounts/12345-compute@developer.gserviceaccount.com',
'{"aliases":["default"],"email":"12345-compute@developer.gserviceaccount.com",'
'"scopes":["https://www.googleapis.com/auth/cloud-platform","https://www.googleapis.com/auth/userinfo.email"]}')
def test_recursive_instance_serviceaccounts_2():
check_dir_recursive('instance/service-accounts/default',
'{"aliases":["default"],"email":"12345-compute@developer.gserviceaccount.com",'
'"scopes":["https://www.googleapis.com/auth/cloud-platform","https://www.googleapis.com/auth/userinfo.email"]}')
def test_recursive_instance_virtualclock():
check_dir_recursive('instance/virtual-clock', '{"driftToken":"0"}')
def test_recursive_project():
check_dir_recursive('project',
'{"attributes":{"my_project_key1":"my_project_value1"},"numericProjectId":12345,"projectId":"fake-project"}')
def test_recursive_project_attributes():
check_dir_recursive('project/attributes', '{"my_project_key1":"my_project_value1"}')
def test_recursive_value():
expect_content('project/numeric-project-id?recursive=true', '12345', common.content_type_text)
def test_recursive_value_alt_1():
expect_content('project/numeric-project-id?recursive=true&alt=text', '12345', common.content_type_text)
def test_recursive_value_alt_2():
expect_content('project/numeric-project-id?recursive=true&alt=json', '12345', common.content_type_json)
def test_ignore_nonsense_parameters():
expect_content('instance/scheduling/automatic-restart?last_etag=abc&timeout_sec=5&foo=blah', 'TRUE', common.content_type_text)
def test_ignore_nonsense_values():
expect_content('instance/network-interfaces?recursive=xyz', '0/\n', common.content_type_text)
def test_error_no_metadata_flavor_1():
expect_error('', 403, metadata_flavor_value='')
def test_error_wrong_metadata_flavor_1():
expect_error('', 403, metadata_flavor_value='Something-Else')
def test_error_no_metadata_flavor_2():
expect_error('instance', 403, metadata_flavor_value='')
def test_error_wrong_metadata_flavor_2():
expect_error('instance', 403, metadata_flavor_value='Something-Else')
def test_error_bad_index_1():
expect_error('instance/disks/x', 404)
def test_error_bad_index_2():
expect_error('instance/disks/99', 404)
def test_error_slash_on_value():
expect_error('instance/virtual-clock/drift-token/', 404)
def test_error_bad_key_1():
expect_error('foo', 404)
def test_error_bad_key_2():
expect_error('project/bar', 404)
def test_error_alt_on_dir_1():
expect_error('project?alt=json', 400)
def test_error_alt_on_dir_2():
expect_error('project?alt=text', 400)
def test_error_wait_on_dir_without_recursive():
expect_error('project?wait_for_change=true', 400)
def test_error_slash_on_token():
expect_error('instance/service-accounts/default/token/', 404)
def test_error_index_into_array_value():
expect_error('instance/service-accounts/12345-compute@developer.gserviceaccount.com/scopes/0', 404)
def test_timeout_sec():
start = datetime.datetime.now()
expect_content('instance/scheduling/automatic-restart?wait_for_change=true&timeout_sec=5', 'TRUE', common.content_type_text)
end = datetime.datetime.now()
elapsed = end - start
assert elapsed.seconds > 5
def test_update_string():
path = 'instance/cpu-platform'
original = 'Intel Haswell'
changed = 'My Platform'
check_path(path, original)
update_content(path, changed)
try:
check_path(path, changed)
finally:
update_content(path, original)
def test_update_int():
path = 'instance/id'
original = 13512546227574112017
changed = 43985743985
check_path(path, original)
update_content(path, changed)
try:
check_path(path, changed)
finally:
update_content(path, original)
def test_update_indexed_list():
path = 'instance/licenses'
original = [
{
"id": "0"
},
{
"id": "1"
},
{
"id": "2"
}
]
changed = [
{
"id": "42"
},
{
"id": "43587"
}
]
check_dir_recursive(path, json.dumps(original, separators=(',', ':')))
update_content(path, changed)
try:
check_dir_recursive(path, json.dumps(changed, separators=(',', ':')))
finally:
update_content(path, original)
def test_update_list():
path = 'instance/tags'
original = ['a', 'b', 'c']
changed = ['tag1', 'tag2']
check_path(path, original, default='json')
update_content(path, changed)
try:
check_path(path, changed, default='json')
finally:
update_content(path, original)
def test_update_directory():
path = 'instance/attributes'
original = {'my_instance_key1':
'my_instance_value1'}
changed = {'abc': 'value1',
'xyz': 'value2'}
check_dir_recursive(path, json.dumps(original, separators=(',', ':')))
update_content(path, changed)
try:
check_dir_recursive(path, json.dumps(changed, separators=(',', ':')))
finally:
update_content(path, original)
def test_update_invalid_value():
path = 'instance/attributes'
changed = {'abc': 'value1',
'complex' : { 'foo': 'bar' },
'xyz': 'value2'}
expect_error_updating_content(path, changed, 400)
def test_etags_differ_based_on_recursive():
etag1 = expect_content('instance/disks?recursive=true',
'[{"deviceName":"boot","index":0,"mode":"READ_WRITE","type":"PERSISTENT"}]',
common.content_type_json)
etag2 = expect_content('instance/disks',
'0/\n',
common.content_type_text)
assert etag1 != etag2
def test_etag_reverts_when_original_value_restored():
path = 'instance/cpu-platform'
original = 'Intel Haswell'
changed = 'My Platform'
etag1 = expect_content(path, original, common.content_type_text)
update_content(path, changed)
try:
etag2 = expect_content(path, changed, common.content_type_text)
assert etag1 != etag2
finally:
update_content(path, original)
etag3 = expect_content(path, original, common.content_type_text)
assert etag1 == etag3
def test_wait_for_change():
path = 'instance/cpu-platform'
original = 'Intel Haswell'
changed = 'My Platform'
def changeData():
time.sleep(1)
update_content(path, changed)
update_content(path, original)
thread = threading.Thread(target=changeData)
thread.start()
expect_content(path + '?wait_for_change=true&timeout_sec=5', changed, common.content_type_text)
thread.join()
def test_wait_for_change_on_descendant():
path = 'instance/disks'
descendant_path = 'instance/disks/0/device-name'
original = 'boot'
changed = 'My Custom Disk'
def changeData():
time.sleep(1)
update_content(descendant_path, changed)
update_content(descendant_path, original)
thread = threading.Thread(target=changeData)
thread.start()
expect_content(path + '?recursive=True&wait_for_change=true&timeout_sec=5',
'[{"deviceName":"My Custom Disk","index":0,"mode":"READ_WRITE","type":"PERSISTENT"}]',
common.content_type_json)
thread.join()
def test_wait_for_change_on_delete():
path = 'instance/attributes'
original = {'my_instance_key1':
'my_instance_value1'}
def changeData():
time.sleep(1)
delete_content(path + '/my_instance_key1')
update_content(path, original)
thread = threading.Thread(target=changeData)
thread.start()
expect_content(path + '?recursive=True&wait_for_change=true&timeout_sec=5', '{}', common.content_type_json)
thread.join()
def test_last_etag_match():
path = 'instance/cpu-platform'
original = 'Intel Haswell'
changed = 'My Platform'
etag1 = expect_content(path, original, common.content_type_text)
def changeData():
time.sleep(1)
update_content(path, changed)
update_content(path, original)
thread = threading.Thread(target=changeData)
thread.start()
etag2 = expect_content(path + '?wait_for_change=true&last_etag=%s&timeout_sec=5' % etag1,
changed,
common.content_type_text)
assert etag1 != etag2
thread.join()
def test_last_etag_non_match():
path = 'instance/cpu-platform'
original = 'Intel Haswell'
changed = 'My Platform'
etag1 = expect_content(path, original, common.content_type_text)
update_content(path, changed)
try:
start = datetime.datetime.now()
etag2 = expect_content(path + '?wait_for_change=true&last_etag=%s&timeout_sec=5' % etag1,
changed,
common.content_type_text)
end = datetime.datetime.now()
elapsed = end - start
assert elapsed.seconds < 5
assert etag1 != etag2
finally:
update_content(path, original)
def test_add_value():
update_content('instance/attributes/foo', 'bar')
try:
check_dir_recursive('instance/attributes', '{"my_instance_key1":"my_instance_value1","foo":"bar"}')
finally:
update_content('instance/attributes', '{"my_instance_key1":"my_instance_value1"}')
def test_add_key_with_dashes():
update_content('instance/attributes/foo-bar', 'baz')
try:
check_dir_recursive('instance/attributes', '{"my_instance_key1":"my_instance_value1","foo-bar":"baz"}')
finally:
update_content('instance/attributes', '{"my_instance_key1":"my_instance_value1"}')
def test_add_value_invalid():
expect_error_updating_content('instance/foo', 'bar', 400)
def test_delete_content():
delete_content('instance/attributes/my_instance_key1')
try:
check_dir_recursive('instance/attributes', '{}')
finally:
update_content('instance/attributes', '{"my_instance_key1":"my_instance_value1"}')
def test_delete_list_item():
path = 'instance/licenses'
original = [
{
"id": "0"
},
{
"id": "1"
},
{
"id": "2"
}
]
changed = [
{
"id": "0"
},
{
"id": "2"
}
]
check_dir_recursive(path, json.dumps(original, separators=(',', ':')))
delete_content(path + '/1')
try:
check_dir_recursive(path, json.dumps(changed, separators=(',', ':')))
finally:
update_content(path, original)
def test_delete_value_invalid():
expect_error_deleting_content('instance/attributes', 400)
| |
import pickle
import time
from unittest import skipIf, TestCase
from nose.tools import istest, raises
import redis
from pycket.driver import MemcachedDriver, RedisDriver
from pycket.session import ConfigurationError, SessionManager, SessionMixin
skip_slow_tests = False
class SessionMixinTest(TestCase):
@istest
def starts_handler_with_session_manager(self):
class StubHandler(SessionMixin):
settings = {
'pycket': {
'engine': 'redis',
}
}
self.assertIsInstance(StubHandler().session, SessionManager)
@istest
@raises(ConfigurationError)
def cannot_start_driver_without_pycket_settings(self):
class StubHandler(SessionMixin):
settings = {}
StubHandler().session.get('something')
@istest
@raises(ConfigurationError)
def cannot_start_driver_without_pycket_engine(self):
class StubHandler(SessionMixin):
settings = {
'pycket': {
'not-an-engine': 'something-useless',
}
}
StubHandler().session.get('something')
@istest
def creates_session_for_redis(self):
class StubHandler(SessionMixin):
settings = {
'pycket': {
'engine': 'redis',
}
}
self.assertIsInstance(StubHandler().session.driver, RedisDriver)
@istest
def creates_session_for_memcached(self):
class StubHandler(SessionMixin):
settings = {
'pycket': {
'engine': 'memcached',
}
}
self.assertIsInstance(StubHandler().session.driver, MemcachedDriver)
class RedisTestCase(TestCase):
client = None
def setUp(self):
if self.client is None:
self.client = redis.Redis(db=RedisDriver.DEFAULT_STORAGE_IDENTIFIERS['db_sessions'])
self.client.flushall()
class SessionManagerTest(RedisTestCase):
@istest
def sets_session_id_on_cookies(self):
test_case = self
class StubHandler(SessionMixin):
settings = {
'pycket': {
'engine': 'redis',
}
}
def get_secure_cookie(self, name):
test_case.assertEqual(name, 'PYCKET_ID')
self.cookie_set = True
return None
def set_secure_cookie(self, name, value, expires_days, expires):
test_case.assertEqual(name, 'PYCKET_ID')
test_case.assertIsInstance(value, str)
test_case.assertGreater(len(value), 0)
self.cookie_retrieved = True
handler = StubHandler()
session_manager = SessionManager(handler)
session_manager.set('some-object', 'Some object')
self.assertTrue(handler.cookie_retrieved)
self.assertTrue(handler.cookie_set)
@istest
def does_not_set_session_id_if_already_exists(self):
class StubHandler(SessionMixin):
settings = {
'pycket': {
'engine': 'redis',
}
}
def get_secure_cookie(self, name):
self.cookie_retrieved = True
return 'some-id'
handler = StubHandler()
manager = SessionManager(handler)
manager.set('some-object', 'Some object')
self.assertTrue(handler.cookie_retrieved)
@istest
def saves_session_object_on_redis_with_same_session_id_as_cookie(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.set('some-object', {'foo': 'bar'})
raw_session = self.client.get(handler.session_id)
session = pickle.loads(raw_session)
self.assertEqual(session['some-object']['foo'], 'bar')
@istest
def retrieves_session_with_same_data_as_saved(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.set('some-object', {'foo': 'bar'})
self.assertEqual(manager.get('some-object')['foo'], 'bar')
@istest
def keeps_previous_items_when_setting_new_ones(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.set('some-object', {'foo': 'bar'})
manager.set('some-object2', {'foo2': 'bar2'})
self.assertEqual(manager.get('some-object')['foo'], 'bar')
self.assertEqual(manager.get('some-object2')['foo2'], 'bar2')
@istest
def retrieves_none_if_session_object_not_previously_set(self):
handler = StubHandler()
manager = SessionManager(handler)
self.assertIsNone(manager.get('unexistant-object'))
@istest
def deletes_objects_from_session(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.set('some-object', {'foo': 'bar'})
manager.set('some-object2', {'foo2': 'bar2'})
manager.delete('some-object')
raw_session = self.client.get(handler.session_id)
session = pickle.loads(raw_session)
self.assertEqual(list(session.keys()), ['some-object2'])
@istest
@skipIf(skip_slow_tests, 'This test is too slow')
def still_retrieves_object_if_not_passed_from_expiration(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.set('foo', 'bar')
time.sleep(1)
self.assertEqual(manager.get('foo'), 'bar')
@istest
@skipIf(skip_slow_tests, 'This test is too slow')
def cannot_retrieve_object_if_passed_from_expiration(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.driver.EXPIRE_SECONDS = 1
manager.set('foo', 'bar')
time.sleep(manager.driver.EXPIRE_SECONDS + 1)
self.assertIsNone(manager.get('foo'))
@istest
def retrieves_object_with_dict_key(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.set('foo', 'bar')
self.assertEqual(manager['foo'], 'bar')
@istest
@raises(KeyError)
def raises_key_error_if_object_doesnt_exist(self):
handler = StubHandler()
manager = SessionManager(handler)
manager['foo']
@istest
def sets_object_with_dict_key(self):
handler = StubHandler()
manager = SessionManager(handler)
manager['foo'] = 'bar'
self.assertEqual(manager['foo'], 'bar')
@istest
def gets_default_value_if_provided_and_not_in_client(self):
handler = StubHandler()
manager = SessionManager(handler)
value = manager.get('foo', 'Default')
self.assertEqual(value, 'Default')
@istest
def sets_session_id_to_last_a_browser_session_as_default(self):
test_case = self
class StubHandler(SessionMixin):
settings = {
'pycket': {
'engine': 'redis',
}
}
def get_secure_cookie(self, name):
return None
def set_secure_cookie(self, name, value, expires_days, expires):
test_case.assertIsNone(expires_days)
test_case.assertIsNone(expires)
handler = StubHandler()
manager = SessionManager(handler)
manager.set('some-object', 'Some object')
@istest
def repasses_cookies_options(self):
test_case = self
class StubHandler(SessionMixin):
settings = {
'pycket': {
'engine': 'redis',
'cookies': {
'foo': 'bar',
}
},
}
def get_secure_cookie(self, name):
return None
def set_secure_cookie(self, *args, **kwargs):
test_case.assertEqual(kwargs['foo'], 'bar')
handler = StubHandler()
manager = SessionManager(handler)
manager.set('some-object', 'Some object')
@istest
def uses_custom_expires_if_provided(self):
test_case = self
class StubHandler(SessionMixin):
settings = {
'pycket': {
'engine': 'redis',
'cookies': {
'expires': 'St. Neversday',
}
},
}
def get_secure_cookie(self, name):
return None
def set_secure_cookie(self, *args, **kwargs):
test_case.assertEqual(kwargs['expires'], 'St. Neversday')
handler = StubHandler()
manager = SessionManager(handler)
manager.set('some-object', 'Some object')
@istest
def uses_custom_expires_days_if_provided(self):
test_case = self
class StubHandler(SessionMixin):
settings = {
'pycket': {
'engine': 'redis',
'cookies': {
'expires_days': 'St. Neversday',
}
},
}
def get_secure_cookie(self, name):
return None
def set_secure_cookie(self, *args, **kwargs):
test_case.assertEqual(kwargs['expires_days'], 'St. Neversday')
handler = StubHandler()
manager = SessionManager(handler)
manager.set('some-object', 'Some object')
@istest
def uses_custom_sessions_database_if_provided(self):
handler = StubHandler()
handler.settings = {
'pycket': {
'engine': 'redis',
'storage': {
'db_sessions': 10,
'db_notifications': 11,
}
},
}
manager = SessionManager(handler)
manager.set('foo', 'bar')
self.assertEqual(manager.driver.client.connection_pool._available_connections[0].db, 10)
@istest
def deletes_multiple_session_objects_at_once(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.set('some-object', {'foo': 'bar'})
manager.set('some-object2', {'foo2': 'bar2'})
manager.delete('some-object', 'some-object2')
raw_session = self.client.get(handler.session_id)
session = pickle.loads(raw_session)
self.assertEqual(list(session.keys()), [])
@istest
def deletes_item_using_command(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.set('some-object', {'foo': 'bar'})
del manager['some-object']
self.assertIsNone(manager.get('some-object'))
@istest
def verifies_if_a_session_exist(self):
handler = StubHandler()
manager = SessionManager(handler)
self.assertFalse('foo' in manager)
manager['foo'] = 'bar'
self.assertTrue('foo' in manager)
@istest
def gets_all_available_keys_from_session(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.set('foo', 'FOO')
manager.set('bar', 'BAR')
self.assertListEqual(sorted(manager.keys()), sorted(['foo', 'bar']))
@istest
def iterates_with_method_over_keys(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.set('foo', 'FOO')
manager.set('bar', 'BAR')
iterations = 0
for key in manager.iterkeys():
self.assertTrue(key in manager)
iterations += 1
self.assertEqual(iterations, 2)
@istest
def iterates_without_method_over_keys(self):
handler = StubHandler()
manager = SessionManager(handler)
manager.set('foo', 'FOO')
manager.set('bar', 'BAR')
iterations = 0
for key in manager:
self.assertTrue(key in manager)
iterations += 1
self.assertEqual(iterations, 2)
class StubHandler(object):
session_id = 'session-id'
def __init__(self, settings=None):
default_settings = {
'pycket': {
'engine': 'redis',
}
}
self.settings = settings if settings is not None else default_settings
def get_secure_cookie(self, name):
return self.session_id
| |
#!/usr/local/lib/mailinabox/env/bin/python
# This script performs a backup of all user data:
# 1) System services are stopped.
# 2) STORAGE_ROOT/backup/before-backup is executed if it exists.
# 3) An incremental encrypted backup is made using duplicity.
# 4) The stopped services are restarted.
# 5) STORAGE_ROOT/backup/after-backup is executed if it exists.
import os, os.path, shutil, glob, re, datetime, sys
import dateutil.parser, dateutil.relativedelta, dateutil.tz
import rtyaml
from exclusiveprocess import Lock
from utils import load_environment, shell, wait_for_service, fix_boto
rsync_ssh_options = [
"--ssh-options='-i /root/.ssh/id_rsa_miab'",
"--rsync-options=-e \"/usr/bin/ssh -oStrictHostKeyChecking=no -oBatchMode=yes -p 22 -i /root/.ssh/id_rsa_miab\"",
]
def backup_status(env):
# Root folder
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
# What is the current status of backups?
# Query duplicity to get a list of all backups.
# Use the number of volumes to estimate the size.
config = get_backup_config(env)
now = datetime.datetime.now(dateutil.tz.tzlocal())
# Are backups dissbled?
if config["target"] == "off":
return { }
backups = { }
backup_cache_dir = os.path.join(backup_root, 'cache')
def reldate(date, ref, clip):
if ref < date: return clip
rd = dateutil.relativedelta.relativedelta(ref, date)
if rd.years > 1: return "%d years, %d months" % (rd.years, rd.months)
if rd.years == 1: return "%d year, %d months" % (rd.years, rd.months)
if rd.months > 1: return "%d months, %d days" % (rd.months, rd.days)
if rd.months == 1: return "%d month, %d days" % (rd.months, rd.days)
if rd.days >= 7: return "%d days" % rd.days
if rd.days > 1: return "%d days, %d hours" % (rd.days, rd.hours)
if rd.days == 1: return "%d day, %d hours" % (rd.days, rd.hours)
return "%d hours, %d minutes" % (rd.hours, rd.minutes)
# Get duplicity collection status and parse for a list of backups.
def parse_line(line):
keys = line.strip().split()
date = dateutil.parser.parse(keys[1]).astimezone(dateutil.tz.tzlocal())
return {
"date": keys[1],
"date_str": date.strftime("%x %X") + " " + now.tzname(),
"date_delta": reldate(date, now, "the future?"),
"full": keys[0] == "full",
"size": 0, # collection-status doesn't give us the size
"volumes": keys[2], # number of archive volumes for this backup (not really helpful)
}
code, collection_status = shell('check_output', [
"/usr/bin/duplicity",
"collection-status",
"--archive-dir", backup_cache_dir,
"--gpg-options", "--cipher-algo=AES256",
"--log-fd", "1",
config["target"],
] + rsync_ssh_options,
get_env(env),
trap=True)
if code != 0:
# Command failed. This is likely due to an improperly configured remote
# destination for the backups or the last backup job terminated unexpectedly.
raise Exception("Something is wrong with the backup: " + collection_status)
for line in collection_status.split('\n'):
if line.startswith(" full") or line.startswith(" inc"):
backup = parse_line(line)
backups[backup["date"]] = backup
# Look at the target to get the sizes of each of the backups. There is more than one file per backup.
for fn, size in list_target_files(config):
m = re.match(r"duplicity-(full|full-signatures|(inc|new-signatures)\.(?P<incbase>\d+T\d+Z)\.to)\.(?P<date>\d+T\d+Z)\.", fn)
if not m: continue # not a part of a current backup chain
key = m.group("date")
backups[key]["size"] += size
# Ensure the rows are sorted reverse chronologically.
# This is relied on by should_force_full() and the next step.
backups = sorted(backups.values(), key = lambda b : b["date"], reverse=True)
# Get the average size of incremental backups, the size of the
# most recent full backup, and the date of the most recent
# backup and the most recent full backup.
incremental_count = 0
incremental_size = 0
first_date = None
first_full_size = None
first_full_date = None
for bak in backups:
if first_date is None:
first_date = dateutil.parser.parse(bak["date"])
if bak["full"]:
first_full_size = bak["size"]
first_full_date = dateutil.parser.parse(bak["date"])
break
incremental_count += 1
incremental_size += bak["size"]
# When will the most recent backup be deleted? It won't be deleted if the next
# backup is incremental, because the increments rely on all past increments.
# So first guess how many more incremental backups will occur until the next
# full backup. That full backup frees up this one to be deleted. But, the backup
# must also be at least min_age_in_days old too.
deleted_in = None
if incremental_count > 0 and incremental_size > 0 and first_full_size is not None:
# How many days until the next incremental backup? First, the part of
# the algorithm based on increment sizes:
est_days_to_next_full = (.5 * first_full_size - incremental_size) / (incremental_size/incremental_count)
est_time_of_next_full = first_date + datetime.timedelta(days=est_days_to_next_full)
# ...And then the part of the algorithm based on full backup age:
est_time_of_next_full = min(est_time_of_next_full, first_full_date + datetime.timedelta(days=config["min_age_in_days"]*10+1))
# It still can't be deleted until it's old enough.
est_deleted_on = max(est_time_of_next_full, first_date + datetime.timedelta(days=config["min_age_in_days"]))
deleted_in = "approx. %d days" % round((est_deleted_on-now).total_seconds()/60/60/24 + .5)
# When will a backup be deleted? Set the deleted_in field of each backup.
saw_full = False
for bak in backups:
if deleted_in:
# The most recent increment in a chain and all of the previous backups
# it relies on are deleted at the same time.
bak["deleted_in"] = deleted_in
if bak["full"]:
# Reset when we get to a full backup. A new chain start *next*.
saw_full = True
deleted_in = None
elif saw_full and not deleted_in:
# We're now on backups prior to the most recent full backup. These are
# free to be deleted as soon as they are min_age_in_days old.
deleted_in = reldate(now, dateutil.parser.parse(bak["date"]) + datetime.timedelta(days=config["min_age_in_days"]), "on next daily backup")
bak["deleted_in"] = deleted_in
return {
"backups": backups,
}
def should_force_full(config, env):
# Force a full backup when the total size of the increments
# since the last full backup is greater than half the size
# of that full backup.
inc_size = 0
for bak in backup_status(env)["backups"]:
if not bak["full"]:
# Scan through the incremental backups cumulating
# size...
inc_size += bak["size"]
else:
# ...until we reach the most recent full backup.
# Return if we should to a full backup, which is based
# on the size of the increments relative to the full
# backup, as well as the age of the full backup.
if inc_size > .5*bak["size"]:
return True
if dateutil.parser.parse(bak["date"]) + datetime.timedelta(days=config["min_age_in_days"]*10+1) < datetime.datetime.now(dateutil.tz.tzlocal()):
return True
return False
else:
# If we got here there are no (full) backups, so make one.
# (I love for/else blocks. Here it's just to show off.)
return True
def get_passphrase(env):
# Get the encryption passphrase. secret_key.txt is 2048 random
# bits base64-encoded and with line breaks every 65 characters.
# gpg will only take the first line of text, so sanity check that
# that line is long enough to be a reasonable passphrase. It
# only needs to be 43 base64-characters to match AES256's key
# length of 32 bytes.
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
with open(os.path.join(backup_root, 'secret_key.txt')) as f:
passphrase = f.readline().strip()
if len(passphrase) < 43: raise Exception("secret_key.txt's first line is too short!")
return passphrase
def get_env(env):
config = get_backup_config(env)
env = { "PASSPHRASE" : get_passphrase(env) }
if get_target_type(config) == 's3':
env["AWS_ACCESS_KEY_ID"] = config["target_user"]
env["AWS_SECRET_ACCESS_KEY"] = config["target_pass"]
return env
def get_target_type(config):
protocol = config["target"].split(":")[0]
return protocol
def perform_backup(full_backup):
env = load_environment()
# Create an global exclusive lock so that the backup script
# cannot be run more than one.
Lock(die=True).forever()
config = get_backup_config(env)
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
backup_cache_dir = os.path.join(backup_root, 'cache')
backup_dir = os.path.join(backup_root, 'encrypted')
# Are backups disabled?
if config["target"] == "off":
return
# In an older version of this script, duplicity was called
# such that it did not encrypt the backups it created (in
# backup/duplicity), and instead openssl was called separately
# after each backup run, creating AES256 encrypted copies of
# each file created by duplicity in backup/encrypted.
#
# We detect the transition by the presence of backup/duplicity
# and handle it by 'dupliception': we move all the old *un*encrypted
# duplicity files up out of the backup/duplicity directory (as
# backup/ is excluded from duplicity runs) in order that it is
# included in the next run, and we delete backup/encrypted (which
# duplicity will output files directly to, post-transition).
old_backup_dir = os.path.join(backup_root, 'duplicity')
migrated_unencrypted_backup_dir = os.path.join(env["STORAGE_ROOT"], "migrated_unencrypted_backup")
if os.path.isdir(old_backup_dir):
# Move the old unencrypted files to a new location outside of
# the backup root so they get included in the next (new) backup.
# Then we'll delete them. Also so that they do not get in the
# way of duplicity doing a full backup on the first run after
# we take care of this.
shutil.move(old_backup_dir, migrated_unencrypted_backup_dir)
# The backup_dir (backup/encrypted) now has a new purpose.
# Clear it out.
shutil.rmtree(backup_dir)
# On the first run, always do a full backup. Incremental
# will fail. Otherwise do a full backup when the size of
# the increments since the most recent full backup are
# large.
try:
full_backup = full_backup or should_force_full(config, env)
except Exception as e:
# This was the first call to duplicity, and there might
# be an error already.
print(e)
sys.exit(1)
# Stop services.
def service_command(service, command, quit=None):
# Execute silently, but if there is an error then display the output & exit.
code, ret = shell('check_output', ["/usr/sbin/service", service, command], capture_stderr=True, trap=True)
if code != 0:
print(ret)
if quit:
sys.exit(code)
service_command("php7.0-fpm", "stop", quit=True)
service_command("postfix", "stop", quit=True)
service_command("dovecot", "stop", quit=True)
# Execute a pre-backup script that copies files outside the homedir.
# Run as the STORAGE_USER user, not as root. Pass our settings in
# environment variables so the script has access to STORAGE_ROOT.
pre_script = os.path.join(backup_root, 'before-backup')
if os.path.exists(pre_script):
shell('check_call',
['su', env['STORAGE_USER'], '-c', pre_script, config["target"]],
env=env)
# Run a backup of STORAGE_ROOT (but excluding the backups themselves!).
# --allow-source-mismatch is needed in case the box's hostname is changed
# after the first backup. See #396.
try:
shell('check_call', [
"/usr/bin/duplicity",
"full" if full_backup else "incr",
"--verbosity", "warning", "--no-print-statistics",
"--archive-dir", backup_cache_dir,
"--exclude", backup_root,
"--volsize", "250",
"--gpg-options", "--cipher-algo=AES256",
env["STORAGE_ROOT"],
config["target"],
"--allow-source-mismatch"
] + rsync_ssh_options,
get_env(env))
finally:
# Start services again.
service_command("dovecot", "start", quit=False)
service_command("postfix", "start", quit=False)
service_command("php7.0-fpm", "start", quit=False)
# Once the migrated backup is included in a new backup, it can be deleted.
if os.path.isdir(migrated_unencrypted_backup_dir):
shutil.rmtree(migrated_unencrypted_backup_dir)
# Remove old backups. This deletes all backup data no longer needed
# from more than 3 days ago.
shell('check_call', [
"/usr/bin/duplicity",
"remove-older-than",
"%dD" % config["min_age_in_days"],
"--verbosity", "error",
"--archive-dir", backup_cache_dir,
"--force",
config["target"]
] + rsync_ssh_options,
get_env(env))
# From duplicity's manual:
# "This should only be necessary after a duplicity session fails or is
# aborted prematurely."
# That may be unlikely here but we may as well ensure we tidy up if
# that does happen - it might just have been a poorly timed reboot.
shell('check_call', [
"/usr/bin/duplicity",
"cleanup",
"--verbosity", "error",
"--archive-dir", backup_cache_dir,
"--force",
config["target"]
] + rsync_ssh_options,
get_env(env))
# Change ownership of backups to the user-data user, so that the after-bcakup
# script can access them.
if get_target_type(config) == 'file':
shell('check_call', ["/bin/chown", "-R", env["STORAGE_USER"], backup_dir])
# Execute a post-backup script that does the copying to a remote server.
# Run as the STORAGE_USER user, not as root. Pass our settings in
# environment variables so the script has access to STORAGE_ROOT.
post_script = os.path.join(backup_root, 'after-backup')
if os.path.exists(post_script):
shell('check_call',
['su', env['STORAGE_USER'], '-c', post_script, config["target"]],
env=env)
# Our nightly cron job executes system status checks immediately after this
# backup. Since it checks that dovecot and postfix are running, block for a
# bit (maximum of 10 seconds each) to give each a chance to finish restarting
# before the status checks might catch them down. See #381.
wait_for_service(25, True, env, 10)
wait_for_service(993, True, env, 10)
def run_duplicity_verification():
env = load_environment()
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
config = get_backup_config(env)
backup_cache_dir = os.path.join(backup_root, 'cache')
shell('check_call', [
"/usr/bin/duplicity",
"--verbosity", "info",
"verify",
"--compare-data",
"--archive-dir", backup_cache_dir,
"--exclude", backup_root,
config["target"],
env["STORAGE_ROOT"],
] + rsync_ssh_options, get_env(env))
def run_duplicity_restore(args):
env = load_environment()
config = get_backup_config(env)
backup_cache_dir = os.path.join(env["STORAGE_ROOT"], 'backup', 'cache')
shell('check_call', [
"/usr/bin/duplicity",
"restore",
"--archive-dir", backup_cache_dir,
config["target"],
] + rsync_ssh_options + args,
get_env(env))
def list_target_files(config):
import urllib.parse
try:
target = urllib.parse.urlparse(config["target"])
except ValueError:
return "invalid target"
if target.scheme == "file":
return [(fn, os.path.getsize(os.path.join(target.path, fn))) for fn in os.listdir(target.path)]
elif target.scheme == "rsync":
rsync_fn_size_re = re.compile(r'.* ([^ ]*) [^ ]* [^ ]* (.*)')
rsync_target = '{host}:{path}'
target_path = target.path
if not target_path.endswith('/'):
target_path = target_path + '/'
if target_path.startswith('/'):
target_path = target_path[1:]
rsync_command = [ 'rsync',
'-e',
'/usr/bin/ssh -i /root/.ssh/id_rsa_miab -oStrictHostKeyChecking=no -oBatchMode=yes',
'--list-only',
'-r',
rsync_target.format(
host=target.netloc,
path=target_path)
]
code, listing = shell('check_output', rsync_command, trap=True, capture_stderr=True)
if code == 0:
ret = []
for l in listing.split('\n'):
match = rsync_fn_size_re.match(l)
if match:
ret.append( (match.groups()[1], int(match.groups()[0].replace(',',''))) )
return ret
else:
if 'Permission denied (publickey).' in listing:
reason = "Invalid user or check you correctly copied the SSH key."
elif 'No such file or directory' in listing:
reason = "Provided path {} is invalid.".format(target_path)
elif 'Network is unreachable' in listing:
reason = "The IP address {} is unreachable.".format(target.hostname)
elif 'Could not resolve hostname':
reason = "The hostname {} cannot be resolved.".format(target.hostname)
else:
reason = "Unknown error." \
"Please check running 'python management/backup.py --verify'" \
"from mailinabox sources to debug the issue."
raise ValueError("Connection to rsync host failed: {}".format(reason))
elif target.scheme == "s3":
# match to a Region
fix_boto() # must call prior to importing boto
import boto.s3
from boto.exception import BotoServerError
for region in boto.s3.regions():
if region.endpoint == target.hostname:
break
else:
raise ValueError("Invalid S3 region/host.")
bucket = target.path[1:].split('/')[0]
path = '/'.join(target.path[1:].split('/')[1:]) + '/'
# If no prefix is specified, set the path to '', otherwise boto won't list the files
if path == '/':
path = ''
if bucket == "":
raise ValueError("Enter an S3 bucket name.")
# connect to the region & bucket
try:
conn = region.connect(aws_access_key_id=config["target_user"], aws_secret_access_key=config["target_pass"])
bucket = conn.get_bucket(bucket)
except BotoServerError as e:
if e.status == 403:
raise ValueError("Invalid S3 access key or secret access key.")
elif e.status == 404:
raise ValueError("Invalid S3 bucket name.")
elif e.status == 301:
raise ValueError("Incorrect region for this bucket.")
raise ValueError(e.reason)
return [(key.name[len(path):], key.size) for key in bucket.list(prefix=path)]
else:
raise ValueError(config["target"])
def backup_set_custom(env, target, target_user, target_pass, min_age):
config = get_backup_config(env, for_save=True)
# min_age must be an int
if isinstance(min_age, str):
min_age = int(min_age)
config["target"] = target
config["target_user"] = target_user
config["target_pass"] = target_pass
config["min_age_in_days"] = min_age
# Validate.
try:
if config["target"] not in ("off", "local"):
# these aren't supported by the following function, which expects a full url in the target key,
# which is what is there except when loading the config prior to saving
list_target_files(config)
except ValueError as e:
return str(e)
write_backup_config(env, config)
return "OK"
def get_backup_config(env, for_save=False, for_ui=False):
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
# Defaults.
config = {
"min_age_in_days": 3,
"target": "local",
}
# Merge in anything written to custom.yaml.
try:
custom_config = rtyaml.load(open(os.path.join(backup_root, 'custom.yaml')))
if not isinstance(custom_config, dict): raise ValueError() # caught below
config.update(custom_config)
except:
pass
# When updating config.yaml, don't do any further processing on what we find.
if for_save:
return config
# When passing this back to the admin to show the current settings, do not include
# authentication details. The user will have to re-enter it.
if for_ui:
for field in ("target_user", "target_pass"):
if field in config:
del config[field]
# helper fields for the admin
config["file_target_directory"] = os.path.join(backup_root, 'encrypted')
config["enc_pw_file"] = os.path.join(backup_root, 'secret_key.txt')
if config["target"] == "local":
# Expand to the full URL.
config["target"] = "file://" + config["file_target_directory"]
ssh_pub_key = os.path.join('/root', '.ssh', 'id_rsa_miab.pub')
if os.path.exists(ssh_pub_key):
config["ssh_pub_key"] = open(ssh_pub_key, 'r').read()
return config
def write_backup_config(env, newconfig):
backup_root = os.path.join(env["STORAGE_ROOT"], 'backup')
with open(os.path.join(backup_root, 'custom.yaml'), "w") as f:
f.write(rtyaml.dump(newconfig))
if __name__ == "__main__":
import sys
if sys.argv[-1] == "--verify":
# Run duplicity's verification command to check a) the backup files
# are readable, and b) report if they are up to date.
run_duplicity_verification()
elif sys.argv[-1] == "--list":
# Run duplicity's verification command to check a) the backup files
# are readable, and b) report if they are up to date.
for fn, size in list_target_files(get_backup_config(load_environment())):
print("{}\t{}".format(fn, size))
elif sys.argv[-1] == "--status":
# Show backup status.
ret = backup_status(load_environment())
print(rtyaml.dump(ret["backups"]))
elif len(sys.argv) >= 2 and sys.argv[1] == "--restore":
# Run duplicity restore. Rest of command line passed as arguments
# to duplicity. The restore path should be specified.
run_duplicity_restore(sys.argv[2:])
else:
# Perform a backup. Add --full to force a full backup rather than
# possibly performing an incremental backup.
full_backup = "--full" in sys.argv
perform_backup(full_backup)
| |
import collections
import os
import threading
import warnings
import numpy
from chainer import _version
from chainer import backends # NOQA
from chainer import dataset # NOQA
from chainer import datasets # NOQA
from chainer import distributions # NOQA
from chainer import function_hooks # NOQA
from chainer import functions # NOQA
from chainer import initializers # NOQA
from chainer import iterators # NOQA
from chainer import links # NOQA
from chainer import optimizers # NOQA
from chainer import serializers # NOQA
from chainer import training # NOQA
# import class and function
# These functions from backends.cuda are kept for backward compatibility
from chainer._runtime_info import print_runtime_info # NOQA
from chainer.backends.cuda import should_use_cudnn # NOQA
from chainer.backends.cuda import should_use_cudnn_tensor_core # NOQA
from chainer.configuration import config # NOQA
from chainer.configuration import global_config # NOQA
from chainer.configuration import using_config # NOQA
from chainer.distribution import cross_entropy # NOQA
from chainer.distribution import Distribution # NOQA
from chainer.distribution import kl_divergence # NOQA
from chainer.distribution import register_kl # NOQA
from chainer.function import force_backprop_mode # NOQA
from chainer.function import Function # NOQA
from chainer.function import FunctionAdapter # NOQA
from chainer.function import no_backprop_mode # NOQA
from chainer.function_hook import FunctionHook # NOQA
from chainer.function_node import FunctionNode # NOQA
from chainer.function_node import grad # NOQA
from chainer.functions import array # NOQA
from chainer.functions.math import basic_math # NOQA
from chainer.initializer import Initializer # NOQA
from chainer.link import Chain # NOQA
from chainer.link import ChainList # NOQA
from chainer.link import Link # NOQA
from chainer.optimizer import GradientMethod # NOQA
from chainer.optimizer import Optimizer # NOQA
from chainer.optimizer import UpdateRule # NOQA
from chainer.reporter import DictSummary # NOQA
from chainer.reporter import get_current_reporter # NOQA
from chainer.reporter import report # NOQA
from chainer.reporter import report_scope # NOQA
from chainer.reporter import Reporter # NOQA
from chainer.reporter import Summary # NOQA
from chainer.sequential import Sequential # NOQA
from chainer.serializer import AbstractSerializer # NOQA
from chainer.serializer import Deserializer # NOQA
from chainer.serializer import Serializer # NOQA
from chainer.variable import as_variable # NOQA
from chainer.variable import Parameter # NOQA
from chainer.variable import Variable # NOQA
# Alias for backward compatibility
from chainer import cuda # NOQA
from chainer import _environment_check
# Check environment conditions
_environment_check.check()
__version__ = _version.__version__
_thread_local = threading.local()
_array_types = None
_cpu_array_types = None
def get_function_hooks():
try:
ret = _thread_local.function_hooks
except AttributeError:
ret = collections.OrderedDict()
_thread_local.function_hooks = ret
return ret
def _load_array_types():
# Note: this function may not be protected by GIL because of external
# calls.
global _array_types
global _cpu_array_types
if _array_types is None:
array_types = [numpy.ndarray]
cpu_array_types = [numpy.ndarray]
if backends.cuda.available:
array_types.append(backends.cuda.ndarray)
if backends.intel64.is_ideep_available():
array_types.append(backends.intel64.mdarray)
cpu_array_types.append(backends.intel64.mdarray)
array_types = tuple(array_types)
cpu_array_types = tuple(cpu_array_types)
_array_types = array_types
_cpu_array_types = cpu_array_types
def get_array_types():
_load_array_types()
return _array_types
def get_cpu_array_types():
_load_array_types()
return _cpu_array_types
def is_arrays_compatible(arrays):
arrays = [a for a in arrays if a is not None]
if len(arrays) == 0:
return True
if type(arrays[0]) is backends.cuda.ndarray:
types = backends.cuda.ndarray
else:
types = get_cpu_array_types()
return all([isinstance(a, types) for a in arrays])
global_config.debug = bool(int(os.environ.get('CHAINER_DEBUG', '0')))
global_config.cudnn_deterministic = False
global_config.enable_backprop = True
global_config.keep_graph_on_report = bool(int(
os.environ.get('CHAINER_KEEP_GRAPH_ON_REPORT', '0')))
global_config.train = True
global_config.type_check = bool(int(os.environ.get('CHAINER_TYPE_CHECK', '1')))
global_config.use_cudnn = os.environ.get('CHAINER_USE_CUDNN', 'auto')
global_config.use_cudnn_tensor_core = 'auto'
global_config.autotune = False
global_config.use_ideep = os.environ.get('CHAINER_USE_IDEEP', 'never')
global_config.lazy_grad_sum = bool(int(
os.environ.get('CHAINER_LAZY_GRAD_SUM', '0')))
_chainer_dtype = os.environ.get('CHAINER_DTYPE', 'float32')
if _chainer_dtype not in ('float16', 'float32', 'float64'):
raise TypeError('incorrect dtype name in CHAINER_DTYPE: "{}". '
'Only float16/32/64 are allowed.'.format(_chainer_dtype))
global_config.dtype = numpy.dtype(_chainer_dtype)
def is_debug():
"""Returns if the debug mode is enabled or not in the current thread.
Returns:
bool: ``True`` if the debug mode is enabled.
"""
return bool(config.debug)
def set_debug(debug):
"""Enables or disables the debug mode in the current thread.
.. note::
``chainer.set_debug(value)`` is equivalent to
``chainer.config.debug = value``.
Args:
debug (bool): New debug mode.
"""
config.debug = debug
class DebugMode(object):
"""Debug mode context.
This class provides a context manager for debug mode. When entering the
context, it sets the debug mode to the value of `debug` parameter with
memorizing its original value. When exiting the context, it sets the debug
mode back to the original value.
.. deprecated:: v2.0.0
Use :func:`chainer.using_config` instead. See :ref:`debug` for details.
Args:
debug (bool): Debug mode used in the context.
"""
def __init__(self, debug):
warnings.warn('chainer.DebugMode is deprecated. '
'Use chainer.using_config("debug", ...) instead.',
DeprecationWarning)
self._using = using_config('debug', debug)
def __enter__(self):
self._using.__enter__()
def __exit__(self, *args):
self._using.__exit__(*args)
def get_dtype(dtype=None):
"""Resolves Chainer's default dtype.
Returns:
If ``dtype`` is not ``None``, it returns the dtype as is. Otherwise, it
returns ``chainer.config.dtype`` (see :ref:`configuration`).
"""
if dtype is None:
return config.dtype
return dtype
basic_math.install_variable_arithmetics()
array.get_item.install_variable_get_item()
disable_experimental_feature_warning = False
| |
import logging
from Crypto.PublicKey import RSA
from django.contrib.auth.views import redirect_to_login, logout
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.views.decorators.http import require_http_methods
from django.views.generic import View
from hashlib import md5
from jwkest import long_to_base64
from oidc_provider.lib.endpoints.authorize import *
from oidc_provider.lib.endpoints.token import *
from oidc_provider.lib.endpoints.userinfo import *
from oidc_provider.lib.errors import *
from oidc_provider.lib.utils.common import get_issuer, get_rsa_key
from oidc_provider.lib.endpoints.register import RegisterEndpoint
logger = logging.getLogger(__name__)
class AuthorizeView(View):
def get(self, request, *args, **kwargs):
authorize = AuthorizeEndpoint(request)
try:
authorize.validate_params()
if request.user.is_authenticated():
# Check if there's a hook setted.
hook_resp = settings.get('OIDC_AFTER_USERLOGIN_HOOK')(
request=request, user=request.user,
client=authorize.client)
if hook_resp:
return hook_resp
if settings.get('OIDC_SKIP_CONSENT_ENABLE'):
# Check if user previously give consent.
if authorize.client_has_user_consent():
uri = authorize.create_response_uri()
return HttpResponseRedirect(uri)
# Generate hidden inputs for the form.
context = {
'params': authorize.params,
}
hidden_inputs = render_to_string(
'oidc_provider/hidden_inputs.html', context)
# Remove `openid` from scope list
# since we don't need to print it.
authorize.params.scope.remove('openid')
context = {
'client': authorize.client,
'hidden_inputs': hidden_inputs,
'params': authorize.params,
}
return render(request, 'oidc_provider/authorize.html', context)
else:
path = request.get_full_path()
return redirect_to_login(path)
except (ClientIdError, RedirectUriError) as error:
context = {
'error': error.error,
'description': error.description,
}
return render(request, 'oidc_provider/error.html', context)
except (AuthorizeError) as error:
uri = error.create_uri(
authorize.params.redirect_uri,
authorize.params.state)
return HttpResponseRedirect(uri)
def post(self, request, *args, **kwargs):
authorize = AuthorizeEndpoint(request)
allow = True if request.POST.get('allow') else False
try:
authorize.validate_params()
if not allow:
raise AuthorizeError(authorize.params.redirect_uri,
'access_denied',
authorize.grant_type)
# Save the user consent given to the client.
authorize.set_client_user_consent()
uri = authorize.create_response_uri()
return HttpResponseRedirect(uri)
except (AuthorizeError) as error:
uri = error.create_uri(
authorize.params.redirect_uri,
authorize.params.state)
return HttpResponseRedirect(uri)
class TokenView(View):
def post(self, request, *args, **kwargs):
token = TokenEndpoint(request)
try:
token.validate_params()
dic = token.create_response_dic()
return TokenEndpoint.response(dic)
except (TokenError) as error:
return TokenEndpoint.response(error.create_dict(), status=400)
@require_http_methods(['GET', 'POST'])
def userinfo(request):
userinfo = UserInfoEndpoint(request)
try:
userinfo.validate_params()
dic = userinfo.create_response_dic()
return UserInfoEndpoint.response(dic)
except (UserInfoError) as error:
return UserInfoEndpoint.error_response(
error.code,
error.description,
error.status)
class ProviderInfoView(View):
def get(self, request, *args, **kwargs):
dic = dict()
dic['issuer'] = get_issuer()
SITE_URL = settings.get('SITE_URL')
dic['authorization_endpoint'] = SITE_URL + reverse('oidc_provider:authorize')
dic['token_endpoint'] = SITE_URL + reverse('oidc_provider:token')
dic['userinfo_endpoint'] = SITE_URL + reverse('oidc_provider:userinfo')
dic['end_session_endpoint'] = SITE_URL + reverse('oidc_provider:logout')
from oidc_provider.models import Client
types_supported = [x[0] for x in Client.RESPONSE_TYPE_CHOICES]
dic['response_types_supported'] = types_supported
dic['jwks_uri'] = SITE_URL + reverse('oidc_provider:jwks')
dic['id_token_signing_alg_values_supported'] = ['RS256']
# See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
dic['subject_types_supported'] = ['public']
dic['token_endpoint_auth_methods_supported'] = [ 'client_secret_post',
'client_secret_basic' ]
return JsonResponse(dic)
class JwksView(View):
def get(self, request, *args, **kwargs):
dic = dict(keys=[])
key = get_rsa_key().encode('utf-8')
public_key = RSA.importKey(key).publickey()
dic['keys'].append({
'kty': 'RSA',
'alg': 'RS256',
'use': 'sig',
'kid': md5(key).hexdigest(),
'n': long_to_base64(public_key.n),
'e': long_to_base64(public_key.e),
})
return JsonResponse(dic)
class LogoutView(View):
def get(self, request, *args, **kwargs):
# We should actually verify if the requested redirect URI is safe
return logout(request, next_page=request.GET.get('post_logout_redirect_uri'))
class RegisterView(View):
def post(self, request, *args, **kwargs):
register = RegisterEndpoint(request)
try:
register.validate_params()
dic = register.create_response_dic()
return register.response(dic)
except (RegisterError) as error:
return RegisterEndpoint.error_response(
error.code,
error.description,
error.status)
| |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com>, 2015
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The match_hostname function and supporting code is under the terms and
# conditions of the Python Software Foundation License. They were taken from
# the Python3 standard library and adapted for use in Python2. See comments in the
# source for which code precisely is under this License. PSF License text
# follows:
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
# retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
'''
The **urls** utils module offers a replacement for the urllib2 python library.
urllib2 is the python stdlib way to retrieve files from the Internet but it
lacks some security features (around verifying SSL certificates) that users
should care about in most situations. Using the functions in this module corrects
deficiencies in the urllib2 module wherever possible.
There are also third-party libraries (for instance, requests) which can be used
to replace urllib2 with a more secure library. However, all third party libraries
require that the library be installed on the managed machine. That is an extra step
for users making use of a module. If possible, avoid third party libraries by using
this code instead.
'''
import base64
import netrc
import os
import platform
import re
import socket
import sys
import tempfile
import traceback
try:
import httplib
except ImportError:
# Python 3
import http.client as httplib
import ansible.module_utils.six.moves.http_cookiejar as cookiejar
import ansible.module_utils.six.moves.urllib.request as urllib_request
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible.module_utils.basic import get_distribution
from ansible.module_utils._text import to_bytes, to_native, to_text
try:
# python3
import urllib.request as urllib_request
from urllib.request import AbstractHTTPHandler
except ImportError:
# python2
import urllib2 as urllib_request
from urllib2 import AbstractHTTPHandler
try:
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
HAS_URLPARSE = True
except:
HAS_URLPARSE = False
try:
import ssl
HAS_SSL = True
except:
HAS_SSL = False
try:
# SNI Handling needs python2.7.9's SSLContext
from ssl import create_default_context, SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
# SNI Handling for python < 2.7.9 with urllib3 support
try:
# urllib3>=1.15
HAS_URLLIB3_SSL_WRAP_SOCKET = False
try:
from urllib3.contrib.pyopenssl import PyOpenSSLContext
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import PyOpenSSLContext
HAS_URLLIB3_PYOPENSSLCONTEXT = True
except ImportError:
# urllib3<1.15,>=1.6
HAS_URLLIB3_PYOPENSSLCONTEXT = False
try:
try:
from urllib3.contrib.pyopenssl import ssl_wrap_socket
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import ssl_wrap_socket
HAS_URLLIB3_SSL_WRAP_SOCKET = True
except ImportError:
pass
# Select a protocol that includes all secure tls protocols
# Exclude insecure ssl protocols if possible
if HAS_SSL:
# If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient
PROTOCOL = ssl.PROTOCOL_TLSv1
if not HAS_SSLCONTEXT and HAS_SSL:
try:
import ctypes
import ctypes.util
except ImportError:
# python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl)
pass
else:
libssl_name = ctypes.util.find_library('ssl')
libssl = ctypes.CDLL(libssl_name)
for method in ('TLSv1_1_method', 'TLSv1_2_method'):
try:
libssl[method]
# Found something - we'll let openssl autonegotiate and hope
# the server has disabled sslv2 and 3. best we can do.
PROTOCOL = ssl.PROTOCOL_SSLv23
break
except AttributeError:
pass
del libssl
LOADED_VERIFY_LOCATIONS = set()
HAS_MATCH_HOSTNAME = True
try:
from ssl import match_hostname, CertificateError
except ImportError:
try:
from backports.ssl_match_hostname import match_hostname, CertificateError
except ImportError:
HAS_MATCH_HOSTNAME = False
if not HAS_MATCH_HOSTNAME:
# The following block of code is under the terms and conditions of the
# Python Software Foundation License
"""The match_hostname() function from Python 3.4, essential when using SSL."""
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or subjectAltName fields were found")
# End of Python Software Foundation Licensed code
HAS_MATCH_HOSTNAME = True
# This is a dummy cacert provided for Mac OS since you need at least 1
# ca cert, regardless of validity, for Python on Mac OS to use the
# keychain functionality in OpenSSL for validating SSL certificates.
# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
b_DUMMY_CA_CERT = b"""-----BEGIN CERTIFICATE-----
MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
-----END CERTIFICATE-----
"""
#
# Exceptions
#
class ConnectionError(Exception):
"""Failed to connect to the server"""
pass
class ProxyError(ConnectionError):
"""Failure to connect because of a proxy"""
pass
class SSLValidationError(ConnectionError):
"""Failure to connect due to SSL validation failing"""
pass
class NoSSLError(SSLValidationError):
"""Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
pass
# Some environments (Google Compute Engine's CoreOS deploys) do not compile
# against openssl and thus do not have any HTTPS support.
CustomHTTPSConnection = CustomHTTPSHandler = None
if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib_request, 'HTTPSHandler'):
class CustomHTTPSConnection(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self.context = None
if HAS_SSLCONTEXT:
self.context = create_default_context()
elif HAS_URLLIB3_PYOPENSSLCONTEXT:
self.context = PyOpenSSLContext(PROTOCOL)
if self.context and self.cert_file:
self.context.load_cert_chain(self.cert_file, self.key_file)
def connect(self):
"Connect to a host on a given (SSL) port."
if hasattr(self, 'source_address'):
sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
else:
sock = socket.create_connection((self.host, self.port), self.timeout)
server_hostname = self.host
# Note: self._tunnel_host is not available on py < 2.6 but this code
# isn't used on py < 2.6 (lack of create_connection)
if self._tunnel_host:
self.sock = sock
self._tunnel()
server_hostname = self._tunnel_host
if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
self.sock = ssl_wrap_socket(sock, keyfile=self.key_file, cert_reqs=ssl.CERT_NONE, certfile=self.cert_file, ssl_version=PROTOCOL,
server_hostname=server_hostname)
else:
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
class CustomHTTPSHandler(urllib_request.HTTPSHandler):
def https_open(self, req):
return self.do_open(CustomHTTPSConnection, req)
https_request = AbstractHTTPHandler.do_request_
class HTTPSClientAuthHandler(urllib_request.HTTPSHandler):
'''Handles client authentication via cert/key
This is a fairly lightweight extension on HTTPSHandler, and can be used
in place of HTTPSHandler
'''
def __init__(self, client_cert=None, client_key=None, **kwargs):
urllib_request.HTTPSHandler.__init__(self, **kwargs)
self.client_cert = client_cert
self.client_key = client_key
def https_open(self, req):
return self.do_open(self._build_https_connection, req)
def _build_https_connection(self, host, **kwargs):
kwargs.update({
'cert_file': self.client_cert,
'key_file': self.client_key,
})
try:
kwargs['context'] = self._context
except AttributeError:
pass
return httplib.HTTPSConnection(host, **kwargs)
def generic_urlparse(parts):
'''
Returns a dictionary of url parts as parsed by urlparse,
but accounts for the fact that older versions of that
library do not support named attributes (ie. .netloc)
'''
generic_parts = dict()
if hasattr(parts, 'netloc'):
# urlparse is newer, just read the fields straight
# from the parts object
generic_parts['scheme'] = parts.scheme
generic_parts['netloc'] = parts.netloc
generic_parts['path'] = parts.path
generic_parts['params'] = parts.params
generic_parts['query'] = parts.query
generic_parts['fragment'] = parts.fragment
generic_parts['username'] = parts.username
generic_parts['password'] = parts.password
generic_parts['hostname'] = parts.hostname
generic_parts['port'] = parts.port
else:
# we have to use indexes, and then parse out
# the other parts not supported by indexing
generic_parts['scheme'] = parts[0]
generic_parts['netloc'] = parts[1]
generic_parts['path'] = parts[2]
generic_parts['params'] = parts[3]
generic_parts['query'] = parts[4]
generic_parts['fragment'] = parts[5]
# get the username, password, etc.
try:
netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
match = netloc_re.match(parts[1])
auth = match.group(1)
hostname = match.group(2)
port = match.group(3)
if port:
# the capture group for the port will include the ':',
# so remove it and convert the port to an integer
port = int(port[1:])
if auth:
# the capture group above includes the @, so remove it
# and then split it up based on the first ':' found
auth = auth[:-1]
username, password = auth.split(':', 1)
else:
username = password = None
generic_parts['username'] = username
generic_parts['password'] = password
generic_parts['hostname'] = hostname
generic_parts['port'] = port
except:
generic_parts['username'] = None
generic_parts['password'] = None
generic_parts['hostname'] = parts[1]
generic_parts['port'] = None
return generic_parts
class RequestWithMethod(urllib_request.Request):
'''
Workaround for using DELETE/PUT/etc with urllib2
Originally contained in library/net_infrastructure/dnsmadeeasy
'''
def __init__(self, url, method, data=None, headers=None):
if headers is None:
headers = {}
self._method = method.upper()
urllib_request.Request.__init__(self, url, data, headers)
def get_method(self):
if self._method:
return self._method
else:
return urllib_request.Request.get_method(self)
def RedirectHandlerFactory(follow_redirects=None, validate_certs=True):
"""This is a class factory that closes over the value of
``follow_redirects`` so that the RedirectHandler class has access to
that value without having to use globals, and potentially cause problems
where ``open_url`` or ``fetch_url`` are used multiple times in a module.
"""
class RedirectHandler(urllib_request.HTTPRedirectHandler):
"""This is an implementation of a RedirectHandler to match the
functionality provided by httplib2. It will utilize the value of
``follow_redirects`` that is passed into ``RedirectHandlerFactory``
to determine how redirects should be handled in urllib2.
"""
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
handler = maybe_add_ssl_handler(newurl, validate_certs)
if handler:
urllib_request._opener.add_handler(handler)
if follow_redirects == 'urllib2':
return urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
elif follow_redirects in ['no', 'none', False]:
raise urllib_error.HTTPError(newurl, code, msg, hdrs, fp)
do_redirect = False
if follow_redirects in ['all', 'yes', True]:
do_redirect = (code >= 300 and code < 400)
elif follow_redirects == 'safe':
m = req.get_method()
do_redirect = (code >= 300 and code < 400 and m in ('GET', 'HEAD'))
if do_redirect:
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in ("content-length", "content-type"))
try:
# Python 2-3.3
origin_req_host = req.get_origin_req_host()
except AttributeError:
# Python 3.4+
origin_req_host = req.origin_req_host
return urllib_request.Request(newurl,
headers=newheaders,
origin_req_host=origin_req_host,
unverifiable=True)
else:
raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
return RedirectHandler
def build_ssl_validation_error(hostname, port, paths, exc=None):
'''Inteligently build out the SSLValidationError based on what support
you have installed
'''
msg = [
('Failed to validate the SSL certificate for %s:%s.'
' Make sure your managed systems have a valid CA'
' certificate installed.')
]
if not HAS_SSLCONTEXT:
msg.append('If the website serving the url uses SNI you need'
' python >= 2.7.9 on your managed machine')
msg.append(' (the python executable used (%s) is version: %s)' %
(sys.executable, ''.join(sys.version.splitlines())))
if not HAS_URLLIB3_PYOPENSSLCONTEXT or not HAS_URLLIB3_SSL_WRAP_SOCKET:
msg.append('or you can install the `urllib3`, `pyOpenSSL`,'
' `ndg-httpsclient`, and `pyasn1` python modules')
msg.append('to perform SNI verification in python >= 2.6.')
msg.append('You can use validate_certs=False if you do'
' not need to confirm the servers identity but this is'
' unsafe and not recommended.'
' Paths checked for this platform: %s.')
if exc:
msg.append('The exception msg was: %s.' % to_native(exc))
raise SSLValidationError(' '.join(msg) % (hostname, port, ", ".join(paths)))
class SSLValidationHandler(urllib_request.BaseHandler):
'''
A custom handler class for SSL validation.
Based on:
http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
http://techknack.net/python-urllib2-handlers/
'''
CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n"
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
def get_ca_certs(self):
# tries to find a valid CA cert in one of the
# standard locations for the current distribution
ca_certs = []
paths_checked = []
system = to_text(platform.system(), errors='surrogate_or_strict')
# build a list of paths to check for .crt/.pem files
# based on the platform type
paths_checked.append('/etc/ssl/certs')
if system == u'Linux':
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
paths_checked.append('/etc/pki/tls/certs')
paths_checked.append('/usr/share/ca-certificates/cacert.org')
elif system == u'FreeBSD':
paths_checked.append('/usr/local/share/certs')
elif system == u'OpenBSD':
paths_checked.append('/etc/ssl')
elif system == u'NetBSD':
ca_certs.append('/etc/openssl/certs')
elif system == u'SunOS':
paths_checked.append('/opt/local/etc/openssl/certs')
# fall back to a user-deployed cert in a standard
# location if the OS platform one is not available
paths_checked.append('/etc/ansible')
tmp_fd, tmp_path = tempfile.mkstemp()
to_add_fd, to_add_path = tempfile.mkstemp()
to_add = False
# Write the dummy ca cert if we are running on Mac OS X
if system == u'Darwin':
os.write(tmp_fd, b_DUMMY_CA_CERT)
# Default Homebrew path for OpenSSL certs
paths_checked.append('/usr/local/etc/openssl')
# for all of the paths, find any .crt or .pem files
# and compile them into single temp file for use
# in the ssl check to speed up the test
for path in paths_checked:
if os.path.exists(path) and os.path.isdir(path):
dir_contents = os.listdir(path)
for f in dir_contents:
full_path = os.path.join(path, f)
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt', '.pem'):
try:
cert_file = open(full_path, 'rb')
cert = cert_file.read()
cert_file.close()
os.write(tmp_fd, cert)
os.write(tmp_fd, b'\n')
if full_path not in LOADED_VERIFY_LOCATIONS:
to_add = True
os.write(to_add_fd, cert)
os.write(to_add_fd, b'\n')
LOADED_VERIFY_LOCATIONS.add(full_path)
except (OSError, IOError):
pass
if not to_add:
to_add_path = None
return (tmp_path, to_add_path, paths_checked)
def validate_proxy_response(self, response, valid_codes=[200]):
'''
make sure we get back a valid code from the proxy
'''
try:
(http_version, resp_code, msg) = re.match(r'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
if int(resp_code) not in valid_codes:
raise Exception
except:
raise ProxyError('Connection to proxy failed')
def detect_no_proxy(self, url):
'''
Detect if the 'no_proxy' environment variable is set and honor those locations.
'''
env_no_proxy = os.environ.get('no_proxy')
if env_no_proxy:
env_no_proxy = env_no_proxy.split(',')
netloc = urlparse(url).netloc
for host in env_no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# Our requested URL matches something in no_proxy, so don't
# use the proxy for this
return False
return True
def _make_context(self, to_add_ca_cert_path):
if HAS_URLLIB3_PYOPENSSLCONTEXT:
context = PyOpenSSLContext(PROTOCOL)
else:
context = create_default_context()
if to_add_ca_cert_path:
context.load_verify_locations(to_add_ca_cert_path)
return context
def http_request(self, req):
tmp_ca_cert_path, to_add_ca_cert_path, paths_checked = self.get_ca_certs()
https_proxy = os.environ.get('https_proxy')
context = None
if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
context = self._make_context(to_add_ca_cert_path)
# Detect if 'no_proxy' environment variable is set and if our URL is included
use_proxy = self.detect_no_proxy(req.get_full_url())
if not use_proxy:
# ignore proxy settings for this host request
return req
try:
if https_proxy:
proxy_parts = generic_urlparse(urlparse(https_proxy))
port = proxy_parts.get('port') or 443
s = socket.create_connection((proxy_parts.get('hostname'), port))
if proxy_parts.get('scheme') == 'http':
s.sendall(to_bytes(self.CONNECT_COMMAND % (self.hostname, self.port), errors='surrogate_or_strict'))
if proxy_parts.get('username'):
credentials = "%s:%s" % (proxy_parts.get('username', ''), proxy_parts.get('password', ''))
s.sendall(b'Proxy-Authorization: Basic %s\r\n' % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip())
s.sendall(b'\r\n')
connect_result = b""
while connect_result.find(b"\r\n\r\n") <= 0:
connect_result += s.recv(4096)
# 128 kilobytes of headers should be enough for everyone.
if len(connect_result) > 131072:
raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.')
self.validate_proxy_response(connect_result)
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
else:
raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
else:
s = socket.create_connection((self.hostname, self.port))
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
# close the ssl connection
# ssl_s.unwrap()
s.close()
except (ssl.SSLError, CertificateError) as e:
build_ssl_validation_error(self.hostname, self.port, paths_checked, e)
except socket.error as e:
raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e)))
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
os.remove(tmp_ca_cert_path)
except:
pass
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
if to_add_ca_cert_path:
os.remove(to_add_ca_cert_path)
except:
pass
return req
https_request = http_request
def maybe_add_ssl_handler(url, validate_certs):
# FIXME: change the following to use the generic_urlparse function
# to remove the indexed references for 'parsed'
parsed = urlparse(url)
if parsed[0] == 'https' and validate_certs:
if not HAS_SSL:
raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,'
' however this is unsafe and not recommended')
# do the cert validation
netloc = parsed[1]
if '@' in netloc:
netloc = netloc.split('@', 1)[1]
if ':' in netloc:
hostname, port = netloc.split(':', 1)
port = int(port)
else:
hostname = netloc
port = 443
# create the SSL validation handler and
# add it to the list of handlers
return SSLValidationHandler(hostname, port)
def open_url(url, data=None, headers=None, method=None, use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None,
force_basic_auth=False, follow_redirects='urllib2',
client_cert=None, client_key=None, cookies=None):
'''
Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
Does not require the module environment
'''
handlers = []
ssl_handler = maybe_add_ssl_handler(url, validate_certs)
if ssl_handler:
handlers.append(ssl_handler)
# FIXME: change the following to use the generic_urlparse function
# to remove the indexed references for 'parsed'
parsed = urlparse(url)
if parsed[0] != 'ftp':
username = url_username
if headers is None:
headers = {}
if username:
password = url_password
netloc = parsed[1]
elif '@' in parsed[1]:
credentials, netloc = parsed[1].split('@', 1)
if ':' in credentials:
username, password = credentials.split(':', 1)
else:
username = credentials
password = ''
parsed = list(parsed)
parsed[1] = netloc
# reconstruct url without credentials
url = urlunparse(parsed)
if username and not force_basic_auth:
passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()
# this creates a password manager
passman.add_password(None, netloc, username, password)
# because we have put None at the start it will always
# use this username/password combination for urls
# for which `theurl` is a super-url
authhandler = urllib_request.HTTPBasicAuthHandler(passman)
digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman)
# create the AuthHandler
handlers.append(authhandler)
handlers.append(digest_authhandler)
elif username and force_basic_auth:
headers["Authorization"] = basic_auth_header(username, password)
else:
try:
rc = netrc.netrc(os.environ.get('NETRC'))
login = rc.authenticators(parsed[1])
except IOError:
login = None
if login:
username, _, password = login
if username and password:
headers["Authorization"] = basic_auth_header(username, password)
if not use_proxy:
proxyhandler = urllib_request.ProxyHandler({})
handlers.append(proxyhandler)
if HAS_SSLCONTEXT and not validate_certs:
# In 2.7.9, the default context validates certificates
context = SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.verify_mode = ssl.CERT_NONE
context.check_hostname = False
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key,
context=context))
elif client_cert:
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key))
# pre-2.6 versions of python cannot use the custom https
# handler, since the socket class is lacking create_connection.
# Some python builds lack HTTPS support.
if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
handlers.append(CustomHTTPSHandler)
handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs))
# add some nicer cookie handling
if cookies is not None:
handlers.append(urllib_request.HTTPCookieProcessor(cookies))
opener = urllib_request.build_opener(*handlers)
urllib_request.install_opener(opener)
data = to_bytes(data, nonstring='passthru')
if method:
if method.upper() not in ('OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT', 'PATCH'):
raise ConnectionError('invalid HTTP request method; %s' % method.upper())
request = RequestWithMethod(url, method.upper(), data)
else:
request = urllib_request.Request(url, data)
# add the custom agent header, to help prevent issues
# with sites that block the default urllib agent string
if http_agent:
request.add_header('User-agent', http_agent)
# Cache control
# Either we directly force a cache refresh
if force:
request.add_header('cache-control', 'no-cache')
# or we do it if the original is more recent than our copy
elif last_mod_time:
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
request.add_header('If-Modified-Since', tstamp)
# user defined headers now, which may override things we've set above
if headers:
if not isinstance(headers, dict):
raise ValueError("headers provided to fetch_url() must be a dict")
for header in headers:
request.add_header(header, headers[header])
urlopen_args = [request, None]
if sys.version_info >= (2, 6, 0):
# urlopen in python prior to 2.6.0 did not
# have a timeout parameter
urlopen_args.append(timeout)
r = urllib_request.urlopen(*urlopen_args)
return r
#
# Module-related functions
#
def basic_auth_header(username, password):
"""Takes a username and password and returns a byte string suitable for
using as value of an Authorization header to do basic auth.
"""
return b"Basic %s" % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict'))
def url_argument_spec():
'''
Creates an argument spec that can be used with any module
that will be requesting content via urllib/urllib2
'''
return dict(
url=dict(),
force=dict(default='no', aliases=['thirsty'], type='bool'),
http_agent=dict(default='ansible-httpget'),
use_proxy=dict(default='yes', type='bool'),
validate_certs=dict(default='yes', type='bool'),
url_username=dict(required=False),
url_password=dict(required=False, no_log=True),
force_basic_auth=dict(required=False, type='bool', default='no'),
client_cert=dict(required=False, type='path', default=None),
client_key=dict(required=False, type='path', default=None),
)
def fetch_url(module, url, data=None, headers=None, method=None,
use_proxy=True, force=False, last_mod_time=None, timeout=10):
"""Sends a request via HTTP(S) or FTP (needs the module as parameter)
:arg module: The AnsibleModule (used to get username, password etc. (s.b.).
:arg url: The url to use.
:kwarg data: The data to be sent (in case of POST/PUT).
:kwarg headers: A dict with the request headers.
:kwarg method: "POST", "PUT", etc.
:kwarg boolean use_proxy: Default: True
:kwarg boolean force: If True: Do not get a cached copy (Default: False)
:kwarg last_mod_time: Default: None
:kwarg int timeout: Default: 10
:returns: A tuple of (**response**, **info**). Use ``response.body()`` to read the data.
The **info** contains the 'status' and other meta data. When a HttpError (status > 400)
occurred then ``info['body']`` contains the error response data::
Example::
data={...}
resp, info = fetch_url(module,
"http://example.com",
data=module.jsonify(data)
header={Content-type': 'application/json'},
method="POST")
status_code = info["status"]
body = resp.read()
if status_code >= 400 :
body = info['body']
"""
if not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
# Get validate_certs from the module params
validate_certs = module.params.get('validate_certs', True)
username = module.params.get('url_username', '')
password = module.params.get('url_password', '')
http_agent = module.params.get('http_agent', 'ansible-httpget')
force_basic_auth = module.params.get('force_basic_auth', '')
follow_redirects = module.params.get('follow_redirects', 'urllib2')
client_cert = module.params.get('client_cert')
client_key = module.params.get('client_key')
cookies = cookiejar.LWPCookieJar()
r = None
info = dict(url=url)
try:
r = open_url(url, data=data, headers=headers, method=method,
use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
validate_certs=validate_certs, url_username=username,
url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth,
follow_redirects=follow_redirects, client_cert=client_cert,
client_key=client_key, cookies=cookies)
info.update(r.info())
# parse the cookies into a nice dictionary
cookie_dict = dict()
for cookie in cookies:
cookie_dict[cookie.name] = cookie.value
info['cookies'] = cookie_dict
# finally update the result with a message about the fetch
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code))
except NoSSLError as e:
distribution = get_distribution()
if distribution is not None and distribution.lower() == 'redhat':
module.fail_json(msg='%s. You can also install python-ssl from EPEL' % to_native(e))
else:
module.fail_json(msg='%s' % to_native(e))
except (ConnectionError, ValueError) as e:
module.fail_json(msg=to_native(e))
except urllib_error.HTTPError as e:
try:
body = e.read()
except AttributeError:
body = ''
# Try to add exception info to the output but don't fail if we can't
try:
info.update(dict(**e.info()))
except:
pass
info.update({'msg': to_native(e), 'body': body, 'status': e.code})
except urllib_error.URLError as e:
code = int(getattr(e, 'code', -1))
info.update(dict(msg="Request failed: %s" % to_native(e), status=code))
except socket.error as e:
info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1))
except Exception as e:
info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1),
exception=traceback.format_exc())
return r, info
| |
#!/usr/bin/python
import os
import sys
import unittest
import pymei
import tornado.web
import tornado.httpserver
from tornado.testing import AsyncHTTPTestCase
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
from neonsrv import tornadoapi
class FakeReq():
""" A class that looks enough like an HTTPRequest to create a
RequestHandler object. """
headers = {}
def supports_http_1_1(self):
pass
class DeleteNoteTest(unittest.TestCase):
def setUp(self):
app = tornado.web.Application()
req = FakeReq()
path = os.path.join(os.path.dirname(__file__), "data", "0400_segment.mei")
self.mei = pymei.XmlImport.documentFromFile(path)
self.deleter = api.DeleteNoteHandler(app, req)
self.deleter.mei = self.mei
def testDeleteNote(self):
""" Delete a note but leave other stuff in the nc """
note = self.mei.getElementById("m-c33fda11-014e-4028-8c6d-d5b9bc927839")
self.deleter.delete_note(note)
self.assertEqual(None, self.mei.getElementById("m-c33fda11-014e-4028-8c6d-d5b9bc927839"))
self.assertEqual(2, len(self.mei.getElementById("m-d60836bd-c4e9-47c9-bac8-c3cf1af87847").children))
def testDeleteNoteAndSurroundingNc(self):
""" Delete a note and its nc, but leave the neume because
there's another nc """
note = self.mei.getElementById("m-id3")
self.deleter.delete_note(note)
self.assertEqual(None, self.mei.getElementById("m-id2"))
self.assertEqual(1, len(self.mei.getElementById("m-id1").children))
self.assertEqual("nc", self.mei.getElementById("m-id6").name)
def testDeleteNoteNcAndNeume(self):
""" Delete the note, its nc, and its neume. Also remove the
neume's zone """
note = self.mei.getElementById("m-82af3519-e4a5-4992-a54d-4217759ff272")
self.assertEqual("zone", self.mei.getElementById("m-df35aa9a-9155-4c89-a8b2-a05688156807").name)
self.assertEqual(5, len(self.mei.getElementById("m-54a0bb9f-7aee-4417-bbe7-298e9149a8a2").children))
self.deleter.delete_note(note)
# neume should be gone
self.assertEqual(None, self.mei.getElementById("m-afb0de04-7df2-4abd-b0af-6b03e91b5fe8"))
self.assertEqual(4, len(self.mei.getElementById("m-54a0bb9f-7aee-4417-bbe7-298e9149a8a2").children))
# Zone should be gone
self.assertEqual(None, self.mei.getElementById("m-df35aa9a-9155-4c89-a8b2-a05688156807"))
def testDeleteNeumeAndZone(self):
""" Delete an id that is a neume, and remove its zone """
neume = self.mei.getElementById("m-a4b60c3b-58ce-4918-8b97-38c960c50dab")
self.deleter.delete_neume(neume)
# Neume
self.assertEqual(None, self.mei.getElementById("m-a4b60c3b-58ce-4918-8b97-38c960c50dab"))
# Zone
self.assertEqual(None, self.mei.getElementById("m-b06676a3-4aa1-430d-b1c8-3d3fcf606f0e"))
def testDoDeleteNote(self):
""" test do_delete with an id of a note """
self.deleter.do_delete("m-c33fda11-014e-4028-8c6d-d5b9bc927839")
self.assertEqual(2, len(self.mei.getElementById("m-d60836bd-c4e9-47c9-bac8-c3cf1af87847").children))
def testDoDeleteNeume(self):
""" test do_delete with an id of a neume """
self.deleter.do_delete("m-afb0de04-7df2-4abd-b0af-6b03e91b5fe8")
self.assertEqual(None, self.mei.getElementById("m-afb0de04-7df2-4abd-b0af-6b03e91b5fe8"))
def testDeleteMultipleNotes(self):
""" Delete multiple notes by passing in a list of ids """
self.deleter.do_delete("m-c33fda11-014e-4028-8c6d-d5b9bc927839,m-9569862a-2076-43e3-8926-d0da646a3ae0")
self.assertEqual(1, len(self.mei.getElementById("m-d60836bd-c4e9-47c9-bac8-c3cf1af87847").children))
class ChangeNoteTest(unittest.TestCase):
def setUp(self):
app = tornado.web.Application()
req = FakeReq()
path = os.path.join(os.path.dirname(__file__), "data", "0400_segment.mei")
self.mei = pymei.XmlImport.read(path)
self.changer = api.ChangeNeumePitchHandler(app, req)
self.changer.mei = self.mei
def testUpdateZone(self):
"""foo"""
neume = self.mei.getElementById("m-a4b60c3b-58ce-4918-8b97-38c960c50dab")
self.changer.update_or_add_zone(neume, "1", "2", "3", "4")
zone = self.mei.getElementById("m-b06676a3-4aa1-430d-b1c8-3d3fcf606f0e")
self.assertEqual("1", zone.getAttribute("ulx").value)
self.assertEqual("2", zone.getAttribute("uly").value)
self.assertEqual("3", zone.getAttribute("lrx").value)
self.assertEqual("4", zone.getAttribute("lry").value)
def testPitchDifference(self):
self.assertEqual(1, self.changer.find_difference("c", "4", "d", "4"))
self.assertEqual(-1, self.changer.find_difference("e", "4", "d", "4"))
self.assertEqual(3, self.changer.find_difference("f", "4", "b", "5"))
self.assertEqual(9, self.changer.find_difference("d", "4", "f", "5"))
self.assertEqual(7, self.changer.find_difference("e", "3", "e", "4"))
self.assertEqual(5, self.changer.find_difference("d", "3", "b", "4"))
self.assertEqual(-5, self.changer.find_difference("b", "4", "d", "3"))
self.assertEqual(-11, self.changer.find_difference("e", "4", "a", "3"))
def testNewNote(self):
self.assertEqual( ("c", "4"), self.changer.new_note("a", "4", 2))
self.assertEqual( ("d", "4"), self.changer.new_note("e", "4", -1))
self.assertEqual( ("b", "5"), self.changer.new_note("f", "4", 3))
self.assertEqual( ("f", "5"), self.changer.new_note("d", "4", 9))
self.assertEqual( ("e", "4"), self.changer.new_note("e", "3", 7))
self.assertEqual( ("b", "4"), self.changer.new_note("d", "3", 5))
self.assertEqual( ("d", "3"), self.changer.new_note("b", "4", -5))
self.assertEqual( ("a", "3"), self.changer.new_note("e", "4", -11))
def testMoveNeume(self):
neume = self.mei.getElementById("m-a4b60c3b-58ce-4918-8b97-38c960c50dab")
self.changer.move_neume(neume, "g", "3")
n1 = self.mei.getElementById("m-c33fda11-014e-4028-8c6d-d5b9bc927839")
n2 = self.mei.getElementById("m-9569862a-2076-43e3-8926-d0da646a3ae0")
n3 = self.mei.getElementById("m-962da020-362b-416f-bbcc-3c1f72de5798")
self.assertEqual("g", n1.getAttribute("pname").value)
self.assertEqual("3", n1.getAttribute("oct").value)
self.assertEqual("f", n2.getAttribute("pname").value)
self.assertEqual("4", n2.getAttribute("oct").value)
self.assertEqual("g", n3.getAttribute("pname").value)
self.assertEqual("3", n3.getAttribute("oct").value)
class InsertNoteTest(unittest.TestCase):
def setUp(self):
app = tornado.web.Application()
req = FakeReq()
path = os.path.join(os.path.dirname(__file__), "data", "0400_segment.mei")
self.mei = pymei.XmlImport.read(path)
self.inserter = api.InsertNeumeHandler(app, req)
self.inserter.mei = self.mei
def testGetNeumeXml(self):
r = self.inserter.get_new_neume("c", "4")
self.assertEqual("neume", r.name)
self.assertEqual("punctum", r.getAttribute("name").value)
self.assertEqual("nc", r.children[0].name)
self.assertEqual("note", r.children[0].children[0].name)
note = r.children[0].children[0]
self.assertEqual("c", note.getAttribute("pname").value)
self.assertEqual("4", note.getAttribute("oct").value)
def testGetZoneXml(self):
z = self.inserter.get_new_zone("1", "2", "3", "4");
self.assertEqual("zone", z.name)
self.assertEqual("1", z.getAttribute("ulx").value)
self.assertEqual("2", z.getAttribute("uly").value)
self.assertEqual("3", z.getAttribute("lrx").value)
self.assertEqual("4", z.getAttribute("lry").value)
def testDoInsertBefore(self):
newneume = self.inserter.get_new_neume("c", "4")
newzone = self.inserter.get_new_zone("1", "2", "3", "4")
layerId = "m-54a0bb9f-7aee-4417-bbe7-298e9149a8a2"
before = "m-a4b60c3b-58ce-4918-8b97-38c960c50dab"
self.inserter.do_insert(newneume, newzone, layerId, before)
surface = self.mei.getElementById("m-4954b1c5-9c05-4963-accb-b6e351e3b6b4")
self.assertEqual(6, len(surface.children))
self.assertEqual(newzone.id, surface.children[-1].id)
layer = self.mei.getElementById(layerId)
self.assertEqual(6, len(layer.children))
self.assertEqual(newneume.id, layer.children[3].id)
def testDoInsertEnd(self):
newneume = self.inserter.get_new_neume("c", "4")
newzone = self.inserter.get_new_zone("1", "2", "3", "4")
layerId = "m-54a0bb9f-7aee-4417-bbe7-298e9149a8a2"
self.inserter.do_insert(newneume, newzone, layerId)
surface = self.mei.getElementById("m-4954b1c5-9c05-4963-accb-b6e351e3b6b4")
self.assertEqual(6, len(surface.children))
self.assertEqual(newzone.id, surface.children[-1].id)
layer = self.mei.getElementById(layerId)
self.assertEqual(6, len(layer.children))
self.assertEqual(newneume.id, layer.children[-1].id)
def testDoInsertNoZone(self):
newneume = self.inserter.get_new_neume("c", "4")
layerId = "m-54a0bb9f-7aee-4417-bbe7-298e9149a8a2"
self.inserter.do_insert(newneume, None, layerId, None)
surface = self.mei.getElementById("m-4954b1c5-9c05-4963-accb-b6e351e3b6b4")
self.assertEqual(5, len(surface.children))
layer = self.mei.getElementById(layerId)
self.assertEqual(6, len(layer.children))
self.assertEqual(newneume.id, layer.children[-1].id)
| |
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2018-10-11 17:51:43
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-11-29 17:23:15
from __future__ import print_function, division, absolute_import
import numpy as np
import astropy
import astropy.units as u
import marvin.tools
from marvin.tools.quantities.spectrum import Spectrum
from marvin.utils.general.general import get_drpall_table
from marvin.utils.plot.scatter import plot as scatplot
from marvin import log
from .base import VACMixIn, VACTarget
def choose_best_spectrum(par1, par2, conf_thresh=0.1):
'''choose optimal HI spectrum based on the following criteria:
(1) If both detected and unconfused, choose highest SNR
(2) If both detected and both confused, choose lower confusion prob.
(3) If both detected and one confused, choose non-confused
(4) If one non-confused detection and one non-detection, go with detection
(5) If one confused detetion and one non-detection, go with non-detection
(6) If niether detected, choose lowest rms
par1 and par2 are dictionaries with the following parameters:
program - gbt or alfalfa
snr - integrated SNR
rms - rms noise level
conf_prob - confusion probability
conf_thresh = maximum confusion probability below which we classify
the object as essentially unconfused. Default to 0.1 following
(Stark+21)
'''
programs = [par1['program'],par2['program']]
sel_high_snr = np.argmax([par1['snr'],par2['snr']])
sel_low_rms = np.argmin([par1['rms'],par2['rms']])
sel_low_conf = np.argmin([par1['conf_prob'],par2['conf_prob']])
#both detected
if (par1['snr'] > 0) & (par2['snr'] > 0):
if (par1['conf_prob'] <= conf_thresh) & (par2['conf_prob'] <= conf_thresh):
pick = sel_high_snr
elif (par1['conf_prob'] <= conf_thresh) & (par2['conf_prob'] > conf_thresh):
pick = 0
elif (par1['conf_prob'] > conf_thresh) & (par2['conf_prob'] <= conf_thresh):
pick = 1
elif (par1['conf_prob'] > conf_thresh) & (par2['conf_prob'] > conf_thresh):
pick = sel_low_conf
#both nondetected
elif (par1['snr'] <= 0) & (par2['snr'] <= 0):
pick = sel_low_rms
#one detected
elif (par1['snr'] > 0) & (par2['snr'] <= 0):
if par1['conf_prob'] < conf_thresh:
pick=0
else:
pick=1
elif (par1['snr'] <= 0) & (par2['snr'] > 0):
if par2['conf_prob'] < conf_thresh:
pick=1
else:
pick=0
return programs[pick]
class HIVAC(VACMixIn):
"""Provides access to the MaNGA-HI VAC.
VAC name: HI
URL: https://www.sdss.org/dr17/data_access/value-added-catalogs/?vac_id=hi-manga-data-release-1
Description: Returns HI summary data and spectra
Authors: David Stark and Karen Masters
"""
# Required parameters
name = 'HI'
description = 'Returns HI summary data and spectra'
version = {'MPL-7': 'v1_0_1', 'DR15': 'v1_0_1', 'DR16': 'v1_0_2', 'DR17': 'v2_0_1', 'MPL-11': 'v2_0_1'}
display_name = 'HI'
url = 'https://www.sdss.org/dr17/data_access/value-added-catalogs/?vac_id=hi-manga-data-release-1'
# optional Marvin Tools to attach your vac to
include = (marvin.tools.cube.Cube, marvin.tools.maps.Maps, marvin.tools.modelcube.ModelCube)
# optional methods to attach to your main VAC tool in ~marvin.tools.vacs.VACs
add_methods = ['plot_mass_fraction']
# Required method
def set_summary_file(self, release):
''' Sets the path to the HI summary file '''
# define the variables to build a unique path to your VAC file
self.path_params = {'ver': self.version[release], 'type': 'all', 'program': 'GBT16A_095'}
# get_path returns False if the files do not exist locally
self.summary_file = self.get_path("mangahisum", path_params=self.path_params)
def set_program(self,plateifu):
# download the vac from the SAS if it does not already exist locally
if not self.file_exists(self.summary_file):
self.summary_file = self.download_vac('mangahisum', path_params=self.path_params)
# Find all entries in summary file with this plate-ifu.
# Need the full summary file data.
# Find best entry between GBT/ALFALFA based on dept and confusion.
# Then update self.path_params['program'] with alfalfa or gbt.
summary = HITarget(plateifu, vacfile=self.summary_file)._data
galinfo = summary[summary['plateifu'] == plateifu]
if len(galinfo) == 1 and galinfo['session']=='ALFALFA':
program = 'alfalfa'
elif len(galinfo) in [0, 1]:
# if no entry found or session is GBT, default program to gbt
program = 'gbt'
else:
par1 = {'program': 'gbt','snr': 0.,'rms': galinfo[0]['rms'], 'conf_prob': galinfo[0]['conf_prob']}
par2 = {'program': 'gbt','snr': 0.,'rms': galinfo[1]['rms'], 'conf_prob': galinfo[1]['conf_prob']}
if galinfo[0]['session']=='ALFALFA':
par1['program'] = 'alfalfa'
if galinfo[1]['session']=='ALFALFA':
par2['program'] = 'alfalfa'
if galinfo[0]['fhi'] > 0:
par1['snr'] = galinfo[0]['fhi']/galinfo[0]['efhi']
if galinfo[1]['fhi'] > 0:
par2['snr'] = galinfo[1]['fhi']/galinfo[1]['efhi']
program = choose_best_spectrum(par1,par2)
log.info('Using HI data from {0}'.format(program))
# get path to ancillary VAC file for target HI spectra
self.update_path_params({'program':program})
# Required method
def get_target(self, parent_object):
''' Accesses VAC data for a specific target from a Marvin Tool object '''
# get any parameters you need from the parent object
plateifu = parent_object.plateifu
self.update_path_params({'plateifu': plateifu})
if parent_object.release in ['DR17', 'MPL-11']:
self.set_program(plateifu)
specfile = self.get_path('mangahispectra', path_params=self.path_params)
# create container for more complex return data
hidata = HITarget(plateifu, vacfile=self.summary_file, specfile=specfile)
# get the spectral data for that row if it exists
if hidata._indata and not self.file_exists(specfile):
hidata._specfile = self.download_vac('mangahispectra', path_params=self.path_params)
return hidata
class HITarget(VACTarget):
''' A customized target class to also display HI spectra
This class handles data from both the HI summary file and the
individual spectral files. Row data from the summary file for the given target
is returned via the `data` property. Spectral data can be displayed via
the the `plot_spectrum` method.
Parameters:
targetid (str):
The plateifu or mangaid designation
vacfile (str):
The path of the VAC summary file
specfile (str):
The path to the HI spectra
Attributes:
data:
The target row data from the main VAC file
targetid (str):
The target identifier
'''
def __init__(self, targetid, vacfile, specfile=None):
super(HITarget, self).__init__(targetid, vacfile)
self._specfile = specfile
self._specdata = None
def plot_spectrum(self):
''' Plot the HI spectrum '''
if self._specfile:
if not self._specdata:
self._specdata = self._get_data(self._specfile)
vel = self._specdata['VHI'][0]
flux = self._specdata['FHI'][0]
spec = Spectrum(flux, unit=u.Jy, wavelength=vel,
wavelength_unit=u.km / u.s)
ax = spec.plot(
ylabel='HI\ Flux\ Density', xlabel='Velocity', title=self.targetid, ytrim='minmax'
)
return ax
return None
#
# Functions to become available on your VAC in marvin.tools.vacs.VACs
def plot_mass_fraction(vacdata_object):
''' Plot the HI mass fraction
Computes and plots the HI mass fraction using
the NSA elliptical Petrosian stellar mass from the
MaNGA DRPall file. Only plots data for subset of
targets in both the HI VAC and the DRPall file.
Parameters:
vacdata_object (object):
The `~.VACDataClass` instance of the HI VAC
Example:
>>> from marvin.tools.vacs import VACs
>>> v = VACs()
>>> hi = v.HI
>>> hi.plot_mass_fraction()
'''
drpall = get_drpall_table()
drpall.add_index('plateifu')
data = vacdata_object.data[1].data
subset = drpall.loc[data['plateifu']]
log_stmass = np.log10(subset['nsa_elpetro_mass'])
diff = data['logMHI'] - log_stmass
fig, axes = scatplot(
log_stmass,
diff,
with_hist=False,
ylim=[-5, 5],
xlabel=r'log $M_*$',
ylabel=r'log $M_{HI}/M_*$',
)
return axes[0]
| |
# -*- coding: utf-8 -*-
import httplib
import logging
from datetime import datetime
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from framework.auth import get_or_create_user
from framework.exceptions import HTTPError
from framework.flask import redirect
from framework.transactions.context import TokuTransaction
from framework.transactions.handlers import no_auto_transaction
from website import settings
from website.models import Node, Tag
from website.util import web_url_for
from website.mails import send_mail
from website.files.models import StoredFileNode
from website.mails import CONFERENCE_SUBMITTED, CONFERENCE_INACTIVE, CONFERENCE_FAILED
from website.conferences import utils, signals
from website.conferences.message import ConferenceMessage, ConferenceError
from website.conferences.model import Conference
logger = logging.getLogger(__name__)
@no_auto_transaction
def meeting_hook():
"""View function for email conference submission.
"""
message = ConferenceMessage()
try:
message.verify()
except ConferenceError as error:
logger.error(error)
raise HTTPError(httplib.NOT_ACCEPTABLE)
try:
conference = Conference.get_by_endpoint(message.conference_name, active=False)
except ConferenceError as error:
logger.error(error)
raise HTTPError(httplib.NOT_ACCEPTABLE)
if not conference.active:
send_mail(
message.sender_email,
CONFERENCE_INACTIVE,
fullname=message.sender_display,
presentations_url=web_url_for('conference_view', _absolute=True),
)
raise HTTPError(httplib.NOT_ACCEPTABLE)
add_poster_by_email(conference=conference, message=message)
def add_poster_by_email(conference, message):
"""
:param Conference conference:
:param ConferenceMessage message:
"""
# Fail if no attachments
if not message.attachments:
return send_mail(
message.sender_email,
CONFERENCE_FAILED,
fullname=message.sender_display,
)
created = []
with TokuTransaction():
user, user_created = get_or_create_user(
message.sender_display,
message.sender_email,
message.is_spam,
)
if user_created:
created.append(user)
user.system_tags.append('osf4m')
set_password_url = web_url_for(
'reset_password',
verification_key=user.verification_key,
_absolute=True,
)
user.date_last_login = datetime.utcnow()
user.save()
else:
set_password_url = None
node, node_created = utils.get_or_create_node(message.subject, user)
if node_created:
created.append(node)
node.system_tags.append('osf4m')
node.save()
utils.provision_node(conference, message, node, user)
utils.record_message(message, created)
# Prevent circular import error
from framework.auth import signals as auth_signals
if user_created:
auth_signals.user_confirmed.send(user)
utils.upload_attachments(user, node, message.attachments)
download_url = node.web_url_for(
'addon_view_or_download_file',
path=message.attachments[0].filename,
provider='osfstorage',
action='download',
_absolute=True,
)
# Send confirmation email
send_mail(
message.sender_email,
CONFERENCE_SUBMITTED,
conf_full_name=conference.name,
conf_view_url=web_url_for(
'conference_results',
meeting=message.conference_name,
_absolute=True,
),
fullname=message.sender_display,
user_created=user_created,
set_password_url=set_password_url,
profile_url=user.absolute_url,
node_url=node.absolute_url,
file_url=download_url,
presentation_type=message.conference_category.lower(),
is_spam=message.is_spam,
)
if node_created and user_created:
signals.osf4m_user_created.send(user, conference=conference, node=node)
def _render_conference_node(node, idx, conf):
try:
record = next(
x for x in
StoredFileNode.find(
Q('node', 'eq', node) &
Q('is_file', 'eq', True)
).limit(1)
).wrapped()
download_count = record.get_download_count()
download_url = node.web_url_for(
'addon_view_or_download_file',
path=record.path.strip('/'),
provider='osfstorage',
action='download',
_absolute=True,
)
except StopIteration:
download_url = ''
download_count = 0
author = node.visible_contributors[0]
tags = [tag._id for tag in node.tags]
return {
'id': idx,
'title': node.title,
'nodeUrl': node.url,
'author': author.family_name if author.family_name else author.fullname,
'authorUrl': node.creator.url,
'category': conf.field_names['submission1'] if conf.field_names['submission1'] in node.system_tags else conf.field_names['submission2'],
'download': download_count,
'downloadUrl': download_url,
'dateCreated': node.date_created.isoformat(),
'confName': conf.name,
'confUrl': web_url_for('conference_results', meeting=conf.endpoint),
'tags': ' '.join(tags)
}
def conference_data(meeting):
try:
conf = Conference.find_one(Q('endpoint', 'iexact', meeting))
except ModularOdmException:
raise HTTPError(httplib.NOT_FOUND)
nodes = Node.find(
Q('tags', 'iexact', meeting) &
Q('is_public', 'eq', True) &
Q('is_deleted', 'eq', False)
)
ret = [
_render_conference_node(each, idx, conf)
for idx, each in enumerate(nodes)
]
return ret
def redirect_to_meetings(**kwargs):
return redirect('/meetings/')
def conference_results(meeting):
"""Return the data for the grid view for a conference.
:param str meeting: Endpoint name for a conference.
"""
try:
conf = Conference.find_one(Q('endpoint', 'iexact', meeting))
except ModularOdmException:
raise HTTPError(httplib.NOT_FOUND)
data = conference_data(meeting)
return {
'data': data,
'label': meeting,
'meeting': conf.to_storage(),
# Needed in order to use base.mako namespace
'settings': settings,
}
def conference_submissions(**kwargs):
"""Return data for all OSF4M submissions.
The total number of submissions for each meeting is calculated and cached
in the Conference.num_submissions field.
"""
submissions = []
# TODO: Revisit this loop, there has to be a way to optimize it
for conf in Conference.find():
# For efficiency, we filter by tag first, then node
# instead of doing a single Node query
projects = set()
tags = Tag.find(Q('lower', 'eq', conf.endpoint.lower())).get_keys()
nodes = Node.find(
Q('tags', 'in', tags) &
Q('is_public', 'eq', True) &
Q('is_deleted', 'ne', True)
)
projects.update(list(nodes))
for idx, node in enumerate(projects):
submissions.append(_render_conference_node(node, idx, conf))
num_submissions = len(projects)
# Cache the number of submissions
conf.num_submissions = num_submissions
conf.save()
if num_submissions < settings.CONFERENCE_MIN_COUNT:
continue
submissions.sort(key=lambda submission: submission['dateCreated'], reverse=True)
return {'submissions': submissions}
def conference_view(**kwargs):
meetings = []
for conf in Conference.find():
if conf.num_submissions < settings.CONFERENCE_MIN_COUNT:
continue
meetings.append({
'name': conf.name,
'location': conf.location,
'end_date': conf.end_date.strftime("%b %d, %Y") if conf.end_date else None,
'start_date': conf.start_date.strftime("%b %d, %Y") if conf.start_date else None,
'url': web_url_for('conference_results', meeting=conf.endpoint),
'count': conf.num_submissions,
})
meetings.sort(key=lambda meeting: meeting['count'], reverse=True)
return {'meetings': meetings}
| |
# -*- Mode: Python; tab-width: 2; indent-tabs-mode:nil; -*-
# vim: set ts=2 et sw=2 tw=80:
#
# Copyright (c) 2013 MathJax Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FONTFAMILY_PREFIX = "Neo Euler MathJax"
FONTNAME_PREFIX = "NeoEulerMathJax"
MATHFONT = "euler.otf"
MAINFONTS = None
FONTSPLITTING_EXTRA = {
"Variants": [
("uni0030.onum", 0xE200), # old style numbers
("uni0031.onum", 0xE201),
("uni0032.onum", 0xE202),
("uni0033.onum", 0xE203),
("uni0034.onum", 0xE204),
("uni0035.onum", 0xE205),
("uni0036.onum", 0xE206),
("uni0037.onum", 0xE207),
("uni0038.onum", 0xE208),
("uni0039.onum", 0xE209),
("minute.ssty1", 0x2032),
("second.ssty1", 0x2033),
("uni2034.ssty1", 0x2034),
("uni2035.ssty1", 0x2035),
("uni2036.ssty1", 0x2036),
("uni2037.ssty1", 0x2037),
("uni2057.ssty1", 0x2057)
]
}
FONTSPLITTING_REMOVE = None
FONTDATA = {
"FileVersion": "2.3",
"Year": "2013",
"TeX_factor": None, # Leave None for automatic computation
"baselineskip": 1.2,
"lineH": .8,
"lineD": .2,
"hasStyleChar": True
}
RULECHAR = 0x00AF
REMAP = {
0x203E: 0x00AF, 0x2C9: 0x00AF,
0x20D0: 0x21BC, 0x20D1: 0x21C0, # combining left and right harpoons
0x20EC: 0x21C1, 0x20ED: 0x21BD, # combining low right and left harpoons
0x20F0: 0x2A, # combining asterisk
0xFE37: 0x23DE, 0xFE38: 0x23DF, # OverBrace, UnderBrace
0x2B9: 0x2032, # prime
0x3D2: 0x3A5, # Upsilon
0x2015: 0x00AF, 0x2014: 0x00AF, # horizontal bars
0x2017: 0x5F,
0x2022: 0x2219, # bullet
# 0x2305: 0x22BC, 0x2306: 0x2A5E, # barwedge, doublebarwedge
# 0x25AA: 0x25A0, 0x25B4: 0x25B2, # blacksquare, blacktriangle
# 0x25B5: 0x25B3, 0x25B8: 0x25B6, # triangle, blacktriangleright
# 0x25BE: 0x25BC, 0x25BF: 0x25BD, # blacktriangledown, triangledown
# 0x25C2: 0x25C0, # blacktriangleleft
0x3008: 0x27E8, 0x3009: 0x27E9, # langle, rangle
0x2758: 0x2223, # VerticalSeparator
0x2A2F: 0xD7, # cross product
# 0x25FB: 0x25A1, 0x25FC: 0x25A0, # square, blacksquare
# 0x226D: "\u224D\u0338" # \not\asymp
0x22E2: "\u2291\u0338", # \not\sqsubseteq
0x22E3: "\u2292\u0338" # \not\sqsupseteq
}
REMAPACCENT = {
"\u2192": "\u20D7", # vector arrow
"\u2032": "\u0301", # acute accent
"\u007E": "\u0303", # tilde
"\u2035": "\u0300", # grave accent
"\u005E": "\u0302", # hat
"\u0060": "\u0300",
"\u00B4": "\u0301"
}
REMAPACCENTUNDER = {
}
VARIANT = None
VARIANTFONTS = ["VARIANTS"]
TEXCALIGRAPHIC = None
TEXCALIGRAPHICFONTS = []
TEXOLDSTYLE = "offsetN: 0xE200"
TEXOLDSTYLEFONTS = ["VARIANTS"]
TEXCALIGRAPHICBOLD = None
TEXCALIGRAPHICBOLDFONTS = []
TEXOLDSTYLEBOLD = None
TEXOLDSTYLEBOLDFONTS = []
SANSSERIFGREEK = None
SANSSERIFITALICNUMBER = None
SANSSERIFITALICGREEK = None
SANSSERIFBOLDITALICNUMBER = None
SMALLOPFONTS = None
DELIMITERS = {
0x002D: {"alias": 0x00AF, "dir": "H"}, # hyphen-minus
0x003D: # equal sign
{
"dir": "H",
"HW": [0x003D],
"stretch": [(0x003D,"rep")]
},
0x005F: {"alias": 0x00AF, "dir": "H"}, # low line
0x00AF:
{
"dir": "H",
"HW": [0x00AF],
"stretch": [(0x00AF,"rep")]
},
0x00C9: {"alias": 0x00AF, "dir": "H"},
0x0332: {"alias": 0x00AF, "dir": "H"},
0x2015: {"alias": 0x00AF, "dir": "H"},
0x2017: {"alias": 0x00AF, "dir": "H"},
0x203E: {"alias": 0x00AF, "dir": "H"},
0x2190: {"alias": 0x20D6, "dir": "H"},
0x2191:
{
"dir": "V",
"HW": [0x2191],
"stretch": [(0x2191,"top"),(0x7C,"ext")]
},
0x2192: {"alias": 0x20D7, "dir": "H"},
0x2193:
{
"dir": "V",
"HW": [0x2193],
"stretch": [(0x7C,"ext"),(0x2193,"bot")]
},
0x2194: {"alias": 0x20E1, "dir": "H"},
0x2195:
{
"dir": "V",
"HW": [0x2195],
"stretch": [(0x2191,"top"),(0x7C,"ext"),(0x2193,"bot")]
},
0x21D0:
{
"dir": "H",
"HW": [0x21D0,0x27F8]
},
0x21D1:
{
"dir": "H",
"HW": [0x21D1],
"stretch": [(0x21D1,"top"),(0x2016,"ext")]
},
0x21D2:
{
"dir": "H",
"HW": [0x21D2,0x27F9]
},
0x21D3:
{
"dir": "H",
"HW": [0x21D3],
"stretch": [(0x2016,"ext"),(0x21D3,"bot")]
},
0x21D4:
{
"dir": "H",
"HW": [0x21D0,0x27FA]
},
0x21D5:
{
"dir": "H",
"HW": [0x21D5],
"stretch": [(0x21D1,"top"),(0x2016,"ext"),(0x21D3,"bot")]
},
0x2212: {"alias": 0x00AF, "dir": "H"}, # minus
0x2312: {"alias": 0x23DC, "dir": "H"}, # arc
0x2322: {"alias": 0x23DC, "dir": "H"}, # frown
0x2323: {"alias": 0x23DD, "dir": "H"}, # smile
0x23AA: # \bracevert
{
"dir": "V",
"HW": [0x23AA],
"stretch": [(0x23AA,"ext")]
},
0x23AF: {"alias": 0x00AF, "dir": "H"}, # minus
0x23B0:
{
"dir": "V",
"HW": [0x23A7],
"stretch": [(0x23A7,"top"),(0x23AA,"ext"),(0x23AD, "bot")]
},
0x23B1:
{
"dir": "V",
"HW": [0x23AB],
"stretch": [(0x23AB,"top"),(0x23AA,"ext"),(0x23A9, "bot")]
},
0x23D0: # vertical line extension
{
"dir": "V",
"HW": [0x7C],
"stretch": [(0x7C,"ext")]
},
0x2500: {"alias": 0x00AF, "dir": "H"}, # minus
0x2758: {"alias": 0x23D0, "dir": "V"},
0x27EE: {"alias": 0x0028, "dir": "V"},
0x27EF: {"alias": 0x0029, "dir": "V"},
0x27F5: {"alias": 0x20D6, "dir": "H"}, # long left arrow
0x27F6: {"alias": 0x20D7, "dir": "H"}, # long right arrow
0x27F7: {"alias": 0x20E1, "dir": "H"}, # long left-right arrow
0x27F8: {"alias": 0x21D0, "dir": "H"}, # long left double arrow
0x27F9: {"alias": 0x21D2, "dir": "H"}, # long right double arrow
0x27FA: {"alias": 0x21D4, "dir": "H"}, # long left-right double arrow
0x27FB: {"alias": 0x20D6, "dir": "H"}, # long left arrow from bar
0x27FC: {"alias": 0x20D7, "dir": "H"}, # long right arrow from bar
0x27FD: {"alias": 0x21D0, "dir": "H"}, # long left double arrow from bar
0x27FE: {"alias": 0x21D2, "dir": "H"}, # long right double arrow from bar
0x3008: {"alias": 0x27E8, "dir": "V"}, # langle
0x3009: {"alias": 0x27E9, "dir": "V"}, # rangle
0xFE37: {"alias": 0x23DE, "dir": "H"}, # horizontal brace down
0xFE38: {"alias": 0x23DF, "dir": "H"} # horizontal brace up
}
DELIMITERS_EXTRA = [
0x2044,
0x20E1,
0x20EE,
0x20EF,
0x220F,
0x2210,
0x2211,
0x2227,
0x2228,
0x2229,
0x222A,
0x222B,
0x222C,
0x222D,
0x222E,
0x228E,
0x22C0,
0x22C1,
0x22C2,
0x22C3,
0x23DC,
0x23DD,
0x2A0C
]
| |
# -*- coding: utf-8 -*-
import os.path
import codecs
import sys
from xml.etree import ElementTree
from beehive.compat import unicode
from beehive.reporter.base import Reporter
from beehive.model import Scenario, ScenarioOutline, Step
from beehive.formatter import ansi_escapes
from beehive.model_describe import ModelDescriptor
from beehive.textutil import indent, make_indentation
def CDATA(text=None):
# -- issue #70: remove_ansi_escapes(text)
element = ElementTree.Element('![CDATA[')
element.text = ansi_escapes.strip_escapes(text)
return element
class ElementTreeWithCDATA(ElementTree.ElementTree):
def _write(self, file, node, encoding, namespaces):
"""This method is for ElementTree <= 1.2.6"""
if node.tag == '![CDATA[':
text = node.text.encode(encoding)
file.write("\n<![CDATA[%s]]>\n" % text)
else:
ElementTree.ElementTree._write(self, file, node, encoding,
namespaces)
if hasattr(ElementTree, '_serialize'):
if sys.version_info[0] == 2:
def _serialize_xml(write, elem, encoding, qnames, namespaces,
orig=ElementTree._serialize_xml):
if elem.tag == '![CDATA[':
write("\n<%s%s]]>\n" % (elem.tag, elem.text.encode(encoding)))
return
return orig(write, elem, encoding, qnames, namespaces)
else:
def _serialize_xml(write, elem, qnames, namespaces,
orig=ElementTree._serialize_xml, **kwargs):
if elem.tag == '![CDATA[':
write("\n<%s%s]]>\n" % (elem.tag, elem.text))
return
return orig(write, elem, qnames, namespaces, **kwargs)
ElementTree._serialize_xml = ElementTree._serialize['xml'] = _serialize_xml
class FeatureReportData(object):
"""
Provides value object to collect JUnit report data from a Feature.
"""
def __init__(self, feature, filename, classname=None):
if not classname and filename:
classname = filename.replace('/', '.')
self.feature = feature
self.filename = filename
self.classname = classname
self.testcases = []
self.counts_tests = 0
self.counts_errors = 0
self.counts_failed = 0
self.counts_skipped = 0
def reset(self):
self.testcases = []
self.counts_tests = 0
self.counts_errors = 0
self.counts_failed = 0
self.counts_skipped = 0
class JUnitReporter(Reporter):
"""
Generates JUnit-like XML test report for beehive.
"""
show_multiline = True
show_timings = True # -- Show step timings.
def make_feature_filename(self, feature):
filename = None
for path in self.config.paths:
if feature.filename.startswith(path):
filename = feature.filename[len(path) + 1:]
break
if not filename:
# -- NOTE: Directory path (subdirs) are taken into account.
filename = feature.location.relpath(self.config.base_dir)
filename = filename.rsplit('.', 1)[0]
filename = filename.replace('\\', '/').replace('/', '.')
return filename
# -- REPORTER-API:
def feature(self, feature):
filename = self.make_feature_filename(feature)
classname = filename
report = FeatureReportData(feature, filename)
filename = 'TESTS-%s.xml' % filename
suite = ElementTree.Element('testsuite')
suite.set('name', '%s.%s' % (classname, feature.name or feature.filename))
# -- BUILD-TESTCASES: From scenarios
for scenario in feature:
if isinstance(scenario, ScenarioOutline):
scenario_outline = scenario
self._process_scenario_outline(scenario_outline, report)
else:
self._process_scenario(scenario, report)
# -- ADD TESTCASES to testsuite:
for testcase in report.testcases:
suite.append(testcase)
suite.set('tests', str(report.counts_tests))
suite.set('errors', str(report.counts_errors))
suite.set('failures', str(report.counts_failed))
suite.set('skipped', str(report.counts_skipped)) # WAS: skips
# -- ORIG: suite.set('time', str(round(feature.duration, 3)))
suite.set('time', str(round(feature.duration, 6)))
if not os.path.exists(self.config.junit_directory):
# -- ENSURE: Create multiple directory levels at once.
os.makedirs(self.config.junit_directory)
tree = ElementTreeWithCDATA(suite)
report_filename = os.path.join(self.config.junit_directory, filename)
tree.write(codecs.open(report_filename, 'wb'), 'UTF-8')
# -- MORE:
@staticmethod
def select_step_with_status(status, steps):
"""
Helper function to find the first step that has the given step.status.
EXAMPLE: Search for a failing step in a scenario (all steps).
>>> from beehive.reporter.junit import select_step_with_status
>>> scenario = "..."
>>> failed_step = select_step_with_status("failed", scenario)
>>> failed_step = select_step_with_status("failed", scenario.all_steps)
>>> assert failed_step.status == "failed"
EXAMPLE: Search only scenario steps, skip background steps.
>>> failed_step = select_step_with_status("failed", scenario.steps)
:param status: Step status to search for (as string).
:param steps: List of steps to search in (or scenario).
:returns: Step object, if found.
:returns: None, otherwise.
"""
for step in steps:
assert isinstance(step, Step), \
"TYPE-MISMATCH: step.class=%s" % step.__class__.__name__
if step.status == status:
return step
# -- OTHERWISE: No step with the given status found.
# KeyError("Step with status={0} not found".format(status))
return None
@classmethod
def describe_step(cls, step):
status = str(step.status)
if cls.show_timings:
status += u" in %0.3fs" % step.duration
text = u'%s %s ... ' % (step.keyword, step.name)
text += u'%s\n' % status
if cls.show_multiline:
prefix = make_indentation(2)
if step.text:
text += ModelDescriptor.describe_docstring(step.text, prefix)
elif step.table:
text += ModelDescriptor.describe_table(step.table, prefix)
return text
@classmethod
def describe_scenario(cls, scenario):
"""
Describe the scenario and the test status.
NOTE: table, multiline text is missing in description.
:param scenario: Scenario that was tested.
:return: Textual description of the scenario.
"""
header_line = u'\n@scenario.begin\n'
tags = scenario.tags
header_line += '\n %s\n' % ' '.join(['@%s' % tag for tag in tags])
header_line += ' %s: %s\n' % (scenario.keyword, scenario.name)
footer_line = u'\n@scenario.end\n' + u'-' * 80 + '\n'
text = u''
for step in scenario:
text += cls.describe_step(step)
step_indentation = make_indentation(4)
return header_line + indent(text, step_indentation) + footer_line
def _process_scenario(self, scenario, report):
"""
Process a scenario and append information to JUnit report object.
This corresponds to a JUnit testcase:
* testcase.@classname = f(filename) +'.'+ feature.name
* testcase.@name = scenario.name
* testcase.@status = scenario.status
* testcase.@time = scenario.duration
Distinguishes now between failures and errors.
Failures are AssertationErrors: expectation is violated/not met.
Errors are unexpected RuntimeErrors (all other exceptions).
If a failure/error occurs, the step, that caused the failure,
and its location are provided now.
:param scenario: Scenario to process.
:param report: Context object to store/add info to (outgoing param).
"""
assert isinstance(scenario, Scenario)
assert not isinstance(scenario, ScenarioOutline)
feature = report.feature
classname = report.classname
report.counts_tests += 1
case = ElementTree.Element('testcase')
case.set('classname', '%s.%s' % (classname, feature.name or feature.filename))
case.set('name', scenario.name or '')
case.set('status', scenario.status)
# -- ORIG: case.set('time', str(round(scenario.duration, 3)))
case.set('time', str(round(scenario.duration, 6)))
step = None
if scenario.status == 'failed':
for status in ('failed', 'undefined'):
step = self.select_step_with_status(status, scenario)
if step:
break
assert step, "OOPS: No failed step found in scenario: %s" % scenario.name
assert step.status in ('failed', 'undefined')
element_name = 'failure'
if isinstance(step.exception, (AssertionError, type(None))):
# -- FAILURE: AssertionError
report.counts_failed += 1
else:
# -- UNEXPECTED RUNTIME-ERROR:
report.counts_errors += 1
element_name = 'error'
# -- COMMON-PART:
failure = ElementTree.Element(element_name)
step_text = self.describe_step(step).rstrip()
text = u"\nFailing step: %s\nLocation: %s\n" % (step_text, step.location)
message = unicode(step.exception)
if len(message) > 80:
message = message[:80] + "..."
failure.set('type', step.exception.__class__.__name__)
failure.set('message', message)
text += unicode(step.error_message)
failure.append(CDATA(text))
case.append(failure)
elif scenario.status in ('skipped', 'untested'):
report.counts_skipped += 1
step = self.select_step_with_status('undefined', scenario)
if step:
# -- UNDEFINED-STEP:
report.counts_failed += 1
failure = ElementTree.Element('failure')
failure.set('type', 'undefined')
failure.set('message', ('Undefined Step: %s' % step.name))
case.append(failure)
else:
skip = ElementTree.Element('skipped')
case.append(skip)
# Create stdout section for each test case
stdout = ElementTree.Element('system-out')
text = self.describe_scenario(scenario)
# Append the captured standard output
if scenario.stdout:
text += '\nCaptured stdout:\n%s\n' % scenario.stdout
stdout.append(CDATA(text))
case.append(stdout)
# Create stderr section for each test case
if scenario.stderr:
stderr = ElementTree.Element('system-err')
text = u'\nCaptured stderr:\n%s\n' % scenario.stderr
stderr.append(CDATA(text))
case.append(stderr)
report.testcases.append(case)
def _process_scenario_outline(self, scenario_outline, report):
assert isinstance(scenario_outline, ScenarioOutline)
for scenario in scenario_outline:
assert isinstance(scenario, Scenario)
self._process_scenario(scenario, report)
| |
# -*- coding: utf-8 -*-
"""
pygments.lexers.math
~~~~~~~~~~~~~~~~~~~~
Lexers for math languages.
:copyright: Copyright 2006-2011 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
from pygments.token import Comment, String, Punctuation, Keyword, Name, \
Operator, Number, Text, Generic
from pygments.lexers.agile import PythonLexer
__all__ = ['MuPADLexer', 'MatlabLexer', 'MatlabSessionLexer', 'OctaveLexer',
'NumPyLexer', 'RConsoleLexer', 'SLexer']
class MuPADLexer(RegexLexer):
"""
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <christopher@creutzig.de>.
*New in Pygments 0.8.*
"""
name = 'MuPAD'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root' : [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
#(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*)\s*([(])''',
bygroups(Name.Function, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
'comment' : [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
]
}
class MatlabLexer(RegexLexer):
"""
For Matlab source code.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
*New in Pygments 0.10.*
"""
name = 'Matlab'
aliases = ['matlab']
filenames = ['*.m']
mimetypes = ['text/matlab']
#
# These lists are generated automatically.
# Run the following in bash shell:
#
# for f in elfun specfun elmat; do
# echo -n "$f = "
# matlab -nojvm -r "help $f;exit;" | perl -ne \
# 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}'
# done
#
# elfun: Elementary math functions
# specfun: Special Math functions
# elmat: Elementary matrices and matrix manipulation
#
# taken from Matlab version 7.4.0.336 (R2007a)
#
elfun = ["sin","sind","sinh","asin","asind","asinh","cos","cosd","cosh",
"acos","acosd","acosh","tan","tand","tanh","atan","atand","atan2",
"atanh","sec","secd","sech","asec","asecd","asech","csc","cscd",
"csch","acsc","acscd","acsch","cot","cotd","coth","acot","acotd",
"acoth","hypot","exp","expm1","log","log1p","log10","log2","pow2",
"realpow","reallog","realsqrt","sqrt","nthroot","nextpow2","abs",
"angle","complex","conj","imag","real","unwrap","isreal","cplxpair",
"fix","floor","ceil","round","mod","rem","sign"]
specfun = ["airy","besselj","bessely","besselh","besseli","besselk","beta",
"betainc","betaln","ellipj","ellipke","erf","erfc","erfcx",
"erfinv","expint","gamma","gammainc","gammaln","psi","legendre",
"cross","dot","factor","isprime","primes","gcd","lcm","rat",
"rats","perms","nchoosek","factorial","cart2sph","cart2pol",
"pol2cart","sph2cart","hsv2rgb","rgb2hsv"]
elmat = ["zeros","ones","eye","repmat","rand","randn","linspace","logspace",
"freqspace","meshgrid","accumarray","size","length","ndims","numel",
"disp","isempty","isequal","isequalwithequalnans","cat","reshape",
"diag","blkdiag","tril","triu","fliplr","flipud","flipdim","rot90",
"find","end","sub2ind","ind2sub","bsxfun","ndgrid","permute",
"ipermute","shiftdim","circshift","squeeze","isscalar","isvector",
"ans","eps","realmax","realmin","pi","i","inf","nan","isnan",
"isinf","isfinite","j","why","compan","gallery","hadamard","hankel",
"hilb","invhilb","magic","pascal","rosser","toeplitz","vander",
"wilkinson"]
tokens = {
'root': [
# line starting with '!' is sent as a system command. not sure what
# label to use...
(r'^!.*', String.Other),
(r'%.*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on version 7.11 (R2010):
(r'(break|case|catch|classdef|continue|else|elseif|end|enumerated|'
r'events|for|function|global|if|methods|otherwise|parfor|'
r'persistent|properties|return|spmd|switch|try|while)\b', Keyword),
("(" + "|".join(elfun+specfun+elmat) + r')\b', Name.Builtin),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r'[^\']*\'', String, '#pop')
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*%', text, re.M): # comment
return 0.9
elif re.match('^!\w+', text, re.M): # system cmd
return 0.9
return 0.1
line_re = re.compile('.*?\n')
class MatlabSessionLexer(Lexer):
"""
For Matlab sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
*New in Pygments 0.10.*
"""
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
else:
if curcode:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
class OctaveLexer(RegexLexer):
"""
For GNU Octave source code.
*New in Pygments 1.5.*
"""
name = 'Octave'
aliases = ['octave']
filenames = ['*.m']
mimetypes = ['text/octave']
# These lists are generated automatically.
# Run the following in bash shell:
#
# First dump all of the Octave manual into a plain text file:
#
# $ info octave --subnodes -o octave-manual
#
# Now grep through it:
# for i in \
# "Built-in Function" "Command" "Function File" \
# "Loadable Function" "Mapping Function";
# do
# perl -e '@name = qw('"$i"');
# print lc($name[0]),"_kw = [\n"';
#
# perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \
# octave-manual | sort | uniq ;
# echo "]" ;
# echo;
# done
# taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011)
builtin_kw = [ "addlistener", "addpath", "addproperty", "all",
"and", "any", "argnames", "argv", "assignin",
"atexit", "autoload",
"available_graphics_toolkits", "beep_on_error",
"bitand", "bitmax", "bitor", "bitshift", "bitxor",
"cat", "cell", "cellstr", "char", "class", "clc",
"columns", "command_line_path",
"completion_append_char", "completion_matches",
"complex", "confirm_recursive_rmdir", "cputime",
"crash_dumps_octave_core", "ctranspose", "cumprod",
"cumsum", "debug_on_error", "debug_on_interrupt",
"debug_on_warning", "default_save_options",
"dellistener", "diag", "diff", "disp",
"doc_cache_file", "do_string_escapes", "double",
"drawnow", "e", "echo_executing_commands", "eps",
"eq", "errno", "errno_list", "error", "eval",
"evalin", "exec", "exist", "exit", "eye", "false",
"fclear", "fclose", "fcntl", "fdisp", "feof",
"ferror", "feval", "fflush", "fgetl", "fgets",
"fieldnames", "file_in_loadpath", "file_in_path",
"filemarker", "filesep", "find_dir_in_path",
"fixed_point_format", "fnmatch", "fopen", "fork",
"formula", "fprintf", "fputs", "fread", "freport",
"frewind", "fscanf", "fseek", "fskipl", "ftell",
"functions", "fwrite", "ge", "genpath", "get",
"getegid", "getenv", "geteuid", "getgid",
"getpgrp", "getpid", "getppid", "getuid", "glob",
"gt", "gui_mode", "history_control",
"history_file", "history_size",
"history_timestamp_format_string", "home",
"horzcat", "hypot", "ifelse",
"ignore_function_time_stamp", "inferiorto",
"info_file", "info_program", "inline", "input",
"intmax", "intmin", "ipermute",
"is_absolute_filename", "isargout", "isbool",
"iscell", "iscellstr", "ischar", "iscomplex",
"isempty", "isfield", "isfloat", "isglobal",
"ishandle", "isieee", "isindex", "isinteger",
"islogical", "ismatrix", "ismethod", "isnull",
"isnumeric", "isobject", "isreal",
"is_rooted_relative_filename", "issorted",
"isstruct", "isvarname", "kbhit", "keyboard",
"kill", "lasterr", "lasterror", "lastwarn",
"ldivide", "le", "length", "link", "linspace",
"logical", "lstat", "lt", "make_absolute_filename",
"makeinfo_program", "max_recursion_depth", "merge",
"methods", "mfilename", "minus", "mislocked",
"mkdir", "mkfifo", "mkstemp", "mldivide", "mlock",
"mouse_wheel_zoom", "mpower", "mrdivide", "mtimes",
"munlock", "nargin", "nargout",
"native_float_format", "ndims", "ne", "nfields",
"nnz", "norm", "not", "numel", "nzmax",
"octave_config_info", "octave_core_file_limit",
"octave_core_file_name",
"octave_core_file_options", "ones", "or",
"output_max_field_width", "output_precision",
"page_output_immediately", "page_screen_output",
"path", "pathsep", "pause", "pclose", "permute",
"pi", "pipe", "plus", "popen", "power",
"print_empty_dimensions", "printf",
"print_struct_array_contents", "prod",
"program_invocation_name", "program_name",
"putenv", "puts", "pwd", "quit", "rats", "rdivide",
"readdir", "readlink", "read_readline_init_file",
"realmax", "realmin", "rehash", "rename",
"repelems", "re_read_readline_init_file", "reset",
"reshape", "resize", "restoredefaultpath",
"rethrow", "rmdir", "rmfield", "rmpath", "rows",
"save_header_format_string", "save_precision",
"saving_history", "scanf", "set", "setenv",
"shell_cmd", "sighup_dumps_octave_core",
"sigterm_dumps_octave_core", "silent_functions",
"single", "size", "size_equal", "sizemax",
"sizeof", "sleep", "source", "sparse_auto_mutate",
"split_long_rows", "sprintf", "squeeze", "sscanf",
"stat", "stderr", "stdin", "stdout", "strcmp",
"strcmpi", "string_fill_char", "strncmp",
"strncmpi", "struct", "struct_levels_to_print",
"strvcat", "subsasgn", "subsref", "sum", "sumsq",
"superiorto", "suppress_verbose_help_message",
"symlink", "system", "tic", "tilde_expand",
"times", "tmpfile", "tmpnam", "toc", "toupper",
"transpose", "true", "typeinfo", "umask", "uminus",
"uname", "undo_string_escapes", "unlink", "uplus",
"upper", "usage", "usleep", "vec", "vectorize",
"vertcat", "waitpid", "warning", "warranty",
"whos_line_format", "yes_or_no", "zeros",
"inf", "Inf", "nan", "NaN"]
command_kw = [ "close", "load", "who", "whos", ]
function_kw = [ "accumarray", "accumdim", "acosd", "acotd",
"acscd", "addtodate", "allchild", "ancestor",
"anova", "arch_fit", "arch_rnd", "arch_test",
"area", "arma_rnd", "arrayfun", "ascii", "asctime",
"asecd", "asind", "assert", "atand",
"autoreg_matrix", "autumn", "axes", "axis", "bar",
"barh", "bartlett", "bartlett_test", "beep",
"betacdf", "betainv", "betapdf", "betarnd",
"bicgstab", "bicubic", "binary", "binocdf",
"binoinv", "binopdf", "binornd", "bitcmp",
"bitget", "bitset", "blackman", "blanks",
"blkdiag", "bone", "box", "brighten", "calendar",
"cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf",
"cauchy_rnd", "caxis", "celldisp", "center", "cgs",
"chisquare_test_homogeneity",
"chisquare_test_independence", "circshift", "cla",
"clabel", "clf", "clock", "cloglog", "closereq",
"colon", "colorbar", "colormap", "colperm",
"comet", "common_size", "commutation_matrix",
"compan", "compare_versions", "compass",
"computer", "cond", "condest", "contour",
"contourc", "contourf", "contrast", "conv",
"convhull", "cool", "copper", "copyfile", "cor",
"corrcoef", "cor_test", "cosd", "cotd", "cov",
"cplxpair", "cross", "cscd", "cstrcat", "csvread",
"csvwrite", "ctime", "cumtrapz", "curl", "cut",
"cylinder", "date", "datenum", "datestr",
"datetick", "datevec", "dblquad", "deal",
"deblank", "deconv", "delaunay", "delaunayn",
"delete", "demo", "detrend", "diffpara", "diffuse",
"dir", "discrete_cdf", "discrete_inv",
"discrete_pdf", "discrete_rnd", "display",
"divergence", "dlmwrite", "dos", "dsearch",
"dsearchn", "duplication_matrix", "durbinlevinson",
"ellipsoid", "empirical_cdf", "empirical_inv",
"empirical_pdf", "empirical_rnd", "eomday",
"errorbar", "etime", "etreeplot", "example",
"expcdf", "expinv", "expm", "exppdf", "exprnd",
"ezcontour", "ezcontourf", "ezmesh", "ezmeshc",
"ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor",
"factorial", "fail", "fcdf", "feather", "fftconv",
"fftfilt", "fftshift", "figure", "fileattrib",
"fileparts", "fill", "findall", "findobj",
"findstr", "finv", "flag", "flipdim", "fliplr",
"flipud", "fpdf", "fplot", "fractdiff", "freqz",
"freqz_plot", "frnd", "fsolve",
"f_test_regression", "ftp", "fullfile", "fzero",
"gamcdf", "gaminv", "gampdf", "gamrnd", "gca",
"gcbf", "gcbo", "gcf", "genvarname", "geocdf",
"geoinv", "geopdf", "geornd", "getfield", "ginput",
"glpk", "gls", "gplot", "gradient",
"graphics_toolkit", "gray", "grid", "griddata",
"griddatan", "gtext", "gunzip", "gzip", "hadamard",
"hamming", "hankel", "hanning", "hggroup",
"hidden", "hilb", "hist", "histc", "hold", "hot",
"hotelling_test", "housh", "hsv", "hurst",
"hygecdf", "hygeinv", "hygepdf", "hygernd",
"idivide", "ifftshift", "image", "imagesc",
"imfinfo", "imread", "imshow", "imwrite", "index",
"info", "inpolygon", "inputname", "interpft",
"interpn", "intersect", "invhilb", "iqr", "isa",
"isdefinite", "isdir", "is_duplicate_entry",
"isequal", "isequalwithequalnans", "isfigure",
"ishermitian", "ishghandle", "is_leap_year",
"isletter", "ismac", "ismember", "ispc", "isprime",
"isprop", "isscalar", "issquare", "isstrprop",
"issymmetric", "isunix", "is_valid_file_id",
"isvector", "jet", "kendall",
"kolmogorov_smirnov_cdf",
"kolmogorov_smirnov_test", "kruskal_wallis_test",
"krylov", "kurtosis", "laplace_cdf", "laplace_inv",
"laplace_pdf", "laplace_rnd", "legend", "legendre",
"license", "line", "linkprop", "list_primes",
"loadaudio", "loadobj", "logistic_cdf",
"logistic_inv", "logistic_pdf", "logistic_rnd",
"logit", "loglog", "loglogerr", "logm", "logncdf",
"logninv", "lognpdf", "lognrnd", "logspace",
"lookfor", "ls_command", "lsqnonneg", "magic",
"mahalanobis", "manova", "matlabroot",
"mcnemar_test", "mean", "meansq", "median", "menu",
"mesh", "meshc", "meshgrid", "meshz", "mexext",
"mget", "mkpp", "mode", "moment", "movefile",
"mpoles", "mput", "namelengthmax", "nargchk",
"nargoutchk", "nbincdf", "nbininv", "nbinpdf",
"nbinrnd", "nchoosek", "ndgrid", "newplot", "news",
"nonzeros", "normcdf", "normest", "norminv",
"normpdf", "normrnd", "now", "nthroot", "null",
"ocean", "ols", "onenormest", "optimget",
"optimset", "orderfields", "orient", "orth",
"pack", "pareto", "parseparams", "pascal", "patch",
"pathdef", "pcg", "pchip", "pcolor", "pcr",
"peaks", "periodogram", "perl", "perms", "pie",
"pink", "planerot", "playaudio", "plot",
"plotmatrix", "plotyy", "poisscdf", "poissinv",
"poisspdf", "poissrnd", "polar", "poly",
"polyaffine", "polyarea", "polyderiv", "polyfit",
"polygcd", "polyint", "polyout", "polyreduce",
"polyval", "polyvalm", "postpad", "powerset",
"ppder", "ppint", "ppjumps", "ppplot", "ppval",
"pqpnonneg", "prepad", "primes", "print",
"print_usage", "prism", "probit", "qp", "qqplot",
"quadcc", "quadgk", "quadl", "quadv", "quiver",
"qzhess", "rainbow", "randi", "range", "rank",
"ranks", "rat", "reallog", "realpow", "realsqrt",
"record", "rectangle_lw", "rectangle_sw",
"rectint", "refresh", "refreshdata",
"regexptranslate", "repmat", "residue", "ribbon",
"rindex", "roots", "rose", "rosser", "rotdim",
"rref", "run", "run_count", "rundemos", "run_test",
"runtests", "saveas", "saveaudio", "saveobj",
"savepath", "scatter", "secd", "semilogx",
"semilogxerr", "semilogy", "semilogyerr",
"setaudio", "setdiff", "setfield", "setxor",
"shading", "shift", "shiftdim", "sign_test",
"sinc", "sind", "sinetone", "sinewave", "skewness",
"slice", "sombrero", "sortrows", "spaugment",
"spconvert", "spdiags", "spearman", "spectral_adf",
"spectral_xdf", "specular", "speed", "spencer",
"speye", "spfun", "sphere", "spinmap", "spline",
"spones", "sprand", "sprandn", "sprandsym",
"spring", "spstats", "spy", "sqp", "stairs",
"statistics", "std", "stdnormal_cdf",
"stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd",
"stem", "stft", "strcat", "strchr", "strjust",
"strmatch", "strread", "strsplit", "strtok",
"strtrim", "strtrunc", "structfun", "studentize",
"subplot", "subsindex", "subspace", "substr",
"substruct", "summer", "surf", "surface", "surfc",
"surfl", "surfnorm", "svds", "swapbytes",
"sylvester_matrix", "symvar", "synthesis", "table",
"tand", "tar", "tcdf", "tempdir", "tempname",
"test", "text", "textread", "textscan", "tinv",
"title", "toeplitz", "tpdf", "trace", "trapz",
"treelayout", "treeplot", "triangle_lw",
"triangle_sw", "tril", "trimesh", "triplequad",
"triplot", "trisurf", "triu", "trnd", "tsearchn",
"t_test", "t_test_regression", "type", "unidcdf",
"unidinv", "unidpdf", "unidrnd", "unifcdf",
"unifinv", "unifpdf", "unifrnd", "union", "unique",
"unix", "unmkpp", "unpack", "untabify", "untar",
"unwrap", "unzip", "u_test", "validatestring",
"vander", "var", "var_test", "vech", "ver",
"version", "view", "voronoi", "voronoin",
"waitforbuttonpress", "wavread", "wavwrite",
"wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday",
"welch_test", "what", "white", "whitebg",
"wienrnd", "wilcoxon_test", "wilkinson", "winter",
"xlabel", "xlim", "ylabel", "yulewalker", "zip",
"zlabel", "z_test", ]
loadable_kw = [ "airy", "amd", "balance", "besselh", "besseli",
"besselj", "besselk", "bessely", "bitpack",
"bsxfun", "builtin", "ccolamd", "cellfun",
"cellslices", "chol", "choldelete", "cholinsert",
"cholinv", "cholshift", "cholupdate", "colamd",
"colloc", "convhulln", "convn", "csymamd",
"cummax", "cummin", "daspk", "daspk_options",
"dasrt", "dasrt_options", "dassl", "dassl_options",
"dbclear", "dbdown", "dbstack", "dbstatus",
"dbstop", "dbtype", "dbup", "dbwhere", "det",
"dlmread", "dmperm", "dot", "eig", "eigs",
"endgrent", "endpwent", "etree", "fft", "fftn",
"fftw", "filter", "find", "full", "gcd",
"getgrent", "getgrgid", "getgrnam", "getpwent",
"getpwnam", "getpwuid", "getrusage", "givens",
"gmtime", "gnuplot_binary", "hess", "ifft",
"ifftn", "inv", "isdebugmode", "issparse", "kron",
"localtime", "lookup", "lsode", "lsode_options",
"lu", "luinc", "luupdate", "matrix_type", "max",
"min", "mktime", "pinv", "qr", "qrdelete",
"qrinsert", "qrshift", "qrupdate", "quad",
"quad_options", "qz", "rand", "rande", "randg",
"randn", "randp", "randperm", "rcond", "regexp",
"regexpi", "regexprep", "schur", "setgrent",
"setpwent", "sort", "spalloc", "sparse", "spparms",
"sprank", "sqrtm", "strfind", "strftime",
"strptime", "strrep", "svd", "svd_driver", "syl",
"symamd", "symbfact", "symrcm", "time", "tsearch",
"typecast", "urlread", "urlwrite", ]
mapping_kw = [ "abs", "acos", "acosh", "acot", "acoth", "acsc",
"acsch", "angle", "arg", "asec", "asech", "asin",
"asinh", "atan", "atanh", "beta", "betainc",
"betaln", "bincoeff", "cbrt", "ceil", "conj", "cos",
"cosh", "cot", "coth", "csc", "csch", "erf", "erfc",
"erfcx", "erfinv", "exp", "finite", "fix", "floor",
"fmod", "gamma", "gammainc", "gammaln", "imag",
"isalnum", "isalpha", "isascii", "iscntrl",
"isdigit", "isfinite", "isgraph", "isinf",
"islower", "isna", "isnan", "isprint", "ispunct",
"isspace", "isupper", "isxdigit", "lcm", "lgamma",
"log", "lower", "mod", "real", "rem", "round",
"roundb", "sec", "sech", "sign", "sin", "sinh",
"sqrt", "tan", "tanh", "toascii", "tolower", "xor",
]
builtin_consts = [ "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA",
"OCTAVE_HOME", "OCTAVE_VERSION", "PAGER",
"PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET",
"SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO",
"S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE",
"WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED",
"WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG",
"WSTOPSIG", "WTERMSIG", "WUNTRACED", ]
tokens = {
'root': [
#We should look into multiline comments
(r'[%#].*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on hg changeset 8cc154f45e37
(r'(__FILE__|__LINE__|break|case|catch|classdef|continue|do|else|'
r'elseif|end|end_try_catch|end_unwind_protect|endclassdef|'
r'endevents|endfor|endfunction|endif|endmethods|endproperties|'
r'endswitch|endwhile|events|for|function|get|global|if|methods|'
r'otherwise|persistent|properties|return|set|static|switch|try|'
r'until|unwind_protect|unwind_protect_cleanup|while)\b', Keyword),
("(" + "|".join( builtin_kw + command_kw
+ function_kw + loadable_kw
+ mapping_kw) + r')\b', Name.Builtin),
("(" + "|".join(builtin_consts) + r')\b', Name.Constant),
# operators in Octave but not Matlab:
(r'-=|!=|!|/=|--', Operator),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators in Octave but not Matlab requiring escape for re:
(r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*',Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
(r'"[^"]*"', String),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r"[^']*'", String, '#pop'),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*[%#]', text, re.M): #Comment
return 0.9
return 0.1
class NumPyLexer(PythonLexer):
"""
A Python lexer recognizing Numerical Python builtins.
*New in Pygments 0.10.*
"""
name = 'NumPy'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = set([
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
])
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
class RConsoleLexer(Lexer):
"""
For R console transcripts or R CMD BATCH output files.
"""
name = 'RConsole'
aliases = ['rconsole', 'rout']
filenames = ['*.Rout']
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block),
[(0, Generic.Prompt, line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines, need to process them first.
if current_code_block:
# Weave together the prompts and highlight code.
for item in do_insertions(insertions,
slexer.get_tokens_unprocessed(current_code_block)):
yield item
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself, this is output from R.
yield match.start(), Generic.Output, line
# If we happen to end on a code block with nothing after it, need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
for item in do_insertions(insertions,
slexer.get_tokens_unprocessed(current_code_block)):
yield item
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
*New in Pygments 0.10.*
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R']
mimetypes = ['text/S-plus', 'text/S', 'text/R']
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][0-9a-zA-Z\._]+', Text),
(r'`.+`', String.Backtick),
],
'punctuation': [
(r'\[|\]|\[\[|\]\]|\$|\(|\)|@|:::?|;|,', Punctuation),
],
'keywords': [
(r'for(?=\s*\()|while(?=\s*\()|if(?=\s*\()|(?<=\s)else|'
r'(?<=\s)break(?=;|$)|return(?=\s*\()|function(?=\s*\()',
Keyword.Reserved)
],
'operators': [
(r'<-|-|==|<=|>=|<|>|&&|&|!=|\|\|?', Operator),
(r'\*|\+|\^|/|%%|%/%|=', Operator),
(r'%in%|%*%', Operator)
],
'builtin_symbols': [
(r'(NULL|NA|TRUE|FALSE|NaN)\b', Keyword.Constant),
(r'(T|F)\b', Keyword.Variable),
],
'numbers': [
(r'(?<![0-9a-zA-Z\)\}\]`\"])(?=\s*)[-\+]?[0-9]+'
r'(\.[0-9]*)?(E[0-9][-\+]?(\.[0-9]*)?)?', Number),
(r'\.[0-9]*(E[0-9][-\+]?(\.[0-9]*)?)?', Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
#(r'\{', Punctuation, 'block'),
(r'.', Text),
],
#'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
#],
'string_squote': [
(r'[^\']*\'', String, '#pop'),
],
'string_dquote': [
(r'[^\"]*\"', String, '#pop'),
],
}
def analyse_text(text):
return '<-' in text
| |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lazy imports for heavy dependencies."""
import functools
import importlib
from typing import Any, Callable, TypeVar
from tensorflow_datasets.core.utils import py_utils as utils
_Fn = TypeVar("_Fn")
def _try_import(module_name):
"""Try importing a module, with an informative error message on failure."""
try:
mod = importlib.import_module(module_name)
return mod
except ImportError as e:
err_msg = ("Failed importing {name}. This likely means that the dataset "
"requires additional dependencies that have to be "
"manually installed (usually with `pip install {name}`). See "
"setup.py extras_require.").format(name=module_name)
utils.reraise(e, suffix=err_msg)
class LazyImporter(object):
"""Lazy importer for heavy dependencies.
Some datasets require heavy dependencies for data generation. To allow for
the default installation to remain lean, those heavy dependencies are
lazily imported here.
"""
@utils.classproperty
@classmethod
def apache_beam(cls):
return _try_import("apache_beam")
@utils.classproperty
@classmethod
def bs4(cls):
return _try_import("bs4")
@utils.classproperty
@classmethod
def crepe(cls):
return _try_import("crepe")
@utils.classproperty
@classmethod
def cv2(cls):
return _try_import("cv2")
@utils.classproperty
@classmethod
def envlogger(cls):
return _try_import("envlogger.reader")
@utils.classproperty
@classmethod
def gcsfs_store(cls):
return _try_import("gcsfs").GCSFileSystem(token='anon').get_mapper
@utils.classproperty
@classmethod
def gcld3(cls):
return _try_import("gcld3") # pylint: disable=unreachable
@utils.classproperty
@classmethod
def h5py(cls):
return _try_import("h5py")
@utils.classproperty
@classmethod
def jax(cls):
return _try_import("jax")
@utils.classproperty
@classmethod
def langdetect(cls):
return _try_import("langdetect")
@utils.classproperty
@classmethod
def librosa(cls):
return _try_import("librosa")
@utils.classproperty
@classmethod
def lxml(cls):
return _try_import("lxml")
@utils.classproperty
@classmethod
def matplotlib(cls):
_try_import("matplotlib.pyplot")
return _try_import("matplotlib")
@utils.classproperty
@classmethod
def mwparserfromhell(cls):
return _try_import("mwparserfromhell")
@utils.classproperty
@classmethod
def networkx(cls):
return _try_import("networkx")
@utils.classproperty
@classmethod
def nltk(cls):
return _try_import("nltk")
@utils.classproperty
@classmethod
def pandas(cls):
return _try_import("pandas")
@utils.classproperty
@classmethod
def PIL_Image(cls): # pylint: disable=invalid-name
# TiffImagePlugin need to be activated explicitly on some systems
# https://github.com/python-pillow/Pillow/blob/5.4.x/src/PIL/Image.py#L407
_try_import("PIL.TiffImagePlugin")
return _try_import("PIL.Image")
@utils.classproperty
@classmethod
def PIL_ImageDraw(cls): # pylint: disable=invalid-name
return _try_import("PIL.ImageDraw")
@utils.classproperty
@classmethod
def pretty_midi(cls):
return _try_import("pretty_midi")
@utils.classproperty
@classmethod
def pycocotools(cls):
return _try_import("pycocotools.mask")
@utils.classproperty
@classmethod
def pydub(cls):
return _try_import("pydub")
@utils.classproperty
@classmethod
def scipy(cls):
_try_import("scipy.io")
_try_import("scipy.io.wavfile")
_try_import("scipy.ndimage")
return _try_import("scipy")
@utils.classproperty
@classmethod
def skimage(cls):
_try_import("skimage.color")
_try_import("skimage.filters")
try:
_try_import("skimage.external.tifffile")
except ImportError:
pass
return _try_import("skimage")
@utils.classproperty
@classmethod
def tifffile(cls):
return _try_import("tifffile")
@utils.classproperty
@classmethod
def tensorflow_data_validation(cls):
return _try_import("tensorflow_data_validation")
@utils.classproperty
@classmethod
def tensorflow_io(cls):
return _try_import("tensorflow_io")
@utils.classproperty
@classmethod
def tldextract(cls):
return _try_import("tldextract")
@utils.classproperty
@classmethod
def os(cls):
"""For testing purposes only."""
return _try_import("os")
@utils.classproperty
@classmethod
def test_foo(cls):
"""For testing purposes only."""
return _try_import("test_foo")
@utils.classproperty
@classmethod
def zarr(cls):
return _try_import("zarr")
lazy_imports = LazyImporter # pylint: disable=invalid-name
def beam_ptransform_fn(fn: Callable[..., Any]) -> Callable[..., Any]:
"""Lazy version of `@beam.ptransform_fn`."""
lazy_decorated_fn = None
@functools.wraps(fn)
def decorated(*args, **kwargs):
nonlocal lazy_decorated_fn
# Actually decorate the function only the first time it is called
if lazy_decorated_fn is None:
lazy_decorated_fn = lazy_imports.apache_beam.ptransform_fn(fn)
return lazy_decorated_fn(*args, **kwargs)
return decorated
| |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""pex support for interacting with interpreters."""
from __future__ import absolute_import
import hashlib
import json
import os
import platform
import re
import subprocess
import sys
import sysconfig
from collections import OrderedDict
from textwrap import dedent
from pex import third_party
from pex.common import is_exe, safe_mkdtemp, safe_rmtree
from pex.compatibility import string
from pex.executor import Executor
from pex.jobs import ErrorHandler, Job, Retain, SpawnedJob, execute_parallel
from pex.orderedset import OrderedSet
from pex.pep_425 import CompatibilityTags
from pex.pep_508 import MarkerEnvironment
from pex.platforms import Platform
from pex.pyenv import Pyenv
from pex.third_party.packaging import tags
from pex.third_party.pkg_resources import Distribution, Requirement
from pex.tracer import TRACER
from pex.typing import TYPE_CHECKING, cast, overload
from pex.util import CacheHelper
from pex.variables import ENV
if TYPE_CHECKING:
from typing import (
Any,
AnyStr,
Callable,
Dict,
Iterable,
Iterator,
List,
Mapping,
MutableMapping,
Optional,
Text,
Tuple,
Union,
)
PathFilter = Callable[[str], bool]
InterpreterIdentificationJobError = Tuple[str, Union[Job.Error, Exception]]
InterpreterOrJobError = Union["PythonInterpreter", InterpreterIdentificationJobError]
# N.B.: We convert InterpreterIdentificationJobErrors that result from spawning interpreter
# identification jobs to these end-user InterpreterIdentificationErrors for display.
InterpreterIdentificationError = Tuple[str, Text]
InterpreterOrError = Union["PythonInterpreter", InterpreterIdentificationError]
class PythonIdentity(object):
class Error(Exception):
pass
class InvalidError(Error):
pass
class UnknownRequirement(Error):
pass
ABBR_TO_INTERPRETER_NAME = {
"pp": "PyPy",
"cp": "CPython",
}
@staticmethod
def _normalize_macosx_deployment_target(value):
# type: (Any) -> Optional[str]
# N.B.: Sometimes MACOSX_DEPLOYMENT_TARGET can be configured as a float.
# See: https://github.com/pantsbuild/pex/issues/1337
if value is None:
return None
return str(value)
@classmethod
def get(cls, binary=None):
# type: (Optional[str]) -> PythonIdentity
# N.B.: We should not need to look past `sys.executable` to learn the current interpreter's
# executable path, but on OSX there has been a bug where the `sys.executable` reported is
# _not_ the path of the current interpreter executable:
# https://bugs.python.org/issue22490#msg283859
# That case is distinguished by the presence of a `__PYVENV_LAUNCHER__` environment
# variable as detailed in the Python bug linked above.
if binary and binary != sys.executable and "__PYVENV_LAUNCHER__" not in os.environ:
# Here we assume sys.executable is accurate and binary is something like a pyenv shim.
binary = sys.executable
supported_tags = tuple(tags.sys_tags())
preferred_tag = supported_tags[0]
configured_macosx_deployment_target = cls._normalize_macosx_deployment_target(
sysconfig.get_config_var("MACOSX_DEPLOYMENT_TARGET")
)
# Pex identifies interpreters using a bit of Pex code injected via an extraction of that
# code under the `PEX_ROOT` adjoined to `sys.path` via `PYTHONPATH`. We ignore such adjoined
# `sys.path` entries to discover the true base interpreter `sys.path`.
pythonpath = frozenset(os.environ.get("PYTHONPATH", "").split(os.pathsep))
sys_path = [item for item in sys.path if item and item not in pythonpath]
return cls(
binary=binary or sys.executable,
prefix=sys.prefix,
base_prefix=(
# Old virtualenv (16 series and lower) sets `sys.real_prefix` in all cases.
cast("Optional[str]", getattr(sys, "real_prefix", None))
# Both pyvenv and virtualenv 20+ set `sys.base_prefix` as per
# https://www.python.org/dev/peps/pep-0405/.
or cast(str, getattr(sys, "base_prefix", sys.prefix))
),
sys_path=sys_path,
python_tag=preferred_tag.interpreter,
abi_tag=preferred_tag.abi,
platform_tag=preferred_tag.platform,
version=sys.version_info[:3],
supported_tags=supported_tags,
env_markers=MarkerEnvironment.default(),
configured_macosx_deployment_target=configured_macosx_deployment_target,
)
@classmethod
def decode(cls, encoded):
TRACER.log("creating PythonIdentity from encoded: %s" % encoded, V=9)
values = json.loads(encoded)
if len(values) != 11:
raise cls.InvalidError("Invalid interpreter identity: %s" % encoded)
supported_tags = values.pop("supported_tags")
def iter_tags():
for (interpreter, abi, platform) in supported_tags:
yield tags.Tag(interpreter=interpreter, abi=abi, platform=platform)
# N.B.: Old encoded identities may have numeric values; so we support these and convert
# back to strings here as needed. See: https://github.com/pantsbuild/pex/issues/1337
configured_macosx_deployment_target = cls._normalize_macosx_deployment_target(
values.pop("configured_macosx_deployment_target")
)
env_markers = MarkerEnvironment(**values.pop("env_markers"))
return cls(
supported_tags=iter_tags(),
configured_macosx_deployment_target=configured_macosx_deployment_target,
env_markers=env_markers,
**values
)
@classmethod
def _find_interpreter_name(cls, python_tag):
for abbr, interpreter in cls.ABBR_TO_INTERPRETER_NAME.items():
if python_tag.startswith(abbr):
return interpreter
raise ValueError("Unknown interpreter: {}".format(python_tag))
def __init__(
self,
binary, # type: str
prefix, # type: str
base_prefix, # type: str
sys_path, # type: Iterable[str]
python_tag, # type: str
abi_tag, # type: str
platform_tag, # type: str
version, # type: Iterable[int]
supported_tags, # type: Iterable[tags.Tag]
env_markers, # type: MarkerEnvironment
configured_macosx_deployment_target, # type: Optional[str]
):
# type: (...) -> None
# N.B.: We keep this mapping to support historical values for `distribution` and
# `requirement` properties.
self._interpreter_name = self._find_interpreter_name(python_tag)
self._binary = binary
self._prefix = prefix
self._base_prefix = base_prefix
self._sys_path = tuple(sys_path)
self._python_tag = python_tag
self._abi_tag = abi_tag
self._platform_tag = platform_tag
self._version = tuple(version)
self._supported_tags = CompatibilityTags(tags=supported_tags)
self._env_markers = env_markers
self._configured_macosx_deployment_target = configured_macosx_deployment_target
def encode(self):
values = dict(
binary=self._binary,
prefix=self._prefix,
base_prefix=self._base_prefix,
sys_path=self._sys_path,
python_tag=self._python_tag,
abi_tag=self._abi_tag,
platform_tag=self._platform_tag,
version=self._version,
supported_tags=[
(tag.interpreter, tag.abi, tag.platform) for tag in self._supported_tags
],
env_markers=self._env_markers.as_dict(),
configured_macosx_deployment_target=self._configured_macosx_deployment_target,
)
return json.dumps(values, sort_keys=True)
@property
def binary(self):
return self._binary
@property
def prefix(self):
# type: () -> str
return self._prefix
@property
def base_prefix(self):
# type: () -> str
return self._base_prefix
@property
def sys_path(self):
# type: () -> Tuple[str, ...]
return self._sys_path
@property
def python_tag(self):
return self._python_tag
@property
def abi_tag(self):
return self._abi_tag
@property
def platform_tag(self):
return self._platform_tag
@property
def version(self):
# type: () -> Tuple[int, int, int]
"""The interpreter version as a normalized tuple.
Consistent with `sys.version_info`, the tuple corresponds to `<major>.<minor>.<micro>`.
"""
return cast("Tuple[int, int, int]", self._version)
@property
def version_str(self):
# type: () -> str
return ".".join(map(str, self.version))
@property
def supported_tags(self):
# type: () -> CompatibilityTags
return self._supported_tags
@property
def env_markers(self):
# type: () -> MarkerEnvironment
return self._env_markers
@property
def configured_macosx_deployment_target(self):
# type: () -> Optional[str]
return self._configured_macosx_deployment_target
@property
def interpreter(self):
return self._interpreter_name
@property
def requirement(self):
# type: () -> Requirement
return self.distribution.as_requirement()
@property
def distribution(self):
# type: () -> Distribution
return Distribution(project_name=self.interpreter, version=self.version_str)
def iter_supported_platforms(self):
# type: () -> Iterator[Platform]
"""All platforms supported by the associated interpreter ordered from most specific to
least."""
yield Platform(
platform=self._platform_tag,
impl=self.python_tag[:2],
version=self.version_str,
version_info=self.version,
abi=self.abi_tag,
)
for tag in self._supported_tags:
yield Platform.from_tag(tag)
@classmethod
def parse_requirement(cls, requirement, default_interpreter="CPython"):
if isinstance(requirement, Requirement):
return requirement
elif isinstance(requirement, string):
try:
requirement = Requirement.parse(requirement)
except ValueError:
try:
requirement = Requirement.parse("%s%s" % (default_interpreter, requirement))
except ValueError:
raise ValueError("Unknown requirement string: %s" % requirement)
return requirement
else:
raise ValueError("Unknown requirement type: %r" % (requirement,))
def matches(self, requirement):
"""Given a Requirement, check if this interpreter matches."""
try:
requirement = self.parse_requirement(requirement, self._interpreter_name)
except ValueError as e:
raise self.UnknownRequirement(str(e))
return self.distribution in requirement
def hashbang(self):
# type: () -> str
if self._interpreter_name == "PyPy":
hashbang_string = "pypy" if self._version[0] == 2 else "pypy{}".format(self._version[0])
else:
hashbang_string = "python{}.{}".format(self._version[0], self._version[1])
return "#!/usr/bin/env {}".format(hashbang_string)
@property
def python(self):
# type: () -> str
# return the python version in the format of the 'python' key for distributions
# specifically, '2.7', '3.2', etc.
return "%d.%d" % (self.version[0:2])
def __str__(self):
# type: () -> str
# N.B.: Kept as distinct from __repr__ to support legacy str(identity) used by Pants v1 when
# forming cache locations.
return "{interpreter_name}-{major}.{minor}.{patch}".format(
interpreter_name=self._interpreter_name,
major=self._version[0],
minor=self._version[1],
patch=self._version[2],
)
def __repr__(self):
# type: () -> str
return (
"{type}({binary!r}, {python_tag!r}, {abi_tag!r}, {platform_tag!r}, {version!r})".format(
type=self.__class__.__name__,
binary=self._binary,
python_tag=self._python_tag,
abi_tag=self._abi_tag,
platform_tag=self._platform_tag,
version=self._version,
)
)
def _tup(self):
return self._binary, self._python_tag, self._abi_tag, self._platform_tag, self._version
def __eq__(self, other):
if type(other) is not type(self):
return NotImplemented
return self._tup() == other._tup()
def __hash__(self):
# type: () -> int
return hash(self._tup())
class PythonInterpreter(object):
_REGEXEN = (
# NB: OSX ships python binaries named Python with a capital-P; so we allow for this.
re.compile(r"^Python$"),
re.compile(
r"""
^
(?:
python |
pypy
)
(?:
# Major version
[2-9]
(?:.
# Minor version
[0-9]+
# Some distributions include a suffix on the interpreter name, similar to
# PEP-3149. For example, Gentoo has /usr/bin/python3.6m to indicate it was
# built with pymalloc
[a-z]?
)?
)?
$
""",
flags=re.VERBOSE,
),
)
_PYTHON_INTERPRETER_BY_NORMALIZED_PATH = {} # type: Dict
@staticmethod
def _get_pyvenv_cfg(path):
# type: (str) -> Optional[str]
# See: https://www.python.org/dev/peps/pep-0405/#specification
pyvenv_cfg_path = os.path.join(path, "pyvenv.cfg")
if os.path.isfile(pyvenv_cfg_path):
with open(pyvenv_cfg_path) as fp:
for line in fp:
name, _, value = line.partition("=")
if name.strip() == "home":
return pyvenv_cfg_path
return None
@classmethod
def _find_pyvenv_cfg(cls, maybe_venv_python_binary):
# type: (str) -> Optional[str]
# A pyvenv is identified by a pyvenv.cfg file with a home key in one of the two following
# directory layouts:
#
# 1. <venv dir>/
# bin/
# pyvenv.cfg
# python*
#
# 2. <venv dir>/
# pyvenv.cfg
# bin/
# python*
#
# In practice, we see layout 2 in the wild, but layout 1 is also allowed by the spec.
#
# See: # See: https://www.python.org/dev/peps/pep-0405/#specification
maybe_venv_bin_dir = os.path.dirname(maybe_venv_python_binary)
pyvenv_cfg = cls._get_pyvenv_cfg(maybe_venv_bin_dir)
if not pyvenv_cfg:
maybe_venv_dir = os.path.dirname(maybe_venv_bin_dir)
pyvenv_cfg = cls._get_pyvenv_cfg(maybe_venv_dir)
return pyvenv_cfg
@classmethod
def _resolve_pyvenv_canonical_python_binary(
cls,
real_binary, # type: str
maybe_venv_python_binary, # type: str
):
# type: (...) -> Optional[str]
maybe_venv_python_binary = os.path.abspath(maybe_venv_python_binary)
if not os.path.islink(maybe_venv_python_binary):
return None
pyvenv_cfg = cls._find_pyvenv_cfg(maybe_venv_python_binary)
if pyvenv_cfg is None:
return None
while os.path.islink(maybe_venv_python_binary):
resolved = os.readlink(maybe_venv_python_binary)
if not os.path.isabs(resolved):
resolved = os.path.abspath(
os.path.join(os.path.dirname(maybe_venv_python_binary), resolved)
)
if os.path.dirname(resolved) == os.path.dirname(maybe_venv_python_binary):
maybe_venv_python_binary = resolved
else:
# We've escaped the venv bin dir; so the last resolved link was the
# canonical venv Python binary.
#
# For example, for:
# ./venv/bin/
# python -> python3.8
# python3 -> python3.8
# python3.8 -> /usr/bin/python3.8
#
# We want to resolve each of ./venv/bin/python{,3{,.8}} to the canonical
# ./venv/bin/python3.8 which is the symlink that points to the home binary.
break
return maybe_venv_python_binary
@classmethod
def canonicalize_path(cls, path):
# type: (str) -> str
"""Canonicalize a potential Python interpreter path.
This will return a path-equivalent of the given `path` in canonical form for use in cache
keys.
N.B.: If the path is a venv symlink it will not be fully de-referenced in order to maintain
fidelity with the requested venv Python binary choice.
"""
real_binary = os.path.realpath(path)
# If the path is a PEP-405 venv interpreter symlink we do not want to resolve outside of the
# venv in order to stay faithful to the binary path choice.
return (
cls._resolve_pyvenv_canonical_python_binary(
real_binary=real_binary, maybe_venv_python_binary=path
)
or real_binary
)
class Error(Exception):
pass
class IdentificationError(Error):
pass
class InterpreterNotFound(Error):
pass
@staticmethod
def latest_release_of_min_compatible_version(interps):
# type: (Iterable[PythonInterpreter]) -> PythonInterpreter
"""Find the minimum major version, but use the most recent micro version within that minor
version.
That is, prefer 3.6.1 over 3.6.0, and prefer both over 3.7.*.
"""
assert interps, "No interpreters passed to `PythonInterpreter.safe_min()`"
return min(
interps, key=lambda interp: (interp.version[0], interp.version[1], -interp.version[2])
)
@classmethod
def get(cls):
# type: () -> PythonInterpreter
return cls.from_binary(sys.executable)
@staticmethod
def _paths(paths=None):
# type: (Optional[Iterable[str]]) -> Iterable[str]
# NB: If `paths=[]`, we will not read $PATH.
return OrderedSet(paths if paths is not None else os.getenv("PATH", "").split(os.pathsep))
@classmethod
def iter(cls, paths=None):
# type: (Optional[Iterable[str]]) -> Iterator[PythonInterpreter]
"""Iterate all valid interpreters found in `paths`.
NB: The paths can either be directories to search for python binaries or the paths of python
binaries themselves.
:param paths: The paths to look for python interpreters; by default the `PATH`.
"""
return cls._filter(cls._find(cls._paths(paths=paths)))
@classmethod
def iter_candidates(cls, paths=None, path_filter=None):
# type: (Optional[Iterable[str]], Optional[PathFilter]) -> Iterator[InterpreterOrError]
"""Iterate all likely interpreters found in `paths`.
NB: The paths can either be directories to search for python binaries or the paths of python
binaries themselves.
:param paths: The paths to look for python interpreters; by default the `PATH`.
:param path_filter: An optional predicate to test whether a candidate interpreter's binary
path is acceptable.
:return: A heterogeneous iterator over valid interpreters and (python, error) invalid
python binary tuples.
"""
failed_interpreters = OrderedDict() # type: MutableMapping[str, Text]
def iter_interpreters():
# type: () -> Iterator[PythonInterpreter]
for candidate in cls._find(
cls._paths(paths=paths), path_filter=path_filter, error_handler=Retain()
):
if isinstance(candidate, cls):
yield candidate
else:
python, exception = cast("InterpreterIdentificationJobError", candidate)
if isinstance(exception, Job.Error) and exception.stderr:
# We spawned a subprocess to identify the interpreter but the interpreter
# could not run our identification code meaning the interpreter is either
# broken or old enough that it either can't parse our identification code
# or else provide stdlib modules we expect. The stderr should indicate the
# broken-ness appropriately.
failed_interpreters[python] = exception.stderr.strip()
else:
# We couldn't even spawn a subprocess to identify the interpreter. The
# likely OSError should help identify the underlying issue.
failed_interpreters[python] = repr(exception)
for interpreter in cls._filter(iter_interpreters()):
yield interpreter
for python, error in failed_interpreters.items():
yield python, error
@classmethod
def all(cls, paths=None):
# type: (Optional[Iterable[str]]) -> Iterable[PythonInterpreter]
return list(cls.iter(paths=paths))
@classmethod
def _create_isolated_cmd(
cls,
binary, # type: str
args=None, # type: Optional[Iterable[str]]
pythonpath=None, # type: Optional[Iterable[str]]
env=None, # type: Optional[Mapping[str, str]]
):
# type: (...) -> Tuple[Iterable[str], Mapping[str, str]]
cmd = [binary]
# Don't add the user site directory to `sys.path`.
#
# Additionally, it would be nice to pass `-S` to disable adding site-packages but unfortunately
# some python distributions include portions of the standard library there.
cmd.append("-s")
env = cls._sanitized_environment(env=env)
pythonpath = list(pythonpath or ())
if pythonpath:
env["PYTHONPATH"] = os.pathsep.join(pythonpath)
else:
# Turn off reading of PYTHON* environment variables.
cmd.append("-E")
if args:
cmd.extend(args)
rendered_command = " ".join(cmd)
if pythonpath:
rendered_command = "PYTHONPATH={} {}".format(env["PYTHONPATH"], rendered_command)
TRACER.log("Executing: {}".format(rendered_command), V=3)
return cmd, env
# We use () as the unset sentinel for this lazily calculated cached value. The cached value
# itself should always be Optional[Pyenv].
#
# N.B.: The empty tuple type is not represented as Tuple[] as you might naivly guess but
# instead as Tuple[()].
#
# See:
# + https://github.com/python/mypy/issues/4211
# + https://www.python.org/dev/peps/pep-0484/#the-typing-module
_PYENV = () # type: Union[Tuple[()],Optional[Pyenv]]
@classmethod
def _pyenv(cls):
# type: () -> Optional[Pyenv]
if isinstance(cls._PYENV, tuple):
cls._PYENV = Pyenv.find()
return cls._PYENV
@classmethod
def _resolve_pyenv_shim(
cls,
binary, # type: str
pyenv=None, # type: Optional[Pyenv]
):
# type: (...) -> Optional[str]
pyenv = pyenv or cls._pyenv()
if pyenv is not None:
shim = pyenv.as_shim(binary)
if shim is not None:
python = shim.select_version()
if python is None:
TRACER.log("Detected inactive pyenv shim: {}.".format(shim), V=3)
else:
TRACER.log("Detected pyenv shim activated to {}: {}.".format(python, shim), V=3)
return python
return binary
INTERP_INFO_FILE = "INTERP-INFO"
@classmethod
def _spawn_from_binary_external(cls, binary):
# type: (str) -> SpawnedJob[PythonInterpreter]
def create_interpreter(
stdout, # type: bytes
check_binary=False, # type: bool
):
# type: (...) -> PythonInterpreter
identity = stdout.decode("utf-8").strip()
if not identity:
raise cls.IdentificationError("Could not establish identity of {}.".format(binary))
interpreter = cls(PythonIdentity.decode(identity))
# We should not need to check this since binary == interpreter.binary should always be
# true, but historically this could be untrue as noted in `PythonIdentity.get`.
if check_binary and not os.path.exists(interpreter.binary):
raise cls.InterpreterNotFound(
"Cached interpreter for {} reports a binary of {}, which could not be found".format(
binary, interpreter.binary
)
)
return interpreter
# Part of the PythonInterpreter data are environment markers that depend on the current OS
# release. That data can change when the OS is upgraded but (some of) the installed interpreters
# remain the same. As such, include the OS in the hash structure for cached interpreters.
os_digest = hashlib.sha1()
for os_identifier in platform.release(), platform.version():
os_digest.update(os_identifier.encode("utf-8"))
os_hash = os_digest.hexdigest()
interpreter_cache_dir = os.path.join(ENV.PEX_ROOT, "interpreters")
os_cache_dir = os.path.join(interpreter_cache_dir, os_hash)
if os.path.isdir(interpreter_cache_dir) and not os.path.isdir(os_cache_dir):
with TRACER.timed("GCing interpreter cache from prior OS version"):
safe_rmtree(interpreter_cache_dir)
interpreter_hash = CacheHelper.hash(binary)
# Some distributions include more than one copy of the same interpreter via a hard link (e.g.:
# python3.7 is a hardlink to python3.7m). To ensure a deterministic INTERP-INFO file we must
# emit a separate INTERP-INFO for each link since INTERP-INFO contains the interpreter path and
# would otherwise be unstable.
#
# See cls._REGEXEN for a related affordance.
#
# N.B.: The path for --venv mode interpreters can be quite long; so we just used a fixed
# length hash of the interpreter binary path to ensure uniqueness and not run afoul of file
# name length limits.
path_id = hashlib.sha1(binary.encode("utf-8")).hexdigest()
cache_dir = os.path.join(os_cache_dir, interpreter_hash, path_id)
cache_file = os.path.join(cache_dir, cls.INTERP_INFO_FILE)
if os.path.isfile(cache_file):
try:
with open(cache_file, "rb") as fp:
return SpawnedJob.completed(create_interpreter(fp.read(), check_binary=True))
except (IOError, OSError, cls.Error, PythonIdentity.Error):
safe_rmtree(cache_dir)
return cls._spawn_from_binary_external(binary)
else:
pythonpath = third_party.expose(["pex"])
cmd, env = cls._create_isolated_cmd(
binary,
args=[
"-c",
dedent(
"""\
import os
import sys
from pex.common import atomic_directory, safe_open
from pex.interpreter import PythonIdentity
encoded_identity = PythonIdentity.get(binary={binary!r}).encode()
with atomic_directory({cache_dir!r}, exclusive=False) as cache_dir:
if not cache_dir.is_finalized():
with safe_open(
os.path.join(cache_dir.work_dir, {info_file!r}), 'w'
) as fp:
fp.write(encoded_identity)
""".format(
binary=binary, cache_dir=cache_dir, info_file=cls.INTERP_INFO_FILE
)
),
],
pythonpath=pythonpath,
)
# Ensure the `.` implicit PYTHONPATH entry contains no Pex code (of a different version)
# that might interfere with the behavior we expect in the script above.
cwd = safe_mkdtemp()
process = Executor.open_process(
cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
)
job = Job(command=cmd, process=process, finalizer=lambda: safe_rmtree(cwd))
return SpawnedJob.file(job, output_file=cache_file, result_func=create_interpreter)
@classmethod
def _expand_path(cls, path):
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
return sorted(os.path.join(path, fn) for fn in os.listdir(path))
return []
@classmethod
def from_env(
cls,
hashbang, # type: str
paths=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Optional[PythonInterpreter]
"""Resolve a PythonInterpreter as /usr/bin/env would.
:param hashbang: A string, e.g. "python3.3" representing some binary on the search path.
:param paths: The search path to use; defaults to $PATH.
:return: the first matching interpreter found or `None`.
"""
def hashbang_matches(fn):
basefile = os.path.basename(fn)
return hashbang == basefile
for interpreter in cls._identify_interpreters(
filter=hashbang_matches, error_handler=None, paths=paths
):
return interpreter
return None
@classmethod
def _spawn_from_binary(cls, binary):
# type: (str) -> SpawnedJob[PythonInterpreter]
canonicalized_binary = cls.canonicalize_path(binary)
if not os.path.exists(canonicalized_binary):
raise cls.InterpreterNotFound(
"The interpreter path {} does not exist.".format(canonicalized_binary)
)
# N.B.: The cache is written as the last step in PythonInterpreter instance initialization.
cached_interpreter = cls._PYTHON_INTERPRETER_BY_NORMALIZED_PATH.get(canonicalized_binary)
if cached_interpreter is not None:
return SpawnedJob.completed(cached_interpreter)
if canonicalized_binary == cls.canonicalize_path(sys.executable):
current_interpreter = cls(PythonIdentity.get())
return SpawnedJob.completed(current_interpreter)
return cls._spawn_from_binary_external(canonicalized_binary)
@classmethod
def from_binary(
cls,
binary, # type: str
pyenv=None, # type: Optional[Pyenv]
):
# type: (...) -> PythonInterpreter
"""Create an interpreter from the given `binary`.
:param binary: The path to the python interpreter binary.
:param pyenv: A custom Pyenv installation for handling pyenv shim identification.
Auto-detected by default.
:return: an interpreter created from the given `binary`.
"""
python = cls._resolve_pyenv_shim(binary, pyenv=pyenv)
if python is None:
raise cls.IdentificationError("The pyenv shim at {} is not active.".format(binary))
try:
return cast(PythonInterpreter, cls._spawn_from_binary(python).await_result())
except Job.Error as e:
raise cls.IdentificationError("Failed to identify {}: {}".format(binary, e))
@classmethod
def _matches_binary_name(cls, path):
# type: (str) -> bool
basefile = os.path.basename(path)
return any(matcher.match(basefile) is not None for matcher in cls._REGEXEN)
@overload
@classmethod
def _find(cls, paths):
# type: (Iterable[str]) -> Iterator[PythonInterpreter]
pass
@overload
@classmethod
def _find(
cls,
paths, # type: Iterable[str]
error_handler, # type: Retain
path_filter=None, # type: Optional[PathFilter]
):
# type: (...) -> Iterator[InterpreterOrJobError]
pass
@classmethod
def _find(
cls,
paths, # type: Iterable[str]
error_handler=None, # type: Optional[ErrorHandler]
path_filter=None, # type: Optional[PathFilter]
):
# type: (...) -> Union[Iterator[PythonInterpreter], Iterator[InterpreterOrJobError]]
"""Given a list of files or directories, try to detect python interpreters amongst them.
Returns an iterator over PythonInterpreter objects.
"""
return cls._identify_interpreters(
filter=path_filter or cls._matches_binary_name, paths=paths, error_handler=error_handler
)
@overload
@classmethod
def _identify_interpreters(
cls,
filter, # type: PathFilter
error_handler, # type: None
paths=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Iterator[PythonInterpreter]
pass
@overload
@classmethod
def _identify_interpreters(
cls,
filter, # type: PathFilter
error_handler, # type: Retain
paths=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Iterator[InterpreterOrJobError]
pass
@classmethod
def _identify_interpreters(
cls,
filter, # type: PathFilter
error_handler=None, # type: Optional[ErrorHandler]
paths=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Union[Iterator[PythonInterpreter], Iterator[InterpreterOrJobError]]
def iter_candidates():
# type: () -> Iterator[str]
for path in cls._paths(paths=paths):
for fn in cls._expand_path(path):
if filter(fn):
binary = cls._resolve_pyenv_shim(fn)
if binary:
yield binary
results = execute_parallel(
inputs=OrderedSet(iter_candidates()),
spawn_func=cls._spawn_from_binary,
error_handler=error_handler,
)
return cast("Union[Iterator[PythonInterpreter], Iterator[InterpreterOrJobError]]", results)
@classmethod
def _filter(cls, pythons):
# type: (Iterable[PythonInterpreter]) -> Iterator[PythonInterpreter]
"""Filters duplicate python interpreters and versions we don't support.
Returns an iterator over PythonInterpreters.
"""
MAJOR, MINOR, SUBMINOR = range(3)
def version_filter(version):
# type: (Tuple[int, int, int]) -> bool
return (
version[MAJOR] == 2
and version[MINOR] >= 7
or version[MAJOR] == 3
and version[MINOR] >= 5
)
seen = set()
for interp in pythons:
version = interp.identity.version
identity = version, interp.identity.abi_tag
if identity not in seen and version_filter(version):
seen.add(identity)
yield interp
@classmethod
def _sanitized_environment(cls, env=None):
# type: (Optional[Mapping[str, str]]) -> Dict[str, str]
# N.B. This is merely a hack because sysconfig.py on the default OS X
# installation of 2.7 breaks. See: https://bugs.python.org/issue9516
env_copy = dict(env or os.environ)
env_copy.pop("MACOSX_DEPLOYMENT_TARGET", None)
return env_copy
def __init__(self, identity):
# type: (PythonIdentity) -> None
"""Construct a PythonInterpreter.
You should probably use `PythonInterpreter.from_binary` instead.
"""
self._identity = identity
self._binary = self.canonicalize_path(self.identity.binary)
self._supported_platforms = None
self._PYTHON_INTERPRETER_BY_NORMALIZED_PATH[self._binary] = self
@property
def binary(self):
# type: () -> str
return self._binary
@property
def is_venv(self):
# type: () -> bool
"""Return `True` if this interpreter is homed in a virtual environment."""
return self._identity.prefix != self._identity.base_prefix
@property
def prefix(self):
# type: () -> str
"""Return the `sys.prefix` of this interpreter.
For virtual environments, this will be the virtual environment directory itself.
"""
return self._identity.prefix
@property
def sys_path(self):
# type: () -> Tuple[str, ...]
"""Return the interpreter's `sys.path`.
The implicit `$PWD` entry and any entries injected via PYTHONPATH or in the user site
directory are excluded such that the `sys.path` presented is the base interpreter `sys.path`
with no adornments.
"""
return self._identity.sys_path
class BaseInterpreterResolutionError(Exception):
"""Indicates the base interpreter for a virtual environment could not be resolved."""
def resolve_base_interpreter(self):
# type: () -> PythonInterpreter
"""Finds the base system interpreter used to create a virtual environment.
If this interpreter is not homed in a virtual environment, returns itself.
"""
if not self.is_venv:
return self
# In the case of PyPy, the <base_prefix> dir might contain one of the following:
#
# 1. On a system with PyPy 2.7 series and one PyPy 3.x series
# bin/
# pypy
# pypy3
#
# 2. On a system with PyPy 2.7 series and more than one PyPy 3.x series
# bin/
# pypy
# pypy3
# pypy3.6
# pypy3.7
#
# In both cases, bin/pypy is a 2.7 series interpreter. In case 2 bin/pypy3 could be either
# PyPy 3.6 series or PyPy 3.7 series. In order to ensure we pick the correct base executable
# of a PyPy virtual environment, we always try to resolve the most specific basename first
# to the least specific basename last and we also verify that, if the basename resolves, it
# resolves to an equivalent interpreter. We employ the same strategy for CPython, but only
# for uniformity in the algorithm. It appears to always be the case for CPython that
# python<major>.<minor> is present in any given <prefix>/bin/ directory; so the algorithm
# gets a hit on 1st try for CPython binaries incurring ~no extra overhead.
version = self._identity.version
abi_tag = self._identity.abi_tag
prefix = "pypy" if self._identity.interpreter == "PyPy" else "python"
suffixes = ("{}.{}".format(version[0], version[1]), str(version[0]), "")
candidate_binaries = tuple("{}{}".format(prefix, suffix) for suffix in suffixes)
def iter_base_candidate_binary_paths(interpreter):
# type: (PythonInterpreter) -> Iterator[str]
bin_dir = os.path.join(interpreter._identity.base_prefix, "bin")
for candidate_binary in candidate_binaries:
candidate_binary_path = os.path.join(bin_dir, candidate_binary)
if is_exe(candidate_binary_path):
yield candidate_binary_path
def is_same_interpreter(interpreter):
# type: (PythonInterpreter) -> bool
identity = interpreter._identity
return identity.version == version and identity.abi_tag == abi_tag
resolution_path = [] # type: List[str]
base_interpreter = self
while base_interpreter.is_venv:
resolved = None # type: Optional[PythonInterpreter]
for candidate_path in iter_base_candidate_binary_paths(base_interpreter):
resolved_interpreter = self.from_binary(candidate_path)
if is_same_interpreter(resolved_interpreter):
resolved = resolved_interpreter
break
if resolved is None:
message = [
"Failed to resolve the base interpreter for the virtual environment at "
"{venv_dir}.".format(venv_dir=self._identity.prefix)
]
if resolution_path:
message.append(
"Resolved through {path}".format(
path=" -> ".join(binary for binary in resolution_path)
)
)
message.append(
"Search of base_prefix {} found no equivalent interpreter for {}".format(
base_interpreter._identity.base_prefix, base_interpreter._binary
)
)
raise self.BaseInterpreterResolutionError("\n".join(message))
base_interpreter = resolved_interpreter
resolution_path.append(base_interpreter.binary)
return base_interpreter
@property
def identity(self):
# type: () -> PythonIdentity
return self._identity
@property
def python(self):
return self._identity.python
@property
def version(self):
return self._identity.version
@property
def version_string(self):
# type: () -> str
return str(self._identity)
@property
def platform(self):
# type: () -> Platform
"""The most specific platform of this interpreter."""
return next(self._identity.iter_supported_platforms())
@property
def supported_platforms(self):
"""All platforms supported by this interpreter.
:rtype: frozenset of :class:`Platform`
"""
if self._supported_platforms is None:
self._supported_platforms = frozenset(self._identity.iter_supported_platforms())
return self._supported_platforms
def create_isolated_cmd(
self,
args=None, # type: Optional[Iterable[str]]
pythonpath=None, # type: Optional[Iterable[str]]
env=None, # type: Optional[Mapping[str, str]]
):
# type: (...) -> Tuple[Iterable[str], Mapping[str, str]]
env_copy = dict(env or os.environ)
if self._identity.configured_macosx_deployment_target:
# System interpreters on mac have a history of bad configuration from one source or
# another. See `cls._sanitized_environment` for one example of this.
#
# When a Python interpreter is used to build platform specific wheels on a mac, it needs
# to report a platform of `macosx-X.Y-<machine>` to conform to PEP-425 & PyPAs
# `packaging` tags library. The X.Y release is derived from the MACOSX_DEPLOYMENT_TARGET
# sysconfig (Makefile) variable. Sometimes the configuration is provided by a user
# building a custom Python. See https://github.com/pypa/wheel/issues/385 for an example
# where MACOSX_DEPLOYMENT_TARGET is set to 11. Other times the configuration is provided
# by the system maintainer (Apple). See https://github.com/pantsbuild/pants/issues/11061
# for an example of this via XCode 12s system Python 3.8 interpreter which reports
# 10.14.6.
release = self._identity.configured_macosx_deployment_target
version = release.split(".")
if len(version) == 1:
release = "{}.0".format(version[0])
elif len(version) > 2:
release = ".".join(version[:2])
if release != self._identity.configured_macosx_deployment_target:
osname, _, machine = sysconfig.get_platform().split("-")
pep425_compatible_platform = "{osname}-{release}-{machine}".format(
osname=osname, release=release, machine=machine
)
# An undocumented feature of `sysconfig.get_platform()` is respect for the
# _PYTHON_HOST_PLATFORM environment variable. We can fix up badly configured macOS
# interpreters by influencing the platform this way, which is enough to get wheels
# building with proper platform tags. This is supported for the CPythons we support:
# + https://github.com/python/cpython/blob/v2.7.18/Lib/sysconfig.py#L567-L569
# ... through ...
# + https://github.com/python/cpython/blob/v3.9.2/Lib/sysconfig.py#L652-L654
TRACER.log(
"Correcting mis-configured MACOSX_DEPLOYMENT_TARGET of {} to {} corresponding "
"to a valid PEP-425 platform of {} for {}.".format(
self._identity.configured_macosx_deployment_target,
release,
pep425_compatible_platform,
self,
)
)
env_copy.update(_PYTHON_HOST_PLATFORM=pep425_compatible_platform)
return self._create_isolated_cmd(
self.binary, args=args, pythonpath=pythonpath, env=env_copy
)
def execute(
self,
args=None, # type: Optional[Iterable[str]]
stdin_payload=None, # type: Optional[AnyStr]
pythonpath=None, # type: Optional[Iterable[str]]
env=None, # type: Optional[Mapping[str, str]]
**kwargs # type: Any
):
# type: (...) -> Tuple[Iterable[str], str, str]
cmd, env = self.create_isolated_cmd(args=args, pythonpath=pythonpath, env=env)
stdout, stderr = Executor.execute(cmd, stdin_payload=stdin_payload, env=env, **kwargs)
return cmd, stdout, stderr
def open_process(
self,
args=None, # type: Optional[Iterable[str]]
pythonpath=None, # type: Optional[Iterable[str]]
env=None, # type: Optional[Mapping[str, str]]
**kwargs # type: Any
):
# type: (...) -> Tuple[Iterable[str], subprocess.Popen]
cmd, env = self.create_isolated_cmd(args=args, pythonpath=pythonpath, env=env)
process = Executor.open_process(cmd, env=env, **kwargs)
return cmd, process
def __hash__(self):
return hash(self._binary)
def __eq__(self, other):
if type(other) is not type(self):
return NotImplemented
return self._binary == other._binary
def __repr__(self):
return "{type}({binary!r}, {identity!r})".format(
type=self.__class__.__name__, binary=self._binary, identity=self._identity
)
def spawn_python_job(
args, # type: Iterable[str]
env=None, # type: Optional[Mapping[str, str]]
interpreter=None, # type: Optional[PythonInterpreter]
expose=None, # type: Optional[Iterable[str]]
pythonpath=None, # type: Optional[Iterable[str]]
**subprocess_kwargs # type: Any
):
# type: (...) -> Job
"""Spawns a python job.
:param args: The arguments to pass to the python interpreter.
:param env: The environment to spawn the python interpreter process in. Defaults to the ambient
environment.
:param interpreter: The interpreter to use to spawn the python job. Defaults to the current
interpreter.
:param expose: The names of any vendored distributions to expose to the spawned python process.
These will be appended to `pythonpath` if passed.
:param pythonpath: The PYTHONPATH to expose to the spawned python process. These will be
pre-pended to the `expose` path if passed.
:param subprocess_kwargs: Any additional :class:`subprocess.Popen` kwargs to pass through.
:returns: A job handle to the spawned python process.
"""
pythonpath = list(pythonpath or ())
subprocess_env = dict(env or os.environ)
if expose:
# In order to expose vendored distributions with their un-vendored import paths in-tact, we
# need to set `__PEX_UNVENDORED__`. See: vendor.__main__.ImportRewriter._modify_import.
subprocess_env["__PEX_UNVENDORED__"] = "1"
pythonpath.extend(third_party.expose(expose))
interpreter = interpreter or PythonInterpreter.get()
cmd, process = interpreter.open_process(
args=args, pythonpath=pythonpath, env=subprocess_env, **subprocess_kwargs
)
return Job(command=cmd, process=process)
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import inspect
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = inspect.getsourcelines(predicate)
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[regtest]\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransactionwithwallet(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
| |
from collections import defaultdict
from collections.abc import Mapping
from functools import lru_cache
from typing import Optional, Type, Union
import cerberus
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.date import DateTrigger
from apscheduler.triggers.interval import IntervalTrigger
from pytz import all_timezones
from deck_chores.config import cfg, CONTAINER_CACHE_SIZE
from deck_chores.utils import (
log,
parse_time_from_string_with_units,
seconds_as_interval_tuple,
split_string,
)
####
CRON_TRIGGER_FIELDS_COUNT = len(CronTrigger.FIELD_NAMES)
INTERVAL_SEPARATOR_TRANSLATION_TABLE = str.maketrans('.:/', ' ')
NAME_INTERVAL_MAP = {
'weekly': (1, 0, 0, 0, 0),
'daily': (0, 1, 0, 0, 0),
'hourly': (0, 0, 1, 0, 0),
'every minute': (0, 0, 0, 1, 0),
'every second': (0, 0, 0, 0, 1),
}
####
class JobConfigValidator(cerberus.Validator):
def set_defaults(self, cfg):
schema = self.schema
schema["max"]["default"] = cfg.default_max
schema["name"]["regex"] = cfg.job_name_regex
schema["timezone"]["default"] = cfg.timezone
schema.validate()
@staticmethod
@lru_cache(128)
def _fill_args(value: str, length: int, filling: str) -> tuple[str, ...]:
value = value.strip()
while ' ' in value:
value = value.replace(' ', ' ')
tokens = value.split(' ')
return tuple([filling] * (length - len(tokens)) + tokens)
def _normalize_coerce_cron(self, value: str) -> tuple[Type, tuple[str, ...]]:
args = self._fill_args(value, CRON_TRIGGER_FIELDS_COUNT, '*')
return CronTrigger, args
def _normalize_coerce_date(self, value: str) -> tuple[Type, tuple[str]]:
return DateTrigger, (value,)
def _normalize_coerce_interval(
self, value: str
) -> tuple[Type, Optional[tuple[int, int, int, int, int]]]:
args = NAME_INTERVAL_MAP.get(value)
if args is None:
if any(x.isalpha() for x in value):
parsed_value = parse_time_from_string_with_units(value)
if parsed_value:
args = seconds_as_interval_tuple(parsed_value)
else:
value = value.translate(INTERVAL_SEPARATOR_TRANSLATION_TABLE)
filled_args = self._fill_args(value, 5, '0')
args = tuple(int(x) for x in filled_args) # type: ignore
return IntervalTrigger, args
def _normalize_coerce_timeunits(self, value: str) -> Optional[int]:
if any(x.isalpha() for x in value):
return parse_time_from_string_with_units(value)
return int(value)
def _check_with_trigger(self, field, value):
if isinstance(value, str): # normalization failed
return
trigger_class, args = value[0], value[1]
try:
trigger_class(*args, timezone=self.document.get('timezone', cfg.timezone))
except Exception as e:
message = (
f"Error while instantiating a {trigger_class.__name__} with '{args}'."
)
if cfg.debug:
message += f"\n{e}"
self._error(field, message)
job_config_validator = JobConfigValidator(
{
'command': {'required': True},
'cron': {
'coerce': 'cron',
'check_with': 'trigger',
'required': True,
'excludes': ['date', 'interval'],
},
'date': {
'coerce': 'date',
'check_with': 'trigger',
'required': True,
'excludes': ['cron', 'interval', 'jitter'],
},
'environment': {'type': 'dict', 'default': {}},
'interval': {
'coerce': 'interval',
'check_with': 'trigger',
'required': True,
'excludes': ['cron', 'date'],
},
'jitter': {
'type': 'integer',
'coerce': 'timeunits',
'nullable': True,
'min': 0,
},
'max': {'coerce': int}, # default is set later
'name': {"required": True}, # regex is set later
'timezone': {'allowed': all_timezones}, # default is set later
'user': {
"empty": True,
'regex': r'[a-zA-Z0-9_.][a-zA-Z0-9_.-]*',
"required": True,
},
'workdir': {'regex': r'/.*'},
}
)
####
@lru_cache(maxsize=CONTAINER_CACHE_SIZE)
def parse_labels(container_id: str) -> tuple[tuple[str, ...], str, dict[str, dict]]:
labels = cfg.client.containers.get(container_id).labels
log.debug(f'Parsing labels: {labels}')
service_id = parse_service_id(labels)
filtered_labels = {k: v for k, v in labels.items() if k.startswith(cfg.label_ns)}
flags, user = parse_options(filtered_labels)
if 'image' in flags:
image_labels = image_definition_labels_of_container(container_id)
user = user or parse_options(image_labels)[1]
else:
image_labels = {}
job_definitions = parse_job_definitions(image_labels | filtered_labels, user)
if service_id:
log.debug(f'Assigning service id: {service_id}')
for job_definition in job_definitions.values():
job_definition['service_id'] = service_id
return service_id, flags, job_definitions
def parse_options(labels: dict[str, str]) -> tuple[str, str]:
flags = parse_flags(labels.pop("options.flags", ""))
user = labels.pop(cfg.label_ns + "options.user", "")
return flags, user
@lru_cache(maxsize=16)
def parse_flags(options: str) -> str:
result = set(cfg.default_flags)
if options:
for option in split_string(options):
if option.startswith("no"):
result.discard(option.removeprefix("no"))
else:
result.add(option)
result_string = ','.join(sorted(result))
log.debug(f'Parsed & resolved container flags: {result_string}')
return result_string
def parse_service_id(labels: dict[str, str]) -> tuple[str, ...]:
filtered_labels = {k: v for k, v in labels.items() if k in cfg.service_identifiers}
log.debug(f'Considering labels for service id: {filtered_labels}')
if not filtered_labels:
return ()
if len(filtered_labels) != len(cfg.service_identifiers):
log.critical(
'Missing service identity labels: {}'.format(
', '.join(set(cfg.service_identifiers) - set(filtered_labels))
)
)
return ()
return tuple(f"{k}={v}" for k, v in filtered_labels.items())
def image_definition_labels_of_container(container_id: str) -> dict[str, str]:
labels = cfg.client.containers.get(container_id).image.labels
return {k: v for k, v in labels.items() if k.startswith(cfg.label_ns)}
def parse_job_definitions(labels: Mapping[str, str], user: str) -> dict[str, dict]:
log.debug(f'Considering labels for job definitions: {dict(labels)}')
name_grouped_definitions: defaultdict[
str, dict[str, Union[str, dict]]
] = defaultdict(dict)
for key, value in labels.items():
key = key.removeprefix(cfg.label_ns)
if '.env.' in key:
name, _, variable = key.split('.', 2)
name_grouped_definitions[name].setdefault('environment', {})
name_grouped_definitions[name]['environment'][ # type: ignore
variable
] = value
else:
name, attribute = key.split('.', 1)
name_grouped_definitions[name][attribute] = value
log.debug(f'Job definitions: {dict(name_grouped_definitions)}')
result = {}
for name, definition in name_grouped_definitions.items():
log.debug(f'Processing {name}')
definition['name'] = name
definition.setdefault("user", user)
job = job_config_validator.validated(definition)
if job is None:
log.error(f'Misconfigured job definition: {definition}')
log.error(f'Errors: {job_config_validator.errors}')
continue
for trigger_name in ('cron', 'date', 'interval'):
trigger = job.pop(trigger_name, None)
if trigger is None:
continue
job['trigger'] = trigger
log.debug(f'Normalized definition: {job}')
result[name] = job
return result
####
# TODO remove ignore when this issue is solved:
# https://github.com/python/mypy/issues/1317
__all__ = (parse_labels.__name__,) # type: ignore
| |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import copy
import unittest
import command
import config
import ipv6
import node
LEADER = 1
BR = 2
ROUTER1 = 3
DUT_ROUTER2 = 4
MED1 = 5
class Cert_5_3_10_AddressQuery(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1, 6):
self.nodes[i] = node.Node(i, (i == MED1), simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[BR].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[DUT_ROUTER2].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[BR].set_panid(0xface)
self.nodes[BR].set_mode('rsdn')
self.nodes[BR].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[BR].enable_whitelist()
self.nodes[BR].set_router_selection_jitter(1)
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[DUT_ROUTER2].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[DUT_ROUTER2].set_panid(0xface)
self.nodes[DUT_ROUTER2].set_mode('rsdn')
self.nodes[DUT_ROUTER2].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[DUT_ROUTER2].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[DUT_ROUTER2].add_whitelist(self.nodes[MED1].get_addr64())
self.nodes[DUT_ROUTER2].enable_whitelist()
self.nodes[DUT_ROUTER2].set_router_selection_jitter(1)
self.nodes[MED1].set_panid(0xface)
self.nodes[MED1].set_mode('rsn')
self.nodes[MED1].add_whitelist(self.nodes[DUT_ROUTER2].get_addr64())
self.nodes[MED1].enable_whitelist()
def tearDown(self):
for n in list(self.nodes.values()):
n.stop()
n.destroy()
self.simulator.stop()
def test(self):
# 1 & 2
# Build and verify the topology
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[BR].start()
self.simulator.go(5)
self.assertEqual(self.nodes[BR].get_state(), 'router')
# Configure two On-Mesh Prefixes on the BR
self.nodes[BR].add_prefix('2003::/64', 'paros')
self.nodes[BR].add_prefix('2004::/64', 'paros')
self.nodes[BR].register_netdata()
# Set lowpan context of sniffer
self.simulator.set_lowpan_context(1, '2003::/64')
self.simulator.set_lowpan_context(2, '2004::/64')
self.nodes[DUT_ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[DUT_ROUTER2].get_state(), 'router')
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[MED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[MED1].get_state(), 'child')
# 3 MED1: MED1 sends an ICMPv6 Echo Request to Router1 using GUA 2003::
# address
router1_addr = self.nodes[ROUTER1].get_addr("2003::/64")
self.assertTrue(router1_addr is not None)
self.assertTrue(self.nodes[MED1].ping(router1_addr))
# Wait for sniffer got Address Notification messages
self.simulator.go(1)
# Verify DUT_ROUTER2 sent an Address Query Request
dut_router2_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_router2_messages.next_coap_message('0.02', '/a/aq')
command.check_address_query(
msg,
self.nodes[DUT_ROUTER2],
config.REALM_LOCAL_ALL_ROUTERS_ADDRESS,
)
# Verify the DUT_ROUTER2 forwarded ICMPv6 Echo Request to ROUTER1
msg = dut_router2_messages.get_icmp_message(ipv6.ICMP_ECHO_REQUEST)
assert (
msg is not None
), "Error: The DUT_ROUTER2 didn't forward ICMPv6 Echo Request to ROUTER1"
msg.assertSentToNode(self.nodes[ROUTER1])
# 4 BR: BR sends an ICMPv6 Echo Request to MED1 using GUA 2003::
# address
med1_addr = self.nodes[MED1].get_addr("2003::/64")
self.assertTrue(med1_addr is not None)
self.assertTrue(self.nodes[BR].ping(med1_addr))
# Wait for sniffer got Address Notification messages
self.simulator.go(1)
# Verify DUT_ROUTER2 sent an Address Notification message
dut_router2_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_router2_messages.next_coap_message('0.02', '/a/an')
command.check_address_notification(
msg, self.nodes[DUT_ROUTER2], self.nodes[BR]
)
# 5 MED1: MED1 sends an ICMPv6 Echo Request to ROUTER1 using GUA 2003::
# address
addr = self.nodes[ROUTER1].get_addr("2003::/64")
self.assertTrue(addr is not None)
self.assertTrue(self.nodes[MED1].ping(addr))
# Wait for sniffer got ICMPv6 Echo Reply
self.simulator.go(1)
# Verify DUT_ROUTER2 didn't generate an Address Query Request
dut_router2_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
dut_router2_messages_temp = copy.deepcopy(dut_router2_messages)
msg = dut_router2_messages.next_coap_message('0.02', '/a/aq', False)
assert (
msg is None
), "Error: The DUT_ROUTER2 sent an unexpected Address Query Request"
# Verify DUT_ROUTER2 forwarded ICMPv6 Echo Reply to MED1
msg = dut_router2_messages_temp.get_icmp_message(
ipv6.ICMP_ECHO_RESPONSE
)
assert (
msg is not None
), "Error: The DUT_ROUTER2 didn't forward ICMPv6 Echo Reply to MED1"
msg.assertSentToNode(self.nodes[MED1])
# 6 DUT_ROUTER2: Power off ROUTER1 and wait 580 seconds to allow the
# LEADER to expire its Router ID
router1_id = self.nodes[ROUTER1].get_router_id()
self.nodes[ROUTER1].stop()
self.simulator.go(580)
# Send an ICMPv6 Echo Request from MED1 to ROUTER1 GUA 2003:: address
self.assertFalse(self.nodes[MED1].ping(router1_addr))
# Verify the DUT_ROUTER2 has removed all entries based on ROUTER1's
# Router ID
command.check_router_id_cached(
self.nodes[DUT_ROUTER2], router1_id, False
)
# Verify DUT_ROUTER2 sent an Address Query Request
dut_router2_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_router2_messages.next_coap_message('0.02', '/a/aq')
msg.assertSentToDestinationAddress(
config.REALM_LOCAL_ALL_ROUTERS_ADDRESS
)
# 7 MED1: Power off MED1 and wait to allow DUT_ROUTER2 to timeout the
# child
self.nodes[MED1].stop()
self.simulator.go(config.MLE_END_DEVICE_TIMEOUT)
# BR sends two ICMPv6 Echo Requests to MED1 GUA 2003:: address
self.assertFalse(self.nodes[BR].ping(med1_addr))
self.assertFalse(self.nodes[BR].ping(med1_addr))
# Verify DUT_ROUTER2 didn't generate an Address Notification message
dut_router2_messages = self.simulator.get_messages_sent_by(DUT_ROUTER2)
msg = dut_router2_messages.next_coap_message('0.02', '/a/an/', False)
assert (
msg is None
), "Error: The DUT_ROUTER2 sent an unexpected Address Notification message"
if __name__ == '__main__':
unittest.main()
| |
from datetime import datetime
from collections import defaultdict
import bisect
import numpy as np
import pandas as pd
from .core import new_dd_object, Series
from ..array.core import Array
from .utils import is_index_like, meta_nonempty
from . import methods
from ..base import tokenize
from ..highlevelgraph import HighLevelGraph
class _IndexerBase(object):
def __init__(self, obj):
self.obj = obj
@property
def _name(self):
return self.obj._name
@property
def _meta_indexer(self):
raise NotImplementedError
def _make_meta(self, iindexer, cindexer):
"""
get metadata
"""
if cindexer is None:
return self.obj
else:
return self._meta_indexer[:, cindexer]
class _iLocIndexer(_IndexerBase):
@property
def _meta_indexer(self):
return self.obj._meta.iloc
def __getitem__(self, key):
# dataframe
msg = (
"'DataFrame.iloc' only supports selecting columns. "
"It must be used like 'df.iloc[:, column_indexer]'."
)
if not isinstance(key, tuple):
raise NotImplementedError(msg)
if len(key) > 2:
raise ValueError("Too many indexers")
iindexer, cindexer = key
if iindexer != slice(None):
raise NotImplementedError(msg)
return self._iloc(iindexer, cindexer)
def _iloc(self, iindexer, cindexer):
assert iindexer == slice(None)
meta = self._make_meta(iindexer, cindexer)
return self.obj.map_partitions(methods.iloc, cindexer, meta=meta)
class _LocIndexer(_IndexerBase):
""" Helper class for the .loc accessor """
@property
def _meta_indexer(self):
return self.obj._meta.loc
def __getitem__(self, key):
if isinstance(key, tuple):
# multi-dimensional selection
if len(key) > self.obj.ndim:
# raise from pandas
msg = "Too many indexers"
raise pd.core.indexing.IndexingError(msg)
iindexer = key[0]
cindexer = key[1]
else:
# if self.obj is Series, cindexer is always None
iindexer = key
cindexer = None
return self._loc(iindexer, cindexer)
def _loc(self, iindexer, cindexer):
""" Helper function for the .loc accessor """
if isinstance(iindexer, Series):
return self._loc_series(iindexer, cindexer)
elif isinstance(iindexer, Array):
return self._loc_array(iindexer, cindexer)
elif callable(iindexer):
return self._loc(iindexer(self.obj), cindexer)
if self.obj.known_divisions:
iindexer = self._maybe_partial_time_string(iindexer)
if isinstance(iindexer, slice):
return self._loc_slice(iindexer, cindexer)
elif isinstance(iindexer, (list, np.ndarray)):
return self._loc_list(iindexer, cindexer)
else:
# element should raise KeyError
return self._loc_element(iindexer, cindexer)
else:
if isinstance(iindexer, (list, np.ndarray)):
# applying map_pattition to each partitions
# results in duplicated NaN rows
msg = "Cannot index with list against unknown division"
raise KeyError(msg)
elif not isinstance(iindexer, slice):
iindexer = slice(iindexer, iindexer)
meta = self._make_meta(iindexer, cindexer)
return self.obj.map_partitions(
methods.try_loc, iindexer, cindexer, meta=meta
)
def _maybe_partial_time_string(self, iindexer):
"""
Convert index-indexer for partial time string slicing
if obj.index is DatetimeIndex / PeriodIndex
"""
idx = meta_nonempty(self.obj._meta.index)
iindexer = _maybe_partial_time_string(idx, iindexer, kind="loc")
return iindexer
def _loc_series(self, iindexer, cindexer):
meta = self._make_meta(iindexer, cindexer)
return self.obj.map_partitions(
methods.loc, iindexer, cindexer, token="loc-series", meta=meta
)
def _loc_array(self, iindexer, cindexer):
iindexer_series = iindexer.to_dask_dataframe("_", self.obj.index)
return self._loc_series(iindexer_series, cindexer)
def _loc_list(self, iindexer, cindexer):
name = "loc-%s" % tokenize(iindexer, self.obj)
parts = self._get_partitions(iindexer)
meta = self._make_meta(iindexer, cindexer)
if len(iindexer):
dsk = {}
divisions = []
items = sorted(parts.items())
for i, (div, indexer) in enumerate(items):
dsk[name, i] = (methods.loc, (self._name, div), indexer, cindexer)
# append minimum value as division
divisions.append(sorted(indexer)[0])
# append maximum value of the last division
divisions.append(sorted(items[-1][1])[-1])
else:
divisions = [None, None]
dsk = {(name, 0): meta.head(0)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self.obj])
return new_dd_object(graph, name, meta=meta, divisions=divisions)
def _loc_element(self, iindexer, cindexer):
name = "loc-%s" % tokenize(iindexer, self.obj)
part = self._get_partitions(iindexer)
if iindexer < self.obj.divisions[0] or iindexer > self.obj.divisions[-1]:
raise KeyError("the label [%s] is not in the index" % str(iindexer))
dsk = {
(name, 0): (
methods.loc,
(self._name, part),
slice(iindexer, iindexer),
cindexer,
)
}
meta = self._make_meta(iindexer, cindexer)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self.obj])
return new_dd_object(graph, name, meta=meta, divisions=[iindexer, iindexer])
def _get_partitions(self, keys):
if isinstance(keys, (list, np.ndarray)):
return _partitions_of_index_values(self.obj.divisions, keys)
else:
# element
return _partition_of_index_value(self.obj.divisions, keys)
def _coerce_loc_index(self, key):
return _coerce_loc_index(self.obj.divisions, key)
def _loc_slice(self, iindexer, cindexer):
name = "loc-%s" % tokenize(iindexer, cindexer, self)
assert isinstance(iindexer, slice)
assert iindexer.step in (None, 1)
if iindexer.start is not None:
start = self._get_partitions(iindexer.start)
else:
start = 0
if iindexer.stop is not None:
stop = self._get_partitions(iindexer.stop)
else:
stop = self.obj.npartitions - 1
if iindexer.start is None and self.obj.known_divisions:
istart = self.obj.divisions[0]
else:
istart = self._coerce_loc_index(iindexer.start)
if iindexer.stop is None and self.obj.known_divisions:
istop = self.obj.divisions[-1]
else:
istop = self._coerce_loc_index(iindexer.stop)
if stop == start:
dsk = {
(name, 0): (
methods.loc,
(self._name, start),
slice(iindexer.start, iindexer.stop),
cindexer,
)
}
divisions = [istart, istop]
else:
dsk = {
(name, 0): (
methods.loc,
(self._name, start),
slice(iindexer.start, None),
cindexer,
)
}
for i in range(1, stop - start):
if cindexer is None:
dsk[name, i] = (self._name, start + i)
else:
dsk[name, i] = (
methods.loc,
(self._name, start + i),
slice(None, None),
cindexer,
)
dsk[name, stop - start] = (
methods.loc,
(self._name, stop),
slice(None, iindexer.stop),
cindexer,
)
if iindexer.start is None:
div_start = self.obj.divisions[0]
else:
div_start = max(istart, self.obj.divisions[start])
if iindexer.stop is None:
div_stop = self.obj.divisions[-1]
else:
div_stop = min(istop, self.obj.divisions[stop + 1])
divisions = (
(div_start,) + self.obj.divisions[start + 1 : stop + 1] + (div_stop,)
)
assert len(divisions) == len(dsk) + 1
meta = self._make_meta(iindexer, cindexer)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self.obj])
return new_dd_object(graph, name, meta=meta, divisions=divisions)
def _partition_of_index_value(divisions, val):
""" In which partition does this value lie?
>>> _partition_of_index_value([0, 5, 10], 3)
0
>>> _partition_of_index_value([0, 5, 10], 8)
1
>>> _partition_of_index_value([0, 5, 10], 100)
1
>>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions
1
"""
if divisions[0] is None:
msg = "Can not use loc on DataFrame without known divisions"
raise ValueError(msg)
val = _coerce_loc_index(divisions, val)
i = bisect.bisect_right(divisions, val)
return min(len(divisions) - 2, max(0, i - 1))
def _partitions_of_index_values(divisions, values):
""" Return defaultdict of division and values pairs
Each key corresponds to the division which values are index values belong
to the division.
>>> sorted(_partitions_of_index_values([0, 5, 10], [3]).items())
[(0, [3])]
>>> sorted(_partitions_of_index_values([0, 5, 10], [3, 8, 5]).items())
[(0, [3]), (1, [8, 5])]
"""
if divisions[0] is None:
msg = "Can not use loc on DataFrame without known divisions"
raise ValueError(msg)
results = defaultdict(list)
values = pd.Index(values, dtype=object)
for val in values:
i = bisect.bisect_right(divisions, val)
div = min(len(divisions) - 2, max(0, i - 1))
results[div].append(val)
return results
def _coerce_loc_index(divisions, o):
""" Transform values to be comparable against divisions
This is particularly valuable to use with pandas datetimes
"""
if divisions and isinstance(divisions[0], datetime):
return pd.Timestamp(o)
if divisions and isinstance(divisions[0], np.datetime64):
return np.datetime64(o).astype(divisions[0].dtype)
return o
def _maybe_partial_time_string(index, indexer, kind):
"""
Convert indexer for partial string selection
if data has DatetimeIndex/PeriodIndex
"""
# do not pass dd.Index
assert is_index_like(index)
if not isinstance(index, (pd.DatetimeIndex, pd.PeriodIndex)):
return indexer
if isinstance(indexer, slice):
if isinstance(indexer.start, str):
start = index._maybe_cast_slice_bound(indexer.start, "left", kind)
else:
start = indexer.start
if isinstance(indexer.stop, str):
stop = index._maybe_cast_slice_bound(indexer.stop, "right", kind)
else:
stop = indexer.stop
return slice(start, stop)
elif isinstance(indexer, str):
start = index._maybe_cast_slice_bound(indexer, "left", "loc")
stop = index._maybe_cast_slice_bound(indexer, "right", "loc")
return slice(min(start, stop), max(start, stop))
return indexer
| |
#!/usr/bin/python
import socket
import ssl
import os
import re
import time
import sys
import string
import hashlib
import traceback
import irc
import getpass
from threading import Thread
from threading import RLock as Lock
import Queue
import chardet
import modjson
dec = modjson.ModJSONDecoder()
enc = modjson.ModJSONEncoder(indent=3)
# TODO: Rewrite this *entire* module and make more efficient.
_listnumerics = dict(b=(367, 368, "channel ban list"),
e=(348, 349, "Channel Exception List"),
I=(346, 347, "Channel Invite Exception List"),
w=(910, 911, "Channel Access List"),
g=(941, 940, "chanel spamfilter list"),
X=(954, 953, "channel exemptchanops list"))
def BouncerReload(BNC):
networks, configs = zip(*BNC.conf.items())
json = enc.encode([BNC, configs])
if BNC.isAlive():
BNC.stop()
newBNC, newconfs = dec.decode(json)
for network, newconf in zip(networks, newconfs):
network.rmAddon(BNC)
network.addAddon(**newconf)
return newBNC
class Bouncer (Thread):
__name__ = "Bouncer for pyIRC"
__version__ = "2.0"
__author__ = "Brian Sherson"
__date__ = "February 21, 2014"
def __init__(self, addr="", port=16667, secure=False, ipv6=False, certfile=None, keyfile=None, ignore=None, debug=False, timeout=300, autoaway=None, servname="bouncer.site"):
self.addr = addr
self.port = port
self.conf = {}
self.passwd = {}
self.socket = None
self.secure = secure
self.ipv6 = ipv6
self.certfile = certfile
self.keyfile = keyfile
self.clients = []
self.ignore = ignore
self.debug = debug
self.timeout = timeout
self.autoaway = autoaway
self.servname = servname
self._stopexpected = False
# Keep track of what extensions/clients are requesting WHO, WHOIS, and LIST, because we don't want to spam every bouncer connection with the server's replies.
# In the future, MAY implement this idea in the irc module.
self._whoexpected = {}
self._whoisexpected = {}
self._listexpected = {}
self.lock = Lock()
self.starttime = int(time.time())
Thread.__init__(self)
self.daemon = True
self.start()
def __repr__(self):
h = hash(self)
return "<Bouncer listening on {self.addr}:{self.port} at 0x{h:x}0>".format(**vars())
def run(self):
self.socket = socket.socket(
socket.AF_INET6 if self.ipv6 else socket.AF_INET)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((self.addr, self.port))
self.socket.listen(5)
#print ((self,"Now listening on port "+str(self.port)))
while True:
try:
(connection, addr) = self.socket.accept()
if self.secure:
connection = ssl.wrap_socket(
connection, server_side=True, certfile=self.certfile, keyfile=self.keyfile, ssl_version=ssl.PROTOCOL_SSLv23)
#print ((self,"New client connecting from %s:%s"%addr))
except socket.error:
# print "Shutting down Listener"
self.socket.close()
if not self._stopexpected:
raise
sys.exit()
except:
tb = traceback.format_exc()
print >>sys.stderr, tb
continue
connection.settimeout(self.timeout)
bouncer = BouncerConnection(
self, connection, addr, debug=self.debug)
time.sleep(0.5)
try:
self.socket.close()
except:
pass
self.socket = None
Thread.__init__(self)
self.daemon = True
def onAddonAdd(self, context, label, passwd=None, hashtype="sha512", ignore=None, autoaway=None, translations=[], hidden=[]):
for (context2, conf2) in self.conf.items():
if context == context2:
raise ValueError, "Context already exists in config."
if label == conf2.label:
raise ValueError, "Unique label required."
if passwd == None:
while True:
passwd = getpass.getpass("Enter new password: ")
if passwd == getpass.getpass("Confirm new password: "):
break
print "Passwords do not match!"
passwd = hashlib.new(hashtype, passwd).hexdigest()
conf = irc.Config(self, label=label, passwd=passwd, hashtype=hashtype, ignore=ignore, autoaway=autoaway, translations=[
(key if type(key) == irc.Channel else context[key], value) for key, value in translations], hidden=irc.ChanList(hidden, context=context))
self.conf[context] = conf
self._whoexpected[context] = []
if self.debug:
context.logwrite(
"dbg [Bouncer.onAddonAdd] Clearing WHO expected list." % vars())
self._whoisexpected[context] = []
self._listexpected[context] = []
return conf
def onAddonRem(self, context):
for client in self.clients:
if client.context == context:
client.quit(quitmsg="Bouncer extension removed")
del self.conf[context]
del self._whoexpected[context], self._whoisexpected[
context], self._listexpected[context]
def stop(self, disconnectall=False):
self._stopexpected = True
self.socket.shutdown(0)
if disconnectall:
self.disconnectall()
def disconnectall(self, quitmsg="Disconnecting all sessions"):
for client in self.clients:
client.quit(quitmsg=quitmsg)
def onDisconnect(self, context, expected=False):
self._whoexpected[context] = []
self._whoisexpected[context] = []
self._listexpected[context] = []
if context.identity:
for channel in context.identity.channels:
self.broadcast(context, origin=context.identity, cmd="PART", target=channel, extinfo="Bouncer Connection Lost", clients=[
client for client in self.clients if channel not in client.hidden])
self.broadcast(context, origin=context.identity,
cmd="QUIT", extinfo="Bouncer Connection Lost")
self.broadcast(
context, origin=self.servname, cmd="NOTICE", target=context.identity,
extinfo=":Connection to %s:%s has been lost." % (context.server, context.port))
def onQuit(self, context, user, quitmsg):
# For some odd reason, certain networks (*cough*Freenode*cough*) will send a quit message for the user, causing context.identity.channels to be cleared
# before onDisconnect can be executed. This is the remedy.
if user == context.identity:
for channel in context.identity.channels:
self.broadcast(context, origin=user, cmd="PART", target=channel, extinfo="Bouncer Connection Lost", clients=[
client for client in self.clients if channel not in client.hidden])
self.broadcast(context, origin=user, cmd="QUIT", extinfo=quitmsg, clients=[
client for client in self.clients if any([user in channel for channel in context.channels if channel not in client.hidden])])
def onConnectAttempt(self, context):
self.broadcast(
context, origin=self.servname, cmd="NOTICE", target=context.identity,
extinfo="Attempting connection to %s:%s." % (context.server, context.port))
def onConnect(self, context):
self.broadcast(
context, origin=self.servname, cmd="NOTICE", target=context.identity,
extinfo="Connection to %s:%s established." % (context.server, context.port))
def onMeNickChange(self, context, newnick):
for client in self.clients:
if client.context == context:
client.send(
origin=context.identity, cmd="NICK", target=newnick)
client.nick = newnick
def onNickChange(self, context, user, newnick):
self.broadcast(context, origin=user, cmd="NICK", target=newnick, clients=[
client for client in self.clients if any([user in channel for channel in context.channels if channel not in client.hidden])])
def onRegistered(self, context):
for client in self.clients:
if client.context == context:
if client.nick != context.identity.nick:
client.send(origin="%s!%s@%s" %
(client.nick, client.username, client.host), cmd="NICK", target=context.identity.nick)
client.nick = context.identity.nick
def onConnectFail(self, context, exc, excmsg, tb):
for client in self.clients:
if client.context == context:
client.send(
origin=self.servname, cmd="NOTICE", target=client.nick,
extinfo="Connection to %s:%s failed: %s." % (context.server, context.port, excmsg))
def onSendChanMsg(self, context, origin, channel, targetprefix, msg):
# Called when bot sends a PRIVMSG to channel.
# The variable origin refers to a class instance voluntarily
# identifying itself as that which requested data be sent.
self.broadcast(
context, origin=context.identity, cmd="PRIVMSG", targetprefix=targetprefix,
target=channel, extinfo=msg, clients=[client for client in self.clients if client != origin])
def onSendChanAction(self, context, origin, channel, targetprefix, action):
self.onSendChanMsg(
context, origin, channel, targetprefix, u"\x01ACTION {action}\x01".format(**vars()))
def onSendChanNotice(self, context, origin, channel, targetprefix, msg):
# Called when bot sends a NOTICE to channel.
# The variable origin refers to a class instance voluntarily
# identifying itself as that which requested data be sent.
self.broadcast(
context, origin=context.identity, cmd="NOTICE", targetprefix=targetprefix,
target=channel, extinfo=msg, clients=[client for client in self.clients if client != origin])
def onSend(self, context, origin, line, cmd, target, targetprefix, params, extinfo):
if cmd.upper() == "WHO":
self._whoexpected[context].append(origin)
if self.debug:
if issubclass(type(origin), Thread):
name = origin.name
context.logwrite(
"dbg [Bouncer.onSend] Adding {origin} ({name}) to WHO expected list.".format(**vars()))
else:
context.logwrite(
"dbg [Bouncer.onSend] Adding %(origin)s to WHO expected list." % vars())
context.logwrite(
"dbg [Bouncer.onSend] WHO expected list size: %d" % len(self._whoexpected[context]))
elif cmd.upper() == "WHOIS":
self._whoisexpected[context].append(origin)
elif cmd.upper() == "LIST":
self._listexpected[context].append(origin)
def onWhoEntry(self, context, origin, channel, user, channame, username, host, serv, nick, flags, hops, realname):
# Called when a WHO list is received.
if len(self._whoexpected[context]):
client = self._whoexpected[context][0]
if client in self.clients:
client.send(origin=origin, cmd=352, target=context.identity, params=u"{channame} {username} {host} {serv} {nick} {flags}".format(
**vars()), extinfo=u"{hops} {realname}".format(**vars()))
# client.send(":%s 352 %s %s %s %s %s %s %s :%s %s\n"%(origin, context.identity.nick, channame, username, host, serv, nick, flags, hops, realname))
def onWhoEnd(self, context, origin, param, endmsg):
# Called when a WHO list is received.
if len(self._whoexpected[context]) and self._whoexpected[context][0] in self.clients:
client = self._whoexpected[context][0]
client.send(
origin=origin, cmd=315, target=context.identity, params=param, extinfo=endmsg)
#client.send(":%s 315 %s %s :%s\n"%(origin, context.identity.nick, param, endmsg))
if self.debug:
if issubclass(type(self._whoexpected[context][0]), Thread):
name = self._whoexpected[context][0].name
context.logwrite(
"dbg [Bouncer.onWhoEnd] Removing %s (%s) from WHO expected list." %
(self._whoexpected[context][0], name))
else:
context.logwrite(
"dbg [Bouncer.onWhoEnd] Removing %s from WHO expected list." % self._whoexpected[context][0])
del self._whoexpected[context][0]
if self.debug:
context.logwrite(
"dbg [Bouncer.onWhoEnd] WHO expected list size: %d" %
len(self._whoexpected[context]))
def onListStart(self, context, origin, params, extinfo):
# Called when a WHO list is received.
if len(self._listexpected[context]) and self._listexpected[context][0] in self.clients:
client = self._listexpected[context][0]
client.send(origin=origin, cmd=321,
target=context.identity, params=params, extinfo=extinfo)
#client.send(":%s 321 %s %s :%s\n"%(origin, context.identity.nick, params, extinfo))
def onListEntry(self, context, origin, channel, population, extinfo):
# Called when a WHO list is received.
if len(self._listexpected[context]) and self._listexpected[context][0] in self.clients:
client = self._listexpected[context][0]
client.send(origin=origin, cmd=322, target=context.identity,
params=u"{channel.name} {population}".format(**vars()), extinfo=extinfo)
# client.send(":%s 322 %s %s %d :%s\n"%(origin, context.identity.nick, channame, population, extinfo))
def onListEnd(self, context, origin, endmsg):
# Called when a WHO list is received.
if len(self._listexpected[context]) and self._listexpected[context][0] in self.clients:
client = self._listexpected[context][0]
client.send(
origin=origin, cmd=323, target=context.identity, extinfo=endmsg)
# client.send(":%s 323 %s :%s\n"%(origin, context.identity.nick, endmsg))
del self._listexpected[context][0]
def onWhoisStart(self, context, origin, user, nickname, username, host, realname):
# Called when a WHOIS reply is received.
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(origin=origin, cmd=311, target=context.identity,
params=u"{nickname} {username} {host} *".format(**vars()), extinfo=realname)
# client.send(":%s 311 %s %s %s %s * :%s\n" % (origin, context.identity.nick, nickname, username, host, realname))
def onWhoisRegisteredNick(self, context, origin, user, nickname, msg):
# Called when a WHOIS reply is received.
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(
origin=origin, cmd=307, target=context.identity, params=nickname, extinfo=msg)
# client.send(":%s 307 %s %s :%s\n" % (origin, context.identity.nick, nickname, msg))
def onWhoisConnectingFrom(self, context, origin, user, nickname, msg):
# Called when a WHOIS reply is received.
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(origin=origin, cmd=378,
target=context.identity, params=nickname, extinfo=msg)
# client.send(":%s 378 %s %s :%s\n" % (origin, context.identity.nick, nickname, msg))
def onWhoisChannels(self, context, origin, user, nickname, chanlist):
# Called when a WHOIS reply is received.
# TODO: Translations implementation
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(origin=origin, cmd=319, target=context.identity,
params=nickname, extinfo=" ".join(chanlist))
# client.send(":%s 319 %s %s :%s\n" % (origin, context.identity.nick, nickname, " ".join(chanlist)))
def onWhoisAvailability(self, context, origin, user, nickname, msg):
# Called when a WHOIS reply is received.
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(
origin=origin, cmd=310, target=context.identity, params=nickname, extinfo=msg)
# client.send(":%s 310 %s %s :%s\n" % (origin, context.identity.nick, nickname, msg))
def onWhoisServer(self, context, origin, user, nickname, server, servername):
# Called when a WHOIS reply is received.
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(origin=origin, cmd=312, target=context.identity,
params=u"{nickname} {server}".format(**vars()), extinfo=servername)
# client.send(":%s 312 %s %s %s :%s\n" % (origin, context.identity.nick, nickname, server, servername))
def onWhoisOp(self, context, origin, user, nickname, msg):
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(
origin=origin, cmd=313, target=context.identity, params=nickname, extinfo=msg)
# client.send(":%s 313 %s %s :%s\n" % (origin, context.identity.nick, nickname, msg))
def onWhoisAway(self, context, origin, user, nickname, awaymsg):
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(origin=origin, cmd=301, target=context.identity,
params=u"{nickname} {idletime} {signontime}".format(**vars()), extinfo=awaymsg)
# client.send(":%s 301 %s %s :%s\n" % (origin, context.identity.nick, nickname, awaymsg))
def onWhoisTimes(self, context, origin, user, nickname, idletime, signontime, msg):
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(origin=origin, cmd=317, target=context.identity,
params=u"{nickname} {idletime} {signontime}".format(**vars()), extinfo=msg)
# client.send(":%s 317 %s %s %d %d :%s\n" % (origin, context.identity.nick, nickname, idletime, signontime, msg))
def onWhoisSSL(self, context, origin, user, nickname, msg):
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(origin=origin, cmd=671,
target=context.identity, params=nickname, extinfo=msg)
# client.send(":%s 671 %s %s :%s\n" % (origin, context.identity.nick, nickname, msg))
def onWhoisModes(self, context, origin, user, nickname, msg):
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(
origin=origin, cmd=339, target=context.identity, params=nickname, extinfo=msg)
# ":%s 339 %s %s :%s\n" % (origin, context.identity.nick, nickname, msg))
def onWhoisLoggedInAs(self, context, origin, user, nickname, loggedinas, msg):
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(origin=origin, cmd=330, target=context.identity,
params=" ".join((nickname, loggedinas)), extinfo=msg)
# ":%s 330 %s %s %s :%s\n" % (origin, context.identity.nick, nickname, loggedinas, msg))
def onWhoisEnd(self, context, origin, user, nickname, msg):
if len(self._whoisexpected[context]) and self._whoisexpected[context][0] in self.clients:
client = self._whoisexpected[context][0]
client.send(origin=origin, cmd=318,
target=context.identity, params=nickname, extinfo=msg)
# ":%s 318 %s %s :%s\n" % (origin, context.identity.nick, nickname, msg)
del self._whoisexpected[context][0]
def onJoin(self, context, user, channel):
self.broadcast(context, origin=user, cmd="JOIN", target=channel, clients=[
client for client in self.clients if channel not in client.hidden])
def onOther(self, context, line, origin, cmd, target, params, extinfo, targetprefix):
conf = self.conf[context]
self.broadcast(
context, origin=origin, cmd=cmd, target=target, params=params, extinfo=extinfo,
targetprefix=targetprefix, clients=[client for client in self.clients if target not in client.hidden])
def broadcast(self, context, origin=None, cmd=None, target=None, params=None, extinfo=None, targetprefix=None, clients=None):
if clients == None:
clients = self.clients
for client in clients:
with client.lock:
if client.context == context and not client.quitting:
client.send(
origin, cmd, target, params, extinfo, targetprefix)
class BouncerConnection (Thread):
def __init__(self, bouncer, connection, addr, debug=False):
# print "Initializing ListenThread..."
self.bouncer = bouncer
self.connection = connection
self.host, self.port = self.addr = addr[:2]
self.context = None
self.pwd = None
self.nick = None
self.label = None
self.username = None
self.realname = None
self.addr = addr
self.debug = debug
self.lock = Lock()
self.quitmsg = "Connection Closed"
self.quitting = False
self.hidden = irc.ChanList()
self.translations = {}
self.namesx = False
self.uhnames = False
Thread.__init__(self)
self.daemon = True
self.start()
def sendstr(self, data, flags=0):
with self.lock:
try:
self.connection.send(data.encode("utf8"))
except socket.error:
exc, excmsg, tb = sys.exc_info()
print >>self.context.logwrite(*["!!! [BouncerConnection.send] Exception in thread %(self)s" % vars()] + [
"!!! [BouncerConnection.send] %(tbline)s" % vars() for tbline in traceback.format_exc().split("\n")])
self.quit(quitmsg=excmsg.message)
# Format and send a string to the client
def send(self, origin=None, cmd=None, target=None, params=None, extinfo=None, targetprefix=None, flags=0):
if type(target) == irc.Channel:
if targetprefix == None:
targetprefix = ""
# if target in self.translations.keys():
# target=targetprefix+self.translations[target]
# else:
# target=targetprefix+target.name
target = targetprefix + target.name
elif type(target) == irc.User:
target = target.nick
if type(cmd) == int:
cmd = "%03d" % cmd
# translated=[]
# if params:
# for param in params.split(" "):
#chantypes=self.context.supports.get("CHANTYPES", irc._defaultchantypes)
# if re.match(irc._chanmatch % re.escape(chantypes), param) and self.context[param] in self.translations.keys():
# translated.append(self.translations[self.context[param]])
# else:
# translated.append(param)
#params=" ".join(translated)
if params:
line = u"{cmd} {target} {params}".format(**vars())
elif target:
line = u"{cmd} {target}".format(**vars())
else:
line = cmd
if extinfo != None:
line = u"{line} :{extinfo}".format(**vars())
if type(origin) == irc.User:
line = u":{origin:full} {line}".format(**vars())
elif origin:
line = u":{origin} {line}".format(**vars())
self.sendstr(u"{line}\n".format(**vars()))
#server=self.context.server if self.context else "*"
#port=self.context.port if self.context else "*"
# if self.context and self.context.identity:
# nick=self.context.identity.nick
#ident=self.context.identity.username if self.context.identity.username else "*"
#host=self.context.identity.host if self.context.identity.host else "*"
# else:
# nick="*"
# ident="*"
# host="*"
# if self.context.ssl and self.context.ipv6:
# protocol="ircs6"
# elif self.context.ssl:
# protocol="ircs"
# elif self.context.ipv6:
# protocol="irc6"
# else:
# protocol="irc"
# addr=self.host
def __repr__(self):
return "<Bouncer connection from {self.host} to {self.context.identity} on {self.context:uri}>".format(**vars())
def quit(self, quitmsg="Disconnected"):
with self.lock:
if not self.quitting:
self.quitmsg = quitmsg
try:
self.send(cmd="ERROR", extinfo="Closing link: (%s@%s) [%s]\n" % (
self.context.identity.nick if self.context else "*", self.host, quitmsg))
except:
pass
try:
self.connection.shutdown(socket.SHUT_WR)
self.connection.close()
except:
pass
self.quitting = True
def showchannel(self, channel):
with self.context.lock, self.lock:
if channel in self.hidden:
self.hidden.remove(channel)
if self.context.identity in channel.users:
self.send(
origin=self.context.identity, cmd="JOIN", target=channel)
self.sendchanneltopic(channel)
self.sendchannelnames(channel)
def sendchanneltopic(self, channel):
with self.context.lock, self.lock:
if channel.topic and channel.topictime:
self.send(origin=self.bouncer.servname, cmd=332,
target=self.context.identity, params=channel.name, extinfo=channel.topic)
# u":{self.context.serv} 332 {self.context.identity.nick} {self.name} :{self.topic}".format(**vars())
self.send(
origin=self.bouncer.servname, cmd=333, target=self.context.identity,
params="{channel.name} {channel.topicsetby} {channel.topictime}".format(**vars()))
# u":{self.context.serv} 333 {self.context.identity.nick} {self.name} {self.topicsetby.nick} {self.topictime}".format(**vars())
else:
self.send(origin=self.bouncer.servname, cmd=331,
target=self.context.identity, params=channel.name, extinfo="No topic is set")
# u":{self.context.serv} 331 {self.context.identity.nick}
# {self.name} :No topic is set".format(**vars())]
def sendchannelnames(self, channel):
with self.context.lock, self.lock:
secret = "s" in channel.modes.keys() and channel.modes["s"]
private = "p" in channel.modes.keys() and channel.modes["p"]
flag = "@" if secret else ("*" if private else "=")
modes, symbols = supports = self.context.supports.get(
"PREFIX", irc._defaultprefix)
users = list(channel.users)
users.sort(key=lambda user: ([user not in channel.modes.get(mode, [])
for mode, char in zip(*supports)], user.nick.lower()))
if self.uhnames:
template = u"{prefixes}{user:full}"
else:
template = u"{prefixes}{user}"
nameslist = []
for user in users:
prefixes = u"".join(
[prefix if mode in channel.modes.keys() and user in channel.modes[mode] else "" for prefix, mode in zip(symbols, modes)])
if not self.namesx:
prefixes = prefixes[:1]
nameslist.append(template.format(**vars()))
names = " ".join(nameslist)
lines = []
while len(names) > 196:
index = names.rfind(" ", 0, 196)
slice = names[:index]
self.send(
origin=self.bouncer.servname, cmd=353, target=self.context.identity,
params="{flag} {channel.name}".format(**vars()), extinfo=slice)
#u":{channel.context.serv} 353 {channel.context.identity.nick} {flag} {channel.name} :{slice}".format(**vars())
names = names[index + 1:]
if len(names):
self.send(
origin=self.bouncer.servname, cmd=353, target=self.context.identity,
params="{flag} {channel.name}".format(**vars()), extinfo=names)
#u":{channel.context.serv} 353 {channel.context.identity.nick} {flag} {channel.name} :{names}".format(**vars())
self.send(
origin=self.bouncer.servname, cmd=366, target=self.context.identity,
params=channel.name, extinfo="End of /NAMES list.")
# u":{channel.context.serv} 366 {channel.context.identity.nick} {channel.name} :End of /NAMES list.".format(**vars())
def sendchannelmodes(self, channel, modechars=None):
with self.context.lock, self.lock:
if modechars:
for mode in modechars:
if mode not in _listnumerics.keys():
continue
i, e, l = _listnumerics[mode]
if mode in channel.modes.keys():
for (mask, setby, settime) in channel.modes[mode]:
self.send(
origin=self.bouncer.servname, cmd=i, target=self.context.identity,
params=u"{channel.name} {mask} {setby} {settime}".format(**vars()))
self.send(origin=self.bouncer.servname, cmd=e,
target=self.context.identity, params=u"{channel.name} {l}".format(**vars()))
else:
items = channel.modes.items()
chanmodes = self.context.supports.get(
"CHANMODES", irc._defaultchanmodes)
prefix = self.context.supports.get(
"PREFIX", irc._defaultprefix)
modes = "".join(
[mode for (mode, val) in items if mode not in chanmodes[0] + prefix[0] and val])
params = " ".join(
[val for (mode, val) in items if mode in chanmodes[1] + chanmodes[2] and val])
if modes and params:
self.send(
origin=self.bouncer.servname, cmd=324, target=self.context.identity,
params="{channel.name} +{modes} {params}".format(**vars()))
# u":{channel.context.identity.server} 324 {channel.context.identity.nick} {channel.name} +{modes} {params}".format(**vars())
elif modes:
self.send(
origin=self.bouncer.servname, cmd=324, target=self.context.identity,
params="{channel.name} +{modes}".format(**vars()))
# u":{channel.context.identity.server} 324 {channel.context.identity.nick} {channel.name} +{modes}".format(**vars())
def sendsupports(self):
with self.context.lock, self.lock:
supports = [
"CHANMODES=%s" % (",".join(value)) if name == "CHANMODES" else "PREFIX=(%s)%s" %
value if name == "PREFIX" else "%s=%s" % (name, value) if value else name for name, value in self.context.supports.items()]
if "UHNAMES" not in supports:
supports.append("UHNAMES")
if "NAMESX" not in supports:
supports.append("NAMESX")
supports.sort()
supports = " ".join(supports)
lines = []
while len(supports) > 196:
index = supports.rfind(" ", 0, 196)
slice = supports[:index]
self.send(
origin=self.bouncer.servname, cmd=5, target=self.context.identity,
params=slice, extinfo="are supported by this server")
# u":{self.context.serv} 005 {self.context.identity.nick} {slice} :are supported by this server".format(**vars())
supports = supports[index + 1:]
if supports:
self.send(
origin=self.bouncer.servname, cmd=5, target=self.context.identity,
params=supports, extinfo="are supported by this server")
# u":{self.context.serv} 005 {self.context.identity.nick} {supports} :are supported by this server".format(**vars())
def sendgreeting(self):
with self.context.lock, self.lock:
if self.context.welcome:
self.send(origin=self.bouncer.servname, cmd=1,
target=self.context.identity, extinfo=self.context.welcome)
# u":{self.context.serv} 001 {self.context.identity.nick} :{self.context.welcome}".format(**vars())
if self.context.hostinfo:
self.send(origin=self.bouncer.servname, cmd=2,
target=self.context.identity, extinfo=self.context.hostinfo)
# u":{self.context.serv} 002 {self.context.identity.nick} :{self.context.hostinfo}".format(**vars())
if self.context.servcreated:
self.send(origin=self.bouncer.servname, cmd=3,
target=self.context.identity, extinfo=self.context.servcreated)
# u":{self.context.serv} 003 {self.context.identity.nick} :{self.context.servcreated}".format(**vars())
if self.context.servinfo:
self.send(origin=self.bouncer.servname, cmd=4,
target=self.context.identity, params=self.context.servinfo)
# u":{self.context.serv} 004 {self.context.identity.nick} {self.context.servinfo}".format(**vars())
def sendmotd(self):
with self.context.lock, self.lock:
if self.context.motdgreet and self.context.motd and self.context.motdend:
self.send(origin=self.bouncer.servname, cmd=375,
target=self.context.identity, extinfo=self.context.motdgreet)
# u":{server} 375 {self.identity.nick} :{self.motdgreet}".format(**vars())
for motdline in self.context.motd:
self.send(origin=self.bouncer.servname, cmd=372,
target=self.context.identity, extinfo=motdline)
# u":{server} 372 {self.identity.nick} :{motdline}".format(**vars())
self.send(origin=self.bouncer.servname, cmd=376,
target=self.context.identity, extinfo=self.context.motdend)
# u":{server} 376 {self.identity.nick} :{self.motdend}".format(**vars())
else:
self.send(origin=self.bouncer.servname, cmd=422,
target=self.context.identity, extinfo="MOTD File is missing")
# u":{server} 422 {self.identity.nick} :MOTD File is missing".format(**vars())
def sendusermodes(self):
with self.context.lock, self.lock:
self.send(
origin=self.bouncer.servname, cmd=221, target=self.context.identity,
params="+{self.context.identity.modes}".format(**vars()))
if "s" in self.context.identity.modes:
self.send(
origin=self.bouncer.servname, cmd=8, target=self.context.identity,
params="+{self.context.identity.snomask}".format(**vars()), extinfo="Server notice mask")
def run(self):
# Name loopup should happen here instead
ipv4match = re.findall(
r"^::ffff:((\d+)\.(\d+)\.(\d+)\.(\d+))$", self.host)
if self.bouncer.ipv6 and ipv4match:
addr, a, b, c, d = ipv4match[0]
if max(int(a), int(b), int(c), int(d)) < 256:
self.host = addr
self.ipv6 = False
elif self.bouncer.ipv6:
self.ipv6 = True
try:
self.host, aliaslist, addresslist = socket.gethostbyaddr(self.host)
self.addr = (self.host, addr[1])
except:
pass
# Add connection to connection list.
passwd = None
nick = None
user = None
addr = self.host
readbuf = ""
linebuf = []
try:
while True:
# Read data (appending) into readbuf, then break lines and
# append lines to linebuf
while len(linebuf) == 0:
timestamp = irc.timestamp()
try:
read = self.connection.recv(512)
except socket.error, msg:
self.quit(msg)
sys.exit()
except ssl.SSLError, msg:
self.quit(msg)
sys.exit()
if read == "" and len(linebuf) == 0: # No more data to process.
#self.quitmsg="Connection Closed"
sys.exit()
readbuf += read
lastlf = readbuf.rfind("\n")
if lastlf >= 0:
linebuf.extend(string.split(readbuf[0:lastlf], "\n"))
readbuf = readbuf[lastlf + 1:]
line = string.rstrip(linebuf.pop(0))
try:
line = line.decode("utf8")
except UnicodeDecodeError:
# Attempt to figure encoding
charset = chardet.detect(line)['encoding']
line = line.decode(charset)
match = re.findall(
"^(.+?)(?:\\s+(.+?)(?:\\s+(.+?))??)??(?:\\s+:(.*))?$", line, re.I)
# print match
if len(match) == 0:
continue
(cmd, target, params, extinfo) = match[0]
if not passwd: # Bouncer expects a password
if cmd.upper() == "PASS":
passwd = target if target else extinfo
else:
self.quit("Access Denied")
print "*** [BouncerConnection] Incoming connection from %s failed: Expected PASS." % (self.host)
break
elif not self.nick: # Bouncer expects a NICK command
if cmd.upper() == "NICK":
self.nick = target if target else extinfo
else:
self.quit("Access Denied")
print "*** [BouncerConnection] Incoming connection from %s failed: Expected NICK." % (self.host)
break
elif not self.username: # Bouncer expects a USER command to finish registration
if cmd.upper() == "USER":
self.username = target
contextfound = False
for self.context, conf in self.bouncer.conf.items():
# print conf.label, self.username
if conf.label == self.username:
contextfound = True
break
if not contextfound:
self.quit("Access Denied")
print >>sys.stderr, "*** [BouncerConnection] Incoming connection from %s denied: Context not found." % (
self.host)
break
passmatch = hashlib.new(
conf.hashtype, passwd).hexdigest() == conf.passwd
with self.context.lock:
if not passmatch:
self.quit("Access Denied")
self.context.logwrite(
"*** [BouncerConnection] Incoming connection from %s to %s denied: Invalid password." % (self.host, self.context))
self.bouncer.broadcast(
self.context, origin=self.bouncer.servname, cmd="NOTICE", target=self.context.identity,
extinfo="Incoming connection from %s to %s denied: Invalid password." % (self.host, self.context))
# for client in self.bouncer.clients:
# if client.context!=self.context:
# continue
# if not client.quitting:
#client.send(origin=self.bouncer.servname, cmd="NOTICE", target=client.context.identity, extinfo="Incoming connection from %s to %s dened: Invalid password.\n" % (self.host, self.context))
break
self.context.logwrite(
"*** [BouncerConnection] Incoming connection from %s to %s established." % (self.host, self.context))
with self.bouncer.lock:
self.translations = dict(
self.bouncer.conf[self.context].translations)
# Announce connection to all other bouncer
# clients.
self.bouncer.broadcast(
self.context, origin=self.bouncer.servname, cmd="NOTICE", target=self.context.identity,
extinfo="Incoming connection from %s to %s established." % (self.host, self.context))
# for client in self.bouncer.clients:
# if client.context!=self.context:
# continue
# if not client.quitting:
#client.send(":*Bouncer* NOTICE %s :Incoming connection from %s to %s\n" % (client.context.identity.nick, self.host, self.context))
if len([client for client in self.bouncer.clients if client.context == self.context]) == 0 and self.context.registered and type(self.context.identity) == irc.User and self.context.identity.away:
# Bouncer connection should
# automatically return from away
# status.
self.context.raw("AWAY")
self.hidden = irc.ChanList(
self.bouncer.conf[self.context].hidden, context=self.context)
self.bouncer.clients.append(self)
if self.context.registered:
# Send Greeting.
with self.lock:
self.sendgreeting()
self.sendsupports()
self.sendmotd()
self.sendusermodes()
# Join user to channels.
for channel in self.context.identity.channels:
if channel not in self.hidden:
self.showchannel(channel)
else:
self.send(
origin=self.bouncer.servname, cmd="NOTICE", target=self.nick,
extinfo="Not connected to server. Type /bncconnect to attempt connection.")
#self.send(u":%s 001 %s :Welcome to the Bouncer context Network %s!%s@%s\n" % ("*Bouncer*", self.nick, self.nick, self.username, self.host))
else: # Client did not send USER command when expected
self.quit("Access Denied")
print "*** [BouncerConnection] Incoming connection from %s failed: Expected USER." % (self.host)
break
else:
chantypes = self.context.supports.get(
"CHANTYPES", irc._defaultchantypes)
# Disable translating for now.
if False and cmd.upper() not in ("SETTRANSLATE", "RMTRANSLATE"):
translated = []
for targ in target.split(","):
translatefound = False
if re.match(irc._chanmatch % re.escape(chantypes), targ):
for channel, translate in self.translations.items():
if targ.lower() == translate.lower():
translated.append(channel.name)
translatefound = True
break
if not translatefound:
translated.append(targ)
target = ",".join(translated)
translated = []
for param in params.split(" "):
translatefound = False
if re.match(irc._chanmatch % re.escape(chantypes), param):
for channel, translate in self.translations.items():
if param.lower() == translate.lower():
translated.append(channel.name)
translatefound = True
break
if not translatefound:
translated.append(param)
params = " ".join(translated)
if params:
line = u"{cmd} {target} {params}".format(**vars())
elif target:
line = u"{cmd} {target}".format(**vars())
else:
line = cmd
if extinfo:
line = u"{line} :{extinfo}".format(**vars())
cmdmethod = "cmd%s" % cmd.upper()
if hasattr(self, cmdmethod):
method = getattr(self, cmdmethod)
try:
method(line, target, params, extinfo)
except SystemExit:
sys.exit()
except:
if self.context:
exc, excmsg, tb = sys.exc_info()
self.context.logwrite(*[u"!!! [BouncerConnection] Exception in thread %(self)s" % vars()] + [
u"!!! [BouncerConnection] %(tbline)s" % vars() for tbline in traceback.format_exc().split("\n")])
print >>sys.stderr, "Exception in thread %(self)s" % vars(
)
print >>sys.stderr, traceback.format_exc()
elif not self.context.connected:
self.send(
origin=self.bouncer.servname, cmd="NOTICE", target=self.nick,
extinfo="Not connected to server. Type /bncconnect to attempt connection.")
continue
elif not self.context.registered:
self.send(origin=self.bouncer.servname, cmd="NOTICE",
target=self.nick, extinfo="Not registered.")
continue
else:
self.context.raw(line, origin=self)
except SystemExit:
pass # No need to pass error message if break resulted from sys.exit()
except:
exc, excmsg, tb = sys.exc_info()
self.quitmsg = str(excmsg)
if self.context:
exc, excmsg, tb = sys.exc_info()
self.context.logwrite(*["!!! [BouncerConnection] Exception in thread %(self)s" % vars()] + [
"!!! [BouncerConnection] %(tbline)s" % vars() for tbline in traceback.format_exc().split("\n")])
print >>sys.stderr, "Exception in thread %(self)s" % vars()
print >>sys.stderr, traceback.format_exc()
finally:
# Juuuuuuust in case.
with self.lock:
try:
self.connection.shutdown(1)
self.connection.close()
except:
pass
if self.context:
self.context.logwrite(
"*** [BouncerConnection] Connection from %s terminated (%s)." % (self.host, self.quitmsg))
with self.bouncer.lock:
if self in self.bouncer.clients:
self.bouncer.clients.remove(self)
if self.context.connected and self.context.identity and len([client for client in self.bouncer.clients if client.context == self.context]) == 0 and self.context.registered and type(self.context.identity) == irc.User and not self.context.identity.away and self.bouncer.autoaway:
# Bouncer automatically sets away status.
self.context.raw("AWAY :%s" % self.bouncer.autoaway)
self.bouncer.broadcast(
self.context, origin=self.bouncer.servname, cmd="NOTICE", target=self.context.identity,
extinfo="Connection from %s to %s terminated (%s)\n" % (self.host, self.context, self.quitmsg))
# ":*Bouncer* NOTICE %s :Connection from %s to %s terminated (%s)\n" % (client.context.identity.nick, self.host, self.context, self.quitmsg))
def cmdQUIT(self, line, target, params, extinfo):
self.quit(extinfo)
sys.exit()
def cmdPROTOCTL(self, line, target, params, extinfo):
protoparams = [target.upper()] + params.upper().split()
if "NAMESX" in protoparams:
self.namesx = True
if "UHNAMES" in protoparams:
self.uhnames = True
def cmdPING(self, line, target, params, extinfo):
with self.context.lock:
if True or (self.context.identity and type(self.context.identity) == irc.User):
self.send(origin=self.bouncer.servname,
cmd="PONG", target=params, extinfo=target)
# u":{self.context.identity.server} PONG {params}
# :{target}\n".format(**vars()).encode("utf8"))
else:
self.send(origin=self.bouncer.servname,
cmd="PONG", params=params, extinfo=target)
self.send(
u":{self.context.server} PONG {params} :{target}\n".format(**vars()).encode("utf8"))
def cmdPRIVMSG(self, line, target, params, extinfo):
# Check if CTCP
ctcp = re.findall("^\x01(.*?)(?:\\s+(.*?)\\s*)?\x01$", extinfo)
if ctcp:
(ctcptype, ext) = ctcp[0] # Unpack CTCP info
if ctcptype == "LAGCHECK": # Client is doing a lag check. No need to send to context network, just reply back.
self.send(
u":{self.context.identity:full} {line}\n".format(**vars()).encode("utf8"))
else:
self.context.raw(line, origin=self)
else:
self.context.raw(line, origin=self)
def cmdMODE(self, line, target, params, extinfo): # Will want to determine is requesting modes, or attempting to modify modes.
# if target and "CHANTYPES" in self.context.supports.keys() and
# target[0] in self.context.supports["CHANTYPES"]:
chantypes = self.context.supports.get(
"CHANTYPES", irc._defaultchantypes)
chanmodes = self.context.supports.get(
"CHANMODES", irc._defaultchanmodes)
prefix = self.context.supports.get("PREFIX", irc._defaultprefix)
if re.match(irc._chanmatch % re.escape(chantypes), target):
channel = self.context[target]
if params == "":
# We are requesting the modes for the channel
if self.context.identity in channel.users:
# We are in the channel, and we know the channel modes
self.sendchannelmodes(channel)
else:
# We are NOT in the channel, so we will forward the request
# to the server.
self.context.raw(
u"MODE {channel.name}".format(**vars()), origin=self)
elif re.match("^\\+?[%s]+$" % chanmodes[0], params) and extinfo == "":
# We are requesting one or more mode lists.
modechars = ""
for mode in params.lstrip("+"):
if mode not in modechars:
modechars += mode
if self.context.identity in channel.users:
self.sendchannelmodes(channel, modechars)
else:
self.context.raw(
u"MODE {channel.name} {params}".format(**vars()), origin=self)
else:
self.context.raw(line, origin=self)
elif params == "" and target.lower() == self.context.identity.nick.lower():
self.sendusermodes()
else:
self.context.raw(
u"MODE {target} {params}".format(**vars()), origin=self)
def cmdNAMES(self, line, target, params, extinfo):
chantypes = self.context.supports.get(
"CHANTYPES", irc._defaultchantypes)
chanmodes = self.context.supports.get(
"CHANMODES", irc._defaultchanmodes)
prefix = self.context.supports.get("PREFIX", irc._defaultprefix)
fallback = []
with self.lock:
for channame in target.split():
if re.match(irc._chanmatch % re.escape(chantypes), channame):
channel = self.context[channame]
with self.lock:
if self.context.identity in channel:
self.sendchannelnames(channel)
else:
fallback.append(channame)
else:
fallback.append(channame)
if fallback:
self.context.raw("NAMES %s" %
(",".join(fallback)), origin=self)
def cmdSHOW(self, line, target, params, extinfo):
chantypes = self.context.supports.get(
"CHANTYPES", irc._defaultchantypes)
with self.context.lock, self.lock:
for channame in target.split():
if re.match(irc._chanmatch % re.escape(chantypes), channame):
channel = self.context[channame]
if channel in self.hidden:
if self.context.identity in channel:
self.showchannel(channel)
else:
self.hidden.remove(channel)
self.send(
origin=self.bouncer.servname, cmd="NOTICE", target=self.context.identity,
extinfo="{channel.name} removed from hidden list, but not joined.".format(**vars()))
else:
self.send(
origin=self.bouncer.servname, cmd="NOTICE", target=self.context.identity,
extinfo="{channel.name} not in hidden list.".format(**vars()))
else:
self.send(
origin=self.bouncer.servname, cmd="NOTICE", target=self.context.identity,
extinfo="{channame}: invalid channel name.".format(**vars()))
def cmdHIDE(self, line, target, params, extinfo):
chantypes = self.context.supports.get(
"CHANTYPES", irc._defaultchantypes)
with self.context.lock, self.lock:
for channame in target.split():
if re.match(irc._chanmatch % re.escape(chantypes), channame):
channel = self.context[channame]
if channel not in self.hidden:
if self.context.identity in channel:
self.send(
origin=self.context.identity, cmd="PART", target=channel, extinfo="Hiding channel")
else:
self.send(
origin=self.bouncer.servname, cmd="NOTICE", target=self.context.identity,
extinfo="{channel.name} added to the hidden list, but not joined.".format(**vars()))
self.hidden.append(channel)
else:
self.send(
origin=self.bouncer.servname, cmd="NOTICE", target=self.context.identity,
extinfo="{channel.name} already in hidden list.".format(**vars()))
else:
self.send(
origin=self.bouncer.servname, cmd="NOTICE", target=self.context.identity,
extinfo="{channame}: invalid channel name.".format(**vars()))
def cmdSETTRANSLATE(self, line, target, params, extinfo):
chantypes = self.context.supports.get(
"CHANTYPES", irc._defaultchantypes)
with self.context.lock, self.lock:
if re.match(irc._chanmatch % re.escape(chantypes), target) and re.match(irc._chanmatch % re.escape(chantypes), target):
channel = self.context[target]
if self.context.supports.get("CASEMAPPING", "rfc1459") == "ascii":
translations_lower = [translation.translate(irc._rfc1459casemapping)
for translation in self.translations.values()]
params_lower = params.translate(irc._rfc1459casemapping)
else:
translations_lower = [translation.lower()
for translation in self.translations.values()]
params_lower = params.lower()
if params_lower in translations_lower:
self.send(
origin=self.bouncer.servname, cmd="NOTICE", target=self.context.identity,
extinfo="Cannot set translation for {channel.name} to {param}.".format(**vars()))
else:
self.send(origin=self.context.identity, cmd="PART",
target=channel, extinfo="Translating...")
self.translations[channel] = params
self.showchannel(channel)
def cmdRMTRANSLATE(self, line, target, params, extinfo):
chantypes = self.context.supports.get(
"CHANTYPES", irc._defaultchantypes)
with self.context.lock, self.lock:
if re.match(irc._chanmatch % re.escape(chantypes), target):
channel = self.context[target]
if channel not in self.translations.keys():
self.send(
origin=self.bouncer.servname, cmd="NOTICE", target=self.context.identity,
extinfo="Cannot remove translation for {channel.name}.".format(**vars()))
else:
self.send(origin=self.context.identity, cmd="PART",
target=channel, extinfo="Translating...")
del self.translations[channel]
self.showchannel(channel)
| |
"""Support for UK public transport data provided by transportapi.com."""
from datetime import datetime, timedelta
from http import HTTPStatus
import logging
import re
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_MODE, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_ATCOCODE = "atcocode"
ATTR_LOCALITY = "locality"
ATTR_STOP_NAME = "stop_name"
ATTR_REQUEST_TIME = "request_time"
ATTR_NEXT_BUSES = "next_buses"
ATTR_STATION_CODE = "station_code"
ATTR_CALLING_AT = "calling_at"
ATTR_NEXT_TRAINS = "next_trains"
CONF_API_APP_KEY = "app_key"
CONF_API_APP_ID = "app_id"
CONF_QUERIES = "queries"
CONF_ORIGIN = "origin"
CONF_DESTINATION = "destination"
_QUERY_SCHEME = vol.Schema(
{
vol.Required(CONF_MODE): vol.All(cv.ensure_list, [vol.In(["bus", "train"])]),
vol.Required(CONF_ORIGIN): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_APP_ID): cv.string,
vol.Required(CONF_API_APP_KEY): cv.string,
vol.Required(CONF_QUERIES): [_QUERY_SCHEME],
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Get the uk_transport sensor."""
sensors = []
number_sensors = len(config.get(CONF_QUERIES))
interval = timedelta(seconds=87 * number_sensors)
for query in config.get(CONF_QUERIES):
if "bus" in query.get(CONF_MODE):
stop_atcocode = query.get(CONF_ORIGIN)
bus_direction = query.get(CONF_DESTINATION)
sensors.append(
UkTransportLiveBusTimeSensor(
config.get(CONF_API_APP_ID),
config.get(CONF_API_APP_KEY),
stop_atcocode,
bus_direction,
interval,
)
)
elif "train" in query.get(CONF_MODE):
station_code = query.get(CONF_ORIGIN)
calling_at = query.get(CONF_DESTINATION)
sensors.append(
UkTransportLiveTrainTimeSensor(
config.get(CONF_API_APP_ID),
config.get(CONF_API_APP_KEY),
station_code,
calling_at,
interval,
)
)
add_entities(sensors, True)
class UkTransportSensor(SensorEntity):
"""
Sensor that reads the UK transport web API.
transportapi.com provides comprehensive transport data for UK train, tube
and bus travel across the UK via simple JSON API. Subclasses of this
base class can be used to access specific types of information.
"""
TRANSPORT_API_URL_BASE = "https://transportapi.com/v3/uk/"
_attr_icon = "mdi:train"
_attr_native_unit_of_measurement = TIME_MINUTES
def __init__(self, name, api_app_id, api_app_key, url):
"""Initialize the sensor."""
self._data = {}
self._api_app_id = api_app_id
self._api_app_key = api_app_key
self._url = self.TRANSPORT_API_URL_BASE + url
self._name = name
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
def _do_api_request(self, params):
"""Perform an API request."""
request_params = dict(
{"app_id": self._api_app_id, "app_key": self._api_app_key}, **params
)
response = requests.get(self._url, params=request_params)
if response.status_code != HTTPStatus.OK:
_LOGGER.warning("Invalid response from API")
elif "error" in response.json():
if "exceeded" in response.json()["error"]:
self._state = "Usage limits exceeded"
if "invalid" in response.json()["error"]:
self._state = "Credentials invalid"
else:
self._data = response.json()
class UkTransportLiveBusTimeSensor(UkTransportSensor):
"""Live bus time sensor from UK transportapi.com."""
_attr_icon = "mdi:bus"
def __init__(self, api_app_id, api_app_key, stop_atcocode, bus_direction, interval):
"""Construct a live bus time sensor."""
self._stop_atcocode = stop_atcocode
self._bus_direction = bus_direction
self._next_buses = []
self._destination_re = re.compile(f"{bus_direction}", re.IGNORECASE)
sensor_name = f"Next bus to {bus_direction}"
stop_url = f"bus/stop/{stop_atcocode}/live.json"
UkTransportSensor.__init__(self, sensor_name, api_app_id, api_app_key, stop_url)
self.update = Throttle(interval)(self._update)
def _update(self):
"""Get the latest live departure data for the specified stop."""
params = {"group": "route", "nextbuses": "no"}
self._do_api_request(params)
if self._data != {}:
self._next_buses = []
for (route, departures) in self._data["departures"].items():
for departure in departures:
if self._destination_re.search(departure["direction"]):
self._next_buses.append(
{
"route": route,
"direction": departure["direction"],
"scheduled": departure["aimed_departure_time"],
"estimated": departure["best_departure_estimate"],
}
)
if self._next_buses:
self._state = min(
_delta_mins(bus["scheduled"]) for bus in self._next_buses
)
else:
self._state = None
@property
def extra_state_attributes(self):
"""Return other details about the sensor state."""
attrs = {}
if self._data is not None:
for key in (
ATTR_ATCOCODE,
ATTR_LOCALITY,
ATTR_STOP_NAME,
ATTR_REQUEST_TIME,
):
attrs[key] = self._data.get(key)
attrs[ATTR_NEXT_BUSES] = self._next_buses
return attrs
class UkTransportLiveTrainTimeSensor(UkTransportSensor):
"""Live train time sensor from UK transportapi.com."""
_attr_icon = "mdi:train"
def __init__(self, api_app_id, api_app_key, station_code, calling_at, interval):
"""Construct a live bus time sensor."""
self._station_code = station_code
self._calling_at = calling_at
self._next_trains = []
sensor_name = f"Next train to {calling_at}"
query_url = f"train/station/{station_code}/live.json"
UkTransportSensor.__init__(
self, sensor_name, api_app_id, api_app_key, query_url
)
self.update = Throttle(interval)(self._update)
def _update(self):
"""Get the latest live departure data for the specified stop."""
params = {
"darwin": "false",
"calling_at": self._calling_at,
"train_status": "passenger",
}
self._do_api_request(params)
self._next_trains = []
if self._data != {}:
if self._data["departures"]["all"] == []:
self._state = "No departures"
else:
for departure in self._data["departures"]["all"]:
self._next_trains.append(
{
"origin_name": departure["origin_name"],
"destination_name": departure["destination_name"],
"status": departure["status"],
"scheduled": departure["aimed_departure_time"],
"estimated": departure["expected_departure_time"],
"platform": departure["platform"],
"operator_name": departure["operator_name"],
}
)
if self._next_trains:
self._state = min(
_delta_mins(train["scheduled"]) for train in self._next_trains
)
else:
self._state = None
@property
def extra_state_attributes(self):
"""Return other details about the sensor state."""
attrs = {}
if self._data is not None:
attrs[ATTR_STATION_CODE] = self._station_code
attrs[ATTR_CALLING_AT] = self._calling_at
if self._next_trains:
attrs[ATTR_NEXT_TRAINS] = self._next_trains
return attrs
def _delta_mins(hhmm_time_str):
"""Calculate time delta in minutes to a time in hh:mm format."""
now = dt_util.now()
hhmm_time = datetime.strptime(hhmm_time_str, "%H:%M")
hhmm_datetime = now.replace(hour=hhmm_time.hour, minute=hhmm_time.minute)
if hhmm_datetime < now:
hhmm_datetime += timedelta(days=1)
delta_mins = (hhmm_datetime - now).total_seconds() // 60
return delta_mins
| |
# Get OWGR Results
from time import gmtime,strftime
import datetime, urllib, urllib2
# External modules (bs4)
import json,csv,sys
sys.path[0:0] = ['lib']
from bs4 import BeautifulSoup
MAXPICKS=12
pickers=(u'Mark',u'Steve')
points={}
picks={}
debug=False
def do_debug(string):
if debug:
print (string)
# Handler for string values to ASCII or integer
def xstr(string):
if string is None:
return None
elif string.isdigit():
return int(string)
else:
return string.encode('ascii','ignore').strip()
def get_value(string):
try:
value=round(float(string),2)
except:
value=0.0
if abs(value-int(value))<0.001:
value=int(value)
return value
# json_results -- get results for a url
def json_results(url):
page=urllib2.urlopen(url)
results=json.load(page)
return results
def current_week():
this_week=strftime("%U",gmtime())
return str(int(this_week))
def current_year():
this_year=strftime("%Y",gmtime())
return str(int(this_year))
# last week (pull from api, not calendar)
def last_week():
url="http://knarflog.appspot.com/api/weeks"
weeks=json_results(url)
last_week=weeks['weeks'][0]['week_id']
return str(last_week)
# last_weeks_rankings (loaded from api)
def last_weeks_rankings():
url="http://knarflog.appspot.com/api/rankings/"+last_week()
rankings=json_results(url)
lastweek={}
ranknum=0
for player in rankings["players"]:
lastweek[player['Name']]={"Rank": player["Rank"], "Points": player['Points'] }
for picker in [picker for picker in rankings["pickers"] if picker.get('Name') in pickers]:
ranknum+=1
lastweek[picker['Name']]={"Points": round(picker.get("Points",0.0),2), "Rank": ranknum }
return lastweek
# last_weeks_results (from owgr home page)
def get_results():
events=[]
owgr_url='http://www.owgr.com/'
soup=soup_results(owgr_url)
last_week=[xstr(t.string) for t in soup.find_all('time') if t.string.endswith(current_year())][0].split()
week_no = xstr(last_week[4])
week_date = ' '.join(last_week[5:])
for row in soup.find_all('tr'):
if row.find('th'):
headers=[xstr(th.string) for th in row.find_all('th') if th.string]
headers[0]="Event Name"
elif row.find('td'):
values=[xstr(td.string) for td in row.find_all('td')]
event={header:value for (header,value) in zip(headers,values) if value }
if event.get('Winner') and event.get('SOF')>50:
urls=[u.get('href') for u in row.find_all('a')]
event['ID']=int(max([url.rsplit('=')[-1] for url in urls if 'Event' in url]) )
event['Week']=week_no
event['Date']=week_date
results=event_results(event['ID'])
if len(results)>0:
event['Points']=results[0].get("Points")
event['Results']=results
events.append(event)
return events
# this_weeks_rankings (loaded from api)
def this_weeks_rankings():
url="http://knarflog.appspot.com/api/rankings"
rankings=json_results(url)
return rankings
# soup_results -- get results for a url
def soup_results(url):
soup = None
timeout = 2
while not soup and timeout < 1000:
try:
page=urllib2.urlopen(url,timeout=180)
soup = BeautifulSoup(page.read(),"html.parser")
except:
timeout = timeout * 2
return soup
def get_bool(input):
if input in ('true',True,1,'1'):
return True
else:
return False
# get_field (loaded from api)
def get_event():
events_url='https://docs.google.com/spreadsheets/d/1rb_attQJRkfOuQSeg7Qq8GoYdgorpm-oQKK60AQY8J8/pub?single=true&gid=0&range=A2:E2&output=csv'
result = urllib2.urlopen(events_url)
reader = csv.reader(result)
event={}
for row in reader:
event['id']=row[0]
event['name']=row[1]
event['shortname']=row[1][5:]
event['url']=row[2]
event['start']=row[3]
return event
def get_players():
players=[]
players_url="https://docs.google.com/spreadsheet/pub?key=0AgO6LpgSovGGdDI4bVpHU05zUDQ3R09rUnZ4LXBQS0E&single=true&gid=1&range=A2%3AF155&output=csv"
result = urllib2.urlopen(players_url)
reader = csv.reader(result)
rownum = 1
for row in reader:
if row:
rownum += 1
player={'rownum':rownum }
player['rank']=get_value(row[0])
player['name']=row[1]
player['lastname']=row[1].split(" ")[-1]
player['points']=get_value(row[2].replace(',','').replace('-','0'))
if len(row)>=5:
player['country']=row[3]
player['odds']=get_value(row[4])
player['picked']=int(row[5])
else:
player['hotpoints']=0.0
player['odds']=999
player['picked']=0
players.append(player)
return players
# get_picks (loaded from api)
def get_picks():
url="http://knarflog.appspot.com/api/picks"
picks=json_results(url).get('picks')
# initialize counter for each user
for picker in pickers:
points=picks[picker]['Points']
for player in picks[picker][u'Picks']:
picks[player]={'Picker': picker, 'Points': 0.0 }
picks[picker]={'Name':picker,'Count':0,'Points': points ,'Picks':[],'Week':0 }
return picks
def get_weeks(year):
weeks=[]
return weeks
def get_picker_results(results):
picker_results={}
for picker in pickers:
picker_results[picker]={'Name':picker,'Count':0,'Points':0,'Rank':1 }
if results:
for result in results:
for player in result['Results']:
picker=player.get('Picker')
if picker:
picker_results[picker]['Count']+=1
picker_results[picker]['Points']+=player['Points']
if (picker_results[pickers[0]]['Points']>picker_results[pickers[1]]['Points']):
picker_results[pickers[1]]['Rank']=2
else:
picker_results[pickers[0]]['Rank']=2
return picker_results
def get_rank(position):
if not position or not position.replace('T','').isdigit():
return 0
else:
rank = int(position.replace('T',''))
return rank
def event_headers(soup):
headers={}
if soup.title.string:
headers['title']=soup.title.string
headers['url']=str(soup.find('form').get('action'))
if headers['url'].find('=')>0:
headers['id']=headers['url'].split('=')[1]
headers['name']=soup.find('h2').string
headers['date']=str(soup.find('time').string)
headers['Week']=int(soup.find("span", { "class" : "week" }).string.split()[-1])
headers['Year']=str(current_year())
# headers['Year']=headers['date'][-4:]
headers['week_id']=int(headers['Year'][-2:])*100+headers['Week']
# headers['columns']=[xstr(column.string) for column in soup.find('thead').findAll('th')]
headers['columns']=[xstr(column.string) for column in soup.findAll('th',{"class":"header"})]
return headers
def ranking_headers(soup):
headers={}
if soup.title.string:
headers['title']=soup.title.string
headers['url']=str(soup.find('form').get('action'))
if headers['url'].find('=')>0:
headers['id']=headers['url'].split('=')[1]
headers['name']=[head.string for head in soup.findAll('h2') if head.string.startswith('WEEK')][0]
headers['date']=str(soup.findAll('time')[-1].string)
headers['Week']=int(headers['name'][-2:])
headers['Year']=str(current_year())
# headers['Year']=headers['date'][-4:]
headers['week_id']=int(headers['Year'][-2:])*100+headers['Week']
# headers['columns']=[xstr(column.string) for column in soup.find('thead').findAll('th')]
headers['columns']=[xstr(column.string) for column in soup.findAll('th')]
return headers
def row_results(row, keys):
values=[xstr(td.string) for td in row.findAll('td')]
event=dict(zip(keys,values))
if row.find(id="ctl5"):
event['owgr_url']=str(row.find(id="ctl5").find('a').get('href'))
event['ID']=int(event.get('owgr_url').rsplit('=')[-1])
event['Points']=int(event.get("SOF",0))
return event
def player_rankings(row):
name = row.find('a')
cols = row.findAll('td')
player={}
if name and len(cols)>=10:
player_name=xstr(name.string)
player={'Rank': int(cols[0].text), 'Name': player_name }
player['ID']=int(row.find('a').get('href').rsplit('=')[-1])
player['Ctry']= xstr(cols[3].img.get('title'))
player['Avg']=round(get_value(cols[5].text),2)
player['Total']=round(get_value(cols[6].text),2)
player['Events']=int(cols[7].text)
player['Points']=get_value(cols[9].text)
player['Week']=player['Points']-points.get(player_name,0)
return player
def player_results(row, keys):
values=[xstr(td.string) for td in row.findAll('td')]
player=dict(zip(keys,values))
player['Rank']=get_rank(str(player.get('Pos')))
player['Ctry']=str(row.find('img').get('title'))
player['ID']=int(row.find('a').get('href').rsplit('=')[-1])
player['Points']=get_value(player.get('Ranking Points',0))
return player
def get_player(player_id):
return None
def get_ranking(size):
ranking_url="http://www.owgr.com/ranking?pageSize="+str(size)
soup=soup_results(ranking_url)
ranking=[ranking_headers(soup)]
for row in soup.findAll('tr'):
player=player_rankings(row)
player_name=player.get('Name')
if player and player_name:
ranking.append(player)
return ranking
def get_rankings(cur_week=current_week()):
# Get previous weeks ranking (if not week 0)
if cur_week=='0':
prevweek={}
else:
prevweek=last_weeks_rankings()
picks=get_picks()
for pick in picks:
if prevweek.get(pick):
picks[pick]["Points"]=round(prevweek[pick]["Points"],2)
else:
picks[pick]["Points"]=0.0
picks['Available']={'Count':0, 'Picks':[] }
ranking_url="http://www.owgr.com/ranking"
soup=soup_results(ranking_url)
rankings=[ranking_headers(soup)]
for row in soup.findAll('tr'):
player=player_rankings(row)
player_name=player.get('Name')
if player_name in prevweek.keys():
player['Last Week']=prevweek[player_name]["Rank"]
player['Week']=round(player['Points']-prevweek[player_name]['Points'],2)
if player_name in picks.keys():
picker=picks[player_name]['Picker']
player['Picker']=picker
player['Pickno']=picks[picker]['Count']
if picks[picker]['Count']<MAXPICKS:
picks[picker]['Picks'].append(player_name)
picks[picker]['Count']+=1
picks[picker]['Week']=round(picks[picker]['Week']+player['Week'],2)
picks[picker]['Points']=round(picks[picker]['Points']+player['Week'],2)
else:
if player_name:
picks['Available']['Picks'].append(player_name)
picks['Available']['Count']+=1
if player and player_name:
player["Week"]=round(player["Week"],2)
rankings.append(player)
# append totals to the end
rankings.append({key:value for key,value in picks.iteritems() if key in pickers})
picks['Available']['Picks'].sort()
rankings[-1]['Available']=picks['Available']
return rankings
def event_results(event_id):
picks=get_picks()
num_picks=0
event_url='http://www.owgr.com/en/Events/EventResult.aspx?eventid='+str(event_id)
soup=soup_results(event_url)
headers=event_headers(soup)
event_keys=['Pos', 'Ctry', 'Name', 'R1', 'R2', 'R3', 'R4', 'Agg', 'Ranking Points']
players=[]
for row in soup.findAll('tr'):
add_player=False
name = row.find('td',{'class': "name"})
if name and name.string:
player=player_results(row,event_keys)
if player.get("Rank")==1:
add_player=True
if player.get('Name') in picks.keys():
player_name=player['Name']
picker=picks[player_name]['Picker']
player['Picker']=xstr(picker)
add_player=True
num_picks+=1
if add_player:
players.append(player)
# only add events with picks
if num_picks==0:
players=[]
return players
#major results (grab all players, no picks)
def major_results(event_id):
event_url='http://www.owgr.com/en/Events/EventResult.aspx?eventid='+str(event_id)
soup=soup_results(event_url)
headers=event_headers(soup)
event_keys=['Pos', 'Ctry', 'Name', 'R1', 'R2', 'R3', 'R4', 'Agg', 'Ranking Points']
players=[]
for row in soup.findAll('tr'):
add_player=False
name = row.find('td',{'class': "name"})
if name and name.string:
player=player_results(row,event_keys)
players.append(player)
return players
def get_events(week_id=0):
return get_results()
def major_event(year, n, event):
event_year=year
event_id=(year-2000)*100
new_event=event
if n==3 or 'MASTERS' in event['Event Name'].upper():
event_id+=4
event_name=str(event['Year'])+" Masters"
elif event['Event Name'].upper()[:3] in ('US ','U.S'):
event_id+=6
event_name=str(event['Year'])+" US Open"
elif 'PGA' in event['Event Name'].upper():
event_id+=8
event_name=str(event['Year'])+" PGA Championship"
else:
event_id+=7
event_name=str(event['Year'])+" Open Championship"
new_event["event_id"]=event_id
new_event["event_name"]=event_name
new_event["owgr_url"]="http://www.owgr.com"+event["owgr_url"]
return new_event
def get_majors(year):
events_url='http://www.owgr.com/events?tour=Maj&year='+str(year)
soup=soup_results(events_url)
headers=event_headers(soup)
keys=headers.get('columns')[:6]
events=[]
for row in soup.findAll('tr'):
if row.find(id="ctl5"):
event=major_event(year,len(events),row_results(row,keys))
event['results']=major_results(event["ID"])
print(event.get("Event Name"),event.get("event_name"))
events.append(event)
return events
def dump_majors(year):
majors=get_majors(year)
outfile=open('json/majors/'+str(year)+'.json','w')
mjson={"year": year, "majors": majors }
json.dump(mjson, outfile)
outfile.close()
def dump_rankings():
ranking=get_rankings()
with open('../rankings.json', 'w') as outfile:
json.dump(ranking, outfile)
results=get_events(ranking[0]['week_id'])
with open('../results.json', 'w') as outfile:
json.dump(results, outfile)
def post_results():
ranking=get_rankings()
week_id=ranking[0]['week_id']
rankingstr=json.dumps(ranking)
results=get_results()
resultstr=json.dumps(results)
query_args = { 'week_id': week_id, 'rankings': rankingstr, 'results': resultstr, 'submit':'Update' }
encoded_args = urllib.urlencode(query_args)
update_url="http://knarflog.appspot.com/update"
result=urllib2.urlopen(update_url, encoded_args)
return True
def post_event(event):
eventstr=json.dumps(event)
query_args = { 'event_data': eventstr, 'submit':'Update' }
encoded_args = urllib.urlencode(query_args)
update_url="http://susyandsteve.appspot.com/golfevent"
result=urllib2.urlopen(update_url, encoded_args)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used.
It also allows setting of formatting information through flags.
"""
import cStringIO
import inspect
import json
import logging
import logging.handlers
import os
import sys
import traceback
from nova import flags
from nova import version
FLAGS = flags.FLAGS
flags.DEFINE_string('logging_context_format_string',
'%(asctime)s %(levelname)s %(name)s '
'[%(request_id)s %(user)s '
'%(project)s] %(message)s',
'format string to use for log messages with context')
flags.DEFINE_string('logging_default_format_string',
'%(asctime)s %(levelname)s %(name)s [-] '
'%(message)s',
'format string to use for log messages without context')
flags.DEFINE_string('logging_debug_format_suffix',
'from (pid=%(process)d) %(funcName)s'
' %(pathname)s:%(lineno)d',
'data to append to log format when level is DEBUG')
flags.DEFINE_string('logging_exception_prefix',
'(%(name)s): TRACE: ',
'prefix each line of exception output with this format')
flags.DEFINE_list('default_log_levels',
['amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'eventlet.wsgi.server=WARN'],
'list of logger=LEVEL pairs')
flags.DEFINE_bool('use_syslog', False, 'output to syslog')
flags.DEFINE_string('logfile', None, 'output to named file')
flags.DEFINE_bool('verbose', False, 'use verbose output')
# A list of things we want to replicate from logging.
# levels
CRITICAL = logging.CRITICAL
FATAL = logging.FATAL
ERROR = logging.ERROR
WARNING = logging.WARNING
WARN = logging.WARN
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# methods
getLogger = logging.getLogger
debug = logging.debug
info = logging.info
warning = logging.warning
warn = logging.warn
error = logging.error
exception = logging.exception
critical = logging.critical
log = logging.log
# handlers
StreamHandler = logging.StreamHandler
WatchedFileHandler = logging.handlers.WatchedFileHandler
# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
SysLogHandler = logging.handlers.SysLogHandler
# our new audit level
AUDIT = logging.INFO + 1
logging.addLevelName(AUDIT, 'AUDIT')
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) \
and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
if FLAGS.logfile:
return FLAGS.logfile
if FLAGS.logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(FLAGS.logdir, binary),)
class NovaLogger(logging.Logger):
"""NovaLogger manages request context and formatting.
This becomes the class that is instanciated by logging.getLogger.
"""
def __init__(self, name, level=NOTSET):
logging.Logger.__init__(self, name, level)
self.setup_from_flags()
def setup_from_flags(self):
"""Setup logger from flags."""
level = NOTSET
for pair in FLAGS.default_log_levels:
logger, _sep, level_name = pair.partition('=')
# NOTE(todd): if we set a.b, we want a.b.c to have the same level
# (but not a.bc, so we check the dot)
if self.name == logger or self.name.startswith("%s." % logger):
level = globals()[level_name]
self.setLevel(level)
def _log(self, level, msg, args, exc_info=None, extra=None, context=None):
"""Extract context from any log call."""
if not extra:
extra = {}
if context:
extra.update(_dictify_context(context))
extra.update({"nova_version": version.version_string_with_vcs()})
return logging.Logger._log(self, level, msg, args, exc_info, extra)
def addHandler(self, handler):
"""Each handler gets our custom formatter."""
handler.setFormatter(_formatter)
return logging.Logger.addHandler(self, handler)
def audit(self, msg, *args, **kwargs):
"""Shortcut for our AUDIT level."""
if self.isEnabledFor(AUDIT):
self._log(AUDIT, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""Logging.exception doesn't handle kwargs, so breaks context."""
if not kwargs.get('exc_info'):
kwargs['exc_info'] = 1
self.error(msg, *args, **kwargs)
# NOTE(todd): does this really go here, or in _log ?
extra = kwargs.get('extra')
if not extra:
return
env = extra.get('environment')
if env:
env = env.copy()
for k in env.keys():
if not isinstance(env[k], str):
env.pop(k)
message = 'Environment: %s' % json.dumps(env)
kwargs.pop('exc_info')
self.error(message, **kwargs)
class NovaFormatter(logging.Formatter):
"""A nova.context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_foramt_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
if record.__dict__.get('request_id', None):
self._fmt = FLAGS.logging_context_format_string
else:
self._fmt = FLAGS.logging_default_format_string
if record.levelno == logging.DEBUG \
and FLAGS.logging_debug_format_suffix:
self._fmt += " " + FLAGS.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with FLAGS.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
formatted_lines = []
for line in lines:
pl = FLAGS.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
_formatter = NovaFormatter()
class NovaRootLogger(NovaLogger):
def __init__(self, name, level=NOTSET):
self.logpath = None
self.filelog = None
self.streamlog = StreamHandler()
self.syslog = None
NovaLogger.__init__(self, name, level)
def setup_from_flags(self):
"""Setup logger from flags."""
global _filelog
if FLAGS.use_syslog:
self.syslog = SysLogHandler(address='/dev/log')
self.addHandler(self.syslog)
elif self.syslog:
self.removeHandler(self.syslog)
logpath = _get_log_file_path()
if logpath:
self.removeHandler(self.streamlog)
if logpath != self.logpath:
self.removeHandler(self.filelog)
self.filelog = WatchedFileHandler(logpath)
self.addHandler(self.filelog)
self.logpath = logpath
else:
self.removeHandler(self.filelog)
self.addHandler(self.streamlog)
if FLAGS.verbose:
self.setLevel(DEBUG)
else:
self.setLevel(INFO)
def handle_exception(type, value, tb):
extra = {}
if FLAGS.verbose:
extra['exc_info'] = (type, value, tb)
logging.root.critical(str(value), **extra)
def reset():
"""Resets logging handlers. Should be called if FLAGS changes."""
for logger in NovaLogger.manager.loggerDict.itervalues():
if isinstance(logger, NovaLogger):
logger.setup_from_flags()
def setup():
"""Setup nova logging."""
if not isinstance(logging.root, NovaRootLogger):
logging._acquireLock()
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
logging.root = NovaRootLogger("nova")
NovaLogger.root = logging.root
NovaLogger.manager.root = logging.root
for logger in NovaLogger.manager.loggerDict.itervalues():
logger.root = logging.root
if isinstance(logger, logging.Logger):
NovaLogger.manager._fixupParents(logger)
NovaLogger.manager.loggerDict["nova"] = logging.root
logging._releaseLock()
sys.excepthook = handle_exception
reset()
root = logging.root
logging.setLoggerClass(NovaLogger)
def audit(msg, *args, **kwargs):
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
logging.root.log(AUDIT, msg, *args, **kwargs)
| |
from django.shortcuts import render, HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login as auth_login, logout as auth_logout
from tracker.models import Expense
from datetime import datetime, timedelta
import json
@login_required(login_url='/signin/')
def index(request):
context = {}
user_expenses = Expense.objects.filter(owner=request.user)
user_expenses = map(lambda x: x.to_json(), user_expenses)
context['expenses'] = user_expenses
return render(request, 'tracker/expenses.html', context)
@login_required(login_url='/signin/')
def get_expenses(request):
all_users = request.POST['all_users']
# Check if user is an admin to fetch all saved expenses if requested
if all_users == 'true' and request.user.is_staff:
success_data = {}
everyones_expenses = Expense.objects.all()
everyones_expenses = map(lambda x: x.to_json(), everyones_expenses)
success_data['status'] = 'success'
success_data['expenses'] = everyones_expenses
return HttpResponse(json.dumps(success_data))
elif all_users == 'false':
user_expenses = Expense.objects.filter(owner=request.user)
user_expenses = map(lambda x: x.to_json(), user_expenses)
success_data = {}
success_data['status'] = 'success'
success_data['expenses'] = user_expenses
return HttpResponse(json.dumps(success_data))
else:
failed_data = {}
failed_data['status'] = 'failed'
failed_data['message'] = 'You must be an admin to view everybody\'s expenses!'
return HttpResponse(json.dumps(failed_data))
def signin(request):
if request.user.is_authenticated:
return HttpResponseRedirect('/')
else:
return render(request, 'tracker/signin.html')
def logout(request):
auth_logout(request)
return HttpResponseRedirect('/signin/')
def login(request):
if request.method == 'POST':
username = request.POST["inputUsername"]
password = request.POST["inputPassword"]
user = authenticate(username=username, password=password)
if user is not None:
auth_login(request, user)
# Redirect to a success page.
return HttpResponseRedirect('/')
else:
# Return an 'invalid login' error message.
context = {'login_error': 'Your username and password didn\'t match. Please try again.'}
return render(request, 'tracker/signin.html', context)
else:
return HttpResponseRedirect('/')
@login_required(login_url='/signin/')
def create_new_expense(request):
if request.user.is_authenticated and request.method == 'POST':
try:
description = request.POST['expense_description']
amount = request.POST['expense_amount']
date_time = request.POST['expense_date_time']
amount = round(float(amount),2)
datetime_object = datetime.strptime(date_time, '%Y-%m-%d %H:%M')
new_expense = Expense(owner=request.user, date_time=datetime_object, amount=amount, description=description)
new_expense.save()
success_data = {'status': 'success', 'new_id': new_expense.id, 'readable_date': new_expense.date_time.strftime("%b. %-d, %Y, %-I:%M %p")}
return HttpResponse(json.dumps( success_data ))
except:
failed_data = {'status': 'failed', 'message': 'Failed to create expense, try again!'}
return HttpResponse(json.dumps(failed_data))
else:
failed_data = {'status': 'failed', 'message': 'Failed to create expense, try again!'}
return HttpResponse(json.dumps(failed_data))
@login_required(login_url='/signin/')
def delete_expense(request):
if request.method == 'POST':
try:
expense_id = request.POST['expense_id']
expense = Expense.objects.get(id=expense_id, owner=request.user)
expense.delete()
return HttpResponse('success')
except:
return HttpResponse('You cannot delete an expense you do not own or does not exist!')
else:
return HttpResponse('Failed to delete expense!')
@login_required(login_url='/signin/')
def update_expense(request):
if request.method == 'POST':
try:
expense_id = request.POST['expense_id']
updated_expense_description = request.POST['updated_expense_description']
updated_expense_amount = request.POST['updated_expense_amount']
updated_date_time = request.POST['updated_date_time']
updated_expense_amount = round(float(updated_expense_amount),2)
updated_date_time = datetime.strptime(updated_date_time, '%Y-%m-%d %H:%M')
except:
failed_data = {'status': 'failed', 'message': 'Failed to fetch updated data!'}
try:
current_expense = Expense.objects.get(id=expense_id, owner=request.user)
current_expense.description = updated_expense_description
current_expense.amount = updated_expense_amount
current_expense.date_time = updated_date_time
current_expense.save()
success_data = {'status': 'success', 'updated_expense': current_expense.to_json()}
return HttpResponse(json.dumps(success_data))
except:
failed_data = {'status': 'failed', 'message': 'You cannot update an expense you do not own or does not exist!'}
else:
failed_data = {'status': 'failed', 'message': 'Failed to update expense!'}
return HttpResponse(json.dumps(failed_data))
@login_required(login_url='/signin/')
def get_report(request):
try:
user_expenses = Expense.objects.filter(owner=request.user)
expenses_per_week = {}
for exp in user_expenses:
# Finding start and end date of week of this expense
dt = exp.date_time
start = dt - timedelta(days=dt.weekday())
end = start + timedelta(days=6)
start_week_date = str(start).split(' ')[0]
end_week_date = str(end).split(' ')[0]
week = expenses_per_week.get(start_week_date)
if week:
week[1] += float(exp.amount)
week[2].append(exp.to_json())
expenses_per_week[start_week_date] = week
else:
expenses_per_week[start_week_date] = [end_week_date, float(exp.amount), [exp.to_json()]]
success_data = {'status': 'success', 'expenses_per_week': expenses_per_week}
return HttpResponse(json.dumps(success_data))
except:
failed_data = {'status': 'failed', 'message': 'Could not get expense report!'}
return HttpResponse(json.dumps(failed_data))
@login_required(login_url='/signin/')
def filter_expenses(request):
if request.method == 'POST':
try:
start_date = request.POST['start_date']
end_date = request.POST['end_date']
start_date = datetime.strptime(start_date, '%Y-%m-%d %H:%M').replace(tzinfo=None)
end_date = datetime.strptime(end_date, '%Y-%m-%d %H:%M').replace(tzinfo=None)
user_expenses = Expense.objects.filter(owner=request.user)
expenses_between = []
for exp in user_expenses:
exp_date_time = exp.date_time.replace(tzinfo=None)
if start_date <= exp_date_time and exp_date_time <= end_date:
expenses_between.append(exp.to_json())
print len(user_expenses)
print len(expenses_between)
success_data = {'status': 'success', 'expenses_between': expenses_between}
return HttpResponse(json.dumps(success_data))
except:
failed_data = {'status': 'failed', 'message': 'Failed to filter expenses!'}
return HttpResponse(json.dumps(failed_data))
else:
failed_data = {'status': 'failed', 'message': 'Failed to filter expenses!'}
return HttpResponse(json.dumps)
| |
from pydmrs.pydelphin_interface import parse, generate
from pydmrs.mapping.mapping import dmrs_mapping
from pydmrs.graphlang.graphlang import parse_graphlang
import examples.examples_dmrs as examples
if __name__ == '__main__':
# basic functionality
dmrs = examples.the_dog_chases_the_cat()
search_dmrs = parse_graphlang('[1]:_the_q')
replace_dmrs = parse_graphlang('[1]:_a_q')
# iterative, all
assert 'A dog chases a cat.' in generate(dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=True, iterative=True, all_matches=True))
# not iterative, all
assert all(sent in sents for sent, sents in zip(['A dog chases the cat.', 'The dog chases a cat.'], [generate(dmrs) for dmrs in dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=True, iterative=False, all_matches=True)]))
# iterative, not all
assert 'A dog chases the cat.' in generate(dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=True, iterative=True, all_matches=False))
# not iterative, not all
assert 'A dog chases the cat.' in generate(dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=True, iterative=False, all_matches=False))
# original dmrs did not change so far
assert 'The dog chases the cat.' in generate(dmrs)
# iterative, not all
dmrs = examples.the_dog_chases_the_cat()
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False, iterative=True, all_matches=False)
assert 'A dog chases the cat.' in generate(dmrs)
# iterative, all
dmrs = examples.the_dog_chases_the_cat()
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False, iterative=True, all_matches=True)
assert 'A dog chases a cat.' in generate(dmrs)
dmrs = parse('Kim eats and Kim sleeps.')[0]
search_dmrs = parse_graphlang('[4]:node=1 <-1- [2]:node <-l- [1]:_and_c e? -r-> [3]:node -1-> node=1 <-- proper_q; :2 <-lh- :1 -rh-> :3')
replace_dmrs = parse_graphlang('[4]:node <-1- [2]:node <-l- [1]:_and_c e? -r-> [3]:node -1-> :4; :2 <=lh= :1 =rh=> :3')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Kim eats and sleeps.' in generate(dmrs)
# some examples inspired by examples from the AMR specification
dmrs = parse('He described the mission as a failure.')[0]
search_dmrs = parse_graphlang('[2]:node <-2- *[1]:_describe_v_as e? -3-> [3]:node')
replace_dmrs = parse_graphlang('pronoun_q --> pron x[3sn_s] <-2- [1]:_describe_v_to e? <-2h- *_as_x_subord e[pui--] -1h-> _be_v_id e[ppi--] -1-> [2]:node; :_be_v_id -2-> [3]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'As he described it, the mission is a failure.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'He described the mission as a failure.' in generate(dmrs)
dmrs = parse('The boy can go.')[0]
search_dmrs = parse_graphlang('[1]:_can_v_modal e[p????] -1h-> [2]:_v e[pui--]')
replace_dmrs = parse_graphlang('[1]:_possible_a_for e[o????] -1h-> [2]:_v e[ppi--]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'It is possible that the boy goes.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy can go.' in generate(dmrs)
dmrs = parse('The boy can\'t go.')[0]
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'It is not possible that the boy goes.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy can\'t go.' in generate(dmrs)
dmrs = parse('The boy must go.')[0]
search_dmrs = parse_graphlang('[1]:_must_v_modal e? -1h-> [2]:_v e[pui--]')
replace_dmrs = parse_graphlang('[1]:_necessary_a_for e? -1h-> [2]:_v e[ppi--]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'It is necessary that the boy goes.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy must go.' in generate(dmrs)
dmrs = parse('The boy should go.')[0]
search_dmrs = parse_graphlang('[1]:_should_v_modal e? -1h-> [2]:_v e[pui--]')
replace_dmrs = parse_graphlang('[1]:_recommend_v_to e? -2h-> [2]:_v e[ppi--]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'That the boy goes, is recommended.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy should go.' in generate(dmrs)
dmrs = parse('The boy is likely to go.')[0]
search_dmrs = parse_graphlang('[1]:_likely_a_1 e? -1h-> [2]:_v e[oui--]')
replace_dmrs = parse_graphlang('[1]:_likely_a_1 e? -1h-> [2]:_v e[ppi--]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'It is likely that the boy goes.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy is likely to go.' in generate(dmrs)
dmrs = parse('The boy would rather go.')[0]
search_dmrs = parse_graphlang('[1]:_would_v_modal e? -1h-> [2]:_v e? <=1= _rather_a_1 i; :2 -1-> [3]:node')
replace_dmrs = parse_graphlang('[1]:_prefer_v_to e? -2h-> [2]:_v e? -1-> [3]:node <-1- :1')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'The boy prefers to go.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy would rather go.' in generate(dmrs)
dmrs = parse('I don\'t have any money.')[0]
search_dmrs = parse_graphlang('neg e[pui--] -1h-> [1]:_v e? -2-> [2]:node <-- _any_q')
replace_dmrs = parse_graphlang('[1]:_v e? -2-> [2]:node <-- _no_q')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I have no money.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I don\'t have any money.' in generate(dmrs)
dmrs = parse('Kim doesn\'t like any cake.')[0]
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Kim likes no cake.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Kim doesn\'t like any cake.' in generate(dmrs)
dmrs = parse('The boy doesn\'t think his team will win.')[0]
search_dmrs = parse_graphlang('neg e[pui--] -1h-> [1]:_v e? -2h-> [2]:_v e?')
replace_dmrs = parse_graphlang('[1]:_v e? -2h-> neg e[pui--] -1h-> [2]:_v e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'The boy thinks his team won\'t win.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The boy doesn\'t think his team will win.' in generate(dmrs)
dmrs = parse('I don\'t believe that Kim likes cake.')[0]
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I believe that Kim doesn\'t like cake.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I don\'t believe that Kim likes cake.' in generate(dmrs)
dmrs = parse('I don\'t think that Kim doesn\'t like cake.')[0]
search_dmrs = parse_graphlang('neg e[pui--] -1h-> [1]:_v e? -2h-> neg e[pui--] -1h-> [2]:_v e?')
replace_dmrs = parse_graphlang('[1]:_v e? -2h-> [2]:_v e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I think that Kim likes cake.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I don\'t think that Kim doesn\'t like cake.' in generate(dmrs)
# Verb particle examples
dmrs = parse('I look you up.')[0]
search_dmrs = parse_graphlang('[1]:_look_v_up e?')
replace_dmrs = parse_graphlang('[1]:_find_v_1 e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I find you.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I look you up.' in generate(dmrs)
dmrs = parse('Kim carries on eating cake.')[0]
search_dmrs = parse_graphlang('[1]:_carry_v_on e?')
replace_dmrs = parse_graphlang('[1]:_continue_v_2 e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Kim continues eating cake.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Kim carries on eating cake.' in generate(dmrs)
dmrs = parse('Alice passed a message on to Bob.')[0]
search_dmrs = parse_graphlang('[1]:_pass_v_on e?')
replace_dmrs = parse_graphlang('[1]:_give_v_1 e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Alice gave a message to Bob.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Alice passed a message on to Bob.' in generate(dmrs)
dmrs = parse('Bob then gave Alice back the message.')[0]
search_dmrs = parse_graphlang('[1]:node <-2- [2]:_give_v_back e? -3-> [3]:node')
replace_dmrs = parse_graphlang('[3]:node <-2- [2]:_return_v_to e? -3-> [1]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Bob then returned the message to Alice.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Bob then gave Alice back the message.' in generate(dmrs)
dmrs = parse('He keeps on complaining.')[0]
search_dmrs = parse_graphlang('[2]:node <-1- [1]:_keep_v_on e? -2h-> [3]:_v e[pui-+] -1-> :2')
replace_dmrs = parse_graphlang('[1]:_continue_v_2 e? -1h-> [3]:_v e[oui--] -1-> [2]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'He continues to complain.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'He keeps on complaining.' in generate(dmrs)
dmrs = parse('He takes on great responsibility.')[0]
search_dmrs = parse_graphlang('[1]:_take_v_on e?')
replace_dmrs = parse_graphlang('[1]:_accept_v_1 e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'He accepts great responsibility.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'He takes on great responsibility.' in generate(dmrs)
# determinerless PPs
dmrs = parse('I found you at last.')[0]
search_dmrs = parse_graphlang('[1]:_at_p e[pui--] -2-> _last_n_1 x[3s_+_] <-- idiom_q_i')
replace_dmrs = parse_graphlang('[1]:_final_a_1 e[pui--]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I found you finally.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I found you at last.' in generate(dmrs)
dmrs = parse('I am on edge.')[0]
search_dmrs = parse_graphlang('[1]:_on_p e? -2-> _edge_n_of x[3s_+_] <-- idiom_q_i')
replace_dmrs = parse_graphlang('[1]:_nervous_a_about e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I am nervous.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I am on edge.' in generate(dmrs)
dmrs = parse('You can see the insects at close range.')[0]
search_dmrs = parse_graphlang('[1]:_at_p e[pui--] -2-> _range_n_of x[3s___] <-- udef_q; :_range_n_of <=1= _close_a_to e[p____]')
replace_dmrs = parse_graphlang('[1]:_from_p_state e[pui--] -2-> _distance_n_1 x[3s_+_] <-- _a_q; :_distance_n_1 <=1= _small_a_1 e[p____]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'You can see the insects from a small distance.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'You can see the insects at close range.' in generate(dmrs)
# idioms
dmrs = parse('Kim often took advantage of Sandy.')[0]
search_dmrs = parse_graphlang('[2]:node <-3- [1]:_take_v_of-i e? -2-> _advantage_n_i x[3s_+_] <-- idiom_q_i')
replace_dmrs = parse_graphlang('[1]:_benefit_v_from e? -2-> [2]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Kim often benefitted from Sandy.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Kim often took advantage of Sandy.' in generate(dmrs)
dmrs = parse('The government keeps tabs on everyone.')[0]
search_dmrs = parse_graphlang('[2]:node <-3- [1]:_keep_v_on-i e? -2-> _tabs_n_i x[3p_+_] <-- udef_q')
replace_dmrs = parse_graphlang('[1]:_watch_v_1 e? -2-> [2]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'The government watches everyone.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The government keeps tabs on everyone.' in generate(dmrs)
dmrs = parse('I can give you a hand with your work.')[0]
search_dmrs = parse_graphlang('[2]:node <-3- [1]:_give_v_1 e? -2-> _hand_n_1 x[3s_+_] <-- _a_q')
replace_dmrs = parse_graphlang('[1]:_help_v_1 e? -2-> [2]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I can help you with your work.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I can give you a hand with your work.' in generate(dmrs)
dmrs = parse('The old senator kicked the bucket.')[0]
search_dmrs = parse_graphlang('[1]:_kick_v_i e? -2-> _bucket_n_1 x[3s_+_] <-- _the_q')
replace_dmrs = parse_graphlang('[1]:_die_v_1 e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'The old senator died.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'The old senator kicked the bucket.' in generate(dmrs)
# light verbs
dmrs = parse('I give a talk on linguistics.')[0]
search_dmrs = parse_graphlang('[1]:_give_v_1 e? -2-> _talk_n_of-on x[3s_+_] <-- _a_q; :_talk_n_of-on -1-> [2]:node')
replace_dmrs = parse_graphlang('[1]:_talk_v_about e? <=1= _about_p e -2-> [2]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I talk about linguistics.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I give a talk on linguistics.' in generate(dmrs)
# synonyms
dmrs = parse('Kim loves cake.')[0]
search_dmrs = parse_graphlang('[1]:_love_v_1 e?')
replace_dmrs = parse_graphlang('[1]:_adore_v_1 e?')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Kim adores cake.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Kim loves cake.' in generate(dmrs)
dmrs = parse('I like to play tennis.')[0]
search_dmrs = parse_graphlang('[1]:_like_v_1 e? -2h-> [2]:_v e[pui--]')
replace_dmrs = parse_graphlang('[1]:_enjoy_v_1 e? -2h-> [2]:_v e[pui-+]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I enjoy playing tennis.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I like to play tennis.' in generate(dmrs)
# synonyms with re-ordering
dmrs = parse('Kim gave a book to Sandy.')[0]
search_dmrs = parse_graphlang('[2]:node <-1- [1]:_give_v_1 e? -3-> [3]:node')
replace_dmrs = parse_graphlang('[3]:node <-1- [1]:_get_v_1 e? <=1= _from_p e -2-> [2]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Sandy got a book from Kim.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Kim gave a book to Sandy.' in generate(dmrs)
dmrs = parse('Kim hates spinach.')[0]
search_dmrs = parse_graphlang('[2]:node <-1- [1]:_hate_v_1 e? -2-> [3]:node')
replace_dmrs = parse_graphlang('[3]:node <-1- [1]:_disgust_v_1 e? -2-> [2]:node')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Spinach disgusts Kim.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'Kim hates spinach.' in generate(dmrs)
dmrs = parse('I like to play tennis.')[0]
search_dmrs = parse_graphlang('[1]:node <-1- [2]:_like_v_1 e? -2h-> [3]:_v e[pui--] -1-> :1')
replace_dmrs = parse_graphlang('udef_q --> nominalization x <-1- [2]:_make_v_cause e? -2h-> _happy_a_with e[pui__] -1-> [1]:node; :nominalization =1h=> [3]:_v e[pui-+]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Playing tennis makes me happy.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I like to play tennis.' in generate(dmrs)
# think + subclause examples
dmrs = parse('I think I will go.')[0]
search_dmrs = parse_graphlang('[1]:_think_v_1 e[????-] -2h-> [2]:_v e[pfi--]')
replace_dmrs = parse_graphlang('[1]:_think_v_of e[????+] -2-> nominalization x <-- udef_q; :nominalization =1h=> [2]:_v e[pui-+]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I am thinking of me going.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I think I will go.' in generate(dmrs)
dmrs = parse('I think he will go.')[0]
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I am thinking of him going.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I think he will go.' in generate(dmrs)
# determinerless PP (with optional node)
dmrs = parse('I found you at last.')[0]
search_dmrs = parse_graphlang('[1]:_at_p e[pui--] -2-> _last_n_1 x[3s_+_] <-- idiom_q_i; (2):_long_a_1 e[pui__] =1=> :_last_n_1')
replace_dmrs = parse_graphlang('[1]:_final_a_1 e[pui--]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'I found you finally.' in generate(dmrs)
dmrs_mapping(dmrs, replace_dmrs, search_dmrs, copy_dmrs=False)
assert 'I found you at last.' in generate(dmrs)
# question generation (with subgraph nodes)
dmrs = parse('Kim gave Sandy a book.')[0]
search_dmrs = parse_graphlang('*[1]:_v e[p????] -1-> {2}:node')
replace_dmrs = parse_graphlang('*[1]:_v e[q????] -1-> [2]:person x[3s___] <-- which_q')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Who gave Sandy a book?' in generate(dmrs)
dmrs = parse('Kim gave Sandy a book.')[0]
search_dmrs = parse_graphlang('*[1]:_v e[p????] -2-> {2}:node')
replace_dmrs = parse_graphlang('*[1]:_v e[q????] -2-> [2]:thing x <-- which_q')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'What did Kim give Sandy?' in generate(dmrs)
dmrs = parse('Kim gave Sandy a book.')[0]
search_dmrs = parse_graphlang('*[1]:_v e[p????] -3-> {2}:node')
replace_dmrs = parse_graphlang('*[1]:_v e[q????] -3-> [2]:person x[3s___] <-- which_q')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, copy_dmrs=False)
assert 'Who did Kim give a book?' in generate(dmrs)
# think example (with equal constraints)
dmrs = parse('I think I will go.')[0]
equalities = {}
search_dmrs = parse_graphlang('[1]:node=1 <-1- [2]:_think_v_1 e[????-] -2h-> [3]:_v e[pfi--] -1-> node=1', equalities=equalities)
replace_dmrs = parse_graphlang('[1]:node <-1- [2]:_think_v_of e[????+] -2-> nominalization x <-- udef_q; :nominalization =1h=> [3]:_v e[pui-+]')
dmrs_mapping(dmrs, search_dmrs, replace_dmrs, equalities=equalities, copy_dmrs=False)
assert 'I am thinking of going.' in generate(dmrs)
| |
# Copyright (c) 2013 dotCloud, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import time
import uuid
from oslo_utils import timeutils
from six import moves
from novadocker.virt.docker import client as docker_client
class MockClient(object):
def __init__(self, endpoint=None):
self._containers = {}
self.name = None
# Fake repository
self._repository = {'image_with_cmd':
{'ContainerConfig':
{'Cmd': 'echo Test'}},
'image_without_cmd':
{'ContainerConfig':
{'Cmd': None}}}
self._images = {'snap-1':
{'ContainerConfig':
{'Cmd': None}}}
self._image_data = {'snap-1': 'dummy'}
self._setup_decorators()
def _setup_decorators(self):
for name, member in inspect.getmembers(self, inspect.ismethod):
if not name.startswith('_'):
setattr(self, name, docker_client.filter_data(member))
def _fake_id(self):
return uuid.uuid4().hex + uuid.uuid4().hex
def _image_name(self, image_name):
"""Split full image name to host and image name."""
if '/' in image_name:
host, image_name = image_name.split('/', 1)
return image_name
def _is_image_exists(self, image_name):
"""Check whether Images is listed in self._repository."""
image_name = self._image_name(image_name)
if image_name in self._repository:
return image_name in self._images
return True
def _is_daemon_running(self):
return True
def containers(self, all=True, filters=None):
containers = []
for container_id in self._containers.iterkeys():
containers.append({
'Status': 'Exit 0',
'Created': int(time.time()),
'Image': 'ubuntu:12.04',
'Ports': '',
'Command': 'bash ',
'Id': container_id
})
if filters and filters.get('name'):
if (self._containers[container_id]['Config']['name'] ==
filters.get('name')):
return [{'Id': container_id}]
return containers
def create_container(self, image_name, **args):
self.name = args['name']
data = {
'Hostname': args['hostname'],
'User': '',
'MemorySwap': 0,
'AttachStdin': False,
'AttachStdout': False,
'AttachStderr': False,
'PortSpecs': None,
'Tty': True,
'OpenStdin': True,
'StdinOnce': False,
'Env': None,
'Cmd': [],
'Dns': None,
'Image': image_name,
'Volumes': {},
'VolumesFrom': '',
'CpuShares': args['cpu_shares'],
'NetworkDisabled': args['network_disabled']
}
data.update(args)
if not self._is_image_exists(data['Image']):
return None
container_id = self._fake_id()
self._containers[container_id] = {
'Id': container_id,
'running': False,
'Config': data
}
return container_id
def start(self, container_id, binds=None, dns=None, privileged=False):
if container_id not in self._containers:
return False
self._containers[container_id]['running'] = True
return True
def inspect_image(self, image_name):
if not self._is_image_exists(image_name):
return None
image_name = self._image_name(image_name)
if image_name in self._images:
return self._images[image_name]
return {'ContainerConfig': {'Cmd': None}}
def inspect_container(self, container_id):
if container_id not in self._containers:
return
container = self._containers[container_id]
info = {
'Args': [],
'Config': container['Config'],
'Created': str(timeutils.utcnow()),
'Id': container_id,
'Image': self._fake_id(),
'NetworkSettings': {
'Bridge': '',
'Gateway': '',
'IPAddress': '',
'IPPrefixLen': 0,
'PortMapping': None
},
'Path': 'bash',
'ResolvConfPath': '/etc/resolv.conf',
'State': {
'ExitCode': 0,
'Ghost': False,
'Pid': 0,
'Running': container['running'],
'StartedAt': str(timeutils.utcnow())
},
'SysInitPath': '/tmp/docker',
'Volumes': {},
}
return info
def stop(self, container_id, timeout=None):
if container_id not in self._containers:
return False
self._containers[container_id]['running'] = False
return True
def kill(self, container_id):
if container_id not in self._containers:
return False
self._containers[container_id]['running'] = False
return True
def remove_container(self, container_id, force=False):
if container_id not in self._containers:
return False
# Docker doesn't allow to destroy a running container.
if self._containers[container_id]['running']:
return False
del self._containers[container_id]
return True
def unpause(self, container_id):
if container_id not in self._containers:
return False
self._containers[container_id]['paused'] = False
return True
def pause(self, container_id):
if container_id not in self._containers:
return False
self._containers[container_id]['paused'] = True
return True
def commit(self, container_id, repository=None, tag=None):
if container_id not in self._containers:
return False
return True
def get_container_logs(self, container_id):
if container_id not in self._containers:
return False
return '\n'.join([
'Lorem ipsum dolor sit amet, consectetur adipiscing elit. ',
'Vivamus ornare mi sit amet orci feugiat, nec luctus magna ',
'vehicula. Quisque diam nisl, dictum vitae pretium id, ',
'consequat eget sapien. Ut vehicula tortor non ipsum ',
'consectetur, at tincidunt elit posuere. In ut ligula leo. ',
'Donec eleifend accumsan mi, in accumsan metus. Nullam nec ',
'nulla eu risus vehicula porttitor. Sed purus ligula, ',
'placerat nec metus a, imperdiet viverra turpis. Praesent ',
'dapibus ornare massa. Nam ut hendrerit nunc. Interdum et ',
'malesuada fames ac ante ipsum primis in faucibus. ',
'Fusce nec pellentesque nisl.'])
def get_image(self, name):
if (name not in self._images or
name not in self._image_data):
raise Exception("Image not found - %s" % name)
return moves.StringIO(self._image_data[name])
def load_image(self, name, data):
self._image_data[name] = data
def load_repository_file(self, name, path):
pass
def ping(self):
return True
| |
'''
mali_remove_gaps.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python mali_remove_gaps.py --help
Type::
python mali_remove_gaps.py --help
for command line help.
Command line options
--------------------
'''
import sys
import string
import re
import getopt
import CGAT.Experiment as E
import CGAT.Genomics as Genomics
import CGAT.MaliIO as MaliIO
USAGE = """python %s [OPTIONS] < exonerate_output > filtered
Prune a nucelotide multiple alignment according to a master sequence.
1. Go in codon steps through the multiple alignment according
to the master sequence.
2. Remove all columns in other sequences, that
1. fall out of frame
2. are incomplete codons
Version = $Id: mali_remove_gaps.py 2782 2009-09-10 11:40:29Z andreas $
Options:
-h, --help print this message.
-v, --verbose= loglevel.
-o, --file-output output
""" % sys.argv[0]
param_long_options = ["verbose=", "help", "file-output=", "version"]
param_short_options = "v:hm:e:p:c"
param_loglevel = 1
param_gap_char = "-"
param_mask_char = "x"
param_filename_output = None
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
try:
optlist, args = getopt.getopt(
sys.argv[1:], param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o, a in optlist:
if o in ("-v", "--verbose"):
param_loglevel = int(a)
elif o in ("--version", ):
print "version="
sys.exit(0)
elif o in ("-h", "--help"):
print USAGE
sys.exit(0)
elif o in ("-o", "--file-output"):
param_filename_output = a
# 1. read multiple alignment in fasta format
mali, identifiers = MaliIO.readFasta(sys.stdin)
if param_loglevel >= 1:
print "# read mali with %i entries." % len(identifiers)
print E.GetHeader()
print E.GetParams()
# 1. remove gaps in multiple alignment
mali = MaliIO.removeGaps(mali)
if param_master:
frame_columns = GetFrameColumns(mali, param_master)
elif param_master_pattern:
columns = []
for id in identifiers:
if re.search(param_master_pattern, id):
columns += GetFrameColumns(mali, id)
if len(columns) == 0:
columns += GetFrameColumns(mali, identifiers[0])
# sort all columns by tuple. The "shortest" codon will be first (1,2,3)
# before (1,2,100)
columns.sort()
# select codons
frame_columns = []
last_codon = columns[0]
for codon in columns[1:]:
# skip identical codons
if codon == last_codon:
continue
# take first (shortest) codon in case of identical first residue
if codon[0] == last_codon[0]:
continue
# if not overlapping, keep
if codon[0] > last_codon[2]:
frame_columns.append(last_codon)
# if overlapping, but out of register: skip
last_codon = codon
frame_columns.append(last_codon)
# translate characters to upper/lower case according to exon info.
if exons:
for id in mali:
if id in exons:
mali[id] = AddExonInformation(
mali[id], exons[id], mask_char=param_mask_char)
if param_loglevel >= 1:
print "# found %i columns" % (len(frame_columns))
mask_chars = (string.upper(param_mask_char), string.lower(param_mask_char))
for id in mali.keys():
sequence = mali[id]
fragments = []
nstops, ncodons, naligned = 0, 0, 0
for a, b, c in frame_columns:
codon = sequence[a] + sequence[b] + sequence[c]
codon_is_aligned = False
codon_is_ok = True
for x in codon:
# a codon will be masked, if it either
# 1. contains a gap character
# 2. is an unaligned character, i.e.,
# exons and masked, or no exons and lowerwase
residue_is_unaligned = (x == param_gap_char) or \
(not exons and x in string.lowercase) or \
(exons and x in mask_chars)
codon_is_aligned = codon_is_aligned or not residue_is_unaligned
codon_is_ok = codon_is_ok and not residue_is_unaligned
if codon_is_aligned:
naligned += 1
if codon_is_ok:
ncodons += 1
if string.upper(codon) in ("TAG", "TAA", "TGA"):
if param_remove_stops:
fragments.append(param_gap_char * 3)
else:
fragments.append(codon)
nstops += 1
else:
fragments.append(codon)
else:
fragments.append(param_gap_char * 3)
mali[id] = string.join(fragments, "")
if param_loglevel >= 1:
print "# sequence: %s\tpositions: %i\taligned:%i\tcodons: %i\t stops: %i" % (id, len(fragments), naligned, ncodons, nstops)
sys.stdout.flush()
for id in mali.keys():
if param_mark_codons:
a = mali[id]
f = lambda x: a[x:x + 3]
s = string.join([f(x) for x in range(0, len(a), 3)], " ")
else:
s = mali[id]
print ">%s\n%s" % (id, s)
if param_filename_translation:
outfile = open(param_filename_translation, "w")
for id in mali.keys():
outfile.write(">%s\n%s\n" %
(id, Genomics.TranslateDNA2Protein(mali[id])))
outfile.close()
print E.GetFooter()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| |
# ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# flake8: noqa
from __future__ import absolute_import
import pytest
import numpy as np
import ngraph as ng
from ngraph.impl import util
from ngraph.impl import Shape, Strides, CoordinateDiff, AxisSet, Coordinate
from ngraph.impl import Type, Function
from ngraph.impl.runtime import Backend
from ngraph.impl.op import Parameter
from ngraph.impl.op import Constant
import test
def binary_op(op_str, a, b):
if op_str == "+":
return a + b
elif op_str == "Add":
return ng.add(a, b)
elif op_str == "-":
return a - b
elif op_str == "Sub":
return ng.subtract(a, b)
elif op_str == "*":
return a * b
elif op_str == "Mul":
return ng.multiply(a, b)
elif op_str == "/":
return a / b
elif op_str == "Div":
return ng.divide(a, b)
elif op_str == "Equal":
return ng.equal(a, b)
elif op_str == "Greater":
return ng.greater(a, b)
elif op_str == "GreaterEq":
return ng.greater_equal(a, b)
elif op_str == "Less":
return ng.less(a, b)
elif op_str == "LessEq":
return ng.less_equal(a, b)
elif op_str == "Maximum":
return ng.maximum(a, b)
elif op_str == "Minimum":
return ng.minimum(a, b)
elif op_str == "NotEqual":
return ng.not_equal(a, b)
elif op_str == "Power":
return ng.power(a, b)
def binary_op_ref(op_str, a, b):
if op_str == "+" or op_str == "Add":
return a + b
elif op_str == "-" or op_str == "Sub":
return a - b
elif op_str == "*" or op_str == "Mul":
return a * b
elif op_str == "/" or op_str == "Div":
return a / b
elif op_str == "Dot":
return np.dot(a, b)
elif op_str == "Equal":
return np.equal(a, b)
elif op_str == "Greater":
return np.greater(a, b)
elif op_str == "GreaterEq":
return np.greater_equal(a, b)
elif op_str == "Less":
return np.less(a, b)
elif op_str == "LessEq":
return np.less_equal(a, b)
elif op_str == "Maximum":
return np.maximum(a, b)
elif op_str == "Minimum":
return np.minimum(a, b)
elif op_str == "NotEqual":
return np.not_equal(a, b)
elif op_str == "Power":
return np.power(a, b)
def binary_op_exec(op_str):
element_type = Type.f32
shape = Shape([2, 2])
A = Parameter(element_type, shape)
B = Parameter(element_type, shape)
parameter_list = [A, B]
function = Function([binary_op(op_str, A, B)], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
b = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, shape)
a.write(util.numpy_to_c(np.array([[1, 6], [7, 4]], dtype=np.float32)), 16)
b.write(util.numpy_to_c(np.array([[5, 2], [3, 8]], dtype=np.float32)), 16)
result_arr = np.array([[0, 0], [0, 0]], dtype=np.float32)
result.write(util.numpy_to_c(result_arr), 16)
handle = backend.compile(function)
handle.call([result], [a, b])
result.read(util.numpy_to_c(result_arr), 16)
a_arr = np.array([[1, 6], [7, 4]], dtype=np.float32)
b_arr = np.array([[5, 2], [3, 8]], dtype=np.float32)
result_arr_ref = binary_op_ref(op_str, a_arr, b_arr)
assert np.allclose(result_arr, result_arr_ref)
def binary_op_comparison(op_str):
element_type = Type.f32
shape = Shape([2, 2])
A = Parameter(element_type, shape)
B = Parameter(element_type, shape)
parameter_list = [A, B]
function = Function([binary_op(op_str, A, B)], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
b = backend.create_tensor(element_type, shape)
result = backend.create_tensor(Type.boolean, shape)
a.write(util.numpy_to_c(np.array([[1, 5], [3, 2]], dtype=np.float32)), 16)
b.write(util.numpy_to_c(np.array([[2, 4], [3, 1]], dtype=np.float32)), 16)
result_arr = np.array([[False, False], [False, False]], dtype=np.bool)
result.write(util.numpy_to_c(result_arr), 4)
handle = backend.compile(function)
handle.call([result], [a, b])
result.read(util.numpy_to_c(result_arr), 4)
a_arr = np.array([[1, 5], [3, 2]], dtype=np.float32)
b_arr = np.array([[2, 4], [3, 1]], dtype=np.float32)
result_arr_ref = binary_op_ref(op_str, a_arr, b_arr)
assert np.allclose(result_arr, result_arr_ref)
def test_add():
binary_op_exec("+")
def test_add_op():
binary_op_exec("Add")
def test_sub():
binary_op_exec("-")
def test_sub_op():
binary_op_exec("Sub")
def test_mul():
binary_op_exec("*")
def test_mul_op():
binary_op_exec("Mul")
def test_div():
binary_op_exec("/")
def test_div_op():
binary_op_exec("Div")
def test_maximum():
binary_op_exec("Maximum")
def test_minimum():
binary_op_exec("Minimum")
def test_power():
binary_op_exec("Power")
def test_greater():
binary_op_comparison("Greater")
def test_greater_eq():
binary_op_comparison("GreaterEq")
def test_less():
binary_op_comparison("Less")
def test_less_eq():
binary_op_comparison("LessEq")
def test_not_equal():
binary_op_comparison("NotEqual")
def test_add_with_mul():
element_type = Type.f32
shape = Shape([2, 2])
A = Parameter(element_type, shape)
B = Parameter(element_type, shape)
C = Parameter(element_type, shape)
parameter_list = [A, B, C]
function = Function([ng.multiply(ng.add(A, B), C)], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
b = backend.create_tensor(element_type, shape)
c = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, shape)
a.write(util.numpy_to_c(np.array([1, 2, 3, 4], dtype=np.float32)), 16)
b.write(util.numpy_to_c(np.array([5, 6, 7, 8], dtype=np.float32)), 16)
c.write(util.numpy_to_c(np.array([9, 10, 11, 12], dtype=np.float32)), 16)
result_arr = np.array([0, 0, 0, 0], dtype=np.float32)
result.write(util.numpy_to_c(result_arr), 16)
handle = backend.compile(function)
handle.call([result], [a, b, c])
result.read(util.numpy_to_c(result_arr), 16)
a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
b_arr = np.array([5, 6, 7, 8], dtype=np.float32)
c_arr = np.array([9, 10, 11, 12], dtype=np.float32)
result_arr_ref = (a_arr + b_arr) * c_arr
assert np.allclose(result_arr, result_arr_ref)
def unary_op(op_str, a):
if op_str == "Abs":
return ng.abs(a)
elif op_str == "Acos":
return ng.acos(a)
elif op_str == "Asin":
return ng.asin(a)
elif op_str == "Atan":
return ng.atan(a)
elif op_str == "Ceiling":
return ng.ceiling(a)
elif op_str == "Cos":
return ng.cos(a)
elif op_str == "Cosh":
return ng.cosh(a)
elif op_str == "Floor":
return ng.floor(a)
elif op_str == "log":
return ng.log(a)
elif op_str == "exp":
return ng.exp(a)
elif op_str == "negative":
return ng.negative(a)
elif op_str == "Reverse":
return ng.reverse(a, np.array([1]), "index")
elif op_str == "Sign":
return ng.sign(a)
elif op_str == "Sin":
return ng.sin(a)
elif op_str == "Sinh":
return ng.sinh(a)
elif op_str == "Sqrt":
return ng.sqrt(a)
elif op_str == "Tan":
return ng.tan(a)
elif op_str == "Tanh":
return ng.tanh(a)
def unary_op_ref(op_str, a):
if op_str == "Abs":
return np.abs(a)
elif op_str == "Acos":
return np.arccos(a)
elif op_str == "Asin":
return np.arcsin(a)
elif op_str == "Atan":
return np.arctan(a)
elif op_str == "Ceiling":
return np.ceil(a)
elif op_str == "Cos":
return np.cos(a)
elif op_str == "Cosh":
return np.cosh(a)
elif op_str == "Floor":
return np.floor(a)
elif op_str == "log":
return np.log(a)
elif op_str == "exp":
return np.exp(a)
elif op_str == "negative":
return np.negative(a)
elif op_str == "Reverse":
return np.fliplr(a)
elif op_str == "Sign":
return np.sign(a)
elif op_str == "Sin":
return np.sin(a)
elif op_str == "Sinh":
return np.sinh(a)
elif op_str == "Sqrt":
return np.sqrt(a)
elif op_str == "Tan":
return np.tan(a)
elif op_str == "Tanh":
return np.tanh(a)
def unary_op_exec(op_str, input_list):
"""
input_list needs to have deep length of 4
"""
element_type = Type.f32
shape = Shape(np.array(input_list).shape)
shape_np = np.array(input_list).shape
A = Parameter(element_type, shape)
parameter_list = [A]
function = Function([unary_op(op_str, A)], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, shape)
a.write(util.numpy_to_c(np.array(input_list, dtype=np.float32)), 16)
result_arr = np.zeros(shape_np, dtype=np.float32)
result.write(util.numpy_to_c(result_arr), 16)
handle = backend.compile(function)
handle.call([result], [a])
result.read(util.numpy_to_c(result_arr), 16)
a_arr = np.array(input_list, dtype=np.float32)
result_arr_ref = unary_op_ref(op_str, a_arr)
assert np.allclose(result_arr, result_arr_ref)
def test_abs():
input_list = [-1, 0, 1, 2]
op_str = "Abs"
unary_op_exec(op_str, input_list)
def test_acos():
input_list = [-1, 0, 0.5, 1]
op_str = "Acos"
unary_op_exec(op_str, input_list)
def test_asin():
input_list = [-1, 0, 0.5, 1]
op_str = "Asin"
unary_op_exec(op_str, input_list)
def test_atan():
input_list = [-1, 0, 0.5, 1]
op_str = "Atan"
unary_op_exec(op_str, input_list)
def test_ceiling():
input_list = [0.5, 0, 0.4, 0.5]
op_str = "Ceiling"
unary_op_exec(op_str, input_list)
def test_cos():
input_list = [0, 0.7, 1.7, 3.4]
op_str = "Cos"
unary_op_exec(op_str, input_list)
def test_cosh():
input_list = [-1, 0.0, 0.5, 1]
op_str = "Cosh"
unary_op_exec(op_str, input_list)
def test_floor():
input_list = [-0.5, 0, 0.4, 0.5]
op_str = "Floor"
unary_op_exec(op_str, input_list)
def test_log():
input_list = [1, 2, 3, 4]
op_str = "log"
unary_op_exec(op_str, input_list)
def test_exp():
input_list = [-1, 0, 1, 2]
op_str = "exp"
unary_op_exec(op_str, input_list)
def test_negative():
input_list = [-1, 0, 1, 2]
op_str = "negative"
unary_op_exec(op_str, input_list)
def test_sign():
input_list = [-1, 0, 0.5, 1]
op_str = "Sign"
unary_op_exec(op_str, input_list)
def test_sin():
input_list = [0, 0.7, 1.7, 3.4]
op_str = "Sin"
unary_op_exec(op_str, input_list)
def test_sinh():
input_list = [-1, 0.0, 0.5, 1]
op_str = "Sinh"
unary_op_exec(op_str, input_list)
def test_sqrt():
input_list = [0.0, 0.5, 1, 2]
op_str = "Sqrt"
unary_op_exec(op_str, input_list)
def test_tan():
input_list = [-np.pi / 4, 0, np.pi / 8, np.pi / 8]
op_str = "Tan"
unary_op_exec(op_str, input_list)
def test_tanh():
input_list = [-1, 0, 0.5, 1]
op_str = "Tanh"
unary_op_exec(op_str, input_list)
def test_reverse():
input_list = [[-1, 0], [0.5, 1]]
op_str = "Reverse"
unary_op_exec(op_str, input_list)
def test_reshape():
element_type = Type.f32
shape = Shape([2, 3])
A = Parameter(element_type, shape)
parameter_list = [A]
function = Function([ng.reshape(A, Shape([3, 2]), special_zero=False)], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, Shape([3, 2]))
a.write(util.numpy_to_c(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)), 24)
result_arr = np.array([[0, 0], [0, 0], [0, 0]], dtype=np.float32)
result.write(util.numpy_to_c(result_arr), 24)
handle = backend.compile(function)
handle.call([result], [a])
result.read(util.numpy_to_c(result_arr), 24)
a_arr = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
result_arr_ref = np.reshape(a_arr, (3, 2))
assert np.allclose(result_arr, result_arr_ref)
def test_broadcast():
element_type = Type.f32
A = Parameter(element_type, Shape([3]))
parameter_list = [A]
function = Function([ng.broadcast(A, [3, 3])], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, Shape([3]))
result = backend.create_tensor(element_type, Shape([3, 3]))
a.write(util.numpy_to_c(np.array([1, 2, 3], dtype=np.float32)), 12)
result_arr = np.zeros((3, 3), dtype=np.float32)
result.write(util.numpy_to_c(result_arr), 36)
handle = backend.compile(function)
handle.call([result], [a])
result.read(util.numpy_to_c(result_arr), 36)
a_arr = np.array([[0], [0], [0]], dtype=np.float32)
b_arr = np.array([[1, 2, 3]], dtype=np.float32)
result_arr_ref = np.add(a_arr, b_arr)
assert np.allclose(result_arr, result_arr_ref)
def test_constant():
element_type = Type.f32
parameter_list = []
function = Function(
[Constant(element_type, Shape([3, 3]), list(range(9)))], parameter_list, "test"
)
backend = Backend.create(test.BACKEND_NAME)
result = backend.create_tensor(element_type, Shape([3, 3]))
result_arr = np.zeros((3, 3), dtype=np.float32)
result.write(util.numpy_to_c(result_arr), 36)
handle = backend.compile(function)
handle.call([result], [])
result.read(util.numpy_to_c(result_arr), 36)
result_arr_ref = np.arange(9).reshape(3, 3)
assert np.allclose(result_arr, result_arr_ref)
def test_concat():
element_type = Type.f32
A = Parameter(element_type, Shape([1, 2]))
B = Parameter(element_type, Shape([1, 2]))
C = Parameter(element_type, Shape([1, 2]))
parameter_list = [A, B, C]
axis = 0
function = Function([ng.concat([A, B, C], axis)], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, Shape([1, 2]))
b = backend.create_tensor(element_type, Shape([1, 2]))
c = backend.create_tensor(element_type, Shape([1, 2]))
result = backend.create_tensor(element_type, Shape([3, 2]))
a.write(util.numpy_to_c(np.array([1, 2], dtype=np.float32)), 8)
b.write(util.numpy_to_c(np.array([5, 6], dtype=np.float32)), 8)
c.write(util.numpy_to_c(np.array([7, 8], dtype=np.float32)), 8)
result_arr = np.zeros(6, dtype=np.float32).reshape(3, 2)
result.write(util.numpy_to_c(result_arr), 24)
handle = backend.compile(function)
handle.call([result], [a, b, c])
result.read(util.numpy_to_c(result_arr), 24)
a_arr = np.array([[1, 2]], dtype=np.float32)
b_arr = np.array([[5, 6]], dtype=np.float32)
c_arr = np.array([[7, 8]], dtype=np.float32)
result_arr_ref = np.concatenate((a_arr, b_arr, c_arr), axis)
assert np.allclose(result_arr, result_arr_ref)
def test_axisset():
set_axisset = AxisSet({1, 2, 3})
list_axisset = AxisSet([1, 2, 3])
tuple_axisset = AxisSet((1, 2, 3))
assert len(set_axisset) == 3
assert set(set_axisset) == {1, 2, 3}
assert len(list_axisset) == 3
assert set(list_axisset) == set(set_axisset)
assert len(tuple_axisset) == 3
assert set(tuple_axisset) == set(set_axisset)
def test_select():
element_type = Type.f32
A = Parameter(Type.boolean, Shape([1, 2]))
B = Parameter(element_type, Shape([1, 2]))
C = Parameter(element_type, Shape([1, 2]))
parameter_list = [A, B, C]
function = Function([ng.select(A, B, C)], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(Type.boolean, Shape([1, 2]))
b = backend.create_tensor(element_type, Shape([1, 2]))
c = backend.create_tensor(element_type, Shape([1, 2]))
result = backend.create_tensor(element_type, Shape([1, 2]))
a.write(util.numpy_to_c(np.array([[True, False]], dtype=np.bool)), 2)
b.write(util.numpy_to_c(np.array([[5, 6]], dtype=np.float32)), 8)
c.write(util.numpy_to_c(np.array([[7, 8]], dtype=np.float32)), 8)
result_arr = np.array([[0, 0]], dtype=np.float32)
result.write(util.numpy_to_c(result_arr), 8)
handle = backend.compile(function)
handle.call([result], [a, b, c])
result.read(util.numpy_to_c(result_arr), 8)
result_arr_ref = np.array([[5, 8]])
assert np.allclose(result_arr, result_arr_ref)
def test_max_pool():
# test 1d
element_type = Type.f32
shape = Shape([1, 1, 10])
A = Parameter(element_type, shape)
parameter_list = [A]
input_arr = np.arange(10, dtype=np.float32).reshape(1, 1, 10)
window_shape = [3]
strides = [1] * len(window_shape)
pads_begin = [0] * len(window_shape)
pads_end = [0] * len(window_shape)
model = ng.max_pool(A, strides, pads_begin, pads_end, window_shape)
function = Function([model], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, Shape([1, 1, 8]))
a.write(util.numpy_to_c(input_arr), 10 * 4)
result_arr = np.zeros(8, dtype=np.float32).reshape(1, 1, 8)
result.write(util.numpy_to_c(result_arr), 8 * 4)
handle = backend.compile(function)
handle.call([result], [a])
result.read(util.numpy_to_c(result_arr), 32)
result_arr_ref = (np.arange(8) + 2).reshape(1, 1, 8)
assert np.allclose(result_arr, result_arr_ref)
# test 1d with strides
strides = [2]
pads_begin = [0] * len(window_shape)
pads_end = [0] * len(window_shape)
model = ng.max_pool(A, strides, pads_begin, pads_end, window_shape)
function = Function([model], parameter_list, "test")
size = 4
result = backend.create_tensor(element_type, Shape([1, 1, size]))
result_arr = np.zeros(size, dtype=np.float32).reshape(1, 1, size)
backend = Backend.create(test.BACKEND_NAME)
result.write(util.numpy_to_c(result_arr), size * 4)
handle = backend.compile(function)
handle.call([result], [a])
result.read(util.numpy_to_c(result_arr), size * 4)
result_arr_ref = ((np.arange(size) + 1) * 2).reshape(1, 1, size)
assert np.allclose(result_arr, result_arr_ref)
# test 2d
element_type = Type.f32
shape = Shape([1, 1, 10, 10])
A = Parameter(element_type, shape)
parameter_list = [A]
input_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
window_shape = [3, 3]
strides = [1, 1]
pads_begin = [0, 0]
pads_end = [0, 0]
model = ng.max_pool(A, strides, pads_begin, pads_end, window_shape)
function = Function([model], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, shape)
result = backend.create_tensor(element_type, Shape([1, 1, 8, 8]))
a.write(util.numpy_to_c(input_arr), 10 * 10 * 4)
result_arr = np.zeros(64, dtype=np.float32).reshape(1, 1, 8, 8)
result.write(util.numpy_to_c(result_arr), 8 * 8 * 4)
handle = backend.compile(function)
handle.call([result], [a])
result.read(util.numpy_to_c(result_arr), 8 * 8 * 4)
result_arr_ref = ((np.arange(100).reshape(10, 10))[2:, 2:]).reshape(1, 1, 8, 8)
assert np.allclose(result_arr, result_arr_ref)
# test 2d with strides
strides = [2, 2]
pads_begin = [0, 0]
pads_end = [0, 0]
model = ng.max_pool(A, strides, pads_begin, pads_end, window_shape)
function = Function([model], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
size = 4
result = backend.create_tensor(element_type, Shape([1, 1, size, size]))
result_arr = np.zeros(size * size, dtype=np.float32).reshape(1, 1, size, size)
result.write(util.numpy_to_c(result_arr), size * size * 4)
handle = backend.compile(function)
handle.call([result], [a])
result.read(util.numpy_to_c(result_arr), size * size * 4)
result_arr_ref = ((np.arange(100).reshape(10, 10))[2::2, 2::2]).reshape(1, 1, size, size)
assert np.allclose(result_arr, result_arr_ref)
def convolution2d(
image,
filterit,
strides=(1, 1),
dilation=(1, 1),
padding_below=(0, 0),
padding_above=(0, 0),
data_dilation=(1, 1),
):
def dilate(arr, dil=(1, 1)):
m, n = arr.shape
new_m, new_n = (m - 1) * dil[0] + 1, (n - 1) * dil[1] + 1
new_arr = np.zeros(new_m * new_n, dtype=np.float32).reshape(new_m, new_n)
for i in range(m):
for j in range(n):
new_arr[dil[0] * i][dil[1] * j] = arr[i][j]
return new_arr
i_m, i_n = image.shape
new_image = np.zeros(
(i_m + padding_below[0] + padding_above[0]) * (i_n + padding_below[1] + padding_above[1]),
dtype=np.float32,
).reshape(i_m + padding_below[0] + padding_above[0], i_n + padding_below[1] + padding_above[1])
new_image[
padding_below[0] : padding_below[0] + i_m, padding_below[1] : padding_below[1] + i_n
] = image
image = new_image
image = image if data_dilation[0] == data_dilation[1] == 1 else dilate(image, data_dilation)
i_m, i_n = image.shape
filterit = filterit if dilation[0] == dilation[1] == 1 else dilate(filterit, dilation)
f_m, f_n = filterit.shape
# result_shape
r_m = i_m - f_m + 1
r_n = i_n - f_n + 1
r_m //= strides[0]
r_n //= strides[1]
result = np.zeros(r_m * r_n, dtype=np.float32).reshape(r_m, r_n)
for i in range(r_m):
for j in range(r_n):
sub_m = image[
i * strides[0] : i * strides[0] + f_m, j * strides[1] : j * strides[1] + f_n
]
result[i][j] = np.sum(sub_m * filterit)
return result
def test_convolution_simple():
element_type = Type.f32
image_shape = Shape([1, 1, 16, 16])
filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(-128, 128, 1, dtype=np.float32).reshape(1, 1, 16, 16)
filter_arr = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)
filter_arr[0][0][0][0] = -1
filter_arr[0][0][1][1] = -1
filter_arr[0][0][2][2] = -1
filter_arr[0][0][0][2] = -1
filter_arr[0][0][2][0] = -1
result_arr = np.zeros(196, dtype=np.float32).reshape(1, 1, 14, 14)
strides = [1, 1]
pads_begin = [0, 0]
pads_end = [0, 0]
dilations = [1, 1]
model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Function([model], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, filter_shape)
a.write(util.numpy_to_c(image_arr), 16 * 16 * 4)
b.write(util.numpy_to_c(filter_arr), 3 * 3 * 4)
result = backend.create_tensor(element_type, Shape([1, 1, 14, 14]))
result.write(util.numpy_to_c(result_arr), 14 * 14 * 4)
handle = backend.compile(function)
handle.call([result], [a, b])
result.read(util.numpy_to_c(result_arr), 14 * 14 * 4)
result_arr_ref = convolution2d(image_arr[0][0], filter_arr[0][0]).reshape(1, 1, 14, 14)
assert np.allclose(result_arr, result_arr_ref)
def test_convolution_with_strides():
element_type = Type.f32
image_shape = Shape([1, 1, 10, 10])
filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
filter_arr = np.zeros(9, dtype=np.float32).reshape(1, 1, 3, 3)
filter_arr[0][0][1][1] = 1
strides = [2, 2]
pads_begin = [0, 0]
pads_end = [0, 0]
dilations = [1, 1]
model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Function([model], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, filter_shape)
a.write(util.numpy_to_c(image_arr), 10 * 10 * 4)
b.write(util.numpy_to_c(filter_arr), 3 * 3 * 4)
result_arr = np.zeros(16, dtype=np.float32).reshape(1, 1, 4, 4)
result = backend.create_tensor(element_type, Shape([1, 1, 4, 4]))
result.write(util.numpy_to_c(result_arr), 4 * 4 * 4)
handle = backend.compile(function)
handle.call([result], [a, b])
result.read(util.numpy_to_c(result_arr), 4 * 4 * 4)
result_arr_ref = convolution2d(image_arr[0][0], filter_arr[0][0], strides).reshape(1, 1, 4, 4)
assert np.allclose(result_arr, result_arr_ref)
def test_convolution_with_filter_dilation():
element_type = Type.f32
image_shape = Shape([1, 1, 10, 10])
filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
filter_arr = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)
strides = [1, 1]
pads_begin = [0, 0]
pads_end = [0, 0]
dilations = [2, 2]
model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Function([model], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, filter_shape)
a.write(util.numpy_to_c(image_arr), 10 * 10 * 4)
b.write(util.numpy_to_c(filter_arr), 3 * 3 * 4)
result_arr = np.zeros(36, dtype=np.float32).reshape(1, 1, 6, 6)
result = backend.create_tensor(element_type, Shape([1, 1, 6, 6]))
result.write(util.numpy_to_c(result_arr), 6 * 6 * 4)
handle = backend.compile(function)
handle.call([result], [a, b])
result.read(util.numpy_to_c(result_arr), 6 * 6 * 4)
result_arr_ref = convolution2d(image_arr[0][0], filter_arr[0][0], strides, dilations).reshape(
1, 1, 6, 6
)
assert np.allclose(result_arr, result_arr_ref)
def test_convolution_with_padding():
element_type = Type.f32
image_shape = Shape([1, 1, 10, 10])
filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
filter_arr = np.zeros(9, dtype=np.float32).reshape(1, 1, 3, 3)
filter_arr[0][0][1][1] = 1
strides = [1, 1]
dilations = [2, 2]
pads_begin = [0, 0]
pads_end = [0, 0]
model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Function([model], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, filter_shape)
a.write(util.numpy_to_c(image_arr), 10 * 10 * 4)
b.write(util.numpy_to_c(filter_arr), 3 * 3 * 4)
result_arr = np.zeros(36, dtype=np.float32).reshape(1, 1, 6, 6)
result = backend.create_tensor(element_type, Shape([1, 1, 6, 6]))
result.write(util.numpy_to_c(result_arr), 6 * 6 * 4)
handle = backend.compile(function)
handle.call([result], [a, b])
result.read(util.numpy_to_c(result_arr), 6 * 6 * 4)
result_arr_ref = convolution2d(
image_arr[0][0], filter_arr[0][0], strides, dilations, pads_begin, pads_end
).reshape(1, 1, 6, 6)
assert np.allclose(result_arr, result_arr_ref)
def test_convolution_with_non_zero_padding():
element_type = Type.f32
image_shape = Shape([1, 1, 10, 10])
filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
filter_arr = (np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)) * -1
filter_arr[0][0][1][1] = 1
strides = [1, 1]
dilations = [2, 2]
pads_begin = [2, 1]
pads_end = [1, 2]
model = ng.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Function([model], parameter_list, "test")
backend = Backend.create(test.BACKEND_NAME)
a = backend.create_tensor(element_type, image_shape)
b = backend.create_tensor(element_type, filter_shape)
a.write(util.numpy_to_c(image_arr), 10 * 10 * 4)
b.write(util.numpy_to_c(filter_arr), 3 * 3 * 4)
result_arr = np.zeros(81, dtype=np.float32).reshape(1, 1, 9, 9)
result = backend.create_tensor(element_type, Shape([1, 1, 9, 9]))
result.write(util.numpy_to_c(result_arr), 9 * 9 * 4)
handle = backend.compile(function)
handle.call([result], [a, b])
result.read(util.numpy_to_c(result_arr), 9 * 9 * 4)
result_arr_ref = convolution2d(
image_arr[0][0], filter_arr[0][0], strides, dilations, pads_begin, pads_end
).reshape(1, 1, 9, 9)
assert np.allclose(result_arr, result_arr_ref)
| |
###unique
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module contains instructions for recalling operating system file and directory paths,
eliminating redundant list entries, removing unecessary file paths from py2app or py2exe
and reading the propper Ensembl database version to allow for version specific access."""
import sys, string
import os.path, platform
import unique ### Import itself as a reference to it's location
dirfile = unique
py2app_adj = '/GO_Elite.app/Contents/Resources/Python/site-packages.zip'
py2app_adj1 = '/GO_Elite.app/Contents/Resources/lib/python2.4/site-packages.zip'
py2app_adj2 = '/GO_Elite.app/Contents/Resources/lib/python2.5/site-packages.zip'
py2app_adj3 = '/GO_Elite.app/Contents/Resources/lib/python2.6/site-packages.zip'
py2app_adj4 = '/GO_Elite.app/Contents/Resources/lib/python2.7/site-packages.zip'
py2exe_adj = '\\library.zip' ###py2exe
cx_Freeze_adj = '/library.zip'
pyinstaller_adj = '/GO_Elite.app/Contents/MacOS'
py2app_ge_dirs = [py2app_adj,py2exe_adj,py2app_adj1,py2app_adj2,py2app_adj3,py2app_adj4,cx_Freeze_adj,pyinstaller_adj]
py2app_adj = '/AltAnalyze.app/Contents/Resources/Python/site-packages.zip'
py2app_adj1 = '/AltAnalyze.app/Contents/Resources/lib/python2.4/site-packages.zip'
py2app_adj2 = '/AltAnalyze.app/Contents/Resources/lib/python2.5/site-packages.zip'
py2app_adj3 = '/AltAnalyze.app/Contents/Resources/lib/python2.6/site-packages.zip'
py2app_adj4 = '/AltAnalyze.app/Contents/Resources/lib/python2.7/site-packages.zip'
py2exe_adj = '\\library.zip' ###py2exe
cx_Freeze_adj = '/library.zip'
pyinstaller_adj = '/AltAnalyze.app/Contents/MacOS'
pyinstaller_adj2 = '/AltAnalyze.app/Contents/Resources'
py2app_aa_dirs = [py2app_adj,py2app_adj1,py2exe_adj,py2app_adj2,py2app_adj3,py2app_adj4,cx_Freeze_adj,pyinstaller_adj,pyinstaller_adj2]
py2app_dirs = py2app_ge_dirs + py2app_aa_dirs
for i in py2app_aa_dirs:
i = string.replace(i,'AltAnalyze.app','AltAnalyzeViewer.app')
py2app_dirs.append(i)
if ('linux' in sys.platform or 'posix' in sys.platform) and getattr(sys, 'frozen', False): ### For PyInstaller
application_path = os.path.dirname(sys.executable)
#application_path = sys._MEIPASS ### should be the same as the above
else:
application_path = os.path.dirname(__file__)
if 'AltAnalyze?' in application_path:
application_path = string.replace(application_path,'//','/')
application_path = string.replace(application_path,'\\','/') ### If /// present
application_path = string.split(application_path,'AltAnalyze?')[0]
if 'GO_Elite?' in application_path:
application_path = string.replace(application_path,'//','/')
application_path = string.replace(application_path,'\\','/') ### If /// present
application_path = string.split(application_path,'GO_Elite?')[0]
def filepath(filename):
#dir=os.path.dirname(dirfile.__file__) #directory file is input as a variable under the main
dir = application_path
if filename== '': ### Windows will actually recognize '' as the AltAnalyze root in certain situations but not others
fn = dir
elif ':' in filename:
fn = filename
else:
try: dir_list = os.listdir(filename); fn = filename ### test to see if the path can be found (then it is the full path)
except Exception: fn=os.path.join(dir,filename)
if '/Volumes/' in filename: filenames = string.split(filename,'/Volumes/'); fn = '/Volumes/'+filenames[-1]
for py2app_dir in py2app_dirs: fn = string.replace(fn,py2app_dir,'')
if 'Databases' in fn or 'AltDatabase' in fn:
getCurrentGeneDatabaseVersion()
fn = correctGeneDatabaseDir(fn)
fn = string.replace(fn,'.txt.txt','.txt')
fn = string.replace(fn,'//','/')
fn = string.replace(fn,'//','/') ### If /// present
return fn
def read_directory(sub_dir):
dir=application_path
for py2app_dir in py2app_dirs: dir = string.replace(dir,py2app_dir,'')
if 'Databases' in sub_dir or 'AltDatabase' in sub_dir:
getCurrentGeneDatabaseVersion()
sub_dir = correctGeneDatabaseDir(sub_dir)
try: dir_list = os.listdir(dir+sub_dir)
except Exception: dir_list = os.listdir(sub_dir) ### For linux
try: dir_list.remove('.DS_Store') ### This is needed on a mac
except Exception: null=[]
return dir_list
def returnDirectories(sub_dir):
dir=application_path
if 'Databases' in sub_dir or 'AltDatabase' in sub_dir:
getCurrentGeneDatabaseVersion()
sub_dir = correctGeneDatabaseDir(sub_dir)
for py2app_dir in py2app_dirs:
dir = string.replace(dir,py2app_dir,'')
try: dir_list = os.listdir(dir + sub_dir)
except Exception:
try: dir_list = os.listdir(sub_dir) ### For linux
except Exception: print dir, sub_dir; bad_exit
return dir_list
def returnDirectoriesNoReplace(sub_dir):
dir=application_path
for py2app_dir in py2app_dirs:
dir = string.replace(dir,py2app_dir,'')
try: dir_list = os.listdir(dir + sub_dir)
except Exception:
try: dir_list = os.listdir(sub_dir) ### For linux
except Exception: dir_list = os.listdir(sub_dir[1:]) ### For linux
return dir_list
def refDir():
reference_dir=application_path #directory file is input as a variable under the main
for py2app_dir in py2app_dirs:
reference_dir = string.replace(reference_dir,py2app_adj,'')
return reference_dir
def whatProgramIsThis():
reference_dir = refDir()
if 'AltAnalyze' in reference_dir: type = 'AltAnalyze'; database_dir = 'AltDatabase/goelite/'
elif 'GO-Elite' in reference_dir: type = 'GO-Elite'; database_dir = 'Databases/'
else: database_dir = 'AltDatabase/goelite/'; type = 'AltAnalyze'
return type,database_dir
def correctGeneDatabaseDir(fn):
try:
proceed = 'no'
alt_version = 'AltDatabase/'+gene_database_dir
elite_version = 'Databases/'+gene_database_dir
fn=string.replace(fn,'//','/'); fn=string.replace(fn,'\\','/')
if (alt_version not in fn) and (elite_version not in fn): proceed = 'yes' ### If the user creates that contains EnsMart
if gene_database_dir not in fn: proceed = 'yes'
if 'EnsMart' in fn: proceed = 'no'
if proceed == 'yes':
fn = string.replace(fn,'Databases','Databases/'+gene_database_dir)
if 'AltDatabase/affymetrix' not in fn and 'NoVersion' not in fn and 'AltDatabase/primer3' not in fn \
and 'AltDatabase/TreeView' not in fn and 'AltDatabase/kallisto' not in fn and 'AltDatabase/tools' not in fn:
if 'AltDatabase' in fn:
fn = string.replace(fn,'AltDatabase','AltDatabase/'+gene_database_dir)
fn = string.replace(fn,'NoVersion','') ### When the text 'NoVersion' is in a filepath, is tells the program to ignore it for adding the database version
except Exception: null = ''
return fn
def getCurrentGeneDatabaseVersion():
global gene_database_dir
try:
filename = 'Config/version.txt'; fn=filepath(filename)
for line in open(fn,'r').readlines():
gene_database_dir, previous_date = string.split(line,'\t')
except Exception: gene_database_dir=''
return gene_database_dir
def unique(s):
#we need to remove duplicates from a list, unsuccessfully tried many different methods
#so I found the below function at: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
n = len(s)
if n == 0: return []
u = {}
try:
for x in s: u[x] = 1
except TypeError: del u # move on to the next method
else: return u.keys()
try: t = list(s); t.sort()
except TypeError: del t # move on to the next method
else:
assert n > 0
last = t[0]; lasti = i = 1
while i < n:
if t[i] != last: t[lasti] = last = t[i]; lasti += 1
i += 1
return t[:lasti]
u = []
for x in s:
if x not in u: u.append(x)
return u
def dictionary(s):
d={}
for i in s:
try: d[i]=[]
except TypeError: d[tuple(i)]=[]
return d
def unique_db(s):
d={}; t=[]
for i in s:
try: d[i]=[]
except TypeError: d[tuple(i)]=[]
for i in d: t.append(i)
return t
def list(d):
t=[]
for i in d: t.append(i)
return t
if __name__ == '__main__':
fn = filepath('/home/nsalomonis/Desktop/GO-Elite_v.1.2.4-Ubuntu-1/GO_Elite?42197/GO-Elite_report-20120512-151332.log')
print fn; sys.exit()
fn = filepath('BuildDBs/Amadeus/symbol-Metazoan-Amadeus.txt')
print fn;sys.exit()
unique_db([1,2,3,4,4,4,5])
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
requests_cache.core
~~~~~~~~~~~~~~~~~~~
Core functions for configuring cache and monkey patching ``requests``
"""
import collections
from contextlib import contextmanager
from datetime import datetime, timedelta
from operator import itemgetter
import requests
from requests import Session as OriginalSession
from requests.hooks import dispatch_hook
from ..requests_cache import backends
from ..requests_cache.compat import basestring
try:
ver = tuple(map(int, requests.__version__.split(".")))
except ValueError:
pass
else:
# We don't need to dispatch hook in Requests <= 1.1.0
if ver < (1, 2, 0):
dispatch_hook = lambda key, hooks, hook_data, *a, **kw: hook_data
del ver
class CachedSession(OriginalSession):
""" Requests ``Sessions`` with caching support.
"""
def __init__(self, cache_name='cache', backend=None, expire_after=None,
allowable_codes=(200,), allowable_methods=('GET',),
ignored_parameters=None, old_data_on_error=False,
**backend_options):
"""
:param cache_name: for ``sqlite`` backend: cache file will start with this prefix,
e.g ``cache.sqlite``
for ``mongodb``: it's used as database name
for ``redis``: it's used as the namespace. This means all keys
are prefixed with ``'cache_name:'``
:param backend: cache backend name e.g ``'sqlite'``, ``'mongodb'``, ``'redis'``, ``'memory'``.
(see :ref:`persistence`). Or instance of backend implementation.
Default value is ``None``, which means use ``'sqlite'`` if available,
otherwise fallback to ``'memory'``.
:param expire_after: ``timedelta`` or number of seconds after cache will be expired
or `None` (default) to ignore expiration
:type expire_after: float
:param allowable_codes: limit caching only for response with this codes (default: 200)
:type allowable_codes: tuple
:param allowable_methods: cache only requests of this methods (default: 'GET')
:type allowable_methods: tuple
:kwarg backend_options: options for chosen backend. See corresponding
:ref:`sqlite <backends_sqlite>`, :ref:`mongo <backends_mongo>`
and :ref:`redis <backends_redis>` backends API documentation
:param include_get_headers: If `True` headers will be part of cache key.
E.g. after get('some_link', headers={'Accept':'application/json'})
get('some_link', headers={'Accept':'application/xml'}) is not from cache.
:param ignored_parameters: List of parameters to be excluded from the cache key.
Useful when requesting the same resource through different
credentials or access tokens, passed as parameters.
:param old_data_on_error: If `True` it will return expired cached response if update fails
"""
if backend is None or isinstance(backend, basestring):
self.cache = backends.create_backend(backend, cache_name,
backend_options)
else:
self.cache = backend
self._cache_name = cache_name
if expire_after is not None and not isinstance(expire_after, timedelta):
expire_after = timedelta(seconds=expire_after)
self._cache_expire_after = expire_after
self._cache_allowable_codes = allowable_codes
self._cache_allowable_methods = allowable_methods
self._cache_ignored_parameters = ignored_parameters
self._return_old_data_on_error = old_data_on_error
self._is_cache_disabled = False
super(CachedSession, self).__init__()
def send(self, request, **kwargs):
if (self._is_cache_disabled
or request.method not in self._cache_allowable_methods):
response = super(CachedSession, self).send(request, **kwargs)
response.from_cache = False
return response
cache_key = self.cache.create_key(request)
def send_request_and_cache_response():
response = super(CachedSession, self).send(request, **kwargs)
if response.status_code in self._cache_allowable_codes:
self.cache.save_response(cache_key, response)
response.from_cache = False
return response
response, timestamp = self.cache.get_response_and_time(cache_key)
if response is None:
return send_request_and_cache_response()
if self._cache_expire_after is not None:
is_expired = datetime.utcnow() - timestamp > self._cache_expire_after
if is_expired:
if not self._return_old_data_on_error:
self.cache.delete(cache_key)
return send_request_and_cache_response()
try:
new_response = send_request_and_cache_response()
except Exception:
return response
else:
if new_response.status_code not in self._cache_allowable_codes:
return response
return new_response
# dispatch hook here, because we've removed it before pickling
response.from_cache = True
response = dispatch_hook('response', request.hooks, response, **kwargs)
return response
def request(self, method, url, params=None, data=None, **kwargs):
response = super(CachedSession, self).request(
method, url,
_normalize_parameters(params, self._cache_ignored_parameters),
_normalize_parameters(data, self._cache_ignored_parameters),
**kwargs
)
if self._is_cache_disabled:
return response
main_key = self.cache.create_key(response.request)
for r in response.history:
self.cache.add_key_mapping(
self.cache.create_key(r.request), main_key
)
return response
@contextmanager
def cache_disabled(self):
"""
Context manager for temporary disabling cache
::
>>> s = CachedSession()
>>> with s.cache_disabled():
... s.get('http://httpbin.org/ip')
"""
self._is_cache_disabled = True
try:
yield
finally:
self._is_cache_disabled = False
def __repr__(self):
return (
"<CachedSession(%s('%s', ...), expire_after=%s, "
"allowable_methods=%s)>" % (
self.cache.__class__.__name__, self._cache_name,
self._cache_expire_after, self._cache_allowable_methods
)
)
def install_cache(cache_name='cache', backend=None, expire_after=None,
allowable_codes=(200,), allowable_methods=('GET',),
session_factory=CachedSession, ignored_parameters=None, **backend_options):
"""
Installs cache for all ``Requests`` requests by monkey-patching ``Session``
Parameters are the same as in :class:`CachedSession`. Additional parameters:
:param session_factory: Session factory. It must be class which inherits :class:`CachedSession` (default)
"""
if backend:
backend = backends.create_backend(backend, cache_name, backend_options)
class _ConfiguredCachedSession(session_factory):
def __init__(self):
super(_ConfiguredCachedSession, self).__init__(
cache_name=cache_name,
backend=backend,
expire_after=expire_after,
allowable_codes=allowable_codes,
allowable_methods=allowable_methods,
ignored_parameters=ignored_parameters,
**backend_options
)
_patch_session_factory(_ConfiguredCachedSession)
# backward compatibility
configure = install_cache
def uninstall_cache():
""" Restores ``requests.Session`` and disables cache
"""
_patch_session_factory(OriginalSession)
@contextmanager
def disabled():
"""
Context manager for temporary disabling globally installed cache
.. warning:: not thread-safe
::
>>> with requests_cache.disabled():
... requests.get('http://httpbin.org/ip')
... requests.get('http://httpbin.org/get')
"""
previous = requests.Session
uninstall_cache()
try:
yield
finally:
_patch_session_factory(previous)
@contextmanager
def enabled(*args, **kwargs):
"""
Context manager for temporary installing global cache.
Accepts same arguments as :func:`install_cache`
.. warning:: not thread-safe
::
>>> with requests_cache.enabled('cache_db'):
... requests.get('http://httpbin.org/get')
"""
install_cache(*args, **kwargs)
try:
yield
finally:
uninstall_cache()
def get_cache():
""" Returns internal cache object from globally installed ``CachedSession``
"""
return requests.Session().cache
def clear():
""" Clears globally installed cache
"""
get_cache().clear()
def _patch_session_factory(session_factory=CachedSession):
requests.Session = requests.sessions.Session = session_factory
def _normalize_parameters(params, ignored_params=None):
""" If builtin dict is passed as parameter, returns sorted list
of key-value pairs
"""
if type(params) is dict:
params = sorted(params.items(), key=itemgetter(0))
elif isinstance(params, collections.Mapping):
params = params.items()
if ignored_params:
try:
params = [(k, v) for k, v in params if k not in ignored_params]
except (AttributeError, ValueError, TypeError):
pass
return params
| |
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import time
from unittest import mock
import uuid
from oslo_utils import timeutils
from pymongo import cursor
import pymongo.errors
from testtools import matchers
from zaqar.common import cache as oslo_cache
from zaqar.conf import default
from zaqar.conf import drivers_management_store_mongodb
from zaqar.conf import drivers_message_store_mongodb
from zaqar import storage
from zaqar.storage import errors
from zaqar.storage import mongodb
from zaqar.storage.mongodb import controllers
from zaqar.storage.mongodb import utils
from zaqar.storage import pooling
from zaqar import tests as testing
from zaqar.tests.unit.storage import base
class MongodbSetupMixin(object):
def _purge_databases(self):
if isinstance(self.driver, mongodb.DataDriver):
databases = (self.driver.message_databases +
[self.control.queues_database,
self.driver.subscriptions_database])
else:
databases = [self.driver.queues_database]
for db in databases:
self.driver.connection.drop_database(db)
def _prepare_conf(self):
if drivers_message_store_mongodb.GROUP_NAME in self.conf:
self.config(drivers_message_store_mongodb.GROUP_NAME,
database=uuid.uuid4().hex)
if drivers_management_store_mongodb.GROUP_NAME in self.conf:
self.config(drivers_management_store_mongodb.GROUP_NAME,
database=uuid.uuid4().hex)
class MongodbUtilsTest(MongodbSetupMixin, testing.TestBase):
config_file = 'wsgi_mongodb.conf'
def setUp(self):
super(MongodbUtilsTest, self).setUp()
self.conf.register_opts(drivers_message_store_mongodb.ALL_OPTS,
group=drivers_message_store_mongodb.GROUP_NAME)
self.mongodb_conf = self.conf[drivers_message_store_mongodb.GROUP_NAME]
MockDriver = collections.namedtuple('MockDriver', 'mongodb_conf')
self.driver = MockDriver(self.mongodb_conf)
self.control_driver = MockDriver(self.mongodb_conf)
def test_scope_queue_name(self):
self.assertEqual('/my-q', utils.scope_queue_name('my-q'))
self.assertEqual('/my-q', utils.scope_queue_name('my-q', None))
self.assertEqual('123/my-q', utils.scope_queue_name('my-q', '123'))
self.assertEqual('/', utils.scope_queue_name(None))
self.assertEqual('123/', utils.scope_queue_name(None, '123'))
def test_descope_queue_name(self):
self.assertIsNone(utils.descope_queue_name('/'))
self.assertEqual('some-pig', utils.descope_queue_name('/some-pig'))
self.assertEqual('some-pig',
utils.descope_queue_name('radiant/some-pig'))
def test_calculate_backoff(self):
sec = utils.calculate_backoff(0, 10, 2, 0)
self.assertEqual(0, sec)
sec = utils.calculate_backoff(9, 10, 2, 0)
self.assertEqual(1.8, sec)
sec = utils.calculate_backoff(4, 10, 2, 0)
self.assertEqual(0.8, sec)
sec = utils.calculate_backoff(4, 10, 2, 1)
if sec != 0.8:
self.assertThat(sec, matchers.GreaterThan(0.8))
self.assertThat(sec, matchers.LessThan(1.8))
self.assertRaises(ValueError, utils.calculate_backoff, 0, 10, -2, -1)
self.assertRaises(ValueError, utils.calculate_backoff, 0, 10, -2, 0)
self.assertRaises(ValueError, utils.calculate_backoff, 0, 10, 2, -1)
self.assertRaises(ValueError, utils.calculate_backoff, -2, 10, 2, 0)
self.assertRaises(ValueError, utils.calculate_backoff, -1, 10, 2, 0)
self.assertRaises(ValueError, utils.calculate_backoff, 10, 10, 2, 0)
self.assertRaises(ValueError, utils.calculate_backoff, 11, 10, 2, 0)
def test_retries_on_autoreconnect(self):
num_calls = [0]
@utils.retries_on_autoreconnect
def _raises_autoreconnect(self):
num_calls[0] += 1
raise pymongo.errors.AutoReconnect()
self.assertRaises(pymongo.errors.AutoReconnect,
_raises_autoreconnect, self)
self.assertEqual([self.mongodb_conf.max_reconnect_attempts], num_calls)
def test_retries_on_autoreconnect_neg(self):
num_calls = [0]
@utils.retries_on_autoreconnect
def _raises_autoreconnect(self):
num_calls[0] += 1
# NOTE(kgriffs): Don't exceed until the last attempt
if num_calls[0] < self.mongodb_conf.max_reconnect_attempts:
raise pymongo.errors.AutoReconnect()
# NOTE(kgriffs): Test that this does *not* raise AutoReconnect
_raises_autoreconnect(self)
self.assertEqual([self.mongodb_conf.max_reconnect_attempts], num_calls)
@testing.requires_mongodb
class MongodbDriverTest(MongodbSetupMixin, testing.TestBase):
config_file = 'wsgi_mongodb.conf'
def setUp(self):
super(MongodbDriverTest, self).setUp()
self.conf.register_opts(default.ALL_OPTS)
self.config(unreliable=False)
oslo_cache.register_config(self.conf)
def test_db_instance(self):
self.config(unreliable=True)
cache = oslo_cache.get_cache(self.conf)
control = mongodb.ControlDriver(self.conf, cache)
data = mongodb.DataDriver(self.conf, cache, control)
for db in data.message_databases:
self.assertThat(db.name, matchers.StartsWith(
data.mongodb_conf.database))
def test_version_match(self):
self.config(unreliable=True)
cache = oslo_cache.get_cache(self.conf)
with mock.patch('pymongo.MongoClient.server_info') as info:
info.return_value = {'version': '2.1'}
self.assertRaises(RuntimeError, mongodb.DataDriver,
self.conf, cache,
mongodb.ControlDriver(self.conf, cache))
info.return_value = {'version': '2.11'}
try:
mongodb.DataDriver(self.conf, cache,
mongodb.ControlDriver(self.conf, cache))
except RuntimeError:
self.fail('version match failed')
def test_replicaset_or_mongos_needed(self):
cache = oslo_cache.get_cache(self.conf)
with mock.patch('pymongo.MongoClient.nodes') as nodes:
nodes.__get__ = mock.Mock(return_value=[])
with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos:
is_mongos.__get__ = mock.Mock(return_value=False)
self.assertRaises(RuntimeError, mongodb.DataDriver,
self.conf, cache,
mongodb.ControlDriver(self.conf, cache))
def test_using_replset(self):
cache = oslo_cache.get_cache(self.conf)
with mock.patch('pymongo.MongoClient.nodes') as nodes:
nodes.__get__ = mock.Mock(return_value=['node1', 'node2'])
with mock.patch('pymongo.MongoClient.write_concern') as wc:
write_concern = pymongo.WriteConcern(w=2)
wc.__get__ = mock.Mock(return_value=write_concern)
mongodb.DataDriver(self.conf, cache,
mongodb.ControlDriver(self.conf, cache))
def test_using_mongos(self):
cache = oslo_cache.get_cache(self.conf)
with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos:
is_mongos.__get__ = mock.Mock(return_value=True)
with mock.patch('pymongo.MongoClient.write_concern') as wc:
write_concern = pymongo.WriteConcern(w=2)
wc.__get__ = mock.Mock(return_value=write_concern)
mongodb.DataDriver(self.conf, cache,
mongodb.ControlDriver(self.conf, cache))
def test_write_concern_check_works(self):
cache = oslo_cache.get_cache(self.conf)
with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos:
is_mongos.__get__ = mock.Mock(return_value=True)
with mock.patch('pymongo.MongoClient.write_concern') as wc:
write_concern = pymongo.WriteConcern(w=1)
wc.__get__ = mock.Mock(return_value=write_concern)
self.assertRaises(RuntimeError, mongodb.DataDriver,
self.conf, cache,
mongodb.ControlDriver(self.conf, cache))
write_concern = pymongo.WriteConcern(w=2)
wc.__get__ = mock.Mock(return_value=write_concern)
mongodb.DataDriver(self.conf, cache,
mongodb.ControlDriver(self.conf, cache))
def test_write_concern_is_set(self):
cache = oslo_cache.get_cache(self.conf)
with mock.patch('pymongo.MongoClient.is_mongos') as is_mongos:
is_mongos.__get__ = mock.Mock(return_value=True)
self.config(unreliable=True)
driver = mongodb.DataDriver(self.conf, cache,
mongodb.ControlDriver
(self.conf, cache))
driver.server_version = (2, 6)
for db in driver.message_databases:
wc = db.write_concern
self.assertEqual('majority', wc.document['w'])
self.assertFalse(wc.document['j'])
@testing.requires_mongodb
class MongodbQueueTests(MongodbSetupMixin, base.QueueControllerTest):
driver_class = mongodb.ControlDriver
config_file = 'wsgi_mongodb.conf'
controller_class = controllers.QueueController
control_driver_class = mongodb.ControlDriver
def test_indexes(self):
collection = self.controller._collection
indexes = collection.index_information()
self.assertIn('p_q_1', indexes)
def test_raises_connection_error(self):
with mock.patch.object(cursor.Cursor,
'__next__',
spec=True) as method:
error = pymongo.errors.ConnectionFailure()
method.side_effect = error
queues = next(self.controller.list())
self.assertRaises(storage.errors.ConnectionError,
queues.next)
@testing.requires_mongodb
class MongodbMessageTests(MongodbSetupMixin, base.MessageControllerTest):
driver_class = mongodb.DataDriver
config_file = 'wsgi_mongodb.conf'
controller_class = controllers.MessageController
control_driver_class = mongodb.ControlDriver
# NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute
gc_interval = 60
def test_indexes(self):
for collection in self.controller._collections:
indexes = collection.index_information()
self.assertIn('active', indexes)
self.assertIn('claimed', indexes)
self.assertIn('queue_marker', indexes)
self.assertIn('counting', indexes)
def test_message_counter(self):
queue_name = self.queue_name
iterations = 10
m = mock.MagicMock(controllers.QueueController)
self.controller._queue_ctrl = m
del self.controller._queue_ctrl._get_counter
del self.controller._queue_ctrl._inc_counter
seed_marker1 = self.controller._get_counter(queue_name,
self.project)
self.assertEqual(0, seed_marker1, 'First marker is 0')
uuid = '97b64000-2526-11e3-b088-d85c1300734c'
for i in range(iterations):
self.controller.post(queue_name, [{'ttl': 60}], uuid,
project=self.project)
marker1 = self.controller._get_counter(queue_name,
self.project)
marker2 = self.controller._get_counter(queue_name,
self.project)
marker3 = self.controller._get_counter(queue_name,
self.project)
self.assertEqual(marker1, marker2)
self.assertEqual(marker2, marker3)
self.assertEqual(i + 1, marker1)
new_value = self.controller._inc_counter(queue_name,
self.project)
self.assertIsNotNone(new_value)
value_before = self.controller._get_counter(queue_name,
project=self.project)
new_value = self.controller._inc_counter(queue_name,
project=self.project)
self.assertIsNotNone(new_value)
value_after = self.controller._get_counter(queue_name,
project=self.project)
self.assertEqual(value_before + 1, value_after)
value_before = value_after
new_value = self.controller._inc_counter(queue_name,
project=self.project,
amount=7)
value_after = self.controller._get_counter(queue_name,
project=self.project)
self.assertEqual(value_before + 7, value_after)
self.assertEqual(new_value, value_after)
reference_value = value_after
unchanged = self.controller._inc_counter(queue_name,
project=self.project,
window=10)
self.assertIsNone(unchanged)
timeutils.set_time_override()
timeutils.advance_time_delta(datetime.timedelta(seconds=10))
changed = self.controller._inc_counter(queue_name,
project=self.project,
window=5)
self.assertEqual(reference_value + 1, changed)
timeutils.clear_time_override()
@testing.requires_mongodb
class MongodbFIFOMessageTests(MongodbSetupMixin, base.MessageControllerTest):
driver_class = mongodb.FIFODataDriver
config_file = 'wsgi_fifo_mongodb.conf'
controller_class = controllers.FIFOMessageController
control_driver_class = mongodb.ControlDriver
# NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute
gc_interval = 60
def test_race_condition_on_post(self):
queue_name = self.queue_name
expected_messages = [
{
'ttl': 60,
'body': {
'event': 'BackupStarted',
'backupId': 'c378813c-3f0b-11e2-ad92-7823d2b0f3ce',
},
},
{
'ttl': 60,
'body': {
'event': 'BackupStarted',
'backupId': 'd378813c-3f0b-11e2-ad92-7823d2b0f3ce',
},
},
{
'ttl': 60,
'body': {
'event': 'BackupStarted',
'backupId': 'e378813c-3f0b-11e2-ad92-7823d2b0f3ce',
},
},
]
uuid = '97b64000-2526-11e3-b088-d85c1300734c'
# NOTE(kgriffs): Patch _inc_counter so it is a noop, so that
# the second time we post, we will get a collision. This simulates
# what happens when we have parallel requests and the "winning"
# requests hasn't gotten around to calling _inc_counter before the
# "losing" request attempts to insert it's batch of messages.
with mock.patch.object(mongodb.messages.MessageController,
'_inc_counter', autospec=True) as ic:
ic.return_value = 2
messages = expected_messages[:1]
created = list(self.controller.post(queue_name,
messages, uuid,
project=self.project))
self.assertEqual(1, len(created))
# Force infinite retries
ic.return_value = None
with testing.expect(errors.MessageConflict):
self.controller.post(queue_name, messages,
uuid, project=self.project)
created = list(self.controller.post(queue_name,
expected_messages[1:],
uuid, project=self.project))
self.assertEqual(2, len(created))
expected_ids = [m['body']['backupId'] for m in expected_messages]
interaction = self.controller.list(queue_name, client_uuid=uuid,
echo=True, project=self.project)
actual_messages = list(next(interaction))
self.assertEqual(len(expected_messages), len(actual_messages))
actual_ids = [m['body']['backupId'] for m in actual_messages]
self.assertEqual(expected_ids, actual_ids)
@testing.requires_mongodb
class MongodbClaimTests(MongodbSetupMixin, base.ClaimControllerTest):
driver_class = mongodb.DataDriver
config_file = 'wsgi_mongodb.conf'
controller_class = controllers.ClaimController
control_driver_class = mongodb.ControlDriver
def test_claim_doesnt_exist(self):
"""Verifies that operations fail on expired/missing claims.
Methods should raise an exception when the claim doesn't
exists and/or has expired.
"""
epoch = '000000000000000000000000'
self.assertRaises(storage.errors.ClaimDoesNotExist,
self.controller.get, self.queue_name,
epoch, project=self.project)
claim_id, messages = self.controller.create(self.queue_name,
{'ttl': 1, 'grace': 0},
project=self.project)
# Lets let it expire
time.sleep(1)
self.assertRaises(storage.errors.ClaimDoesNotExist,
self.controller.update, self.queue_name,
claim_id, {'ttl': 1, 'grace': 0},
project=self.project)
self.assertRaises(storage.errors.ClaimDoesNotExist,
self.controller.update, self.queue_name,
claim_id, {'ttl': 1, 'grace': 0},
project=self.project)
@testing.requires_mongodb
class MongodbSubscriptionTests(MongodbSetupMixin,
base.SubscriptionControllerTest):
driver_class = mongodb.DataDriver
config_file = 'wsgi_mongodb.conf'
controller_class = controllers.SubscriptionController
control_driver_class = mongodb.ControlDriver
#
# TODO(kgriffs): Do these need database purges as well as those above?
#
@testing.requires_mongodb
class MongodbPoolsTests(base.PoolsControllerTest):
config_file = 'wsgi_mongodb.conf'
driver_class = mongodb.ControlDriver
controller_class = controllers.PoolsController
control_driver_class = mongodb.ControlDriver
def setUp(self):
super(MongodbPoolsTests, self).setUp()
self.uri2 = str(uuid.uuid1())
self.flavor2 = str(uuid.uuid1())
self.pools_controller.create(self.pool, 100, self.uri2,
flavor=self.flavor2, options={})
def tearDown(self):
# self.pool_ctrl.update(self.pool, flavor="")
self.pools_controller.drop_all()
super(MongodbPoolsTests, self).tearDown()
# NOTE(gengchc2): Unittest for new flavor configure scenario.
def test_delete_pool_used_by_flavor1(self):
self.flavors_controller.create(self.flavor,
project=self.project,
capabilities={})
self.pools_controller.update(self.pool1, flavor=self.flavor)
with testing.expect(errors.PoolInUseByFlavor):
self.pools_controller.delete(self.pool1)
# NOTE(gengchc2): Unittest for new flavor configure scenario.
def test_mismatching_capabilities_fifo1(self):
with testing.expect(errors.PoolCapabilitiesMismatch):
self.pools_controller.create(str(uuid.uuid1()),
100, 'mongodb.fifo://localhost',
flavor=self.flavor,
options={})
def test_mismatching_capabilities1(self):
# NOTE(gengchc2): This test is used for testing mismatchming
# capabilities in pool with flavor
with testing.expect(errors.PoolCapabilitiesMismatch):
self.pools_controller.create(str(uuid.uuid1()),
100, 'redis://localhost',
flavor=self.flavor,
options={})
# NOTE(gengchc2): Unittest for new flavor configure scenario.
def test_duplicate_uri1(self):
with testing.expect(errors.PoolAlreadyExists):
# The url 'localhost' is used in setUp(). So reusing the uri
# 'localhost' here will raise PoolAlreadyExists.
self.pools_controller.create(str(uuid.uuid1()), 100, self.uri,
flavor=str(uuid.uuid1()), options={})
@testing.requires_mongodb
class MongodbCatalogueTests(base.CatalogueControllerTest):
driver_class = mongodb.ControlDriver
controller_class = controllers.CatalogueController
control_driver_class = mongodb.ControlDriver
config_file = 'wsgi_mongodb.conf'
def setUp(self):
super(MongodbCatalogueTests, self).setUp()
self.addCleanup(self.controller.drop_all)
@testing.requires_mongodb
class PooledMessageTests(base.MessageControllerTest):
config_file = 'wsgi_mongodb_pooled.conf'
controller_class = pooling.MessageController
driver_class = pooling.DataDriver
control_driver_class = mongodb.ControlDriver
controller_base_class = storage.Message
# NOTE(kgriffs): MongoDB's TTL scavenger only runs once a minute
gc_interval = 60
@testing.requires_mongodb
class PooledClaimsTests(base.ClaimControllerTest):
config_file = 'wsgi_mongodb_pooled.conf'
controller_class = pooling.ClaimController
driver_class = pooling.DataDriver
control_driver_class = mongodb.ControlDriver
controller_base_class = storage.Claim
def test_delete_message_expired_claim(self):
# NOTE(flaper87): The pool tests uses sqlalchemy
# as one of the pools, which causes this test to fail.
# Several reasons to do this:
# The sqla driver is deprecated
# It's not optimized
# mocking utcnow mocks the driver too, which
# requires to put sleeps in the test
self.skip("Fix sqlalchemy driver")
# NOTE(gengchc2): Unittest for new flavor configure scenario.
@testing.requires_mongodb
class MongodbFlavorsTest1(base.FlavorsControllerTest1):
driver_class = mongodb.ControlDriver
controller_class = controllers.FlavorsController
control_driver_class = mongodb.ControlDriver
config_file = 'wsgi_mongodb.conf'
def setUp(self):
super(MongodbFlavorsTest1, self).setUp()
self.addCleanup(self.controller.drop_all)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.