hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5bd26c35c2a3693f265a5d23b4d28e4f91b66661
| 24,235
|
py
|
Python
|
digen/benchmark.py
|
perib/digen
|
36b30f347a1f95561e2567796671eab1126511da
|
[
"MIT"
] | null | null | null |
digen/benchmark.py
|
perib/digen
|
36b30f347a1f95561e2567796671eab1126511da
|
[
"MIT"
] | null | null | null |
digen/benchmark.py
|
perib/digen
|
36b30f347a1f95561e2567796671eab1126511da
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020 Patryk Orzechowski | Epistasis Lab | University of Pennsylvania
DIGEN was developed at the University of Pennsylvania by Patryk Orzechowski (patryk.orzechowski@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import copy
import random
from io import StringIO
import matplotlib.pyplot as plt
import numpy as np
import optuna
import pandas as pd
import seaborn as sns
from matplotlib import rcParams
from sklearn.base import clone, BaseEstimator
from sklearn.metrics import (roc_auc_score, roc_curve, precision_score, auc, recall_score, precision_recall_curve, \
f1_score)
from sklearn.model_selection import train_test_split, StratifiedKFold
from . import initialize, load_datasets
from .dataset import Dataset
rcParams.update({'figure.autolayout': True})
GITHUB_URL = 'https://github.com/EpistasisLab/digen/tree/main/datasets'
suffix = '.tsv'
__version__ = '0.0.3'
class Benchmark:
'''
The main class of DIGEN.
This class implements base methods for benchmarking methods.
In order to keep it consistent with the benchmark, the defaults shouldn't be modified.
Parameters:
----------
n_trials : int, default=200
The maximal number of combinations of hyper-parameters optimizations performed during benchmarking ML method.
For further reference, see Optuna.
timeout : int, default=10000
The maximal time that is allowed for the ML method to run all the optimizations..
After timeout is reached, the current experiments are completed and no further optimizations can be started.
n_split : int, default=10
Number of splits for k-fold cross validation
'''
def __init__(self, n_trials=200, timeout=10000, n_splits=10):
data = initialize()
df = pd.DataFrame(data)
assert len(df) > 0
df['fpr'] = df['fpr'].str.slice(1, -1).str.split(', ').apply(lambda x: np.array([float(i) for i in x]))
df['tpr'] = df['tpr'].str.slice(1, -1).str.split(', ').apply(lambda x: np.array([float(i) for i in x]))
df['prec'] = df['prec'].str.slice(1, -1).str.split(', ').apply(lambda x: np.array([float(i) for i in x]))
df['rec'] = df['rec'].str.slice(1, -1).str.split(', ').apply(lambda x: np.array([float(i) for i in x]))
self.data = df
self.dataset_names = pd.unique(df['dataset']).tolist()
self.n_trials = n_trials
self.timeout = timeout
self.n_splits = n_splits
df = pd.read_csv(StringIO(load_datasets()), sep=',', index_col='dataset')
self.models = df[['indiv']].to_dict()['indiv']
self.hashes = df[['hash']].to_dict()['hash']
def _fullname(self, dataset_name):
'''
This method gets proper full name of a DIGEN dataset, no matter how you pass the argument, e.g. 10, digen10 or digen10_8322
Returns
--------
Properly annotated full name of a DIGEN dataset, e.g. digen10_8322
'''
seedmap = dict(map(lambda x: (x.split('_')[0], x.split('_')[1]), self.list_datasets()))
if type(dataset_name) is int:
dataset_name = 'digen' + str(dataset_name)
if len(dataset_name.split('_')) == 1:
dataset_name = dataset_name + '_' + seedmap[dataset_name]
return dataset_name
def list_methods(self):
'''
This method lists all the methods included in the benchmark.
Returns
--------
methods : a list of strings
List of the methods included in DIGEN.
'''
return list(sorted(self.data['classifier'].unique()))
def list_datasets(self):
'''
This method lists all datasets
Returns
--------
dataset_names : a list of strings
List of all names of the datasets included in DIGEN.
'''
return self.dataset_names
def get_models(self):
'''
This method lists all models
Returns
--------
dataset_names : dict
List of all models of the datasets included in DIGEN.
'''
return self.models
def load_dataset(self, dataset_name, separate_target=False, local_cache_dir=None):
"""Downloads a dataset from the DIGEN and returns it. For convenience, instead of using Dataset interface.
Parameters
----------
dataset_name : str
The name of the data set to load from DIGEN.
separate_target : bool (default: False)
Should the target variable be kept within the array in scikit-learn format, or the features separate as NumPy arrays.
local_cache_dir: str (default: None)
The directory on your local machine to store the data files.
If None, then the local data cache will not be used and the datasets downloaded from Github.
Returns
----------
dataset: pd.DataFrame or (array-like, array-like)
if separate_target == False: A pandas DataFrame containing the fetched data set.
if separate_target == True: A tuple of NumPy arrays containing (features, labels)
"""
dataset = Dataset(self._fullname(dataset_name))
return dataset.load_dataset(separate_target=separate_target, local_cache_dir=local_cache_dir)
def optimize(self, est, parameter_scopes, datasets=None, storage='sqlite:///default.db', local_cache_dir=None):
'''
The method that optimizes hyper-parameters for a single or multiple DIGEN datasets.
Parameters
----------
est : sklearn.base.BaseEstimator
A method that will be optimized and benchmarked against DIGEN.
parameter_scopes : dict
A dictionary containing hyper parameters of the benchmarked ML method as well as their distributions.
Refer to Optuna Trial:
https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args
https://optuna.readthedocs.io/en/v1.4.0/reference/trial.html
datasets : a string or a list
The name(s) of the dataset(s) that DIGEN will be run on.
storage : string, default: local file named default.db
The link to SQLite dataset which will store the optimizations from Optuna.
'''
best_models = dict()
if datasets is None:
datasets = self.list_datasets()
if not isinstance(datasets, list):
datasets = [datasets]
datasets = [self._fullname(x) for x in datasets]
for dataset_name in datasets:
print('Optimizing ' + est.__class__.__name__ + ' on ' + dataset_name)
dataset = Dataset(dataset_name)
random_state = dataset.get_random_state()
random.seed(random_state)
np.random.seed(random_state)
self.random_state = random.getstate()
self.random_state_np = np.random.get_state()
sampler = optuna.samplers.TPESampler(seed=random_state) # Make the sampler behave in a deterministic way.
study = optuna.create_study(study_name=dataset_name + '-' + est.__class__.__name__,
direction='maximize',
sampler=sampler,
storage=storage, load_if_exists=True)
X, y = dataset.load_dataset(separate_target=True, local_cache_dir=local_cache_dir)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.2,
random_state=random_state)
study.optimize(lambda trial: self._objective(trial, X_train, y_train, est, parameter_scopes, random_state),
n_trials=self.n_trials, timeout=self.timeout)
best_models[dataset_name] = \
self.evaluate(clone(est).set_params(**study.best_trial.user_attrs['params']), dataset_name,
local_cache_dir)[dataset_name]
best_models['name'] = est.__class__.__name__
return best_models
def evaluate(self, est, datasets=None, local_cache_dir=None):
'''
A method that calculates different performance metrics for the ML method with parameters.
This function doesn't tune the parameters.
Parameters
----------
est : sklearn.base.BaseEstimator
A method that will be timized and benchmarked against DIGEN.
'''
if datasets is None:
datasets = self.list_datasets()
if not isinstance(datasets, list):
datasets = [datasets]
datasets = [self._fullname(x) for x in datasets]
results = {}
for dataset_name in datasets:
dataset = Dataset(dataset_name)
random_state = dataset.get_random_state()
random.seed(random_state)
np.random.seed(random_state)
X, y = dataset.load_dataset(separate_target=True, local_cache_dir=local_cache_dir)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.2,
random_state=random_state)
new_est = clone(est)
new_est.fit(X_train, y_train)
y_pred = new_est.predict(X_test)
if hasattr(new_est, "predict_proba"):
yproba = new_est.predict_proba(X_test)[:, 1]
else:
yproba = y_pred
fpr, tpr, _ = roc_curve(y_test, yproba)
auroc = roc_auc_score(y_test, yproba)
# average_precision_score(y_test,yproba)
prec, rec, _ = precision_recall_curve(y_test, yproba)
results[dataset_name] = {
'dataset': dataset_name,
'classifier': new_est,
'fpr': fpr,
'tpr': tpr,
'prec': prec,
'rec': rec,
'auroc': auroc,
'f1_score': f1_score(y_test, y_pred),
'auprc': auc(rec, prec)
}
results['name'] = new_est.__class__.__name__
return results
def _objective(self, trial, X, y, estimator, parameter_scopes, random_state):
'''
An internal method that sets Optuna parameters and objective for hyper-parameter optimization
'''
est = clone(estimator).set_params(**parameter_scopes(trial))
for a in ['random_state', 'seed']:
if hasattr(est, a):
setattr(est, a, random_state)
splits_auc = []
cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=random_state)
for train_idx, test_idx in cv.split(X, y):
split_num = len(splits_auc)
if isinstance(X, np.ndarray):
X_train = X[train_idx]
X_test = X[test_idx]
else:
X_train = X.iloc[train_idx, :]
X_test = X.iloc[test_idx, :]
if isinstance(y, np.ndarray):
y_train = y[train_idx]
y_test = y[test_idx]
else:
y_train = y.iloc[train_idx]
y_test = y.iloc[test_idx]
est.fit(X_train, y_train)
auroc_test = roc_auc_score(y_test, est.predict(X_test))
trial.set_user_attr('split_id', split_num)
trial.set_user_attr('fold' + str(split_num) + '_auroc', auroc_test)
splits_auc.append(auroc_test)
trial.set_user_attr('estimator', str(estimator.__class__.__name__))
trial.set_user_attr('auroc', np.mean(splits_auc))
trial.set_user_attr('seed', random_state)
trial.set_user_attr('params', est.get_params())
return np.mean(splits_auc)
def plot_roc(self, dataset_name, ax=None, new_results=None, **kwargs):
'''
A method that plots an ROC curve chart for a given dataset with methods included in DIGEN with or without the additional benchmarked method.
Parameters
----------
dataset_name : str
The name of the data set to load from DIGEN.
new_result : dict
The result of evaluation of the given estimator on the dataset_name
For further reference, see: evaluate
'''
dataset_name = self._fullname(dataset_name)
df = self.data[self.data['dataset'] == dataset_name]
df.reset_index(inplace=True)
assert (len(df) > 0)
linestyles = ['-', '--', '-.', ':', 'solid', 'dashed', 'dashdot', 'dotted']
colors = ['0.4', '0.4', '0.4', '0.4', '0.8', '0.8', '0.8', '0.8']
if ax is None:
fig, ax = plt.subplots(figsize=(16, 12))
if new_results is not None:
assert (isinstance(new_results, dict))
new_results_tmp = copy.deepcopy(new_results)
new_results_tmp[dataset_name]['classifier'] = new_results_tmp['name']
if isinstance(new_results_tmp[dataset_name], dict):
df = df.append(new_results_tmp[dataset_name], ignore_index=True)
else:
df = df.append(new_results_tmp, ignore_index=True)
linestyles.append('-')
colors.append('red')
linestyles = iter(linestyles)
colors = iter(colors)
for i in df.index:
ax.plot(df.loc[i]['fpr'],
df.loc[i]['tpr'],
color=next(colors), linestyle=next(linestyles),
label="{}, AUC={:.3f}".format(df.loc[i]['classifier'], df.loc[i]['auroc']), **kwargs)
else:
for i in df.index:
ax.plot(df.loc[i]['fpr'],
df.loc[i]['tpr'],
label="{}, AUC={:.3f}".format(df.loc[i]['classifier'], df.loc[i]['auroc']), **kwargs)
ax.plot([0, 1], [0, 1], color='black', linestyle='--')
ax.set_xticks(np.arange(0.0, 1.1, step=0.1))
ax.set_yticks(np.arange(0.0, 1.1, step=0.1))
ax.tick_params(axis='both', which='major', labelsize=20)
ax.set_xlabel("False Positive Rate", fontsize=24)
ax.set_ylabel("True Positive Rate", fontsize=24)
ax.set_title('ROC Curves Comparison', fontweight='bold', fontsize=28)
ax.legend(prop={'size': 13}, loc='lower right')
plt.gcf().subplots_adjust(bottom=0.15)
return fig, ax
def plot_prc(self, dataset_name, ax=None, new_results=None, **kwargs):
'''
A method that plots an PRC curve chart for a given dataset with methods included in DIGEN with or without the additional benchmarked method.
Parameters
----------
dataset_name : str
The name of the data set to load from DIGEN.
new_result : dict
The result of evaluation of the given estimator on the dataset_name
For further reference, see: evaluate
'''
dataset_name = self._fullname(dataset_name)
df = self.data[self.data['dataset'] == dataset_name]
df.reset_index(inplace=True)
assert (len(df) > 0)
linestyles = ['-', '--', '-.', ':', 'solid', 'dashed', 'dashdot', 'dotted']
colors = ['0.4', '0.4', '0.4', '0.4', '0.8', '0.8', '0.8', '0.8']
if ax is None:
fig, ax = plt.subplots(figsize=(16, 12))
if new_results is not None:
assert (isinstance(new_results, dict))
new_results_tmp = copy.deepcopy(new_results)
new_results_tmp[dataset_name]['classifier'] = new_results_tmp[dataset_name]['classifier'].__class__.__name__
if isinstance(new_results_tmp[dataset_name], dict):
df = df.append(new_results_tmp[dataset_name], ignore_index=True)
else:
df = df.append(new_results_tmp, ignore_index=True)
linestyles.append('-')
colors.append('red')
linestyles = iter(linestyles)
colors = iter(colors)
for i in df.index:
ax.plot(df.loc[i]['rec'],
df.loc[i]['prec'],
color=next(colors), linestyle=next(linestyles),
label="{}, f1_score={:.3f}, auprc={:.3f}".format(df.loc[i]['classifier'], df.loc[i]['f1_score'],
df.loc[i]['auprc']), **kwargs)
else:
for i in df.index:
ax.plot(df.loc[i]['rec'],
df.loc[i]['prec'],
label="{}, f1_score={:.3f}, auprc={:.3f}".format(df.loc[i]['classifier'], df.loc[i]['f1_score'],
df.loc[i]['auprc']), **kwargs)
ax.plot([0, 1], [0.5, 0.5], color='black', linestyle='--')
ax.set_xticks(np.arange(0.0, 1.1, step=0.1))
ax.set_xlabel("Recall", fontsize=24)
ax.set_yticks(np.arange(0.4, 1.1, step=0.1))
ax.tick_params(axis='both', which='major', labelsize=20)
ax.set_ylabel("Precision", fontsize=24)
ax.set_title('Precision-Recall Curve Comparison', fontweight='bold', fontsize=28)
ax.legend(prop={'size': 13}, loc='lower right')
plt.gcf().subplots_adjust(bottom=0.15)
return fig, ax
def plot_parallel_coordinates(self, datasets=None, ax=None, new_results=None, **kwargs):
'''
A method that plots a parallel coordinate plot for the whole benchmark with or without the additional benchmarked method.
If no additional method is presented, the results are averaged to a mean.
performance of all of the methods is averag
Parameters
----------
datasets : list(str)
The name of the data set to load from DIGEN.
new_result : dict
The result of evaluation of the given estimator on the dataset_name
For further reference, see: evaluate
'''
if ax is None:
fig, ax = plt.subplots(figsize=(25, 12))
df = self.data
if datasets is None:
datasets = self.list_datasets()
if not isinstance(datasets, list):
datasets = [datasets]
datasets = list(map(lambda x: self._fullname(x), datasets))
df = df.pivot('classifier', columns='dataset', values='auroc')[datasets]
# if no dataset is added, just subtract mean performance for all datasets
if new_results is None:
df = df - df.mean()
# otherwise, use provided results as a reference
else:
z = pd.DataFrame.from_dict(new_results).transpose()
z.drop('name', inplace=True)
z['classifier'] = new_results['name']
z = z.pivot('classifier', columns='dataset', values='auroc')[datasets]
df = df - z.loc[new_results['name']]
# columns - classifiers, rows-datasets
df = df.transpose()
df['legend'] = df.index
if new_results is None:
ax = pd.plotting.parallel_coordinates(df, 'legend', # color_continuous_scale=px.colors.sequential.Blues,
# color=["lime", "tomato","dodgerblue"],
alpha=0.3)
# ax.set_title("Performance of classifiers compared to mean AUROC on DIGEN benchmark")
ax.text(6, -0.03, 'Mean AUROC per dataset', color='red', fontsize=16)
ax.set_ylabel('Difference vs mean AUROC per dataset', fontsize=24)
# plt.title("Performance of classifiers compared to mean AUROC on DIGEN benchmark", fontsize=28)
# plt.ylabel('Difference vs mean AUROC value', fontsize=24)
else:
ax = pd.plotting.parallel_coordinates(df, 'legend', alpha=0.3)
ax.text(6, -0.03, new_results['name'], color='red', fontsize=16)
# plt.title("Performance of classifiers compared vs the method on DIGEN benchmark", fontsize=28)
ax.set_ylabel('Difference vs AUROC per dataset', fontsize=24)
# ax.set_xticks(np.arange(0.0, 1.1, step=0.1))
ax.set_xlabel("Classifiers", fontsize=24)
ax.set_yticks(np.arange(-0.5, 0.55, step=0.1))
ax.tick_params(axis='both', which='major', labelsize=20)
plt.plot([0, 7], [0, 0], color='red', linestyle='--')
ax.get_legend().remove()
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.grid(False)
plt.xlabel('Classifiers')
plt.xticks(rotation=90)
# plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
# ax.set_facecolor("white")
# plt.gcf().subplots_adjust(bottom=0.15)
return fig, ax
def plot_heatmap(self, datasets=None, ax=None, new_results=None, **kwargs):
'''
A method that plots a parallel coordinate plot for the whole benchmark with or without the additional benchmarked method.
If no additional method is presented, the results are averaged to a mean.
performance of all of the methods is averag
Parameters
----------
dataset_list : list(str)
The name of the data set to load from DIGEN.
new_result : dict
The result of evaluation of the given estimator on the dataset_name
For further reference, see: evaluate
'''
df = self.data # .pivot('classifier', columns='dataset', values='auroc')
# getting performance of all the classifiers, and limiting to the listed datasets
if new_results is not None:
z = pd.DataFrame.from_dict(new_results).transpose()
z.drop('name', inplace=True)
z['classifier'] = new_results['name']
df = df.append(z)
if datasets is None:
if new_results is not None:
assert (isinstance(new_results, dict))
datasets = list(new_results.keys())
datasets.remove('name')
else:
datasets = self.list_datasets()
if not isinstance(datasets, list):
datasets = [datasets]
datasets = list(map(lambda x: self._fullname(x), datasets))
df = df.pivot('classifier', columns='dataset', values='auroc')[datasets]
df = df.transpose()
if new_results is not None:
fig = sns.clustermap(df.astype(float).sort_values(by=new_results['name'], ascending=False), cmap='Blues',
yticklabels=True, row_cluster=False, **kwargs)
else:
fig = sns.clustermap(df.astype(float), cmap='Blues', yticklabels=True, row_cluster=True, **kwargs)
fig.ax_heatmap.set_xlabel('')
fig.ax_heatmap.set_ylabel('')
plt.setp(fig.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
col = fig.cax.get_position()
fig.cax.set_position([col.x0 + 1, col.y0 - 0.35, col.width, col.height])
return fig, ax
if __name__ == '__main__':
benchmark = Benchmark()
benchmark.list_datasets()
| 42.074653
| 148
| 0.594883
|
828d308b0a069a863ae8d82d6398df86eb03a656
| 4,538
|
py
|
Python
|
homeassistant/components/yale_smart_alarm/config_flow.py
|
marcelblijleven/core
|
6d13466f8a9b157609227046e5ee542d1a261d0f
|
[
"Apache-2.0"
] | 1
|
2021-07-31T21:08:49.000Z
|
2021-07-31T21:08:49.000Z
|
homeassistant/components/yale_smart_alarm/config_flow.py
|
marcelblijleven/core
|
6d13466f8a9b157609227046e5ee542d1a261d0f
|
[
"Apache-2.0"
] | 58
|
2021-01-04T08:20:32.000Z
|
2022-03-31T06:05:13.000Z
|
homeassistant/components/yale_smart_alarm/config_flow.py
|
marcelblijleven/core
|
6d13466f8a9b157609227046e5ee542d1a261d0f
|
[
"Apache-2.0"
] | 1
|
2021-04-04T22:03:57.000Z
|
2021-04-04T22:03:57.000Z
|
"""Adds config flow for Yale Smart Alarm integration."""
from __future__ import annotations
import voluptuous as vol
from yalesmartalarmclient.client import AuthenticationError, YaleSmartAlarmClient
from homeassistant import config_entries
from homeassistant.const import CONF_NAME, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from .const import CONF_AREA_ID, DEFAULT_AREA_ID, DEFAULT_NAME, DOMAIN, LOGGER
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_AREA_ID, default=DEFAULT_AREA_ID): cv.string,
}
)
DATA_SCHEMA_AUTH = vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
class YaleConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Yale integration."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
entry: config_entries.ConfigEntry
async def async_step_import(self, config: dict):
"""Import a configuration from config.yaml."""
self.context.update(
{"title_placeholders": {CONF_NAME: f"YAML import {DOMAIN}"}}
)
return await self.async_step_user(user_input=config)
async def async_step_reauth(self, user_input=None):
"""Handle initiation of re-authentication with Yale."""
self.entry = self.hass.config_entries.async_get_entry(self.context["entry_id"])
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
"""Dialog that informs the user that reauth is required."""
errors = {}
if user_input is not None:
username = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
try:
await self.hass.async_add_executor_job(
YaleSmartAlarmClient, username, password
)
except AuthenticationError as error:
LOGGER.error("Authentication failed. Check credentials %s", error)
return self.async_show_form(
step_id="reauth_confirm",
data_schema=DATA_SCHEMA,
errors={"base": "invalid_auth"},
)
existing_entry = await self.async_set_unique_id(username)
if existing_entry:
self.hass.config_entries.async_update_entry(
existing_entry,
data={
**self.entry.data,
CONF_USERNAME: username,
CONF_PASSWORD: password,
},
)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
return self.async_show_form(
step_id="reauth_confirm",
data_schema=DATA_SCHEMA_AUTH,
errors=errors,
)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
username = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
name = user_input.get(CONF_NAME, DEFAULT_NAME)
area = user_input.get(CONF_AREA_ID, DEFAULT_AREA_ID)
try:
await self.hass.async_add_executor_job(
YaleSmartAlarmClient, username, password
)
except AuthenticationError as error:
LOGGER.error("Authentication failed. Check credentials %s", error)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={"base": "invalid_auth"},
)
await self.async_set_unique_id(username)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=username,
data={
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_NAME: name,
CONF_AREA_ID: area,
},
)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors=errors,
)
| 34.907692
| 87
| 0.602248
|
bdcad064726831a1ce9dae6be82e0cb4de99b74c
| 640
|
py
|
Python
|
aridanalysis/error_strings.py
|
UBC-MDS/DSCI524_Group8
|
564c0f1371fde677a212d2d9bcf83d172ccc6a7b
|
[
"MIT"
] | 1
|
2021-05-23T01:41:24.000Z
|
2021-05-23T01:41:24.000Z
|
aridanalysis/error_strings.py
|
UBC-MDS/aridanalysis_py
|
564c0f1371fde677a212d2d9bcf83d172ccc6a7b
|
[
"MIT"
] | 37
|
2021-03-05T10:04:03.000Z
|
2021-03-26T18:48:14.000Z
|
aridanalysis/error_strings.py
|
UBC-MDS/aridanalysis_py
|
564c0f1371fde677a212d2d9bcf83d172ccc6a7b
|
[
"MIT"
] | 2
|
2021-05-23T01:41:31.000Z
|
2021-07-05T17:32:59.000Z
|
# flake8: noqa
INVALID_DATAFRAME = "ERROR: INVALID DATAFRAME INPUT"
EMPTY_DATAFRAME = "ERROR: EMPTY DATAFRAME INPUT"
RESPONSE_NOT_FOUND = "ERROR: RESPONSE SELECTION NOT PRESENT IN DATAFRAME"
INVALID_RESPONSE_DATATYPE = "ERROR: INVALID RESPONSE DATATYPE"
INVALID_REGULARIZATION_INPUT = "ERROR: INVALID REGULARIZATION INPUT"
INVALID_ALPHA_INPUT = "ERROR: INVALID ALPHA INPUT DATATYPE"
NO_VALID_FEATURES = "ERROR: NO VALID FEATURES AVAILABLE"
INVALID_INPUT_LIST = "ERROR: INPUT FEATURE ARGUMENT NOT A LIST"
INVALID_TYPE_INPUT = "ERROR: INVALID MODEL TYPE SPECIFIED"
| 58.181818
| 83
| 0.717188
|
b10d35df4260ba8609352e1e8698d09311dd81e3
| 903
|
py
|
Python
|
tests/MyGame/Example/Any.py
|
chi-w-ng/flatbuffers
|
e2b26ee19b3f680bbebf65c9c87ddff492cd25fd
|
[
"Apache-2.0"
] | 16,870
|
2015-01-01T14:57:29.000Z
|
2022-03-31T21:56:17.000Z
|
tests/MyGame/Example/Any.py
|
chi-w-ng/flatbuffers
|
e2b26ee19b3f680bbebf65c9c87ddff492cd25fd
|
[
"Apache-2.0"
] | 6,363
|
2015-01-01T04:41:50.000Z
|
2022-03-31T23:36:52.000Z
|
tests/MyGame/Example/Any.py
|
chi-w-ng/flatbuffers
|
e2b26ee19b3f680bbebf65c9c87ddff492cd25fd
|
[
"Apache-2.0"
] | 3,324
|
2015-01-02T12:42:10.000Z
|
2022-03-31T10:57:27.000Z
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: Example
class Any(object):
NONE = 0
Monster = 1
TestSimpleTableWithEnum = 2
MyGame_Example2_Monster = 3
def AnyCreator(unionType, table):
from flatbuffers.table import Table
if not isinstance(table, Table):
return None
if unionType == Any().Monster:
import MyGame.Example.Monster
return MyGame.Example.Monster.MonsterT.InitFromBuf(table.Bytes, table.Pos)
if unionType == Any().TestSimpleTableWithEnum:
import MyGame.Example.TestSimpleTableWithEnum
return MyGame.Example.TestSimpleTableWithEnum.TestSimpleTableWithEnumT.InitFromBuf(table.Bytes, table.Pos)
if unionType == Any().MyGame_Example2_Monster:
import MyGame.Example2.Monster
return MyGame.Example2.Monster.MonsterT.InitFromBuf(table.Bytes, table.Pos)
return None
| 34.730769
| 114
| 0.734219
|
e37abedda59bb1dd581aa04f1d67aff3fa9e3d1d
| 13,681
|
py
|
Python
|
signals/PeakSignal.py
|
SengerM/signals
|
2aa32a83dad4a04e4d63c8ee58daf5c1a2b02931
|
[
"MIT"
] | null | null | null |
signals/PeakSignal.py
|
SengerM/signals
|
2aa32a83dad4a04e4d63c8ee58daf5c1a2b02931
|
[
"MIT"
] | null | null | null |
signals/PeakSignal.py
|
SengerM/signals
|
2aa32a83dad4a04e4d63c8ee58daf5c1a2b02931
|
[
"MIT"
] | null | null | null |
from .Signal import Signal
import numpy as np
from scipy import interpolate, integrate
import warnings
from scipy.stats import median_abs_deviation
class PeakSignal(Signal):
"""Class intended to deal with 'single peak signals', i.e. a signal that is 'zero zero PEAK zero zero'."""
@property
def peak_start_index(self) -> int:
"""Returns the index of the sample where the peak starts."""
if not hasattr(self, '_peak_start_index'):
try:
peak_index = np.argmax(self.samples)
median_before_peak = np.nanmedian(self.samples[:peak_index])
std_before_peak = median_abs_deviation(self.samples[:peak_index])*1.4826 # https://en.wikipedia.org/wiki/Median_absolute_deviation#Relation_to_standard_deviation
indices_where_signal_is_lower_than_median = np.squeeze(np.where(self.samples<=median_before_peak+std_before_peak))
self._peak_start_index = indices_where_signal_is_lower_than_median[np.squeeze(np.where(indices_where_signal_is_lower_than_median<peak_index))[-1]]
except:
self._peak_start_index = None
return self._peak_start_index
@property
def peak_start_time(self) -> float:
"""Returns the time at which the peak starts. The current implementation returns the time of the sample with `self.peak_start_index`."""
if self.peak_start_index is not None:
return self.time[self.peak_start_index]
else:
return float('NaN')
@property
def baseline(self) -> float:
"""Returns the baseline of the signal, i.e. the value at which it was stable before the peak started."""
if not hasattr(self, '_baseline'):
try:
self._baseline = np.nanmean(self.samples[:self.peak_start_index-1])
except:
self._baseline = float('NaN')
return self._baseline
@property
def amplitude(self) -> float:
"""Returns the amplitude of the signal defined as the difference between the maximum value and the baseline."""
if not hasattr(self, '_amplitude'):
self._amplitude = (self.samples - self.baseline).max()
return self._amplitude
@property
def noise(self) -> float:
"""Returns the noise of the signal defined as the standard deviation of the samples before the peak starts, or `float('NaN')` if it cannot be determined."""
if not hasattr(self, '_noise'):
try:
self._noise = np.nanstd(self.samples[:self.peak_start_index-1])
except:
self._noise = float('NaN')
return self._noise
@property
def SNR(self) -> float:
"""Returns the signal to noise ratio defined as amplitude/noise."""
return self.amplitude/self.noise
@property
def rise_time(self) -> float:
"""Returns the rise time defined as the time spent by the signal to go from 10 % to 90 %."""
if not hasattr(self, '_rise_time'):
try:
self._rise_time = self.find_time_at_rising_edge(90) - self.find_time_at_rising_edge(10)
except (ValueError, RuntimeError):
self._rise_time = float('NaN')
return self._rise_time
@property
def rising_edge_indices(self) -> list:
"""Returns a list of integer numbers corresponding to the indices of the `time` and `samples` arrays where the rising edge is located. The rising edge is considered to start at 10 % and end at 90 %. If the rising edge cannot be found, returns an empty list."""
if not hasattr(self, '_rising_edge_indices'):
try:
self._rising_edge_indices = self.find_rising_edge_indices(low=10,high=90)
except:
self._rising_edge_indices = []
return self._rising_edge_indices
@property
def falling_edge_indices(self) -> list:
"""Returns a list of integer numbers corresponding to the indices of the `time` and `samples` arrays where the falling edge is located. The falling edge is considered to start at 10 % and end at 90 %. If the falling edge cannot be found, returns an empty list."""
if not hasattr(self, '_falling_edge_indices'):
try:
self._falling_edge_indices = self.find_falling_edge_indices(low=10,high=90)
except:
self._falling_edge_indices = []
return self._falling_edge_indices
@property
def time_over_noise(self) -> float:
"""Returns the time the pulse spends over the noise value."""
if not hasattr(self, '_time_over_noise'):
try:
self._time_over_noise = self.find_time_over_threshold(threshold = self.noise/self.amplitude*100)
except:
self._time_over_noise = float('NaN')
return self._time_over_noise
@property
def peak_integral(self) -> float:
"""Returns the integral under the peak. The peak start is defined as that point where the signal goes outside of the noise band, and the end is the moment in which it goes back inside the noise band."""
if not hasattr(self, '_peak_integral'):
try:
integral, *_ = integrate.quad(
func = lambda t: (self(time=t)-self.baseline),
a = self.find_time_at_rising_edge(self.noise/self.amplitude*100),
b = self.find_time_at_falling_edge(self.noise/self.amplitude*100),
)
self._peak_integral = integral
except:
self._peak_integral = float('NaN')
return self._peak_integral
def find_rising_edge_indices(self, low: float, high: float) -> list:
"""Finds the rising edge of the signal. Returns a list of integers corresponding to the indices of the rising edge between `low` % and `high` %.
- low: float, percentage to consider where the rising edge starts, e.g. 10 %.
- high: float, percentage to consider where the rising edge ends, e.g. 90 %."""
for name,x in {'low': low, 'high': high}.items():
if not isinstance(x, (int, float)):
raise TypeError(f'`{name}` must be a float number, but received object of type {type(x)}.')
if not low < high:
raise ValueError(f'`low` must be less than `high`, received low={low} and high={high}.')
k = self.samples.argmax()
k_start_rise = None
k_stop_rise = None
while k > 0:
if self.samples[k] - self.baseline > self.amplitude*high/100:
k_stop_rise = k+1
if self.samples[k] - self.baseline < self.amplitude*low/100:
k_start_rise = k
break
k -= 1
if k_start_rise is None or k_stop_rise is None or k_start_rise == k_stop_rise:
raise RuntimeError(f'Cannot find the rising edge of this signal.')
return [k for k in range(k_start_rise, k_stop_rise)]
def find_falling_edge_indices(self, low: float, high: float) -> list:
"""Finds the falling edge of the signal. Returns a list of integers corresponding to the indices of the falling edge between `low` % and `high` %.
- low: float, percentage to consider where the falling edge starts, e.g. 10 %.
- high: float, percentage to consider where the falling edge ends, e.g. 90 %."""
for name,x in {'low': low, 'high': high}.items():
if not isinstance(x, (int, float)):
raise TypeError(f'`{name}` must be a float number, but received object of type {type(x)}.')
if not low < high:
raise ValueError(f'`low` must be less than `high`, received low={low} and high={high}.')
k = self.samples.argmax()
k_start_fall = None
k_stop_fall = None
while k < len(self.samples):
if self.samples[k] - self.baseline > self.amplitude*high/100:
k_start_fall = k
if self.samples[k] - self.baseline < self.amplitude*low/100:
k_stop_fall = k + 1
break
k += 1
if k_start_fall is None or k_stop_fall is None:
raise RuntimeError(f'Cannot find the falling edge of this signal.')
return [k for k in range(k_start_fall, k_stop_fall)]
def find_time_at_rising_edge(self, threshold: float) -> float:
"""Given some threshold value (as a percentage) returns the time at which the signal crosses such threshold within the rising edge. The signal is linearly interpolated between samples."""
if not isinstance(threshold, (float, int)):
raise TypeError(f'`threshold` must be a float number, received object of type {type(threshold)}.')
if not 0 < threshold < 100:
raise ValueError(f'`threshold` must be between 0 and 100, received {threshold}.')
if np.isnan(self.amplitude):
raise RuntimeError('Cannot find the amplitude of the signal.')
if np.isnan(self.baseline):
raise RuntimeError('Cannot find the baseline of the signal.')
rising_edge_indices = self.find_rising_edge_indices(low=threshold, high=99)
return float(interpolate.interp1d(
x = self.samples[rising_edge_indices],
y = self.time[rising_edge_indices],
)(self.amplitude*threshold/100 + self.baseline))
def find_time_at_falling_edge(self, threshold: float) -> float:
"""Given some threshold value (as a percentage) returns the time at which the signal crosses such threshold within the falling edge. The signal is linearly interpolated between samples."""
if not isinstance(threshold, (float, int)):
raise TypeError(f'`threshold` must be a float number, received object of type {type(threshold)}.')
if not 0 < threshold < 100:
raise ValueError(f'`threshold` must be between 0 and 100, received {threshold}.')
if np.isnan(self.amplitude):
raise RuntimeError('Cannot find the amplitude of the signal.')
if np.isnan(self.baseline):
raise RuntimeError('Cannot find the baseline of the signal.')
falling_edge_indices = self.find_falling_edge_indices(low=threshold, high=99)
return float(interpolate.interp1d(
x = self.samples[falling_edge_indices],
y = self.time[falling_edge_indices],
)(self.amplitude*threshold/100 + self.baseline))
def find_time_over_threshold(self, threshold: float) -> float:
"""Returns the time over some threshold where `threshold` is a percentage."""
if not isinstance(threshold, (float, int)):
raise TypeError(f'`threshold` must be a number, received object of type {type(threshold)}.')
if not 0 < threshold < 100:
raise ValueError(f'`threshold` must be within 0 and 100, received {threshold}.')
return self.find_time_at_falling_edge(threshold) - self.find_time_at_rising_edge(threshold)
def draw_in_plotly(signal, fig=None, baseline=True, noise=True, amplitude=True, rise_time=True, time_over_noise=True, peak_integral=True, peak_start_time=True):
"""Plot the signal along with the different quantities. `fig` is a plotly figure."""
import plotly.graph_objects as go
if not isinstance(signal, PeakSignal):
raise TypeError(f'`signal` must be an instance of {repr(PeakSignal)}, received object of type {repr(type(signal))}.')
if fig is None:
fig = go.Figure()
if type(fig) != type(go.Figure()):
raise TypeError(f'`fig` must be a plotly figure, received object of type {repr(type(fig))}.')
fig.add_trace(
go.Scatter(
x = signal.time,
y = signal.samples,
mode = 'lines+markers',
name = 'Signal',
)
)
if peak_integral == True:
try:
t_start = signal.find_time_at_rising_edge(signal.noise/signal.amplitude*100)
t_stop = signal.find_time_at_falling_edge(signal.noise/signal.amplitude*100)
except:
t_start = float('NaN')
t_stop = float('NaN')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fig.add_trace(
go.Scatter(
x = [t_start] + list(signal.time[(signal.time>t_start)&(signal.time<t_stop)]) + [t_start + signal.time_over_noise] + [t_stop,t_start] + [t_start],
y = [signal(t_start)] + list(signal.samples[(signal.time>t_start)&(signal.time<t_stop)]) + [signal(t_start + signal.time_over_noise)] + 2*[signal.baseline] + [signal(t_start)],
name = f'Integral ({signal.peak_integral:.2e})',
fill = 'toself',
mode = 'none',
line = dict(color='#ff6363'),
)
)
if baseline == True:
fig.add_trace(
go.Scatter(
x = [min(signal.time), max(signal.time)],
y = [signal.baseline]*2,
mode = 'lines',
name = f'Baseline ({signal.baseline:.2e})',
line = dict(color='black', dash='dash'),
)
)
if noise == True:
fig.add_trace(
go.Scatter(
x = [min(signal.time), max(signal.time)] + [float('NaN')] + [max(signal.time), min(signal.time)],
y = [signal.baseline + signal.noise, signal.baseline + signal.noise] + [float('NaN')] + [signal.baseline - signal.noise, signal.baseline - signal.noise],
mode = 'lines',
name = f'Noise ({signal.noise:.2e})',
line = dict(color='black', width=.7, dash='dash'),
)
)
if amplitude == True:
fig.add_trace(
go.Scatter(
x = [signal.time[np.argmax(signal.samples)]]*2,
y = [signal.baseline, signal.baseline + signal.amplitude],
name = f'Amplitude ({signal.amplitude:.2e})',
mode = 'lines+markers',
line = dict(color='rgba(50, 163, 39, .7)'),
marker = dict(size=11),
)
)
if rise_time == True:
try:
t_start_rise = signal.find_time_at_rising_edge(threshold=10)
except:
t_start_rise = float('NaN')
fig.add_trace(
go.Scatter(
x = [t_start_rise, t_start_rise+signal.rise_time, t_start_rise+signal.rise_time, t_start_rise, t_start_rise],
y = signal.baseline + np.array([signal.amplitude*.1, signal.amplitude*.1, signal.amplitude*.9, signal.amplitude*.9, signal.amplitude*.1]),
name = f'Rise time ({signal.rise_time:.2e})',
mode = 'lines',
line = dict(color='rgba(196, 0, 173, .5)'),
)
)
if time_over_noise == True:
threshold = signal.noise/signal.amplitude*100
try:
t_start = signal.find_time_at_rising_edge(threshold)
except:
t_start = float('NaN')
fig.add_trace(
go.Scatter(
x = [t_start,t_start + signal.time_over_noise],
y = 2*[signal.baseline+threshold/100*signal.amplitude],
name = f'Time over noise ({signal.time_over_noise:.2e})',
mode = 'lines+markers',
line = dict(color='#bf6c00', dash='dashdot'),
marker = dict(size=11),
)
)
if peak_start_time == True and not np.isnan(signal.peak_start_time):
fig.add_vline(
x = signal.peak_start_time,
line_color = 'black',
line_dash = 'dashdot',
line_width = .5,
annotation_text = f'Peak start time = {signal.peak_start_time:.2e}',
annotation_textangle = -90,
annotation_position = 'top left',
)
return fig
| 43.431746
| 265
| 0.709817
|
f1b8d1c71bb84c444070e9d37e5ea10465233a25
| 2,880
|
py
|
Python
|
volatility/framework/plugins/linux/lsof.py
|
orchechik/volatility3
|
99a536e57e908850a656f8aed6262d56f60e4bdc
|
[
"Linux-OpenIB"
] | null | null | null |
volatility/framework/plugins/linux/lsof.py
|
orchechik/volatility3
|
99a536e57e908850a656f8aed6262d56f60e4bdc
|
[
"Linux-OpenIB"
] | null | null | null |
volatility/framework/plugins/linux/lsof.py
|
orchechik/volatility3
|
99a536e57e908850a656f8aed6262d56f60e4bdc
|
[
"Linux-OpenIB"
] | null | null | null |
# This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
"""A module containing a collection of plugins that produce data typically
found in Linux's /proc file system."""
import logging
from typing import List
from volatility.framework import renderers, interfaces, constants
from volatility.framework.configuration import requirements
from volatility.framework.interfaces import plugins
from volatility.framework.objects import utility
from volatility.framework.symbols import linux
from volatility.plugins.linux import pslist
vollog = logging.getLogger(__name__)
class Lsof(plugins.PluginInterface):
"""Lists all memory maps for all processes."""
@classmethod
def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:
return [
requirements.TranslationLayerRequirement(name = 'primary',
description = 'Memory layer for the kernel',
architectures = ["Intel32", "Intel64"]),
requirements.SymbolTableRequirement(name = "vmlinux", description = "Linux kernel symbols"),
requirements.PluginRequirement(name = 'pslist', plugin = pslist.PsList, version = (1, 0, 0)),
requirements.ListRequirement(name = 'pid',
description = 'Filter on specific process IDs',
element_type = int,
optional = True)
]
def _generator(self, tasks):
symbol_table = None
for task in tasks:
if symbol_table is None:
if constants.BANG not in task.vol.type_name:
raise ValueError("Task is not part of a symbol table")
symbol_table = task.vol.type_name.split(constants.BANG)[0]
name = utility.array_to_string(task.comm)
pid = int(task.pid)
for fd_num, _, full_path in linux.LinuxUtilities.files_descriptors_for_process(
self.context, symbol_table, task):
yield (0, (pid, name, fd_num, full_path))
def run(self):
filter_func = pslist.PsList.create_pid_filter(self.config.get('pid', None))
return renderers.TreeGrid([("PID", int), ("Process", str), ("FD", int), ("Path", str)],
self._generator(
pslist.PsList.list_tasks(self.context,
self.config['primary'],
self.config['vmlinux'],
filter_func = filter_func)))
| 48
| 106
| 0.580903
|
576a03b88945397fdbdd0010dab7c02cebaf5cf4
| 32,887
|
py
|
Python
|
zuul.d/octavia/tests/unit/common/sample_configs/sample_configs.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
zuul.d/octavia/tests/unit/common/sample_configs/sample_configs.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
zuul.d/octavia/tests/unit/common/sample_configs/sample_configs.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
from oslo_config import cfg
from octavia.common import constants
from octavia.tests.unit.common.sample_configs import sample_certs
CONF = cfg.CONF
def sample_amphora_tuple():
in_amphora = collections.namedtuple(
'amphora', 'id, lb_network_ip, vrrp_ip, ha_ip, vrrp_port_id, '
'ha_port_id, role, status, vrrp_interface,'
'vrrp_priority')
return in_amphora(
id='sample_amphora_id_1',
lb_network_ip='10.0.1.1',
vrrp_ip='10.1.1.1',
ha_ip='192.168.10.1',
vrrp_port_id='1234',
ha_port_id='1234',
role=None,
status='ACTIVE',
vrrp_interface=None,
vrrp_priority=None)
RET_PERSISTENCE = {
'type': 'HTTP_COOKIE',
'cookie_name': None}
RET_MONITOR_1 = {
'id': 'sample_monitor_id_1',
'type': 'HTTP',
'delay': 30,
'timeout': 31,
'fall_threshold': 3,
'rise_threshold': 2,
'http_method': 'GET',
'url_path': '/index.html',
'expected_codes': '418',
'enabled': True}
RET_MONITOR_2 = {
'id': 'sample_monitor_id_2',
'type': 'HTTP',
'delay': 30,
'timeout': 31,
'fall_threshold': 3,
'rise_threshold': 2,
'http_method': 'GET',
'url_path': '/healthmon.html',
'expected_codes': '418',
'enabled': True}
RET_MEMBER_1 = {
'id': 'sample_member_id_1',
'address': '10.0.0.99',
'protocol_port': 82,
'weight': 13,
'subnet_id': '10.0.0.1/24',
'enabled': True,
'operating_status': 'ACTIVE',
'monitor_address': None,
'monitor_port': None,
'backup': False}
RET_MEMBER_2 = {
'id': 'sample_member_id_2',
'address': '10.0.0.98',
'protocol_port': 82,
'weight': 13,
'subnet_id': '10.0.0.1/24',
'enabled': True,
'operating_status': 'ACTIVE',
'monitor_address': None,
'monitor_port': None,
'backup': False}
RET_MEMBER_3 = {
'id': 'sample_member_id_3',
'address': '10.0.0.97',
'protocol_port': 82,
'weight': 13,
'subnet_id': '10.0.0.1/24',
'enabled': True,
'operating_status': 'ACTIVE',
'monitor_address': None,
'monitor_port': None,
'backup': False}
RET_POOL_1 = {
'id': 'sample_pool_id_1',
'protocol': 'http',
'lb_algorithm': 'roundrobin',
'members': [RET_MEMBER_1, RET_MEMBER_2],
'health_monitor': RET_MONITOR_1,
'session_persistence': RET_PERSISTENCE,
'enabled': True,
'operating_status': 'ACTIVE',
'stick_size': '10k'}
RET_POOL_2 = {
'id': 'sample_pool_id_2',
'protocol': 'http',
'lb_algorithm': 'roundrobin',
'members': [RET_MEMBER_3],
'health_monitor': RET_MONITOR_2,
'session_persistence': RET_PERSISTENCE,
'enabled': True,
'operating_status': 'ACTIVE',
'stick_size': '10k'}
RET_DEF_TLS_CONT = {'id': 'cont_id_1', 'allencompassingpem': 'imapem',
'primary_cn': 'FakeCn'}
RET_SNI_CONT_1 = {'id': 'cont_id_2', 'allencompassingpem': 'imapem2',
'primary_cn': 'FakeCn'}
RET_SNI_CONT_2 = {'id': 'cont_id_3', 'allencompassingpem': 'imapem3',
'primary_cn': 'FakeCn2'}
RET_L7RULE_1 = {
'id': 'sample_l7rule_id_1',
'type': constants.L7RULE_TYPE_PATH,
'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH,
'key': None,
'value': '/api',
'invert': False,
'enabled': True}
RET_L7RULE_2 = {
'id': 'sample_l7rule_id_2',
'type': constants.L7RULE_TYPE_HEADER,
'compare_type': constants.L7RULE_COMPARE_TYPE_CONTAINS,
'key': 'Some-header',
'value': 'This\\ string\\\\\\ with\\ stuff',
'invert': True,
'enabled': True}
RET_L7RULE_3 = {
'id': 'sample_l7rule_id_3',
'type': constants.L7RULE_TYPE_COOKIE,
'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX,
'key': 'some-cookie',
'value': 'this.*|that',
'invert': False,
'enabled': True}
RET_L7RULE_4 = {
'id': 'sample_l7rule_id_4',
'type': constants.L7RULE_TYPE_FILE_TYPE,
'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO,
'key': None,
'value': 'jpg',
'invert': False,
'enabled': True}
RET_L7RULE_5 = {
'id': 'sample_l7rule_id_5',
'type': constants.L7RULE_TYPE_HOST_NAME,
'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH,
'key': None,
'value': '.example.com',
'invert': False,
'enabled': True}
RET_L7RULE_6 = {
'id': 'sample_l7rule_id_6',
'type': constants.L7RULE_TYPE_HOST_NAME,
'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH,
'key': None,
'value': '.example.com',
'invert': False,
'enabled': False}
RET_L7POLICY_1 = {
'id': 'sample_l7policy_id_1',
'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL,
'redirect_pool': RET_POOL_2,
'redirect_url': None,
'enabled': True,
'l7rules': [RET_L7RULE_1]}
RET_L7POLICY_2 = {
'id': 'sample_l7policy_id_2',
'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL,
'redirect_pool': None,
'redirect_url': 'http://www.example.com',
'enabled': True,
'l7rules': [RET_L7RULE_2, RET_L7RULE_3]}
RET_L7POLICY_3 = {
'id': 'sample_l7policy_id_3',
'action': constants.L7POLICY_ACTION_REJECT,
'redirect_pool': None,
'redirect_url': None,
'enabled': True,
'l7rules': [RET_L7RULE_4, RET_L7RULE_5]}
RET_L7POLICY_4 = {
'id': 'sample_l7policy_id_4',
'action': constants.L7POLICY_ACTION_REJECT,
'redirect_pool': None,
'redirect_url': None,
'enabled': True,
'l7rules': []}
RET_L7POLICY_5 = {
'id': 'sample_l7policy_id_5',
'action': constants.L7POLICY_ACTION_REJECT,
'redirect_pool': None,
'redirect_url': None,
'enabled': False,
'l7rules': [RET_L7RULE_5]}
RET_L7POLICY_6 = {
'id': 'sample_l7policy_id_6',
'action': constants.L7POLICY_ACTION_REJECT,
'redirect_pool': None,
'redirect_url': None,
'enabled': True,
'l7rules': []}
RET_LISTENER = {
'id': 'sample_listener_id_1',
'protocol_port': '80',
'protocol': 'HTTP',
'protocol_mode': 'http',
'default_pool': RET_POOL_1,
'connection_limit': constants.HAPROXY_MAX_MAXCONN,
'amphorae': [sample_amphora_tuple()],
'peer_port': 1024,
'topology': 'SINGLE',
'pools': [RET_POOL_1],
'l7policies': [],
'enabled': True,
'insert_headers': {},
'timeout_client_data': 50000,
'timeout_member_connect': 5000,
'timeout_member_data': 50000,
'timeout_tcp_inspect': 0,
}
RET_LISTENER_L7 = {
'id': 'sample_listener_id_1',
'protocol_port': '80',
'protocol': 'HTTP',
'protocol_mode': 'http',
'default_pool': RET_POOL_1,
'connection_limit': constants.HAPROXY_MAX_MAXCONN,
'amphorae': [sample_amphora_tuple()],
'peer_port': 1024,
'topology': 'SINGLE',
'pools': [RET_POOL_1, RET_POOL_2],
'l7policies': [RET_L7POLICY_1, RET_L7POLICY_2, RET_L7POLICY_3,
RET_L7POLICY_4, RET_L7POLICY_5, RET_L7POLICY_6],
'enabled': True,
'insert_headers': {},
'timeout_client_data': 50000,
'timeout_member_connect': 5000,
'timeout_member_data': 50000,
'timeout_tcp_inspect': 0,
}
RET_LISTENER_TLS = {
'id': 'sample_listener_id_1',
'protocol_port': '443',
'protocol': 'TERMINATED_HTTPS',
'protocol_mode': 'http',
'default_pool': RET_POOL_1,
'connection_limit': constants.HAPROXY_MAX_MAXCONN,
'tls_certificate_id': 'cont_id_1',
'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem',
'default_tls_container': RET_DEF_TLS_CONT,
'pools': [RET_POOL_1],
'l7policies': [],
'enabled': True,
'insert_headers': {}}
RET_LISTENER_TLS_SNI = {
'id': 'sample_listener_id_1',
'protocol_port': '443',
'protocol': 'http',
'protocol': 'TERMINATED_HTTPS',
'default_pool': RET_POOL_1,
'connection_limit': constants.HAPROXY_MAX_MAXCONN,
'tls_certificate_id': 'cont_id_1',
'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem',
'default_tls_container': RET_DEF_TLS_CONT,
'crt_dir': '/v2/sample_loadbalancer_id_1',
'sni_container_ids': ['cont_id_2', 'cont_id_3'],
'sni_containers': [RET_SNI_CONT_1, RET_SNI_CONT_2],
'pools': [RET_POOL_1],
'l7policies': [],
'enabled': True,
'insert_headers': {}}
RET_AMPHORA = {
'id': 'sample_amphora_id_1',
'lb_network_ip': '10.0.1.1',
'vrrp_ip': '10.1.1.1',
'ha_ip': '192.168.10.1',
'vrrp_port_id': '1234',
'ha_port_id': '1234',
'role': None,
'status': 'ACTIVE',
'vrrp_interface': None,
'vrrp_priority': None}
RET_LB = {
'host_amphora': RET_AMPHORA,
'id': 'sample_loadbalancer_id_1',
'vip_address': '10.0.0.2',
'listener': RET_LISTENER,
'topology': 'SINGLE',
'enabled': True,
'global_connection_limit': constants.HAPROXY_MAX_MAXCONN}
RET_LB_L7 = {
'host_amphora': RET_AMPHORA,
'id': 'sample_loadbalancer_id_1',
'vip_address': '10.0.0.2',
'listener': RET_LISTENER_L7,
'topology': 'SINGLE',
'enabled': True,
'global_connection_limit': constants.HAPROXY_MAX_MAXCONN}
UDP_SOURCE_IP_BODY = {
'type': constants.SESSION_PERSISTENCE_SOURCE_IP,
'persistence_timeout': 33,
'persistence_granularity': '255.0.0.0'
}
RET_UDP_HEALTH_MONITOR = {
'id': 'sample_monitor_id_1',
'type': constants.HEALTH_MONITOR_UDP_CONNECT,
'delay': 30,
'timeout': 31,
'enabled': True,
'fall_threshold': 3,
'check_script_path': (CONF.haproxy_amphora.base_path +
'/lvs/check/udp_check.sh')
}
UDP_HEALTH_MONITOR_NO_SCRIPT = {
'id': 'sample_monitor_id_1',
'check_script_path': None,
'delay': 30,
'enabled': True,
'fall_threshold': 3,
'timeout': 31,
'type': 'UDP'
}
RET_UDP_MEMBER = {
'id': 'member_id_1',
'address': '192.0.2.10',
'protocol_port': 82,
'weight': 13,
'enabled': True,
'monitor_address': None,
'monitor_port': None
}
RET_UDP_MEMBER_MONITOR_IP_PORT = {
'id': 'member_id_1',
'address': '192.0.2.10',
'protocol_port': 82,
'weight': 13,
'enabled': True,
'monitor_address': '192.168.1.1',
'monitor_port': 9000
}
UDP_MEMBER_1 = {
'id': 'sample_member_id_1',
'address': '10.0.0.99',
'enabled': True,
'protocol_port': 82,
'weight': 13,
'monitor_address': None,
'monitor_port': None
}
UDP_MEMBER_2 = {
'id': 'sample_member_id_2',
'address': '10.0.0.98',
'enabled': True,
'protocol_port': 82,
'weight': 13,
'monitor_address': None,
'monitor_port': None
}
RET_UDP_POOL = {
'id': 'sample_pool_id_1',
'enabled': True,
'health_monitor': UDP_HEALTH_MONITOR_NO_SCRIPT,
'lb_algorithm': 'rr',
'members': [UDP_MEMBER_1, UDP_MEMBER_2],
'protocol': 'udp',
'session_persistence': UDP_SOURCE_IP_BODY
}
RET_UDP_LISTENER = {
'connection_limit': 98,
'default_pool': {
'id': 'sample_pool_id_1',
'enabled': True,
'health_monitor': RET_UDP_HEALTH_MONITOR,
'lb_algorithm': 'rr',
'members': [UDP_MEMBER_1, UDP_MEMBER_2],
'protocol': 'udp',
'session_persistence': UDP_SOURCE_IP_BODY
},
'enabled': True,
'id': 'sample_listener_id_1',
'protocol_mode': 'udp',
'protocol_port': '80'
}
def sample_loadbalancer_tuple(proto=None, monitor=True, persistence=True,
persistence_type=None, tls=False, sni=False,
topology=None, l7=False, enabled=True):
proto = 'HTTP' if proto is None else proto
topology = 'SINGLE' if topology is None else topology
in_lb = collections.namedtuple(
'load_balancer', 'id, name, protocol, vip, listeners, amphorae,'
' enabled')
return in_lb(
id='sample_loadbalancer_id_1',
name='test-lb',
protocol=proto,
vip=sample_vip_tuple(),
topology=topology,
listeners=[sample_listener_tuple(proto=proto, monitor=monitor,
persistence=persistence,
persistence_type=persistence_type,
tls=tls,
sni=sni,
l7=l7,
enabled=enabled)],
enabled=enabled
)
def sample_listener_loadbalancer_tuple(proto=None, topology=None,
enabled=True):
proto = 'HTTP' if proto is None else proto
topology = 'SINGLE' if topology is None else topology
in_lb = collections.namedtuple(
'load_balancer', 'id, name, protocol, vip, amphorae, topology, '
'enabled')
return in_lb(
id='sample_loadbalancer_id_1',
name='test-lb',
protocol=proto,
vip=sample_vip_tuple(),
amphorae=[sample_amphora_tuple()],
topology=topology,
enabled=enabled
)
def sample_vrrp_group_tuple():
in_vrrp_group = collections.namedtuple(
'vrrp_group', 'load_balancer_id, vrrp_auth_type, vrrp_auth_pass, '
'advert_int, smtp_server, smtp_connect_timeout, '
'vrrp_group_name')
return in_vrrp_group(
vrrp_group_name='sample_loadbalancer_id_1',
load_balancer_id='sample_loadbalancer_id_1',
vrrp_auth_type='PASS',
vrrp_auth_pass='123',
advert_int='1',
smtp_server='',
smtp_connect_timeout='')
def sample_vip_tuple():
vip = collections.namedtuple('vip', 'ip_address')
return vip(ip_address='10.0.0.2')
def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True,
persistence=True, persistence_type=None,
persistence_cookie=None, persistence_timeout=None,
persistence_granularity=None,
tls=False, sni=False, peer_port=None, topology=None,
l7=False, enabled=True, insert_headers=None,
be_proto=None, monitor_ip_port=False,
monitor_proto=None, backup_member=False,
disabled_member=False, connection_limit=-1,
timeout_client_data=50000,
timeout_member_connect=5000,
timeout_member_data=50000,
timeout_tcp_inspect=0):
proto = 'HTTP' if proto is None else proto
if be_proto is None:
be_proto = 'HTTP' if proto is 'TERMINATED_HTTPS' else proto
topology = 'SINGLE' if topology is None else topology
port = '443' if proto is 'HTTPS' or proto is 'TERMINATED_HTTPS' else '80'
peer_port = 1024 if peer_port is None else peer_port
insert_headers = insert_headers or {}
in_listener = collections.namedtuple(
'listener', 'id, project_id, protocol_port, protocol, default_pool, '
'connection_limit, tls_certificate_id, '
'sni_container_ids, default_tls_container, '
'sni_containers, load_balancer, peer_port, pools, '
'l7policies, enabled, insert_headers, timeout_client_data,'
'timeout_member_connect, timeout_member_data, '
'timeout_tcp_inspect',)
if l7:
pools = [
sample_pool_tuple(
proto=be_proto, monitor=monitor, persistence=persistence,
persistence_type=persistence_type,
persistence_cookie=persistence_cookie,
monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto),
sample_pool_tuple(
proto=be_proto, monitor=monitor, persistence=persistence,
persistence_type=persistence_type,
persistence_cookie=persistence_cookie, sample_pool=2,
monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto)]
l7policies = [
sample_l7policy_tuple('sample_l7policy_id_1', sample_policy=1),
sample_l7policy_tuple('sample_l7policy_id_2', sample_policy=2),
sample_l7policy_tuple('sample_l7policy_id_3', sample_policy=3),
sample_l7policy_tuple('sample_l7policy_id_4', sample_policy=4),
sample_l7policy_tuple('sample_l7policy_id_5', sample_policy=5),
sample_l7policy_tuple('sample_l7policy_id_6', sample_policy=6)]
else:
pools = [
sample_pool_tuple(
proto=be_proto, monitor=monitor, persistence=persistence,
persistence_type=persistence_type,
persistence_cookie=persistence_cookie,
monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto,
backup_member=backup_member, disabled_member=disabled_member)]
l7policies = []
return in_listener(
id='sample_listener_id_1',
project_id='12345',
protocol_port=port,
protocol=proto,
load_balancer=sample_listener_loadbalancer_tuple(proto=proto,
topology=topology),
peer_port=peer_port,
default_pool=sample_pool_tuple(
proto=be_proto, monitor=monitor, persistence=persistence,
persistence_type=persistence_type,
persistence_cookie=persistence_cookie,
persistence_timeout=persistence_timeout,
persistence_granularity=persistence_granularity,
monitor_ip_port=monitor_ip_port,
monitor_proto=monitor_proto) if alloc_default_pool else '',
connection_limit=connection_limit,
tls_certificate_id='cont_id_1' if tls else '',
sni_container_ids=['cont_id_2', 'cont_id_3'] if sni else [],
default_tls_container=sample_tls_container_tuple(
id='cont_id_1', certificate=sample_certs.X509_CERT,
private_key=sample_certs.X509_CERT_KEY,
intermediates=sample_certs.X509_IMDS_LIST,
primary_cn=sample_certs.X509_CERT_CN
) if tls else '',
sni_containers=[
sample_tls_sni_container_tuple(
tls_container_id='cont_id_2',
tls_container=sample_tls_container_tuple(
id='cont_id_2', certificate=sample_certs.X509_CERT_2,
private_key=sample_certs.X509_CERT_KEY_2,
intermediates=sample_certs.X509_IMDS_LIST,
primary_cn=sample_certs.X509_CERT_CN_2)),
sample_tls_sni_container_tuple(
tls_container_id='cont_id_3',
tls_container=sample_tls_container_tuple(
id='cont_id_3', certificate=sample_certs.X509_CERT_3,
private_key=sample_certs.X509_CERT_KEY_3,
intermediates=sample_certs.X509_IMDS_LIST,
primary_cn=sample_certs.X509_CERT_CN_3))]
if sni else [],
pools=pools,
l7policies=l7policies,
enabled=enabled,
insert_headers=insert_headers,
timeout_client_data=timeout_client_data,
timeout_member_connect=timeout_member_connect,
timeout_member_data=timeout_member_data,
timeout_tcp_inspect=timeout_tcp_inspect
)
def sample_tls_sni_container_tuple(tls_container_id=None, tls_container=None):
sc = collections.namedtuple('sni_container', 'tls_container_id, '
'tls_container')
return sc(tls_container_id=tls_container_id, tls_container=tls_container)
def sample_tls_sni_containers_tuple(tls_container_id=None, tls_container=None):
sc = collections.namedtuple('sni_containers', 'tls_container_id, '
'tls_container')
return [sc(tls_container_id=tls_container_id, tls_container=tls_container)]
def sample_tls_container_tuple(id='cont_id_1', certificate=None,
private_key=None, intermediates=None,
primary_cn=None):
sc = collections.namedtuple(
'tls_container',
'id, certificate, private_key, intermediates, primary_cn')
return sc(id=id, certificate=certificate, private_key=private_key,
intermediates=intermediates or [], primary_cn=primary_cn)
def sample_pool_tuple(proto=None, monitor=True, persistence=True,
persistence_type=None, persistence_cookie=None,
persistence_timeout=None, persistence_granularity=None,
sample_pool=1, monitor_ip_port=False,
monitor_proto=None, backup_member=False,
disabled_member=False):
proto = 'HTTP' if proto is None else proto
monitor_proto = proto if monitor_proto is None else monitor_proto
in_pool = collections.namedtuple(
'pool', 'id, protocol, lb_algorithm, members, health_monitor,'
'session_persistence, enabled, operating_status')
if (proto == constants.PROTOCOL_UDP and
persistence_type == constants.SESSION_PERSISTENCE_SOURCE_IP):
kwargs = {'persistence_type': persistence_type,
'persistence_timeout': persistence_timeout,
'persistence_granularity': persistence_granularity}
else:
kwargs = {'persistence_type': persistence_type,
'persistence_cookie': persistence_cookie}
persis = sample_session_persistence_tuple(**kwargs)
mon = None
if sample_pool == 1:
id = 'sample_pool_id_1'
members = [sample_member_tuple('sample_member_id_1', '10.0.0.99',
monitor_ip_port=monitor_ip_port),
sample_member_tuple('sample_member_id_2', '10.0.0.98',
monitor_ip_port=monitor_ip_port,
backup=backup_member,
enabled=not disabled_member)]
if monitor is True:
mon = sample_health_monitor_tuple(proto=monitor_proto)
elif sample_pool == 2:
id = 'sample_pool_id_2'
members = [sample_member_tuple('sample_member_id_3', '10.0.0.97',
monitor_ip_port=monitor_ip_port)]
if monitor is True:
mon = sample_health_monitor_tuple(proto=monitor_proto, sample_hm=2)
return in_pool(
id=id,
protocol=proto,
lb_algorithm='ROUND_ROBIN',
members=members,
health_monitor=mon,
session_persistence=persis if persistence is True else None,
enabled=True,
operating_status='ACTIVE')
def sample_member_tuple(id, ip, enabled=True, operating_status='ACTIVE',
monitor_ip_port=False, backup=False):
in_member = collections.namedtuple('member',
'id, ip_address, protocol_port, '
'weight, subnet_id, '
'enabled, operating_status, '
'monitor_address, monitor_port, '
'backup')
monitor_address = '192.168.1.1' if monitor_ip_port else None
monitor_port = 9000 if monitor_ip_port else None
return in_member(
id=id,
ip_address=ip,
protocol_port=82,
weight=13,
subnet_id='10.0.0.1/24',
enabled=enabled,
operating_status=operating_status,
monitor_address=monitor_address,
monitor_port=monitor_port,
backup=backup)
def sample_session_persistence_tuple(persistence_type=None,
persistence_cookie=None,
persistence_timeout=None,
persistence_granularity=None):
spersistence = collections.namedtuple('SessionPersistence',
'type, cookie_name, '
'persistence_timeout, '
'persistence_granularity')
pt = 'HTTP_COOKIE' if persistence_type is None else persistence_type
return spersistence(type=pt,
cookie_name=persistence_cookie,
persistence_timeout=persistence_timeout,
persistence_granularity=persistence_granularity)
def sample_health_monitor_tuple(proto='HTTP', sample_hm=1):
proto = 'HTTP' if proto is 'TERMINATED_HTTPS' else proto
monitor = collections.namedtuple(
'monitor', 'id, type, delay, timeout, fall_threshold, rise_threshold,'
'http_method, url_path, expected_codes, enabled, '
'check_script_path')
if sample_hm == 1:
id = 'sample_monitor_id_1'
url_path = '/index.html'
elif sample_hm == 2:
id = 'sample_monitor_id_2'
url_path = '/healthmon.html'
kwargs = {
'id': id,
'type': proto,
'delay': 30,
'timeout': 31,
'fall_threshold': 3,
'rise_threshold': 2,
'http_method': 'GET',
'url_path': url_path,
'expected_codes': '418',
'enabled': True
}
if proto == constants.HEALTH_MONITOR_UDP_CONNECT:
kwargs['check_script_path'] = (CONF.haproxy_amphora.base_path +
'lvs/check/' + 'udp_check.sh')
else:
kwargs['check_script_path'] = None
return monitor(**kwargs)
def sample_l7policy_tuple(id,
action=constants.L7POLICY_ACTION_REJECT,
redirect_pool=None, redirect_url=None,
enabled=True, sample_policy=1):
in_l7policy = collections.namedtuple('l7policy',
'id, action, redirect_pool, '
'redirect_url, l7rules, enabled')
l7rules = []
if sample_policy == 1:
action = constants.L7POLICY_ACTION_REDIRECT_TO_POOL
redirect_pool = sample_pool_tuple(sample_pool=2)
l7rules = [sample_l7rule_tuple('sample_l7rule_id_1')]
elif sample_policy == 2:
action = constants.L7POLICY_ACTION_REDIRECT_TO_URL
redirect_url = 'http://www.example.com'
l7rules = [sample_l7rule_tuple('sample_l7rule_id_2', sample_rule=2),
sample_l7rule_tuple('sample_l7rule_id_3', sample_rule=3)]
elif sample_policy == 3:
action = constants.L7POLICY_ACTION_REJECT
l7rules = [sample_l7rule_tuple('sample_l7rule_id_4', sample_rule=4),
sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)]
elif sample_policy == 4:
action = constants.L7POLICY_ACTION_REJECT
elif sample_policy == 5:
action = constants.L7POLICY_ACTION_REJECT
enabled = False
l7rules = [sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)]
elif sample_policy == 6:
action = constants.L7POLICY_ACTION_REJECT
l7rules = [sample_l7rule_tuple('sample_l7rule_id_6', sample_rule=6)]
return in_l7policy(
id=id,
action=action,
redirect_pool=redirect_pool,
redirect_url=redirect_url,
l7rules=l7rules,
enabled=enabled)
def sample_l7rule_tuple(id,
type=constants.L7RULE_TYPE_PATH,
compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH,
key=None,
value='/api',
invert=False,
enabled=True,
sample_rule=1):
in_l7rule = collections.namedtuple('l7rule',
'id, type, compare_type, '
'key, value, invert, enabled')
if sample_rule == 2:
type = constants.L7RULE_TYPE_HEADER
compare_type = constants.L7RULE_COMPARE_TYPE_CONTAINS
key = 'Some-header'
value = 'This string\\ with stuff'
invert = True
enabled = True
if sample_rule == 3:
type = constants.L7RULE_TYPE_COOKIE
compare_type = constants.L7RULE_COMPARE_TYPE_REGEX
key = 'some-cookie'
value = 'this.*|that'
invert = False
enabled = True
if sample_rule == 4:
type = constants.L7RULE_TYPE_FILE_TYPE
compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO
key = None
value = 'jpg'
invert = False
enabled = True
if sample_rule == 5:
type = constants.L7RULE_TYPE_HOST_NAME
compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH
key = None
value = '.example.com'
invert = False
enabled = True
if sample_rule == 6:
type = constants.L7RULE_TYPE_HOST_NAME
compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH
key = None
value = '.example.com'
invert = False
enabled = False
return in_l7rule(
id=id,
type=type,
compare_type=compare_type,
key=key,
value=value,
invert=invert,
enabled=enabled)
def sample_base_expected_config(frontend=None, backend=None,
peers=None, global_opts=None, defaults=None):
if frontend is None:
frontend = ("frontend sample_listener_id_1\n"
" option httplog\n"
" maxconn {maxconn}\n"
" bind 10.0.0.2:80\n"
" mode http\n"
" default_backend sample_pool_id_1\n"
" timeout client 50000\n\n").format(
maxconn=constants.HAPROXY_MAX_MAXCONN)
if backend is None:
backend = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" timeout check 31s\n"
" option httpchk GET /index.html\n"
" http-check expect rstatus 418\n"
" fullconn {maxconn}\n"
" option allbackups\n"
" timeout connect 5000\n"
" timeout server 50000\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 "
"check inter 30s fall 3 rise 2 cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 "
"check inter 30s fall 3 rise 2 cookie sample_member_id_2\n"
"\n").format(maxconn=constants.HAPROXY_MAX_MAXCONN)
if peers is None:
peers = "\n\n"
if global_opts is None:
global_opts = " maxconn {maxconn}\n\n".format(
maxconn=constants.HAPROXY_MAX_MAXCONN)
if defaults is None:
defaults = ("defaults\n"
" log global\n"
" retries 3\n"
" option redispatch\n\n")
return ("# Configuration for loadbalancer sample_loadbalancer_id_1\n"
"global\n"
" daemon\n"
" user nobody\n"
" log /dev/log local0\n"
" log /dev/log local1 notice\n"
" stats socket /var/lib/octavia/sample_listener_id_1.sock"
" mode 0666 level user\n" +
global_opts + defaults + peers + frontend + backend)
| 36.868834
| 80
| 0.588895
|
1aaccb3dde6cd02c323d3e61ad1bc49aac627118
| 2,624
|
py
|
Python
|
data/p4VQE/R4/benchmark/startQiskit_Class644.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_Class644.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_Class644.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=15
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.cx(input_qubit[1],input_qubit[0]) # number=8
prog.h(input_qubit[0]) # number=12
prog.cz(input_qubit[1],input_qubit[0]) # number=13
prog.h(input_qubit[0]) # number=14
prog.h(input_qubit[3]) # number=10
prog.x(input_qubit[2]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_Class644.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 28.215054
| 118
| 0.637195
|
f046c4e220c55ae653e07b2b9db9f3019507473f
| 609
|
py
|
Python
|
home/forms.py
|
Engerrs/ckan.org
|
a5a9b63b0ca16cb5aa4f709f7a264b8f6c265158
|
[
"BSD-3-Clause"
] | null | null | null |
home/forms.py
|
Engerrs/ckan.org
|
a5a9b63b0ca16cb5aa4f709f7a264b8f6c265158
|
[
"BSD-3-Clause"
] | null | null | null |
home/forms.py
|
Engerrs/ckan.org
|
a5a9b63b0ca16cb5aa4f709f7a264b8f6c265158
|
[
"BSD-3-Clause"
] | null | null | null |
from allauth.account.forms import SignupForm
from django import forms
class CkanorgSignupForm(SignupForm):
first_name = forms.CharField(
max_length=30,
label='First Name',
widget=forms.TextInput(attrs={'placeholder': 'First Name'}))
last_name = forms.CharField(
max_length=30,
label='Last Name',
widget=forms.TextInput(attrs={'placeholder': 'Last Name'}))
def signup(self, request, user):
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.save()
return user
| 30.45
| 68
| 0.655172
|
0c372b4704388c6acfe140bfde3aa8e2768678a9
| 2,017
|
py
|
Python
|
networks/larflow/models/layer_utils.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | 2
|
2020-07-09T19:34:03.000Z
|
2021-06-21T23:09:23.000Z
|
networks/larflow/models/layer_utils.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | null | null | null |
networks/larflow/models/layer_utils.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import sparseconvnet as scn
def residual_block(m, ninputchs, noutputchs, leakiness=0.01, dimensions=2):
"""
Residual Modulae Block
intention is to append to a sequence module (m)
produce output of [identity,3x3+3x3] then add together
inputs
------
m [scn.Sequential module] network to add layers to
ninputchs [int]: number of input channels
noutputchs [int]: number of output channels
leakiness [float]: leakiness of ReLU activations
dimensions [int]: dimensions of input sparse tensor
modifies
--------
m: adds layers
"""
inoutsame = ninputchs==noutputchs
m.add(scn.ConcatTable()
.add(scn.Identity() if inoutsame else scn.NetworkInNetwork(ninputchs, noutputchs, False))
.add(scn.Sequential()
.add(scn.BatchNormLeakyReLU(ninputchs,leakiness=leakiness))
.add(scn.SubmanifoldConvolution(dimensions, ninputchs, noutputchs, 3, False))
.add(scn.BatchNormLeakyReLU(noutputchs,leakiness=leakiness))
.add(scn.SubmanifoldConvolution(dimensions, noutputchs, noutputchs, 3, False)))
).add(scn.AddTable())
def create_resnet_layer(nreps, ninputchs, noutputchs,
downsample=[2,2]):
"""
creates Sequence of layers (scn.Sequential)
formed by a repetition of residual blocks
inputs
------
nreps [int] number of times to repeat residula block
ninputchs [int] input features to layer
noutputchs [int] output features from layer
outputs
-------
[scn.Sequential] module with residual blocks
"""
m = scn.Sequential()
for iblock in xrange(nreps):
if iblock==0:
# in first repitition we change
# number of features from input to output
residual_block(m,ninputchs,noutputchs)
else:
# other repitions we do not change number of features
residual_block(m,noutputchs,noutputchs)
return m
| 33.616667
| 99
| 0.654933
|
d410395b91ac61a9fd126dc454e3c98488a0b110
| 112
|
py
|
Python
|
leetcode/arrays/squares-of-a-sorted-array.py
|
AmrMKayid/KayAlgo
|
df6e2b5b0f74174d5c0950520f0c47b04212dfaa
|
[
"MIT"
] | 1
|
2019-02-11T13:29:32.000Z
|
2019-02-11T13:29:32.000Z
|
leetcode/arrays/squares-of-a-sorted-array.py
|
AmrMKayid/KayAlgo
|
df6e2b5b0f74174d5c0950520f0c47b04212dfaa
|
[
"MIT"
] | 1
|
2019-02-11T15:26:36.000Z
|
2019-02-11T15:26:36.000Z
|
leetcode/arrays/squares-of-a-sorted-array.py
|
AmrMKayid/KayAlgo
|
df6e2b5b0f74174d5c0950520f0c47b04212dfaa
|
[
"MIT"
] | null | null | null |
class Solution:
def sortedSquares(self, A: List[int]) -> List[int]:
return sorted([num**2 for num in A])
| 22.4
| 53
| 0.651786
|
c0de3727e1168aa42a8648b9ba4a8b0e5a7b0197
| 2,864
|
py
|
Python
|
Face_Recognition/face_recognition_live.py
|
sion9262/HouseOfHope
|
c19e0f9518225447a28590b6731865538ceef212
|
[
"MIT"
] | 1
|
2021-03-30T03:17:41.000Z
|
2021-03-30T03:17:41.000Z
|
Face_Recognition/face_recognition_live.py
|
sion9262/HouseOfHope
|
c19e0f9518225447a28590b6731865538ceef212
|
[
"MIT"
] | 6
|
2020-10-12T14:03:15.000Z
|
2020-12-06T00:03:13.000Z
|
Face_Recognition/face_recognition_live.py
|
sion9262/HouseOfHope
|
c19e0f9518225447a28590b6731865538ceef212
|
[
"MIT"
] | 1
|
2020-10-12T13:59:14.000Z
|
2020-10-12T13:59:14.000Z
|
import cv2
import RPi.GPIO as GPIO # RPi.GPIO 라이브러리를 GPIO로 사용
from time import sleep # time 라이브러리의 sleep함수 사용
servoPin = 12 # 서보 핀
SERVO_MAX_DUTY = 12 # 서보의 최대(180도) 위치의 주기
SERVO_MIN_DUTY = 3 # 서보의 최소(0도) 위치의 주기
GPIO.setmode(GPIO.BOARD) # GPIO 설정
GPIO.setup(servoPin, GPIO.OUT) # 서보핀 출력으로 설정
servo = GPIO.PWM(servoPin, 50) # 서보핀을 PWM 모드 50Hz로 사용하기 (50Hz > 20ms)
servo.start(0) # 서보 PWM 시작 duty = 0, duty가 0이면 서보는 동작하지 않는다.
def setServoPos(degree):
# 각도는 180도를 넘을 수 없다.
if degree > 180:
degree = 180
# 각도(degree)를 duty로 변경한다.
duty = SERVO_MIN_DUTY + (degree * (SERVO_MAX_DUTY - SERVO_MIN_DUTY) / 180.0)
# duty 값 출력
print("Degree: {} to {}(Duty)".format(degree, duty))
# 변경된 duty값을 서보 pwm에 적용
servo.ChangeDutyCycle(duty)
model = 'models/dnn_face_detector/res10_300x300_ssd_iter_140000_fp16.caffemodel'
config = 'models/dnn_face_detector/deploy.prototxt'
import sys
import numpy as np
import cv2
import face_recognition_predict
model = 'models/dnn_face_detector/res10_300x300_ssd_iter_140000_fp16.caffemodel'
config = 'models/dnn_face_detector/deploy.prototxt'
face_predict = face_recognition_predict.FaceRecognitionPredict()
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print('Camera open failed!')
sys.exit()
net = cv2.dnn.readNet(model, config)
if net.empty():
print('Net open failed!')
sys.exit()
while True:
ret, frame = cap.read()
if not ret:
break
blob = cv2.dnn.blobFromImage(frame, 1, (300, 300), (104, 177, 123))
net.setInput(blob)
out = net.forward()
detect = out[0, 0, :, :]
(h, w) = frame.shape[:2]
for i in range(detect.shape[0]):
confidence = detect[i, 2]
if confidence < 0.6:
break
# detect값는 정규화가 되어있어 실제 들어온 영상의 w, h를 곱해야함.
x1 = int(detect[i, 3] * w)
y1 = int(detect[i, 4] * h)
x2 = int(detect[i, 5] * w)
y2 = int(detect[i, 6] * h)
crop = frame[y1:y2, x1:x2]
name, class_probability = face_predict.predict(crop)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0))
if class_probability:
label = f'Name: {name} Probability {class_probability:4.2f}'
cv2.putText(frame, label, (x1, y1 - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
setServoPos(0)
sleep(1) # 1초 대기
# 90도에 위치
setServoPos(90)
sleep(5)
setServoPos(0)
sleep(1) # 1초 대기
else:
label = f'wait!!'
cv2.putText(frame, label, (x1, y1 - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.imshow('frame', frame)
if cv2.waitKey(1) == 27:
break
# 서보 PWM 정지
servo.stop()
# GPIO 모드 초기화
GPIO.cleanup()
cv2.destroyAllWindows()
| 18.597403
| 111
| 0.605098
|
98cc6c5aec7ee1a30b34d3afadee9415393ac1b0
| 311
|
py
|
Python
|
Ene-Jun-2021/perez-sanchez-jose-jahir/Segundo Parcial/Ejercicio-3/app/app.py
|
jarmarj/DAS_Sistemas
|
36c876673e7abae503cc137c3f66585a0e45ed79
|
[
"MIT"
] | 41
|
2017-09-26T09:36:32.000Z
|
2022-03-19T18:05:25.000Z
|
Ene-Jun-2021/perez-sanchez-jose-jahir/Segundo Parcial/Ejercicio-3/app/app.py
|
jarmarj/DAS_Sistemas
|
36c876673e7abae503cc137c3f66585a0e45ed79
|
[
"MIT"
] | 67
|
2017-09-11T05:06:12.000Z
|
2022-02-14T04:44:04.000Z
|
Ene-Jun-2021/perez-sanchez-jose-jahir/Segundo Parcial/Ejercicio-3/app/app.py
|
jarmarj/DAS_Sistemas
|
36c876673e7abae503cc137c3f66585a0e45ed79
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
import redis
import json
redis_client = redis.Redis(host='redis', port=6379)
with open('monk-data.json') as contenido:
data = json.load(contenido)
for dato in data:
key =dato.get('id'),dato.get('first_name'),dato.get('last_name')
redis_client.set(key,dato)
print(redis_client.keys())
| 23.923077
| 69
| 0.684887
|
b095f560a9eca184727e641960df598195af6498
| 6,995
|
py
|
Python
|
configs/masktrackrcnn_ytvos/masktrackrcnn_r50_fpn_2x.py
|
Exely/masktrackrcnn-mmdet2.0
|
f181daa4bfcc15792cffecae7628167948335078
|
[
"Apache-2.0"
] | 6
|
2021-06-30T07:40:56.000Z
|
2021-12-14T15:48:49.000Z
|
configs/masktrackrcnn_ytvos/masktrackrcnn_r50_fpn_2x.py
|
Exely/masktrackrcnn-mmdet2.0
|
f181daa4bfcc15792cffecae7628167948335078
|
[
"Apache-2.0"
] | 6
|
2021-07-05T07:43:42.000Z
|
2022-01-06T07:31:01.000Z
|
configs/masktrackrcnn_ytvos/masktrackrcnn_r50_fpn_2x.py
|
Exely/masktrackrcnn-mmdet2.0
|
f181daa4bfcc15792cffecae7628167948335078
|
[
"Apache-2.0"
] | 4
|
2021-07-17T07:33:38.000Z
|
2021-12-15T16:18:29.000Z
|
model = dict(
type='MaskRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)
),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)
),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=1,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
track_head=dict(
type='TrackHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
match_coeff=[1.0, 2.0, 10]
),
),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='soft_nms', iou_threshold=0.4),
max_per_img=100,
mask_thr_binary=0.5)))
dataset_type = 'YTVOSDataset'
root_dir = '/'
data_root = root_dir+'user_data/'
train_prefix_root = root_dir+'tcdata/PreRoundData/JPEGImages'
test_prefix_root = root_dir+'tcdata/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(640, 360), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_pids', 'gt_bboxes_ignore']),
]
vis_test_meta_keys = ('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg', 'is_first', 'video_id', 'frame_id')
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1280, 720),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'], meta_keys=vis_test_meta_keys),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train_sub.json',
img_prefix=train_prefix_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train_sub.json',
img_prefix=train_prefix_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_test_sub.json',
img_prefix=test_prefix_root,
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[10, 15])
runner = dict(type='EpochBasedRunner', max_epochs=24)
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 33.151659
| 112
| 0.561401
|
8cd09d4b4b574cb6ceca9121eda5d824b76c143a
| 526
|
py
|
Python
|
pages/templatetags/pages_tags.py
|
WebCampZg/conference-web
|
76ccae83924fdcd040d9280db5cf3a249d668606
|
[
"BSD-3-Clause"
] | 4
|
2015-03-03T17:48:14.000Z
|
2019-02-27T20:28:42.000Z
|
pages/templatetags/pages_tags.py
|
WebCampZg/conference-web
|
76ccae83924fdcd040d9280db5cf3a249d668606
|
[
"BSD-3-Clause"
] | 104
|
2015-02-25T18:09:15.000Z
|
2019-06-21T10:02:53.000Z
|
pages/templatetags/pages_tags.py
|
WebCampZg/conference-web
|
76ccae83924fdcd040d9280db5cf3a249d668606
|
[
"BSD-3-Clause"
] | 9
|
2015-03-01T18:59:14.000Z
|
2019-06-10T06:48:45.000Z
|
from django import template
from usergroups.models import UserGroup
register = template.Library()
TAG_USERGROUPS = "<!-- usergroups -->"
@register.filter()
def pages_substitute(content):
"""
Substitute tags in pages source.
"""
if TAG_USERGROUPS in content:
usergroups = UserGroup.objects.filter(is_active=True).order_by('name')
replacement = ", ".join(f"[{u.name}]({u.webpage_url})" for u in usergroups)
content = content.replace(TAG_USERGROUPS, replacement)
return content
| 23.909091
| 83
| 0.690114
|
2707b5a560a657c00fcb215a84d91d066580fbd5
| 3,814
|
py
|
Python
|
apps/user/views.py
|
mrf-foundation/ckios_v1
|
3556a99ba5e01f00e137fd124903ace77d2cba28
|
[
"Apache-2.0"
] | null | null | null |
apps/user/views.py
|
mrf-foundation/ckios_v1
|
3556a99ba5e01f00e137fd124903ace77d2cba28
|
[
"Apache-2.0"
] | null | null | null |
apps/user/views.py
|
mrf-foundation/ckios_v1
|
3556a99ba5e01f00e137fd124903ace77d2cba28
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# Import User UpdateForm, ProfileUpdatForm
from .forms import UserRegisterForm, UserUpdateForm, EditProfileForm, ProfileForm
from .models import Profile, UpdateProfileForm
from django.views import generic
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Your account has been created! You are now able to log in')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'registration/register.html', {'form': form})
# Update it here
def login(request):
if request.method=="POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.success(request, "Successfully Logged In")
return redirect("/myprofile")
else:
messages.error(request, "Invalid Credentials")
alert = True
return render(request, 'login.html', {'alert':alert})
return render(request, "login.html")
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('registration/profile') # Redirect back to profile page
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'registration/profile.html', context)
def myprofile(request):
if request.method=="POST":
user = request.user
profile = Profile(user=user)
profile.save()
form = EditProfileForm(data=request.POST, files=request.FILES)
if form.is_valid():
form.save()
obj = form.instance
return render(request, "registration/profile.html",{'obj':obj})
else:
form=EditProfileForm(Profile)
return render(request, "registration/profile.html", {'form':form})
def edit_profile(request):
if request.method == 'POST':
form = ProfileForm(request.POST)
print = request.POST
if form.is_valid():
new_profile = Profile(
user = request.user,
bio = request.POST['bio'],
address = request.POST['address'],
age = request.POST['age']
)
new_profile.save()
return HttpResponseRedirect(reverse('user_public_profile', args=(request.user.username,)))
return render(request,'registration/edit_profile.html', {'form': form})
else:
form = UpdateProfileForm()
return render(request,'registration/edit_profile.html',
{'form': form})
class EditProfilePage(generic.CreateView):
model = Profile
template_name = 'registration/edit_profile_page.html'
fields = '__all__'
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
| 34.990826
| 102
| 0.615627
|
9e5a3b6d0ebf22556ed910a6a92f4cbc560a3774
| 58
|
py
|
Python
|
Reverse-and-complement-nucleic-acid-sequences-(DNA-RNA)-using-Python/task_1.py
|
AnuragAnalog/Guided-Projects-Coursera
|
938749ca3ef4529021b9323161be5d48b4dd26a9
|
[
"Apache-2.0"
] | 1
|
2021-04-11T17:04:58.000Z
|
2021-04-11T17:04:58.000Z
|
Reverse-and-complement-nucleic-acid-sequences-(DNA-RNA)-using-Python/task_1.py
|
AnuragAnalog/Guided-Projects-Coursera
|
938749ca3ef4529021b9323161be5d48b4dd26a9
|
[
"Apache-2.0"
] | null | null | null |
Reverse-and-complement-nucleic-acid-sequences-(DNA-RNA)-using-Python/task_1.py
|
AnuragAnalog/Guided-Projects-Coursera
|
938749ca3ef4529021b9323161be5d48b4dd26a9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This is a comment
x = 5
y = 12
| 9.666667
| 23
| 0.5
|
7ae2c74eef123afd9bc8f25456f2b96bc88bfe9e
| 27,267
|
py
|
Python
|
ironic/tests/unit/conductor/test_base_manager.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 350
|
2015-01-02T09:35:49.000Z
|
2022-03-28T09:25:59.000Z
|
ironic/tests/unit/conductor/test_base_manager.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 7
|
2015-05-04T16:12:41.000Z
|
2021-08-31T12:27:27.000Z
|
ironic/tests/unit/conductor/test_base_manager.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 333
|
2015-01-06T09:09:22.000Z
|
2022-02-20T08:11:40.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Ironic BaseConductorManager."""
import collections
from unittest import mock
import uuid
import eventlet
import futurist
from futurist import periodics
from ironic_lib import mdns
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_utils import uuidutils
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.conductor import base_manager
from ironic.conductor import manager
from ironic.conductor import notification_utils
from ironic.conductor import task_manager
from ironic.db import api as dbapi
from ironic.drivers import fake_hardware
from ironic.drivers import generic
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic import objects
from ironic.objects import fields
from ironic.tests import base as tests_base
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
CONF = cfg.CONF
@mgr_utils.mock_record_keepalive
class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_start_registers_conductor(self):
self.assertRaises(exception.ConductorNotFound,
objects.Conductor.get_by_hostname,
self.context, self.hostname)
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
def test_start_clears_conductor_locks(self):
node = obj_utils.create_test_node(self.context,
reservation=self.hostname)
node.save()
self._start_service()
node.refresh()
self.assertIsNone(node.reservation)
def test_stop_clears_conductor_locks(self):
node = obj_utils.create_test_node(self.context,
reservation=self.hostname)
node.save()
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
self.service.del_host()
node.refresh()
self.assertIsNone(node.reservation)
def test_stop_unregisters_conductor(self):
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
self.service.del_host()
self.assertRaises(exception.ConductorNotFound,
objects.Conductor.get_by_hostname,
self.context, self.hostname)
def test_stop_doesnt_unregister_conductor(self):
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
self.service.del_host(deregister=False)
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
@mock.patch.object(manager.ConductorManager, 'init_host', autospec=True)
def test_stop_uninitialized_conductor(self, mock_init):
self._start_service()
self.service.del_host()
@mock.patch.object(driver_factory.HardwareTypesFactory, '__getitem__',
lambda *args: mock.MagicMock())
@mock.patch.object(driver_factory, 'default_interface', autospec=True)
def test_start_registers_driver_names(self, mock_def_iface):
init_names = ['fake1', 'fake2']
restart_names = ['fake3', 'fake4']
mock_def_iface.return_value = 'fake'
df = driver_factory.HardwareTypesFactory()
with mock.patch.object(df._extension_manager, 'names',
autospec=True) as mock_names:
# verify driver names are registered
self.config(enabled_hardware_types=init_names)
mock_names.return_value = init_names
self._start_service()
res = objects.Conductor.get_by_hostname(self.context,
self.hostname)
self.assertEqual(init_names, res['drivers'])
self._stop_service()
# verify that restart registers new driver names
self.config(enabled_hardware_types=restart_names)
mock_names.return_value = restart_names
self._start_service()
res = objects.Conductor.get_by_hostname(self.context,
self.hostname)
self.assertEqual(restart_names, res['drivers'])
@mock.patch.object(base_manager.BaseConductorManager,
'_register_and_validate_hardware_interfaces',
autospec=True)
@mock.patch.object(driver_factory, 'all_interfaces', autospec=True)
@mock.patch.object(driver_factory, 'hardware_types', autospec=True)
def test_start_registers_driver_specific_tasks(self,
mock_hw_types, mock_ifaces,
mock_reg_hw_ifaces):
class TestHwType(generic.GenericHardware):
@property
def supported_management_interfaces(self):
return []
@property
def supported_power_interfaces(self):
return []
# This should not be collected, since we don't collect periodic
# tasks from hardware types
@periodics.periodic(spacing=100500)
def task(self):
pass
class TestInterface(object):
@periodics.periodic(spacing=100500)
def iface(self):
pass
class TestInterface2(object):
@periodics.periodic(spacing=100500)
def iface(self):
pass
hw_type = TestHwType()
iface1 = TestInterface()
iface2 = TestInterface2()
expected = [iface1.iface, iface2.iface]
mock_hw_types.return_value = {'fake1': hw_type}
mock_ifaces.return_value = {
'management': {'fake1': iface1},
'power': {'fake2': iface2}
}
self._start_service(start_periodic_tasks=True)
tasks = {c[0] for c in self.service._periodic_task_callables}
for item in expected:
self.assertTrue(periodics.is_periodic(item))
self.assertIn(item, tasks)
# no periodic tasks from the hardware type
self.assertTrue(periodics.is_periodic(hw_type.task))
self.assertNotIn(hw_type.task, tasks)
@mock.patch.object(driver_factory.HardwareTypesFactory, '__init__',
autospec=True)
def test_start_fails_on_missing_driver(self, mock_df):
mock_df.side_effect = exception.DriverNotFound('test')
with mock.patch.object(self.dbapi, 'register_conductor',
autospec=True) as mock_reg:
self.assertRaises(exception.DriverNotFound,
self.service.init_host)
self.assertTrue(mock_df.called)
self.assertFalse(mock_reg.called)
def test_start_fails_on_no_enabled_interfaces(self):
self.config(enabled_boot_interfaces=[])
self.assertRaisesRegex(exception.ConfigInvalid,
'options enabled_boot_interfaces',
self.service.init_host)
@mock.patch.object(base_manager, 'LOG', autospec=True)
@mock.patch.object(driver_factory, 'HardwareTypesFactory', autospec=True)
def test_start_fails_on_hw_types(self, ht_mock, log_mock):
driver_factory_mock = mock.MagicMock(names=[])
ht_mock.return_value = driver_factory_mock
self.assertRaises(exception.NoDriversLoaded,
self.service.init_host)
self.assertTrue(log_mock.error.called)
ht_mock.assert_called_once_with()
@mock.patch.object(base_manager, 'LOG', autospec=True)
@mock.patch.object(base_manager.BaseConductorManager,
'_register_and_validate_hardware_interfaces',
autospec=True)
@mock.patch.object(base_manager.BaseConductorManager, 'del_host',
autospec=True)
def test_start_fails_hw_type_register(self, del_mock, reg_mock, log_mock):
reg_mock.side_effect = exception.DriverNotFound('hw-type')
self.assertRaises(exception.DriverNotFound,
self.service.init_host)
self.assertTrue(log_mock.error.called)
del_mock.assert_called_once()
def test_prevent_double_start(self):
self._start_service()
self.assertRaisesRegex(RuntimeError, 'already running',
self.service.init_host)
def test_start_recover_nodes_stuck(self):
state_trans = [
(states.DEPLOYING, states.DEPLOYFAIL),
(states.CLEANING, states.CLEANFAIL),
(states.VERIFYING, states.ENROLL),
(states.INSPECTING, states.INSPECTFAIL),
(states.ADOPTING, states.ADOPTFAIL),
(states.RESCUING, states.RESCUEFAIL),
(states.UNRESCUING, states.UNRESCUEFAIL),
(states.DELETING, states.ERROR),
]
nodes = [obj_utils.create_test_node(self.context, uuid=uuid.uuid4(),
driver='fake-hardware',
provision_state=state[0])
for state in state_trans]
self._start_service()
for node, state in zip(nodes, state_trans):
node.refresh()
self.assertEqual(state[1], node.provision_state,
'Test failed when recovering from %s' % state[0])
@mock.patch.object(base_manager, 'LOG', autospec=True)
def test_warning_on_low_workers_pool(self, log_mock):
CONF.set_override('workers_pool_size', 3, 'conductor')
self._start_service()
self.assertTrue(log_mock.warning.called)
@mock.patch.object(eventlet.greenpool.GreenPool, 'waitall', autospec=True)
def test_del_host_waits_on_workerpool(self, wait_mock):
self._start_service()
self.service.del_host()
self.assertTrue(wait_mock.called)
def test_conductor_shutdown_flag(self):
self._start_service()
self.assertFalse(self.service._shutdown)
self.service.del_host()
self.assertTrue(self.service._shutdown)
@mock.patch.object(deploy_utils, 'get_ironic_api_url', autospec=True)
@mock.patch.object(mdns, 'Zeroconf', autospec=True)
def test_start_with_mdns(self, mock_zc, mock_api_url):
CONF.set_override('debug', False)
CONF.set_override('enable_mdns', True, 'conductor')
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
mock_zc.return_value.register_service.assert_called_once_with(
'baremetal',
mock_api_url.return_value,
params={})
@mock.patch.object(deploy_utils, 'get_ironic_api_url', autospec=True)
@mock.patch.object(mdns, 'Zeroconf', autospec=True)
def test_start_with_mdns_and_debug(self, mock_zc, mock_api_url):
CONF.set_override('debug', True)
CONF.set_override('enable_mdns', True, 'conductor')
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
mock_zc.return_value.register_service.assert_called_once_with(
'baremetal',
mock_api_url.return_value,
params={'ipa_debug': True})
def test_del_host_with_mdns(self):
mock_zc = mock.Mock(spec=mdns.Zeroconf)
self.service._zeroconf = mock_zc
self._start_service()
self.service.del_host()
mock_zc.close.assert_called_once_with()
self.assertIsNone(self.service._zeroconf)
@mock.patch.object(dbapi, 'get_instance', autospec=True)
def test_start_dbapi_single_call(self, mock_dbapi):
self._start_service()
# NOTE(TheJulia): This seems like it should only be 1, but
# the hash ring initailization pulls it's own database connection
# instance, which is likely a good thing, thus this is 2 instead of
# 3 without reuse of the database connection.
self.assertEqual(2, mock_dbapi.call_count)
class CheckInterfacesTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__check_enabled_interfaces_success(self):
base_manager._check_enabled_interfaces()
def test__check_enabled_interfaces_failure(self):
self.config(enabled_boot_interfaces=[])
self.assertRaisesRegex(exception.ConfigInvalid,
'options enabled_boot_interfaces',
base_manager._check_enabled_interfaces)
class KeepAliveTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__conductor_service_record_keepalive(self):
self._start_service()
# avoid wasting time at the event.wait()
CONF.set_override('heartbeat_interval', 0, 'conductor')
with mock.patch.object(self.dbapi, 'touch_conductor',
autospec=True) as mock_touch:
with mock.patch.object(self.service._keepalive_evt,
'is_set', autospec=True) as mock_is_set:
mock_is_set.side_effect = [False, True]
self.service._conductor_service_record_keepalive()
mock_touch.assert_called_once_with(self.hostname)
def test__conductor_service_record_keepalive_failed_db_conn(self):
self._start_service()
# avoid wasting time at the event.wait()
CONF.set_override('heartbeat_interval', 0, 'conductor')
with mock.patch.object(self.dbapi, 'touch_conductor',
autospec=True) as mock_touch:
mock_touch.side_effect = [None, db_exception.DBConnectionError(),
None]
with mock.patch.object(self.service._keepalive_evt,
'is_set', autospec=True) as mock_is_set:
mock_is_set.side_effect = [False, False, False, True]
self.service._conductor_service_record_keepalive()
self.assertEqual(3, mock_touch.call_count)
def test__conductor_service_record_keepalive_failed_error(self):
self._start_service()
# avoid wasting time at the event.wait()
CONF.set_override('heartbeat_interval', 0, 'conductor')
with mock.patch.object(self.dbapi, 'touch_conductor',
autospec=True) as mock_touch:
mock_touch.side_effect = [None, Exception(),
None]
with mock.patch.object(self.service._keepalive_evt,
'is_set', autospec=True) as mock_is_set:
mock_is_set.side_effect = [False, False, False, True]
self.service._conductor_service_record_keepalive()
self.assertEqual(3, mock_touch.call_count)
class ManagerSpawnWorkerTestCase(tests_base.TestCase):
def setUp(self):
super(ManagerSpawnWorkerTestCase, self).setUp()
self.service = manager.ConductorManager('hostname', 'test-topic')
self.executor = mock.Mock(spec=futurist.GreenThreadPoolExecutor)
self.service._executor = self.executor
def test__spawn_worker(self):
self.service._spawn_worker('fake', 1, 2, foo='bar', cat='meow')
self.executor.submit.assert_called_once_with(
'fake', 1, 2, foo='bar', cat='meow')
def test__spawn_worker_none_free(self):
self.executor.submit.side_effect = futurist.RejectedSubmission()
self.assertRaises(exception.NoFreeConductorWorker,
self.service._spawn_worker, 'fake')
@mock.patch.object(objects.Conductor, 'unregister_all_hardware_interfaces',
autospec=True)
@mock.patch.object(objects.Conductor, 'register_hardware_interfaces',
autospec=True)
@mock.patch.object(driver_factory, 'default_interface', autospec=True)
@mock.patch.object(driver_factory, 'enabled_supported_interfaces',
autospec=True)
@mgr_utils.mock_record_keepalive
class RegisterInterfacesTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def setUp(self):
super(RegisterInterfacesTestCase, self).setUp()
self._start_service()
def test__register_and_validate_hardware_interfaces(self,
esi_mock,
default_mock,
reg_mock,
unreg_mock):
# these must be same order as esi_mock side effect
hardware_types = collections.OrderedDict((
('fake-hardware', fake_hardware.FakeHardware()),
('manual-management', generic.ManualManagementHardware),
))
esi_mock.side_effect = [
collections.OrderedDict((
('management', ['fake', 'noop']),
('deploy', ['direct', 'ansible']),
)),
collections.OrderedDict((
('management', ['fake']),
('deploy', ['direct', 'fake']),
)),
]
default_mock.side_effect = ('fake', 'direct', 'fake', 'direct')
expected_calls = [
mock.call(
mock.ANY,
[{'hardware_type': 'fake-hardware',
'interface_type': 'management',
'interface_name': 'fake',
'default': True},
{'hardware_type': 'fake-hardware',
'interface_type': 'management',
'interface_name': 'noop',
'default': False},
{'hardware_type': 'fake-hardware',
'interface_type': 'deploy',
'interface_name': 'direct',
'default': True},
{'hardware_type': 'fake-hardware',
'interface_type': 'deploy',
'interface_name': 'ansible',
'default': False},
{'hardware_type': 'manual-management',
'interface_type': 'management',
'interface_name': 'fake',
'default': True},
{'hardware_type': 'manual-management',
'interface_type': 'deploy',
'interface_name': 'direct',
'default': True},
{'hardware_type': 'manual-management',
'interface_type': 'deploy',
'interface_name': 'fake',
'default': False}]
)
]
self.service._register_and_validate_hardware_interfaces(hardware_types)
unreg_mock.assert_called_once_with(mock.ANY)
# we're iterating over dicts, don't worry about order
reg_mock.assert_has_calls(expected_calls)
def test__register_and_validate_no_valid_default(self,
esi_mock,
default_mock,
reg_mock,
unreg_mock):
# these must be same order as esi_mock side effect
hardware_types = collections.OrderedDict((
('fake-hardware', fake_hardware.FakeHardware()),
))
esi_mock.side_effect = [
collections.OrderedDict((
('management', ['fake', 'noop']),
('deploy', ['direct', 'ansible']),
)),
]
default_mock.side_effect = exception.NoValidDefaultForInterface("boo")
self.assertRaises(
exception.NoValidDefaultForInterface,
self.service._register_and_validate_hardware_interfaces,
hardware_types)
default_mock.assert_called_once_with(
hardware_types['fake-hardware'],
mock.ANY, driver_name='fake-hardware')
unreg_mock.assert_called_once_with(mock.ANY)
self.assertFalse(reg_mock.called)
@mock.patch.object(fake.FakeConsole, 'start_console', autospec=True)
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
class StartConsolesTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__start_consoles(self, mock_notify, mock_start_console):
obj_utils.create_test_node(self.context,
driver='fake-hardware',
console_enabled=True)
obj_utils.create_test_node(
self.context,
uuid=uuidutils.generate_uuid(),
driver='fake-hardware',
console_enabled=True
)
obj_utils.create_test_node(
self.context,
uuid=uuidutils.generate_uuid(),
driver='fake-hardware'
)
self._start_service()
self.service._start_consoles(self.context)
self.assertEqual(2, mock_start_console.call_count)
mock_notify.assert_has_calls(
[mock.call(mock.ANY, 'console_restore',
fields.NotificationStatus.START),
mock.call(mock.ANY, 'console_restore',
fields.NotificationStatus.END)])
def test__start_consoles_no_console_enabled(self, mock_notify,
mock_start_console):
obj_utils.create_test_node(self.context,
driver='fake-hardware',
console_enabled=False)
self._start_service()
self.service._start_consoles(self.context)
self.assertFalse(mock_start_console.called)
self.assertFalse(mock_notify.called)
def test__start_consoles_failed(self, mock_notify, mock_start_console):
test_node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
console_enabled=True)
self._start_service()
mock_start_console.side_effect = Exception()
self.service._start_consoles(self.context)
mock_start_console.assert_called_once_with(mock.ANY, mock.ANY)
test_node.refresh()
self.assertFalse(test_node.console_enabled)
self.assertIsNotNone(test_node.last_error)
mock_notify.assert_has_calls(
[mock.call(mock.ANY, 'console_restore',
fields.NotificationStatus.START),
mock.call(mock.ANY, 'console_restore',
fields.NotificationStatus.ERROR)])
history = objects.NodeHistory.list_by_node_id(self.context,
test_node.id)
entry = history[0]
self.assertEqual('startup failure', entry['event_type'])
self.assertEqual('ERROR', entry['severity'])
self.assertIsNotNone(entry['event'])
@mock.patch.object(base_manager, 'LOG', autospec=True)
def test__start_consoles_node_locked(self, log_mock, mock_notify,
mock_start_console):
test_node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
console_enabled=True,
reservation='fake-host')
self._start_service()
self.service._start_consoles(self.context)
self.assertFalse(mock_start_console.called)
test_node.refresh()
self.assertTrue(test_node.console_enabled)
self.assertIsNone(test_node.last_error)
self.assertTrue(log_mock.warning.called)
self.assertFalse(mock_notify.called)
@mock.patch.object(base_manager, 'LOG', autospec=True)
def test__start_consoles_node_not_found(self, log_mock, mock_notify,
mock_start_console):
test_node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
console_enabled=True)
self._start_service()
with mock.patch.object(task_manager, 'acquire',
autospec=True) as mock_acquire:
mock_acquire.side_effect = exception.NodeNotFound(node='not found')
self.service._start_consoles(self.context)
self.assertFalse(mock_start_console.called)
test_node.refresh()
self.assertTrue(test_node.console_enabled)
self.assertIsNone(test_node.last_error)
self.assertTrue(log_mock.warning.called)
self.assertFalse(mock_notify.called)
class MiscTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def setUp(self):
super(MiscTestCase, self).setUp()
self._start_service()
def test__fail_transient_state(self):
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
provision_state=states.DEPLOYING)
self.service._fail_transient_state(states.DEPLOYING, 'unknown err')
node.refresh()
self.assertEqual(states.DEPLOYFAIL, node.provision_state)
def test__fail_transient_state_maintenance(self):
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
maintenance=True,
provision_state=states.DEPLOYING)
self.service._fail_transient_state(states.DEPLOYING, 'unknown err')
node.refresh()
self.assertEqual(states.DEPLOYFAIL, node.provision_state)
history = objects.NodeHistory.list_by_node_id(self.context,
node.id)
entry = history[0]
self.assertEqual('transition', entry['event_type'])
self.assertEqual('ERROR', entry['severity'])
self.assertEqual('unknown err', entry['event'])
| 44.408795
| 79
| 0.614442
|
e13d84d27be907d544982d7c696e231b4b13754f
| 32,169
|
py
|
Python
|
vip_hci/pca/svd.py
|
Lewis-Picker/VIP
|
494190d124dd19e3494b0825b4c82c37d8207074
|
[
"MIT"
] | 2
|
2017-02-10T02:02:17.000Z
|
2018-02-16T16:07:24.000Z
|
vip_hci/pca/svd.py
|
Lewis-Picker/VIP
|
494190d124dd19e3494b0825b4c82c37d8207074
|
[
"MIT"
] | null | null | null |
vip_hci/pca/svd.py
|
Lewis-Picker/VIP
|
494190d124dd19e3494b0825b4c82c37d8207074
|
[
"MIT"
] | 2
|
2020-07-18T19:31:03.000Z
|
2022-01-24T21:11:20.000Z
|
#! /usr/bin/env python
"""
Module with functions for computing SVDs.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez'
__all__ = ['SVDecomposer']
import warnings
try:
import cupy
no_cupy = False
except ImportError:
msg = "Cupy not found. Do you have a GPU? Consider setting up a CUDA "
msg += "environment and installing cupy >= 2.0.0"
warnings.warn(msg, ImportWarning)
no_cupy = True
try:
import torch
no_torch = False
except ImportError:
msg = "Pytorch not found. Do you have a GPU? Consider setting up a CUDA "
msg += "environment and installing pytorch"
warnings.warn(msg, ImportWarning)
no_torch = True
import numpy as np
from numpy import linalg
from matplotlib import pyplot as plt
from scipy.sparse.linalg import svds
from sklearn.decomposition import randomized_svd
from sklearn.metrics import mean_squared_error as MSE
from sklearn.metrics import mean_absolute_error as MAE
from sklearn.utils import check_random_state
from pandas import DataFrame
from ..conf import timing, time_ini, sep, Progressbar
from ..var import matrix_scaling, prepare_matrix, matrix_scaling
from ..preproc import check_scal_vector, cube_crop_frames
from ..preproc import cube_rescaling_wavelengths as scwave
from ..conf import vip_figsize, check_array
class SVDecomposer:
"""
Class for SVD decomposition of 2d, 3d or 4d HCI arrays.
Parameters
----------
data : numpy ndarray
Input array (2d, 3d or 4d).
mode : {'fullfr', 'annular'}, optional
Whether to use the whole frames or a single annulus.
inrad : None or int, optional
[mode='annular'] Inner radius.
outrad : None or int, optional
[mode='annular'] Outer radius.
svd_mode : {'lapack', 'arpack', 'eigen', 'randsvd', 'cupy', 'eigencupy',
'randcupy', 'pytorch', 'eigenpytorch', 'randpytorch'}, str optional
Switch for the SVD method/library to be used.
``lapack``: uses the LAPACK linear algebra library through Numpy
and it is the most conventional way of computing the SVD
(deterministic result computed on CPU).
``arpack``: uses the ARPACK Fortran libraries accessible through
Scipy (computation on CPU).
``eigen``: computes the singular vectors through the
eigendecomposition of the covariance M.M' (computation on CPU).
``randsvd``: uses the randomized_svd algorithm implemented in
Sklearn (computation on CPU).
``cupy``: uses the Cupy library for GPU computation of the SVD as in
the LAPACK version. `
`eigencupy``: offers the same method as with the ``eigen`` option
but on GPU (through Cupy).
``randcupy``: is an adaptation of the randomized_svd algorithm,
where all the computations are done on a GPU (through Cupy). `
`pytorch``: uses the Pytorch library for GPU computation of the SVD.
``eigenpytorch``: offers the same method as with the ``eigen``
option but on GPU (through Pytorch).
``randpytorch``: is an adaptation of the randomized_svd algorithm,
where all the linear algebra computations are done on a GPU
(through Pytorch).
scaling : {None, "temp-mean", spat-mean", "temp-standard",
"spat-standard"}, None or str optional
Pixel-wise scaling mode using ``sklearn.preprocessing.scale``
function. If set to None, the input matrix is left untouched.
Otherwise:
``temp-mean``: temporal px-wise mean is subtracted.
``spat-mean``: spatial mean is subtracted.
``temp-standard``: temporal mean centering plus scaling pixel values
to unit variance.
``spat-standard``: spatial mean centering plus scaling pixel values
to unit variance.
wavelengths : numpy ndarray, optional
Wavelengths in case of a 4d HCI cube. These are used to compute
scaling factors for re-scaling the spectral channels and aligning
the speckles.
verbose : bool, optional
If True intermediate messages and timing are printed.
Notes
-----
For info on CEVR search: # Get variance explained by singular values in
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/pca.py
"""
def __init__(self, data, mode='fullfr', inrad=None, outrad=None,
svd_mode='lapack', scaling='temp-standard', wavelengths=None,
verbose=True):
"""
"""
check_array(data, (2, 3, 4), msg='data')
self.data = data
self.mode = mode
self.svd_mode = svd_mode
self.inrad = inrad
self.outrad = outrad
self.scaling = scaling
self.wavelengths = wavelengths
self.verbose = verbose
if self.mode == 'annular':
if inrad is None:
raise ValueError("`inrad` must be a positive integer")
if outrad is None:
raise ValueError("`outrad` must be a positive integer")
if self.verbose:
print(sep)
def generate_matrix(self):
"""
Generate a matrix from the input ``data``. Pixel values in the matrix
are scaled. Depending on ``mode``, the matrix can come from an annulus
instead of the whole frames.
"""
start_time = time_ini(False)
if self.data.ndim == 2:
print("`data` is already a 2d array")
self.matrix = matrix_scaling(self.data, self.scaling)
elif self.data.ndim in [3, 4]:
if self.data.ndim == 3:
cube_ = self.data
elif self.data.ndim == 4:
if self.wavelengths is None:
raise ValueError("`wavelengths` must be provided when "
"`data` is a 4D array")
z, n_frames, y_in, x_in = self.data.shape
scale_list = check_scal_vector(self.wavelengths)
if not scale_list.shape[0] == z:
raise ValueError("`wavelengths` length is {} instead of "
"{}".format(scale_list.shape[0], z))
big_cube = []
# Rescaling the spectral channels to align the speckles
if self.verbose:
print('Rescaling the spectral channels to align the '
'speckles')
for i in Progressbar(range(n_frames), verbose=self.verbose):
cube_resc = scwave(self.data[:, i, :, :], scale_list)[0]
cube_resc = cube_crop_frames(cube_resc, size=y_in,
verbose=False)
big_cube.append(cube_resc)
big_cube = np.array(big_cube)
cube_ = big_cube.reshape(z * n_frames, y_in, x_in)
self.cube4dto3d_shape = cube_.shape
result = prepare_matrix(cube_, self.scaling, mode=self.mode,
inner_radius=self.inrad,
outer_radius=self.outrad,
verbose=self.verbose)
if self.mode == 'annular':
self.matrix = result[0]
pxind = result[1]
self.yy, self.xx = pxind # pixel coords in the annulus
elif self.mode == 'fullfr':
self.matrix = result
if self.verbose:
timing(start_time)
def run(self):
"""
Decompose the input data.
"""
start_time = time_ini(False)
if not hasattr(self, 'matrix'):
self.generate_matrix()
max_pcs = min(self.matrix.shape[0], self.matrix.shape[1])
results = svd_wrapper(self.matrix, self.svd_mode, max_pcs,
verbose=self.verbose, full_output=True)
if len(results) == 3:
self.u, self.s, self.v = results
elif len(results) == 2:
self.s, self.v = results
if self.verbose:
timing(start_time)
def get_cevr(self, ncomp_list=None, plot=True, plot_save=False, plot_dpi=90,
plot_truncation=None):
"""
Calculate the cumulative explained variance ratio for the SVD of a
cube/matrix (either full frames or a single annulus could be used).
Parameters
----------
ncomp_list : None, list or tuple, optional
If provided the list is used to filter the vector of CEVR.
plot : bool, optional
If True, the CEVR is plotted.
plot_save : bool, optional
If True, the plot is saved as ./figure.pdf.
plot_dpi : int, optional
The DPI of the figure.
plot_truncation : None or int, optional
If provided, it created a second panel in the plot, focusing on the
CEVR curve up to ``plot_truncation`` components.
Returns
-------
df_allks : pandas dataframe
[ncomp_list is None] A table with the explained varaince ratio and
the CEVR for all ncomps.
df_klist : pandas dataframe
[ncomp_list is not None] A table with the ncomp_list, the explained
varaince ratio and the CEVR.
"""
start_time = time_ini(False)
if not hasattr(self, 'v'):
self.run()
if self.verbose:
print("Computing the cumulative explained variance ratios")
self.ncomp_list = ncomp_list
exp_var = (self.s ** 2) / (self.s.shape[0] - 1)
full_var = np.sum(exp_var)
# % of variance explained by each PC
self.explained_variance_ratio = exp_var / full_var
self.cevr = np.cumsum(self.explained_variance_ratio)
df_allks = DataFrame({'ncomp': range(1, self.s.shape[0] + 1),
'expvar_ratio': self.explained_variance_ratio,
'cevr': self.cevr})
self.table_cevr = df_allks
if plot:
lw = 2;
alpha = 0.4
fig = plt.figure(figsize=vip_figsize, dpi=plot_dpi)
fig.subplots_adjust(wspace=0.4)
ax1 = plt.subplot2grid((1, 3), (0, 0), colspan=2)
ax1.step(range(self.explained_variance_ratio.shape[0]),
self.explained_variance_ratio, alpha=alpha, where='mid',
label='Individual EVR', lw=lw)
ax1.plot(self.cevr, '.-', alpha=alpha,
label='Cumulative EVR', lw=lw)
ax1.legend(loc='best', frameon=False, fontsize='medium')
ax1.set_ylabel('Explained variance ratio (EVR)')
ax1.set_xlabel('Principal components')
ax1.grid(linestyle='solid', alpha=0.2)
ax1.set_xlim(-10, self.explained_variance_ratio.shape[0] + 10)
ax1.set_ylim(0, 1)
if plot_truncation is not None:
ax2 = plt.subplot2grid((1, 3), (0, 2), colspan=1)
ax2.step(range(plot_truncation),
self.explained_variance_ratio[:plot_truncation],
alpha=alpha, where='mid', lw=lw)
ax2.plot(self.cevr[:plot_truncation], '.-', alpha=alpha, lw=lw)
ax2.set_xlabel('Principal components')
ax2.grid(linestyle='solid', alpha=0.2)
ax2.set_xlim(-2, plot_truncation + 2)
ax2.set_ylim(0, 1)
if plot_save:
plt.savefig('figure.pdf', dpi=300, bbox_inches='tight')
if self.ncomp_list is not None:
cevr_klist = []
expvar_ratio_klist = []
for k in self.ncomp_list:
cevr_klist.append(self.cevr[k - 1])
expvar_ratio_klist.append(self.explained_variance_ratio[k - 1])
df_klist = DataFrame({'ncomp': self.ncomp_list,
'exp_var_ratio': expvar_ratio_klist,
'cevr': cevr_klist})
self.cevr_ncomp = cevr_klist
self.table_cevr_ncomp = df_klist
if self.verbose:
timing(start_time)
return df_klist
else:
if self.verbose:
timing(start_time)
return df_allks
def cevr_to_ncomp(self, cevr=0.9):
"""
Infer the number of principal components for a given CEVR.
Parameters
----------
cevr : float or tuple of floats, optional
The desired CEVR.
Returns
-------
ncomp : int or list of ints
The found number(s) of PCs.
"""
if not hasattr(self, 'cevr'):
self.get_cevr(plot=False)
if isinstance(cevr, float):
ncomp = np.searchsorted(self.cevr, cevr) + 1
elif isinstance(cevr, tuple):
ncomp = [np.searchsorted(self.cevr, c) + 1 for c in cevr]
return ncomp
def svd_wrapper(matrix, mode, ncomp, verbose, full_output=False,
random_state=None, to_numpy=True):
""" Wrapper for different SVD libraries (CPU and GPU).
Parameters
----------
matrix : numpy ndarray, 2d
2d input matrix.
mode : {'lapack', 'arpack', 'eigen', 'randsvd', 'cupy', 'eigencupy',
'randcupy', 'pytorch', 'eigenpytorch', 'randpytorch'}, str optional
Switch for the SVD method/library to be used.
``lapack``: uses the LAPACK linear algebra library through Numpy
and it is the most conventional way of computing the SVD
(deterministic result computed on CPU).
``arpack``: uses the ARPACK Fortran libraries accessible through
Scipy (computation on CPU).
``eigen``: computes the singular vectors through the
eigendecomposition of the covariance M.M' (computation on CPU).
``randsvd``: uses the randomized_svd algorithm implemented in
Sklearn (computation on CPU).
``cupy``: uses the Cupy library for GPU computation of the SVD as in
the LAPACK version. `
`eigencupy``: offers the same method as with the ``eigen`` option
but on GPU (through Cupy).
``randcupy``: is an adaptation of the randomized_svd algorithm,
where all the computations are done on a GPU (through Cupy). `
`pytorch``: uses the Pytorch library for GPU computation of the SVD.
``eigenpytorch``: offers the same method as with the ``eigen``
option but on GPU (through Pytorch).
``randpytorch``: is an adaptation of the randomized_svd algorithm,
where all the linear algebra computations are done on a GPU
(through Pytorch).
ncomp : int
Number of singular vectors to be obtained. In the cases when the full
SVD is computed (LAPACK, ARPACK, EIGEN, CUPY), the matrix of singular
vectors is truncated.
verbose: bool
If True intermediate information is printed out.
full_output : bool optional
If True the 3 terms of the SVD factorization are returned. If ``mode``
is eigen then only S and V are returned.
random_state : int, RandomState instance or None, optional
If int, random_state is the seed used by the random number generator.
If RandomState instance, random_state is the random number generator.
If None, the random number generator is the RandomState instance used
by np.random. Used for ``randsvd`` mode.
to_numpy : bool, optional
If True (by default) the arrays computed in GPU are transferred from
VRAM and converted to numpy ndarrays.
Returns
-------
V : numpy ndarray
The right singular vectors of the input matrix. If ``full_output`` is
True it returns the left and right singular vectors and the singular
values of the input matrix. If ``mode`` is set to eigen then only S and
V are returned.
References
----------
* For ``lapack`` SVD mode see:
https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.svd.html
http://www.netlib.org/lapack/
* For ``eigen`` mode see:
https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.eigh.html
* For ``arpack`` SVD mode see:
https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.sparse.linalg.svds.html
http://www.caam.rice.edu/software/ARPACK/
* For ``randsvd`` SVD mode see:
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* For ``cupy`` SVD mode see:
https://docs-cupy.chainer.org/en/stable/reference/generated/cupy.linalg.svd.html
* For ``eigencupy`` mode see:
https://docs-cupy.chainer.org/en/master/reference/generated/cupy.linalg.eigh.html
* For ``pytorch`` SVD mode see:
http://pytorch.org/docs/master/torch.html#torch.svd
* For ``eigenpytorch`` mode see:
http://pytorch.org/docs/master/torch.html#torch.eig
"""
if matrix.ndim != 2:
raise TypeError('Input matrix is not a 2d array')
if ncomp > min(matrix.shape[0], matrix.shape[1]):
msg = '{} PCs cannot be obtained from a matrix with size [{},{}].'
msg += ' Increase the size of the patches or request less PCs'
raise RuntimeError(msg.format(ncomp, matrix.shape[0], matrix.shape[1]))
if mode == 'eigen':
# building C as np.dot(matrix.T,matrix) is slower and takes more memory
C = np.dot(matrix, matrix.T) # covariance matrix
e, EV = linalg.eigh(C) # EVals and EVs
pc = np.dot(EV.T, matrix) # PCs using a compact trick when cov is MM'
V = pc[::-1] # reverse since we need the last EVs
S = np.sqrt(np.abs(e)) # SVals = sqrt(EVals)
S = S[::-1] # reverse since EVals go in increasing order
for i in range(V.shape[1]):
V[:, i] /= S # scaling EVs by the square root of EVals
V = V[:ncomp]
if verbose:
print('Done PCA with numpy linalg eigh functions')
elif mode == 'lapack':
# n_frames is usually smaller than n_pixels. In this setting taking
# the SVD of M' and keeping the left (transposed) SVs is faster than
# taking the SVD of M (right SVs)
U, S, V = linalg.svd(matrix.T, full_matrices=False)
V = V[:ncomp] # we cut projection matrix according to the # of PCs
U = U[:, :ncomp]
S = S[:ncomp]
if verbose:
print('Done SVD/PCA with numpy SVD (LAPACK)')
elif mode == 'arpack':
U, S, V = svds(matrix, k=ncomp)
if verbose:
print('Done SVD/PCA with scipy sparse SVD (ARPACK)')
elif mode == 'randsvd':
U, S, V = randomized_svd(matrix, n_components=ncomp, n_iter=2,
transpose='auto', random_state=random_state)
if verbose:
print('Done SVD/PCA with randomized SVD')
elif mode == 'cupy':
if no_cupy:
raise RuntimeError('Cupy is not installed')
a_gpu = cupy.array(matrix)
a_gpu = cupy.asarray(a_gpu) # move the data to the current device
u_gpu, s_gpu, vh_gpu = cupy.linalg.svd(a_gpu, full_matrices=True,
compute_uv=True)
V = vh_gpu[:ncomp]
if to_numpy:
V = cupy.asnumpy(V)
if full_output:
S = s_gpu[:ncomp]
if to_numpy:
S = cupy.asnumpy(S)
U = u_gpu[:, :ncomp]
if to_numpy:
U = cupy.asnumpy(U)
if verbose:
print('Done SVD/PCA with cupy (GPU)')
elif mode == 'randcupy':
if no_cupy:
raise RuntimeError('Cupy is not installed')
U, S, V = randomized_svd_gpu(matrix, ncomp, n_iter=2, lib='cupy')
if to_numpy:
V = cupy.asnumpy(V)
S = cupy.asnumpy(S)
U = cupy.asnumpy(U)
if verbose:
print('Done randomized SVD/PCA with cupy (GPU)')
elif mode == 'eigencupy':
if no_cupy:
raise RuntimeError('Cupy is not installed')
a_gpu = cupy.array(matrix)
a_gpu = cupy.asarray(a_gpu) # move the data to the current device
C = cupy.dot(a_gpu, a_gpu.T) # covariance matrix
e, EV = cupy.linalg.eigh(C) # eigenvalues and eigenvectors
pc = cupy.dot(EV.T, a_gpu) # using a compact trick when cov is MM'
V = pc[::-1] # reverse to get last eigenvectors
S = cupy.sqrt(e)[::-1] # reverse since EVals go in increasing order
for i in range(V.shape[1]):
V[:, i] /= S # scaling by the square root of eigvals
V = V[:ncomp]
if to_numpy:
V = cupy.asnumpy(V)
if verbose:
print('Done PCA with cupy eigh function (GPU)')
elif mode == 'pytorch':
if no_torch:
raise RuntimeError('Pytorch is not installed')
a_gpu = torch.Tensor.cuda(torch.from_numpy(matrix.astype('float32').T))
u_gpu, s_gpu, vh_gpu = torch.svd(a_gpu)
V = vh_gpu[:ncomp]
S = s_gpu[:ncomp]
U = torch.transpose(u_gpu, 0, 1)[:ncomp]
if to_numpy:
V = np.array(V)
S = np.array(S)
U = np.array(U)
if verbose:
print('Done SVD/PCA with pytorch (GPU)')
elif mode == 'eigenpytorch':
if no_torch:
raise RuntimeError('Pytorch is not installed')
a_gpu = torch.Tensor.cuda(torch.from_numpy(matrix.astype('float32')))
C = torch.mm(a_gpu, torch.transpose(a_gpu, 0, 1))
e, EV = torch.eig(C, eigenvectors=True)
V = torch.mm(torch.transpose(EV, 0, 1), a_gpu)
S = torch.sqrt(e[:, 0])
for i in range(V.shape[1]):
V[:, i] /= S
V = V[:ncomp]
if to_numpy:
V = np.array(V)
if verbose:
print('Done PCA with pytorch eig function')
elif mode == 'randpytorch':
if no_torch:
raise RuntimeError('Pytorch is not installed')
U, S, V = randomized_svd_gpu(matrix, ncomp, n_iter=2, lib='pytorch')
if to_numpy:
V = np.array(V)
S = np.array(S)
U = np.array(U)
if verbose:
print('Done randomized SVD/PCA with randomized pytorch (GPU)')
else:
raise ValueError('The SVD `mode` is not recognized')
if full_output:
if mode == 'lapack':
return V.T, S, U.T
elif mode == 'pytorch':
if to_numpy:
return V.T, S, U.T
else:
return torch.transpose(V, 0, 1), S, torch.transpose(U, 0, 1)
elif mode in ('eigen', 'eigencupy', 'eigenpytorch'):
return S, V
else:
return U, S, V
else:
if mode == 'lapack':
return U.T
elif mode == 'pytorch':
return U
else:
return V
def get_eigenvectors(ncomp, data, svd_mode, mode='noise', noise_error=1e-3,
cevr=0.9, max_evs=None, data_ref=None, debug=False,
collapse=False):
""" Getting ``ncomp`` eigenvectors. Choosing the size of the PCA truncation
when ``ncomp`` is set to ``auto``. Used in ``pca_annular`` and ``llsg``.
"""
no_dataref = False
if data_ref is None:
no_dataref = True
data_ref = data
if max_evs is None:
max_evs = min(data_ref.shape[0], data_ref.shape[1])
if ncomp is None:
raise ValueError('ncomp must be an integer or `auto`')
if ncomp == 'auto':
ncomp = 0
V_big = svd_wrapper(data_ref, svd_mode, max_evs, False)
if mode == 'noise':
if not collapse:
data_ref_sc = matrix_scaling(data_ref, 'temp-mean')
data_sc = matrix_scaling(data, 'temp-mean')
else:
data_ref_sc = matrix_scaling(data_ref, 'temp-standard')
data_sc = matrix_scaling(data, 'temp-standard')
V_sc = svd_wrapper(data_ref_sc, svd_mode, max_evs, False)
px_noise = []
px_noise_decay = 1
# Noise (px stddev of residuals) to be lower than a given threshold
while px_noise_decay >= noise_error:
ncomp += 1
V = V_sc[:ncomp]
if no_dataref:
transformed = np.dot(data_sc, V.T)
reconstructed = np.dot(transformed, V)
else:
transformed = np.dot(V, data_sc)
reconstructed = np.dot(transformed.T, V).T
residuals = data_sc - reconstructed
if not collapse:
curr_noise = np.std(residuals)
else:
curr_noise = np.std((np.median(residuals, axis=0)))
px_noise.append(curr_noise)
if ncomp > 1:
px_noise_decay = px_noise[-2] - curr_noise
# print '{} {:.4f} {:.4f}'.format(ncomp, curr_noise, px_noise_decay)
V = V_big[:ncomp]
elif mode == 'cevr':
data_sc = matrix_scaling(data, 'temp-mean')
_, S, _ = svd_wrapper(data_sc, svd_mode, min(data_sc.shape[0],
data_sc.shape[1]),
False, full_output=True)
exp_var = (S ** 2) / (S.shape[0] - 1)
full_var = np.sum(exp_var)
# % of variance explained by each PC
explained_variance_ratio = exp_var / full_var
ratio_cumsum = np.cumsum(explained_variance_ratio)
ncomp = np.searchsorted(ratio_cumsum, cevr) + 1
V = V_big[:ncomp]
if debug:
print('ncomp', ncomp)
else:
# Performing SVD/PCA according to "svd_mode" flag
ncomp = min(ncomp, min(data_ref.shape[0], data_ref.shape[1]))
V = svd_wrapper(data_ref, svd_mode, ncomp, verbose=False)
return V
def randomized_svd_gpu(M, n_components, n_oversamples=10, n_iter='auto',
transpose='auto', random_state=0, lib='cupy'):
"""Computes a truncated randomized SVD on GPU. Adapted from Sklearn.
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter : int or 'auto' (default is 'auto')
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) `n_iter` in which case is set to 7.
This improves precision with few components.
transpose : True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
lib : {'cupy', 'pytorch'}, str optional
Chooses the GPU library to be used.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == 'auto':
# Checks if the number of iterations is explicitly specified
n_iter = 7 if n_components < .1 * min(M.shape) else 4
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
M = M.T # this implementation is a bit faster with smaller shape[1]
if lib == 'cupy':
M = cupy.array(M)
M = cupy.asarray(M)
# Generating normal random vectors with shape: (M.shape[1], n_random)
Q = random_state.normal(size=(M.shape[1], n_random))
Q = cupy.array(Q)
Q = cupy.asarray(Q)
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of M in Q
for i in range(n_iter):
Q = cupy.dot(M, Q)
Q = cupy.dot(M.T, Q)
# Sample the range of M by linear projection of Q.
# Extract an orthonormal basis
Q, _ = cupy.linalg.qr(cupy.dot(M, Q), mode='reduced')
# project M to the (k + p) dimensional space using the basis vectors
B = cupy.dot(Q.T, M)
B = cupy.array(B)
Q = cupy.array(Q)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = cupy.linalg.svd(B, full_matrices=False, compute_uv=True)
del B
U = cupy.dot(Q, Uhat)
if transpose:
# transpose back the results according to the input convention
return (V[:n_components, :].T, s[:n_components],
U[:,:n_components].T)
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
elif lib == 'pytorch':
M_gpu = torch.Tensor.cuda(torch.from_numpy(M.astype('float32')))
# Generating normal random vectors with shape: (M.shape[1], n_random)
Q = torch.cuda.FloatTensor(M_gpu.shape[1], n_random).normal_()
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of M in Q
for i in range(n_iter):
Q = torch.mm(M_gpu, Q)
Q = torch.mm(torch.transpose(M_gpu, 0, 1), Q)
# Sample the range of M by linear projection of Q.
# Extract an orthonormal basis
Q, _ = torch.qr(torch.mm(M_gpu, Q))
# project M to the (k + p) dimensional space using the basis vectors
B = torch.mm(torch.transpose(Q, 0, 1), M_gpu)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = torch.svd(B)
del B
U = torch.mm(Q, Uhat)
if transpose:
# transpose back the results according to the input convention
return (torch.transpose(V[:n_components, :], 0, 1),
s[:n_components],
torch.transpose(U[:, :n_components], 0, 1))
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
| 39.568266
| 97
| 0.585533
|
5e2e406e26498dc87791d6827e3eb7304b2e6d7f
| 900
|
py
|
Python
|
var/spack/repos/builtin/packages/py-jaraco-functools/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-09-15T23:55:48.000Z
|
2019-09-15T23:55:48.000Z
|
var/spack/repos/builtin/packages/py-jaraco-functools/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-jaraco-functools/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2017-01-21T17:19:32.000Z
|
2017-01-21T17:19:32.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJaracoFunctools(PythonPackage):
"""Functools like those found in stdlib"""
homepage = "https://github.com/jaraco/jaraco.functools"
url = "https://pypi.io/packages/source/j/jaraco.functools/jaraco.functools-2.0.tar.gz"
version(
'2.0', sha256='35ba944f52b1a7beee8843a5aa6752d1d5b79893eeb7770ea98be6b637bf9345')
depends_on('py-setuptools', type='build')
depends_on('py-setuptools-scm@1.15.0:', type='build')
depends_on('py-backports-functools-lru-cache@1.0.3:',
when='^python@:2', type=('build', 'run'))
depends_on('py-more-itertools', type=('build', 'run'))
depends_on('python@2.7:', type=('build', 'run'))
| 37.5
| 95
| 0.692222
|
1c3521323cf7d57dc8b2b240d95a181b90cc3144
| 1,188
|
py
|
Python
|
src/recognizeDigit.py
|
RsTaK/Sudoku
|
8daa0a06906ce61d9a71586a8d28a3931ca4e5e3
|
[
"MIT"
] | 2
|
2020-01-22T14:32:40.000Z
|
2021-12-23T20:42:52.000Z
|
src/recognizeDigit.py
|
RsTaK/Sudoku
|
8daa0a06906ce61d9a71586a8d28a3931ca4e5e3
|
[
"MIT"
] | 4
|
2020-11-13T18:54:24.000Z
|
2022-02-10T02:10:00.000Z
|
src/recognizeDigit.py
|
RsTaK/Sudoku
|
8daa0a06906ce61d9a71586a8d28a3931ca4e5e3
|
[
"MIT"
] | 1
|
2020-01-22T14:02:50.000Z
|
2020-01-22T14:02:50.000Z
|
from keras.models import load_model
import cv2
import pickle
import keras.backend as K
import numpy as np
from src.model_path import MODEL_PATH
'''def predict(self, cell):
model = load_model('./model/Model.h5')
f = K.function([model.layers[0].input, K.learning_phase()],[model.layers[-1].output])
rescaled_cell = self.rescale(cell)
result = []
for _ in range(10):
result.append(f([rescaled_cell, 1]))
result = np.array(result)
prediction = result.mean(axis=0)
uncertainty = result.var(axis=0)
if uncertainty.argmax() > 3:
new_prediction = 0
print(prediction.argmax(),uncertainty.argmax(),new_prediction)
else:
print(prediction.argmax(),uncertainty.argmax())'''
class recognizeDigit:
def __init__(self, cell):
self._prediction = self.predict(cell)
def predict(self, cell):
model = load_model(MODEL_PATH)
rescaled_cell = self.rescale(cell)
pred = model.predict(rescaled_cell)
return pred.argmax()
def rescale(self, cell):
resized_cell = cv2.resize(cell, (28, 28))
return resized_cell.reshape(1, resized_cell.shape[0], resized_cell.shape[1], 1)
@property
def prediction(self):
return self._prediction
| 27
| 87
| 0.705387
|
b42d502295720ab8ca93e796c6c6925a38f6705c
| 2,411
|
py
|
Python
|
alchemyjsonschema/custom/format.py
|
mrichar1/alchemyjsonschema
|
cee81f707b3ebb64e72fd555b9118c76cf127caf
|
[
"MIT"
] | null | null | null |
alchemyjsonschema/custom/format.py
|
mrichar1/alchemyjsonschema
|
cee81f707b3ebb64e72fd555b9118c76cf127caf
|
[
"MIT"
] | null | null | null |
alchemyjsonschema/custom/format.py
|
mrichar1/alchemyjsonschema
|
cee81f707b3ebb64e72fd555b9118c76cf127caf
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import re
from jsonschema._format import _checks_drafts, FormatChecker, _draft_checkers
import calendar
from datetime import date, time
from ..compat import string_types
"""
this is custom format
"""
time_rx = re.compile(r"(\d{2}):(\d{2}):(\d{2})(\.\d+)?(Z|([+\-])(\d{2}):(\d{2}))?")
date_rx = re.compile(r"(\d{4})\-(\d{2})\-(\d{2})")
def parse_date(date_string):
m = date_rx.match(date_string)
if m is None:
return None
groups = m.groups()
year, month, day = [int(x) for x in groups[:3]]
return date(year, month, day)
def validate_date(date_string):
m = date_rx.match(date_string)
if m is None:
return False
groups = m.groups()
year, month, day = [int(x) for x in groups[:3]]
if not 1 <= year <= 9999:
# Have to reject this, unfortunately (despite it being OK by rfc3339):
# calendar.timegm/calendar.monthrange can't cope (since datetime can't)
return False
if not 1 <= month <= 12:
return False
(_, max_day) = calendar.monthrange(year, month)
if not 1 <= day <= max_day:
return False
# all OK
return True
def parse_time(time_string):
m = time_rx.match(time_string)
if m is None:
return None
groups = m.groups()
hour, minute, second = [int(x) for x in groups[:3]]
if groups[4] is not None and groups[4] != "Z":
return time(hour, minute, second, int(groups(3)))
return time(hour, minute, second)
def validate_time(time_string):
m = time_rx.match(time_string)
if m is None:
return False
groups = m.groups()
hour, minute, second = [int(x) for x in groups[:3]]
if not (0 <= hour <= 23 and 0 <= minute <= 59 and 0 <= second <= 59):
# forbid leap seconds :-(. See README
return False
if groups[4] is not None and groups[4] != "Z":
(offset_sign, offset_hours, offset_mins) = groups[5:]
if not (0 <= int(offset_hours) <= 23 and 0 <= int(offset_mins) <= 59):
return False
# all OK
return True
@_checks_drafts("date", raises=ValueError)
def is_date(instance):
if not isinstance(instance, string_types):
return True
return validate_date(instance)
@_checks_drafts("time", raises=ValueError)
def is_time(instance):
if not isinstance(instance, string_types):
return True
return validate_time(instance)
| 24.85567
| 83
| 0.616757
|
1c0e623ae0474cb9f6dc4ca8ade7f61f33481776
| 3,807
|
py
|
Python
|
test/functional/wallet_encryption.py
|
INFAQCOIN/INFAQ
|
487de82c26135eb8ac93c9393e7fdb29bbc2822c
|
[
"MIT"
] | 1
|
2022-01-18T14:48:23.000Z
|
2022-01-18T14:48:23.000Z
|
test/functional/wallet_encryption.py
|
martin-braun/INFAQ
|
fca6db067b8079fbedf4e9160180424c95470fed
|
[
"MIT"
] | null | null | null |
test/functional/wallet_encryption.py
|
martin-braun/INFAQ
|
fca6db067b8079fbedf4e9160180424c95470fed
|
[
"MIT"
] | 1
|
2022-01-18T14:48:28.000Z
|
2022-01-18T14:48:28.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Wallet encryption"""
import time
from test_framework.test_framework import infaqcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
assert_greater_than,
assert_greater_than_or_equal,
)
class WalletEncryptionTest(infaqcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
# Make sure the wallet isn't encrypted first
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
assert_equal(privkey[:1], "c")
assert_equal(len(privkey), 52)
# Encrypt the wallet
self.nodes[0].node_encrypt_wallet(passphrase)
self.start_node(0)
# Test that the wallet is encrypted
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
# Check that the timeout is right
time.sleep(2)
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wallet already unlocked
self.nodes[0].walletpassphrase(passphrase, 12000, True)
assert_raises_rpc_error(-17, "Wallet is already unlocked", self.nodes[0].walletpassphrase, passphrase, 100, True)
self.nodes[0].walletlock()
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
# Test timeout bounds
assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10)
# Check the timeout
# Check a time less than the limit
MAX_VALUE = 100000000
expected_time = int(time.time()) + MAX_VALUE - 600
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE - 600)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
# Check a time greater than the limit
expected_time = int(time.time()) + MAX_VALUE - 1
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
if __name__ == '__main__':
WalletEncryptionTest().main()
| 43.758621
| 138
| 0.70528
|
61325edb08cafce8e4f411337f491e1c40aa1fd0
| 80
|
py
|
Python
|
html5&css3/Aulas/ex002/teste.py
|
Joey-Resende/Estudando-htlm5-e-css3
|
224306df9bd64c3cb88e313702881642c74352b9
|
[
"MIT"
] | null | null | null |
html5&css3/Aulas/ex002/teste.py
|
Joey-Resende/Estudando-htlm5-e-css3
|
224306df9bd64c3cb88e313702881642c74352b9
|
[
"MIT"
] | null | null | null |
html5&css3/Aulas/ex002/teste.py
|
Joey-Resende/Estudando-htlm5-e-css3
|
224306df9bd64c3cb88e313702881642c74352b9
|
[
"MIT"
] | null | null | null |
teste = int(input('Digite seu nome: '))
print(f'Seu nome e bem bonito {teste}')
| 26.666667
| 39
| 0.675
|
d1788137294d70c937f20c94d3340c73d32cff35
| 386
|
py
|
Python
|
package/diana/utils/gateways/file_handlers/text_file.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 15
|
2019-02-12T23:26:09.000Z
|
2021-12-21T08:53:58.000Z
|
package/diana/utils/gateways/file_handlers/text_file.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 2
|
2019-01-23T21:13:12.000Z
|
2019-06-28T15:45:51.000Z
|
package/diana/utils/gateways/file_handlers/text_file.py
|
thomasyi17/diana2
|
2167053dfe15b782d96cb1e695047433f302d4dd
|
[
"MIT"
] | 6
|
2019-01-23T20:22:50.000Z
|
2022-02-03T03:27:04.000Z
|
import os
import attr
from .file_handler import FileHandler
@attr.s
class TextFileHandler(FileHandler):
def put(self, fn: str, data: str):
fp = self.get_path(fn)
# logger = logging.getLogger(self.name)
if not os.path.exists( os.path.dirname(fp) ):
os.makedirs(os.path.dirname(fp))
with open(fp, 'w') as f:
f.write(data)
| 20.315789
| 53
| 0.611399
|
70cb7cc29bd7067f286960d5e88eef1730f36795
| 9,074
|
py
|
Python
|
GramAddict/core/storage.py
|
philip-ulrich/insomniac-fork
|
9f0fe6e7c6cdcb690c1711faaa7df194d36172b1
|
[
"MIT"
] | 2
|
2020-10-29T20:07:03.000Z
|
2020-10-29T21:10:07.000Z
|
GramAddict/core/storage.py
|
philip-ulrich/insomniac-fork
|
9f0fe6e7c6cdcb690c1711faaa7df194d36172b1
|
[
"MIT"
] | 1
|
2020-10-29T13:27:00.000Z
|
2020-10-29T21:04:30.000Z
|
GramAddict/core/storage.py
|
GramAddict/gramaddict-bot
|
9f0fe6e7c6cdcb690c1711faaa7df194d36172b1
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
import sys
from datetime import datetime, timedelta
from enum import Enum, unique
from typing import Optional, Union
from atomicwrites import atomic_write
logger = logging.getLogger(__name__)
ACCOUNTS = "accounts"
REPORTS = "reports"
FILENAME_HISTORY_FILTER_USERS = "history_filters_users.json"
FILENAME_INTERACTED_USERS = "interacted_users.json"
OLD_FILTER = "filter.json"
FILTER = "filters.yml"
USER_LAST_INTERACTION = "last_interaction"
USER_FOLLOWING_STATUS = "following_status"
FILENAME_WHITELIST = "whitelist.txt"
FILENAME_BLACKLIST = "blacklist.txt"
FILENAME_COMMENTS = "comments_list.txt"
FILENAME_MESSAGES = "pm_list.txt"
class Storage:
def __init__(self, my_username):
if my_username is None:
logger.error(
"No username, thus the script won't get access to interacted users and sessions data."
)
return
self.account_path = os.path.join(ACCOUNTS, my_username)
if not os.path.exists(self.account_path):
os.makedirs(self.account_path)
self.interacted_users = {}
self.history_filter_users = {}
self.interacted_users_path = os.path.join(
self.account_path, FILENAME_INTERACTED_USERS
)
if os.path.isfile(self.interacted_users_path):
with open(self.interacted_users_path, encoding="utf-8") as json_file:
try:
self.interacted_users = json.load(json_file)
except Exception as e:
logger.error(
f"Please check {json_file.name}, it contains this error: {e}"
)
sys.exit(0)
self.history_filter_users_path = os.path.join(
self.account_path, FILENAME_HISTORY_FILTER_USERS
)
if os.path.isfile(self.history_filter_users_path):
with open(self.history_filter_users_path, encoding="utf-8") as json_file:
try:
self.history_filter_users = json.load(json_file)
except Exception as e:
logger.error(
f"Please check {json_file.name}, it contains this error: {e}"
)
sys.exit(0)
self.filter_path = os.path.join(self.account_path, FILTER)
if not os.path.exists(self.filter_path):
self.filter_path = os.path.join(self.account_path, OLD_FILTER)
whitelist_path = os.path.join(self.account_path, FILENAME_WHITELIST)
if os.path.exists(whitelist_path):
with open(whitelist_path, encoding="utf-8") as file:
self.whitelist = [line.rstrip() for line in file]
else:
self.whitelist = []
blacklist_path = os.path.join(self.account_path, FILENAME_BLACKLIST)
if os.path.exists(blacklist_path):
with open(blacklist_path, encoding="utf-8") as file:
self.blacklist = [line.rstrip() for line in file]
else:
self.blacklist = []
self.report_path = os.path.join(self.account_path, REPORTS)
def can_be_reinteract(
self,
last_interaction: datetime,
hours_that_have_to_pass: Optional[Union[int, float]],
) -> bool:
if hours_that_have_to_pass is None:
return False
elif hours_that_have_to_pass == 0:
return True
return self._check_time(
last_interaction, timedelta(hours=hours_that_have_to_pass)
)
def can_be_unfollowed(
self, last_interaction: datetime, days_that_have_to_pass: Optional[int]
) -> bool:
if days_that_have_to_pass is None:
return False
return self._check_time(
last_interaction, timedelta(days=days_that_have_to_pass)
)
def _check_time(
self, stored_time: Optional[datetime], limit_time: timedelta
) -> bool:
if stored_time is None or limit_time == timedelta(hours=0):
return True
return datetime.now() - stored_time >= limit_time
def check_user_was_interacted(self, username):
"""returns when a username has been interacted, False if not already interacted"""
user = self.interacted_users.get(username)
if user is None:
return False, None
last_interaction = datetime.strptime(
user[USER_LAST_INTERACTION], "%Y-%m-%d %H:%M:%S.%f"
)
return True, last_interaction
def get_following_status(self, username):
user = self.interacted_users.get(username)
if user is None:
return FollowingStatus.NOT_IN_LIST
else:
return FollowingStatus[user[USER_FOLLOWING_STATUS].upper()]
def add_filter_user(self, username, profile_data, skip_reason=None):
user = profile_data.__dict__
user["follow_button_text"] = (
profile_data.follow_button_text.name
if not profile_data.is_restricted
else None
)
user["skip_reason"] = None if skip_reason is None else skip_reason.name
self.history_filter_users[username] = user
if self.history_filter_users_path is not None:
with atomic_write(
self.history_filter_users_path, overwrite=True, encoding="utf-8"
) as outfile:
json.dump(self.history_filter_users, outfile, indent=4, sort_keys=False)
def add_interacted_user(
self,
username,
session_id,
followed=False,
is_requested=None,
unfollowed=False,
scraped=False,
liked=0,
watched=0,
commented=0,
pm_sent=False,
job_name=None,
target=None,
):
user = self.interacted_users.get(username, {})
user[USER_LAST_INTERACTION] = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
if followed:
if is_requested:
user[USER_FOLLOWING_STATUS] = FollowingStatus.REQUESTED.name.casefold()
else:
user[USER_FOLLOWING_STATUS] = FollowingStatus.FOLLOWED.name.casefold()
elif unfollowed:
user[USER_FOLLOWING_STATUS] = FollowingStatus.UNFOLLOWED.name.casefold()
elif scraped:
user[USER_FOLLOWING_STATUS] = FollowingStatus.SCRAPED.name.casefold()
else:
user[USER_FOLLOWING_STATUS] = FollowingStatus.NONE.name.casefold()
# Save only the last session_id
user["session_id"] = session_id
# Save only the last job_name and target
if not user.get("job_name"):
user["job_name"] = job_name
if not user.get("target"):
user["target"] = target
# Increase the value of liked, watched or commented if we have already a value
user["liked"] = liked if "liked" not in user else (user["liked"] + liked)
user["watched"] = (
watched if "watched" not in user else (user["watched"] + watched)
)
user["commented"] = (
commented if "commented" not in user else (user["commented"] + commented)
)
# Update the followed or unfollowed boolean only if we have a real update
user["followed"] = (
followed
if "followed" not in user or user["followed"] != followed
else user["followed"]
)
user["unfollowed"] = (
unfollowed
if "unfollowed" not in user or user["unfollowed"] != unfollowed
else user["unfollowed"]
)
user["scraped"] = (
scraped
if "scraped" not in user or user["scraped"] != scraped
else user["scraped"]
)
# Save the boolean if we sent a PM
user["pm_sent"] = (
pm_sent
if "pm_sent" not in user or user["pm_sent"] != pm_sent
else user["pm_sent"]
)
self.interacted_users[username] = user
self._update_file()
def is_user_in_whitelist(self, username):
return username in self.whitelist
def is_user_in_blacklist(self, username):
return username in self.blacklist
def _get_last_day_interactions_count(self):
count = 0
users_list = list(self.interacted_users.values())
for user in users_list:
last_interaction = datetime.strptime(
user[USER_LAST_INTERACTION], "%Y-%m-%d %H:%M:%S.%f"
)
is_last_day = datetime.now() - last_interaction <= timedelta(days=1)
if is_last_day:
count += 1
return count
def _update_file(self):
if self.interacted_users_path is not None:
with atomic_write(
self.interacted_users_path, overwrite=True, encoding="utf-8"
) as outfile:
json.dump(self.interacted_users, outfile, indent=4, sort_keys=False)
@unique
class FollowingStatus(Enum):
NONE = 0
FOLLOWED = 1
REQUESTED = 2
UNFOLLOWED = 3
NOT_IN_LIST = 4
SCRAPED = 5
| 35.584314
| 102
| 0.610976
|
197b76f8f58edb22b149804f828bcd52aa93649d
| 6,454
|
py
|
Python
|
pyro/contrib/autoname/scoping.py
|
gavincangan/pyro
|
d9115a6da7edd7e3fecd6b89a850cc137d7e7e9a
|
[
"MIT"
] | 10
|
2020-03-18T14:41:25.000Z
|
2021-07-04T08:49:57.000Z
|
pyro/contrib/autoname/scoping.py
|
gavincangan/pyro
|
d9115a6da7edd7e3fecd6b89a850cc137d7e7e9a
|
[
"MIT"
] | 19
|
2018-10-30T13:45:31.000Z
|
2019-09-27T14:16:57.000Z
|
pyro/contrib/autoname/scoping.py
|
gavincangan/pyro
|
d9115a6da7edd7e3fecd6b89a850cc137d7e7e9a
|
[
"MIT"
] | 5
|
2020-06-21T23:40:35.000Z
|
2021-11-09T16:18:42.000Z
|
"""
``pyro.contrib.autoname.scoping`` contains the implementation of
:func:`pyro.contrib.autoname.scope`, a tool for automatically appending
a semantically meaningful prefix to names of sample sites.
"""
import functools
from pyro.poutine.messenger import Messenger
from pyro.poutine.runtime import effectful
class NameCountMessenger(Messenger):
"""
``NameCountMessenger`` is the implementation of :func:`pyro.contrib.autoname.name_count`
"""
def __enter__(self):
self._names = set()
return super(NameCountMessenger, self).__enter__()
def _increment_name(self, name, label):
while (name, label) in self._names:
split_name = name.split("__")
if "__" in name and split_name[-1].isdigit():
counter = int(split_name[-1]) + 1
name = "__".join(split_name[:-1] + [str(counter)])
else:
name = name + "__1"
return name
def _pyro_sample(self, msg):
msg["name"] = self._increment_name(msg["name"], "sample")
def _pyro_post_sample(self, msg):
self._names.add((msg["name"], "sample"))
def _pyro_post_scope(self, msg):
self._names.add((msg["args"][0], "scope"))
def _pyro_scope(self, msg):
msg["args"] = (self._increment_name(msg["args"][0], "scope"),)
class ScopeMessenger(Messenger):
"""
``ScopeMessenger`` is the implementation of :func:`pyro.contrib.autoname.scope`
"""
def __init__(self, prefix=None, inner=None):
super(ScopeMessenger, self).__init__()
self.prefix = prefix
self.inner = inner
@staticmethod
@effectful(type="scope")
def _collect_scope(prefixed_scope):
return prefixed_scope.split("/")[-1]
def __enter__(self):
if self.prefix is None:
raise ValueError("no prefix was provided")
if not self.inner:
# to accomplish adding a counter to duplicate scopes,
# we make ScopeMessenger.__enter__ effectful
# so that the same mechanism that adds counters to sample names
# can be used to add a counter to a scope name
self.prefix = self._collect_scope(self.prefix)
return super(ScopeMessenger, self).__enter__()
def __call__(self, fn):
if self.prefix is None:
self.prefix = fn.__code__.co_name # fn.__name__
@functools.wraps(fn)
def _fn(*args, **kwargs):
with type(self)(prefix=self.prefix, inner=self.inner):
return fn(*args, **kwargs)
return _fn
def _pyro_scope(self, msg):
msg["args"] = ("{}/{}".format(self.prefix, msg["args"][0]),)
def _pyro_sample(self, msg):
msg["name"] = "{}/{}".format(self.prefix, msg["name"])
def scope(fn=None, prefix=None, inner=None):
"""
:param fn: a stochastic function (callable containing Pyro primitive calls)
:param prefix: a string to prepend to sample names (optional if ``fn`` is provided)
:param inner: switch to determine where duplicate name counters appear
:returns: ``fn`` decorated with a :class:`~pyro.contrib.autoname.scoping.ScopeMessenger`
``scope`` prepends a prefix followed by a ``/`` to the name at a Pyro sample site.
It works much like TensorFlow's ``name_scope`` and ``variable_scope``,
and can be used as a context manager, a decorator, or a higher-order function.
``scope`` is very useful for aligning compositional models with guides or data.
Example::
>>> @scope(prefix="a")
... def model():
... return pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "a/x" in poutine.trace(model).get_trace()
Example::
>>> def model():
... with scope(prefix="a"):
... return pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "a/x" in poutine.trace(model).get_trace()
Scopes compose as expected, with outer scopes appearing before inner scopes in names::
>>> @scope(prefix="b")
... def model():
... with scope(prefix="a"):
... return pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "b/a/x" in poutine.trace(model).get_trace()
When used as a decorator or higher-order function,
``scope`` will use the name of the input function as the prefix
if no user-specified prefix is provided.
Example::
>>> @scope
... def model():
... return pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "model/x" in poutine.trace(model).get_trace()
"""
msngr = ScopeMessenger(prefix=prefix, inner=inner)
return msngr(fn) if fn is not None else msngr
def name_count(fn=None):
"""
``name_count`` is a very simple autonaming scheme that simply appends a suffix `"__"`
plus a counter to any name that appears multiple tims in an execution.
Only duplicate instances of a name get a suffix; the first instance is not modified.
Example::
>>> @name_count
... def model():
... for i in range(3):
... pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "x" in poutine.trace(model).get_trace()
>>> assert "x__1" in poutine.trace(model).get_trace()
>>> assert "x__2" in poutine.trace(model).get_trace()
``name_count`` also composes with :func:`~pyro.contrib.autoname.scope`
by adding a suffix to duplicate scope entrances:
Example::
>>> @name_count
... def model():
... for i in range(3):
... with pyro.contrib.autoname.scope(prefix="a"):
... pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "a/x" in poutine.trace(model).get_trace()
>>> assert "a__1/x" in poutine.trace(model).get_trace()
>>> assert "a__2/x" in poutine.trace(model).get_trace()
Example::
>>> @name_count
... def model():
... with pyro.contrib.autoname.scope(prefix="a"):
... for i in range(3):
... pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "a/x" in poutine.trace(model).get_trace()
>>> assert "a/x__1" in poutine.trace(model).get_trace()
>>> assert "a/x__2" in poutine.trace(model).get_trace()
"""
msngr = NameCountMessenger()
return msngr(fn) if fn is not None else msngr
| 34.698925
| 92
| 0.596529
|
7024fa5453f357ec6c1753d40656b9074a9dd51f
| 5,980
|
py
|
Python
|
tests/Mongo/ModelDemo.py
|
wjtxlliubin/simpysql
|
c135ce42d0bda8b11632f4003bb60995d24a7392
|
[
"MIT"
] | 29
|
2019-05-22T08:08:34.000Z
|
2021-11-16T08:15:10.000Z
|
tests/Mongo/ModelDemo.py
|
wjtxlliubin/simpysql
|
c135ce42d0bda8b11632f4003bb60995d24a7392
|
[
"MIT"
] | 4
|
2019-05-20T08:34:07.000Z
|
2019-09-11T11:26:57.000Z
|
tests/Mongo/ModelDemo.py
|
wjtxlliubin/simpysql
|
c135ce42d0bda8b11632f4003bb60995d24a7392
|
[
"MIT"
] | 5
|
2019-05-20T09:15:49.000Z
|
2021-09-04T19:08:59.000Z
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""model类"""
__author__ = ''
from tests.mysqlDemo.BaseModel import BaseModel
class ModelDemo1(BaseModel):
__database__ = 'VnTrader_Log_Db' # 表名
__tablename__ = '20190226' # 表名
__create_time__ = None # 插入时间字段 如果该字段为None create_time则不会自动添加
__update_time__ = None # 更新时间字段 如果该字段为None create_time则不会自动添加
columns = [ # 数据库字段
'_id',
'trustor',
'asset_issuer',
'asset_code',
'limit',
'asset_type',
'transaction_id',
'type',
'trustee',
'id',
'source_account',
]
class ModelDemo2(BaseModel):
__database__ = 'VnTrader_Log_Db' # 表名
__tablename__ = 'lh_test' # 表名
__create_time__ = 'create_time' # 插入时间字段 如果该字段为None create_time则不会自动添加
__update_time__ = 'update_time' # 更新时间字段 如果该字段为None create_time则不会自动添加
columns = [ # 数据库字段
'_id',
'name',
]
# set time format of create_time and update_time
# def fresh_timestamp(self):
# return datetime.datetime.now().strftime("%Y%m%d")
class ModelDemo3(BaseModel):
__database__ = 'icoape_mongo' # 表名
__tablename__ = 'meetup_events' # 表名
__create_time__ = None # 插入时间字段 如果该字段为None create_time则不会自动添加
__update_time__ = None # 更新时间字段 如果该字段为None create_time则不会自动添加
columns = []
if __name__ == '__main__':
# data = ModelDemo1().select('content', 'time', 'gateway').take(10).pluck('time', 'gateway')
# select * from table where gateway = 'MAIN_ENGINE'
# data = ModelDemo1.where({'gateway': 'MAIN_ENGINE'}).select('content', 'time', 'gateway').data()
# data = ModelDemo1().where('gateway', 'MAIN_ENGINE').select('content', 'time', 'gateway').data()
# data = ModelDemo1().where('gateway', '=', 'MAIN_ENGINE').select('content', 'time', 'gateway').data()
#
# data = ModelDemo1().where('gateway', '=', 'MAIN_ENGINE').select('content', 'time', 'gateway').first()
# data = ModelDemo1().where('gateway', '<=', 'MAIN_ENGINE').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', '<', 'MAIN_ENGINE').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', '>', 'MAIN_ENGINE').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', '>=', 'MAIN_ENGINE').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', '!=', 'MAIN_ENGINE').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', 'in', ['MAIN_ENGINE', 'BITFINEX']).select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', 'not in', ['MAIN_ENGINE', 'BITFINEX']).select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', 'like', 'MAIN_ENGINE').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', 'not like', 'BITFINEX').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', 'ilike', 'MAIN_eNGINE').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', 'not ilike', 'bITFINEX').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', 'like', 'ENGINE$').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', 'like', '^MAIN_').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', 'like', '^MAIN_ENGINE$').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('gateway', 'like', '^MAIN_ENGINE$').select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('time', 'between', ['14:08:38', '14:11:37']).select('content', 'time', 'gateway').get()
# data = ModelDemo1().where('time', 'not between', ['14:11:38', '19:38:18']).select('content', 'time', 'gateway').get()
# model = ModelDemo1().where({'gateway': 'BITFINEX'}).where('time', '>=', '19:38:').select('content', 'time', 'gateway').get()
# skip
# data = ModelDemo1().where({'gateway': 'BITFINEX'}).offset(4).select('content', 'time', 'gateway').data()
# # sort
# # data = ModelDemo2().orderby('update_time').data() # update_time 正序
# # data = ModelDemo2().orderby('update_time', 'asc').data() # update_time 正序
# # data = ModelDemo2().orderby('update_time', 'desc').data() # update_time 倒叙
#
# # take|limit
# # data = ModelDemo2().orderby('update_time', 'desc').take(4).data() # 获取4条记录
#
# # or
# data = ModelDemo2().where('update_time', '>=', 1559722499).whereor([{'name': 'haha1'}, ['name', 'haha3'], ('name', '=', 'haha2')]).data() # 获取4条记录
# data = ModelDemo2().where('update_time', '=', 1559722499).whereor({'name': 'haha1'}).whereor('name', 'haha3').data() # 获取4条记录
# data = ModelDemo2().where('token_name', 'size', 2).data() # 获取4条记录
# create
# data = ModelDemo2().create({'name': 'haha3', 'token_name': 'BTC'})
# data = ModelDemo2().create([{'name': 'haha', 'token_name': 'USDT'}, {'name': 'haha1', 'token_name': 'BTC'}])
# update
# data = ModelDemo2().where('name', 'ilike', 'haHa').update({'token_name': ['BTC14', '123']})
# data = ModelDemo2().where('name', 'haha').update({'token_name': 'BTC111'})
# delete
# ModelDemo2().where('name', 'haha1').delete()
# data = ModelDemo2().where('token_name', ['BTC14', '123']).data()
# data = ModelDemo2().data()
# print(data)
# exit(0)
data = ModelDemo3().where('time', '>', 1561910400000).where('time', '<', 1564502400000).where('venue', 'exist', True).select('min(venue.country) as country', 'count(*) as count', 'venue.localized_country_name as country_name')\
.groupby('country_name').orderby('count').offset(0).take(3).get()
# data = ModelDemo3().select('min(venue.country)', 'count(*) as total', 'venue.localized_country_name as country_name')\
print(data)
pass
| 44.626866
| 231
| 0.60301
|
e208a89d3a607c1889c7f869cb2397b8346e0a31
| 6,991
|
py
|
Python
|
.venv/lib/python3.8/site-packages/clikit/api/io/io.py
|
RivtLib/replit01
|
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
|
[
"MIT"
] | 1
|
2020-08-07T16:09:57.000Z
|
2020-08-07T16:09:57.000Z
|
.venv/lib/python3.8/site-packages/clikit/api/io/io.py
|
RivtLib/replit01
|
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
|
[
"MIT"
] | null | null | null |
.venv/lib/python3.8/site-packages/clikit/api/io/io.py
|
RivtLib/replit01
|
ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7
|
[
"MIT"
] | null | null | null |
from typing import Optional
from clikit.api.formatter import Formatter
from .input import Input
from .output import Output
class IO(Formatter):
"""
Provides methods to access the console input and output.
"""
def __init__(
self, input, output, error_output
): # type: (Input, Output, Output) -> None
self._input = input
self._output = output
self._error_output = error_output
self._terminal_dimensions = None
@property
def input(self): # type: () -> Input
return self._input
@property
def output(self): # type: () -> Output
return self._output
@property
def error_output(self): # type: () -> Output
return self._error_output
def read(self, length, default=None): # type: (int, Optional[str]) -> str
"""
Reads the given amount of characters from the standard input.
:raises: IOException
"""
return self._input.read(length, default=default)
def read_line(
self, length=None, default=None
): # type: (Optional[int], Optional[str]) -> str
"""
Reads a line from the standard input.
:raises: IOException
"""
return self._input.read_line(length=length, default=default)
def write(self, string, flags=None): # type: (str, Optional[int]) -> None
"""
Writes a string to the standard output.
The string is formatted before it is written to the output.
"""
self._output.write(string, flags=flags)
def write_line(self, string, flags=None): # type: (str, Optional[int]) -> None
"""
Writes a line to the standard output.
The string is formatted before it is written to the output.
"""
self._output.write_line(string, flags=flags)
def write_raw(self, string, flags=None): # type: (str, Optional[int]) -> None
"""
Writes a string to the standard output without formatting.
"""
self._output.write_raw(string, flags=flags)
def write_line_raw(self, string, flags=None): # type: (str, Optional[int]) -> None
"""
Writes a line to the standard output without formatting.
"""
self._output.write_raw(string, flags=flags)
def error(self, string, flags=None): # type: (str, Optional[int]) -> None
"""
Writes a string to the error output.
The string is formatted before it is written to the output.
"""
self._error_output.write(string, flags=flags)
def error_line(self, string, flags=None): # type: (str, Optional[int]) -> None
"""
Writes a line to the error output.
The string is formatted before it is written to the output.
"""
self._error_output.write_line(string, flags=flags)
def error_raw(self, string, flags=None): # type: (str, Optional[int]) -> None
"""
Writes a string to the error output without formatting.
"""
self._error_output.write_raw(string, flags=flags)
def error_line_raw(self, string, flags=None): # type: (str, Optional[int]) -> None
"""
Writes a line to the error output without formatting.
"""
self._error_output.write_raw(string, flags=flags)
def flush(self): # type: () -> None
"""
Flushes the outputs and forces all pending text to be written out.
"""
self._output.flush()
self._error_output.flush()
def close(self): # type: () -> None
"""
Closes the input and the outputs.
"""
self._input.close()
self._output.close()
self._error_output.close()
def set_interactive(self, interactive): # type: (bool) -> None
"""
Enables or disables interaction with the user.
"""
self._input.set_interactive(interactive)
def is_interactive(self): # type: () -> bool
"""
Returns whether the user may be asked for input.
"""
return self._input.is_interactive()
def set_verbosity(self, verbosity): # type: (int) -> None
"""
Sets the verbosity of the output.
"""
self._output.set_verbosity(verbosity)
self._error_output.set_verbosity(verbosity)
def is_verbose(self): # type: () -> bool
"""
Returns whether the verbosity is VERBOSE or greater.
"""
return self._output.is_verbose()
def is_very_verbose(self): # type: () -> bool
"""
Returns whether the verbosity is VERY_VERBOSE or greater.
"""
return self._output.is_very_verbose()
def is_debug(self): # type: () -> bool
"""
Returns whether the verbosity is DEBUG.
"""
return self._output.is_debug()
@property
def verbosity(self): # type: () -> int
return self._output.verbosity
def set_quiet(self, quiet): # type: (bool) -> None
"""
Sets whether all output should be suppressed.
"""
self._output.set_quiet(quiet)
self._error_output.set_quiet(quiet)
def is_quiet(self): # type: () -> bool
"""
Returns whether all output is suppressed.
"""
return self._output.is_quiet()
def set_terminal_dimensions(self, dimensions): # type: (Rectangle) -> None
"""
Sets the dimensions of the terminal.
"""
self._terminal_dimensions = dimensions
@property
def terminal_dimensions(self): # type: () -> Rectangle
if not self._terminal_dimensions:
self._terminal_dimensions = self.get_default_terminal_dimensions()
return self._terminal_dimensions
def get_default_terminal_dimensions(self): # type: () -> Rectangle
"""
Returns the default terminal dimensions.
"""
from clikit.ui.rectangle import Rectangle
return Rectangle(80, 20)
def set_formatter(self, formatter): # type: (Formatter) -> None
"""
Sets the output formatter.
"""
self._output.set_formatter(formatter)
self._error_output.set_formatter(formatter)
def supports_ansi(self): # type: () -> bool
return self._output.supports_ansi()
@property
def formatter(self): # type: () -> Formatter
"""
Returns the output formatter.
"""
return self._output.formatter
def format(self, string, style=None): # type: (str, Style) -> str
"""
Formats the given string.
"""
return self._output.formatter.format(string, style=style)
def remove_format(self, string): # type: (str) -> str
"""
Removes the format tags from the given string.
"""
return self._output.formatter.remove_format(string)
def section(self):
return self.__class__(
self._input, self._output.section(), self._error_output.section()
)
| 30.264069
| 87
| 0.597196
|
a0e10056329ff2e3f1991d872d961b5596ef247e
| 5,034
|
py
|
Python
|
revitAPI/viewRemoveCropRegionShape.py
|
sixtysecondrevit/dynamoPython
|
dfb4b001800ebf9ab308510db40cfc5a5a953fee
|
[
"MIT"
] | 114
|
2018-07-17T17:47:11.000Z
|
2022-03-08T09:33:39.000Z
|
revitAPI/viewRemoveCropRegionShape.py
|
sixtysecondrevit/dynamoPython
|
dfb4b001800ebf9ab308510db40cfc5a5a953fee
|
[
"MIT"
] | 28
|
2018-07-18T10:43:37.000Z
|
2020-11-24T06:08:18.000Z
|
revitAPI/viewRemoveCropRegionShape.py
|
sixtysecondrevit/dynamoPython
|
dfb4b001800ebf9ab308510db40cfc5a5a953fee
|
[
"MIT"
] | 56
|
2018-07-17T17:57:28.000Z
|
2022-03-26T12:30:39.000Z
|
"""
VIEW REMOVE CROP REGION SHAPE
"""
__author__ = 'Sol Amour - amoursol@gmail.com'
__twitter__ = '@solamour'
__version__ = '1.0.0'
# Importing Reference Modules
import clr # CLR ( Common Language Runtime Module )
clr.AddReference("RevitServices") # Adding the RevitServices.dll special Dynamo
# module to deal with Revit
import RevitServices # Importing RevitServices
from RevitServices.Persistence import DocumentManager # From RevitServices import
# the Document Manager
from RevitServices.Transactions import TransactionManager # From RevitServices
# import the Document Manager
clr.AddReference("RevitAPI") # Adding the RevitAPI.dll module to access the Revit
# API
import Autodesk # Here we import the Autodesk namespace
# From the Autodesk namespace - derived down to the Revit Database, we import only
# the Filtered Element Collector and CurveElement classes
from Autodesk.Revit.DB import FilteredElementCollector, CurveElement
# Here we give the Revit Document a nickname of 'doc' which allows us to simply
# call 'doc' later without having to type the long namespace name
doc = DocumentManager.Instance.CurrentDBDocument
# We want to access the Revit API on our 'view' objects, so we have to Unwrap them
# to get under the hood. If we do not Unwrap the views, we only have access to the
# DynamoAPI calls on the object. To do this, we simply wrap our IN[0] port inside
# of the 'UnwrapElement()' function
viewList = UnwrapElement(IN[0])
# If the view doesn't have the attribute of '__iter__' (Which means iterable - i.e
# it's an object that contains objects such as a 'List') then we want to wrap that
# singular item (In our case a view) into a List of one thing. This way our For
# Loop will never fail due to rank
if not hasattr(viewList, '__iter__'):
# By adding square braces around the variable of 'view' we put it inside a
# new list
viewList = [viewList]
# Creating an empty catchment list for our Success/Failure messages
message = []
# To affect the Revit model we have to wrap all of our API calls inside a
# Transaction. This is Revit's way of handling content change and is called by the
# Dynamo service .dll called 'RevitServices'. What this means is we wrap all of our
# Dynamo changes into a single Transaction (Instead of having a LOT of things to
# undo with Ctrl + Z in our 'back' log inside of Revit) called 'Dynamo-GUID script'
TransactionManager.Instance.EnsureInTransaction(doc)
# We run 'Try/Except' error handling to stop the node execution if any part of the
# 'try' indented code returns a None (null)
try:
# We then must run a 'For Loop' across every view (item) inside our viewList (list)
# that allows us to make changes to either a single view, or multiple views when
# fed in inside Dynamo
for view in viewList:
# We first do a conditioanl check. 'If' the view property of 'CropBoxActive' is
# on (True) then, and only then, do the following
if view.CropBoxActive == True:
# Query the Crop Region Shape Manager applied to the view we are checking
# against and give it a variable name of 'cropManager'
cropManager = view.GetCropRegionShapeManager()
# After we have access to our Crop Region Shape Manager (cropManager) then
# we can call a method (action) on that manager to 'Remove Crop Region
# Shape' by using the API call of 'RemoveCropRegionShape()'
cropManager.RemoveCropRegionShape()
# If successful, we append the view
message.append( view )
# Otherwise if the view does not have a Crop Box Active
else:
# If unsuccessful, we append a None (null)
message.append( None )
# If our 'try' statement fails by returning a None (null), then execute the following
# code instead
except:
# Import the 'sys' module to capture System Specific Parameters
import sys
# Append to the 'path' list a string where the Python interpreter can look for
# non-native modules
sys.path.append(r'C:\Program Files (x86)\IronPython 2.7\Lib')
# Import the 'traceback' module to capture Error messages from the appended path
import traceback
# Set our output message as the formatted failure message from the 'traceback'
# module
message = traceback.format_exc()
# After we have affected the Revit Model we close the Transaction which allows
# Revit to be accessed again (If in a worksharing environment) and that Transaction
# to be undone if needed through Ctrl + Z
TransactionManager.Instance.TransactionTaskDone()
# If our message list contains only a singular item we don't need it to be wrapped up
# inside a list. So we check if the len (count) of our list is greather than or equal
# to 1 and then, and only then, do we change our output from 'message' (A list) to
# 'message[0]', the first item in that list. There is no 'else' check as the default
# list of 'message' is acceptable and the 'If' statement won't execute if that
# condition doesn't exist
if len(message) >= 1:
message = message[0]
# To get our results back inside of Dynamo, we need to append our 'message'
# the OUT port
OUT = message
| 45.763636
| 86
| 0.754271
|
bf45de095669be58131e758d7b5e796ba65da186
| 4,661
|
py
|
Python
|
conditional.py
|
LLNL/fastcam
|
99cefe37528014247319468cf05f54fef259d3bf
|
[
"BSD-3-Clause"
] | 25
|
2020-01-27T23:31:29.000Z
|
2022-03-30T08:23:02.000Z
|
conditional.py
|
LLNL/fastcam
|
99cefe37528014247319468cf05f54fef259d3bf
|
[
"BSD-3-Clause"
] | 1
|
2020-12-17T10:27:10.000Z
|
2020-12-17T10:27:10.000Z
|
conditional.py
|
LLNL/fastcam
|
99cefe37528014247319468cf05f54fef259d3bf
|
[
"BSD-3-Clause"
] | 5
|
2020-04-02T21:53:14.000Z
|
2021-01-27T11:51:12.000Z
|
'''
BSD 3-Clause License
Copyright (c) 2020, Lawrence Livermore National Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
'''
https://github.com/LLNL/fastcam
A toolkit for efficent computation of saliency maps for explainable
AI attribution.
This work was performed under the auspices of the U.S. Department of Energy
by Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344
and was supported by the LLNL-LDRD Program under Project 18-ERD-021 and
Project 17-SI-003.
Software released as LLNL-CODE-802426.
See also: https://arxiv.org/abs/1911.11293
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from . import maps
except ImportError:
import maps
class ConditionalSaliencyMaps(maps.CombineSaliencyMaps):
r'''
This will combine saliency maps into a single weighted saliency map.
Input is a list of 3D tensors or various sizes.
Output is a 3D tensor of size output_size
num_maps specifies how many maps we will combine
weights is an optional list of weights for each layer e.g. [1, 2, 3, 4, 5]
'''
def __init__(self, **kwargs):
super(ConditionalSaliencyMaps, self).__init__(**kwargs)
def forward(self, xmap, ymaps, reverse=False):
r'''
Input shapes are something like [64,7,7] i.e. [batch size x layer_height x layer_width]
Output shape is something like [64,224,244] i.e. [batch size x image_height x image_width]
'''
assert(isinstance(xmap,list))
assert(len(xmap) == self.map_num)
assert(len(xmap[0].size()) == 3)
bn = xmap[0].size()[0]
cm = torch.zeros((bn, 1, self.output_size[0], self.output_size[1]), dtype=xmap[0].dtype, device=xmap[0].device)
ww = []
r'''
Now get each saliency map and resize it. Then store it and also create a combined saliency map.
'''
for i in range(len(xmap)):
assert(torch.is_tensor(xmap[i]))
wsz = xmap[i].size()
wx = xmap[i].reshape(wsz[0], 1, wsz[1], wsz[2]) + 0.0000001
w = torch.zeros_like(wx)
if reverse:
for j in range(len(ymaps)):
wy = ymaps[j][i].reshape(wsz[0], 1, wsz[1], wsz[2]) + 0.0000001
w -= wx*torch.log2(wx/wy)
else:
for j in range(len(ymaps)):
wy = ymaps[j][i].reshape(wsz[0], 1, wsz[1], wsz[2]) + 0.0000001
w -= wy*torch.log2(wy/wx)
w = torch.clamp(w,0.0000001,1)
w = nn.functional.interpolate(w, size=self.output_size, mode=self.resize_mode, align_corners=False)
ww.append(w)
cm += (w * self.weights[i])
cm = cm / self.weight_sum
cm = cm.reshape(bn, self.output_size[0], self.output_size[1])
ww = torch.stack(ww,dim=1)
ww = ww.reshape(bn, self.map_num, self.output_size[0], self.output_size[1])
return cm, ww
| 38.204918
| 120
| 0.642781
|
e906030224fce4ee5b021be2a1a3c63e95a0bd3c
| 3,265
|
py
|
Python
|
djangoresume/settings.py
|
nfoos/django-resume
|
70fe6a2541358708d6973868750a82e854a14fe4
|
[
"MIT"
] | 11
|
2015-03-31T21:36:45.000Z
|
2022-02-18T18:59:17.000Z
|
djangoresume/settings.py
|
nfoos/django-resume
|
70fe6a2541358708d6973868750a82e854a14fe4
|
[
"MIT"
] | 2
|
2018-02-15T13:45:47.000Z
|
2018-07-07T14:45:02.000Z
|
djangoresume/settings.py
|
nfoos/django-resume
|
70fe6a2541358708d6973868750a82e854a14fe4
|
[
"MIT"
] | 13
|
2015-06-07T05:20:44.000Z
|
2021-02-11T12:18:22.000Z
|
"""
Django settings for djangoresume project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pdr@%#n+*#s)4d5rso^m#a@2b=ozbv3k6)z8^)4)-&k7#fc)%i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'resume'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoresume.urls'
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'resume/templates')]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoresume.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
| 26.12
| 91
| 0.695559
|
c49d1a8bf60e3fdc13e4565f7eab6b73862232df
| 577
|
py
|
Python
|
test/test_del_group.py
|
200312/python_training
|
623cfd967d999849aac5d3130823fba8638bc289
|
[
"Apache-2.0"
] | null | null | null |
test/test_del_group.py
|
200312/python_training
|
623cfd967d999849aac5d3130823fba8638bc289
|
[
"Apache-2.0"
] | null | null | null |
test/test_del_group.py
|
200312/python_training
|
623cfd967d999849aac5d3130823fba8638bc289
|
[
"Apache-2.0"
] | null | null | null |
from model.group import Group
import random
def test_delete_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
app.group.delete_group_by_id(group.id)
new_groups = db.get_group_list()
assert len(old_groups) - 1 == len(new_groups)
old_groups.remove(group)
assert old_groups == new_groups
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| 33.941176
| 113
| 0.710572
|
c26e2acfc4598da2503f41da041329bc56a16a6e
| 3,771
|
py
|
Python
|
annotations/simplify_annotations.py
|
chrhenning/image_text_relation
|
8d09483b48babe9bf90ca15a8cd67e389a0bd00c
|
[
"Apache-2.0"
] | null | null | null |
annotations/simplify_annotations.py
|
chrhenning/image_text_relation
|
8d09483b48babe9bf90ca15a8cd67e389a0bd00c
|
[
"Apache-2.0"
] | null | null | null |
annotations/simplify_annotations.py
|
chrhenning/image_text_relation
|
8d09483b48babe9bf90ca15a8cd67e389a0bd00c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# This script takes the input for the SC and MI classifier (which is then used to generate the TFRecord Files)
# simplifies the annotations by omitting MI label 1, 6 and 7 as described in chapter 4.2.3.1 of the thesis. Label 1
# is ommited as there is no annotated sample representing it and because it represents a completely rare case
# (cp. example 4.2.1 in the thesis).
# Such a simpler annotation allows to state the MI labeling problem as a regression problem. Therefore, the remaining
# labels will become subject to a permutation, to allow simpler handling later on. The permutation will be
# Original label: 0 5 4 3 2
# | | | | |
# New label: 0 1 2 3 4
#
# The inputs are the sample files generated by 'generateClassifierInputWiki' and 'generateClassifierInputBBC'.
import argparse
import os
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--wiki', type=str, help="The file containing the annotated wiki samples.", default='wiki-anno-samples.jsonl')
parser.add_argument('--bbc', type=str, help="The file containing the annotated bbc samples.", default='bbc-anno-samples.jsonl')
parser.add_argument('--ext', type=str, help="A file extension that should be added to the modified input files.", default='-simple')
args = parser.parse_args()
wiki_samples_fn = args.wiki
bbc_samples_fn = args.bbc
extension = args.ext
# the mapping from old to new labels
mapping = {
0: 0,
5: 1,
4: 2,
3: 3,
2: 4
}
if not os.path.isfile(wiki_samples_fn):
raise(Exception('The file ' + wiki_samples_fn + ' does not exists'))
if not os.path.isfile(bbc_samples_fn):
raise(Exception('The file ' + bbc_samples_fn + ' does not exists'))
# output file names
wiki_fn, wiki_ext = os.path.splitext(wiki_samples_fn)
wiki_out_fn = wiki_fn + extension + wiki_ext
bbc_fn, bbc_ext = os.path.splitext(bbc_samples_fn)
bbc_out_fn = bbc_fn + extension + bbc_ext
print('The output file for wiki samples will be ' + wiki_out_fn)
if os.path.isfile(wiki_out_fn):
print('The file %s already exists and will be overwritten.' % (wiki_out_fn))
print('The output file for bbc samples will be ' + bbc_out_fn)
if os.path.isfile(bbc_out_fn):
print('The file %s already exists and will be overwritten.' % (bbc_out_fn))
# read samples
with open(wiki_samples_fn, 'r') as f:
wiki_samples = [json.loads(line) for line in f]
with open(bbc_samples_fn, 'r') as f:
bbc_samples = [json.loads(line) for line in f]
print()
print('Read %d annotated bbc samples and %d annotated wiki samples.' % (len(bbc_samples), len(wiki_samples)))
# Write samples that have valid MI samples (all except those having 1, 6 or 7 as label).
# All other labels get asssigned to their new labeling according to the dict "mapping".
skipped = 0
wiki_n = 0
bbc_n = 0
with open(bbc_out_fn, 'w') as f:
for d in bbc_samples:
if d['annotation']['mi'] < 6 and d['annotation']['mi'] != 1:
bbc_n += 1
d['annotation']['mi'] = mapping[d['annotation']['mi']]
jsonLine = json.dumps(d)
f.write(jsonLine + '\n')
else:
skipped += 1
with open(wiki_out_fn, 'w') as f:
for d in wiki_samples:
if d['annotation']['mi'] < 6 and d['annotation']['mi'] != 1:
wiki_n += 1
d['annotation']['mi'] = mapping[d['annotation']['mi']]
jsonLine = json.dumps(d)
f.write(jsonLine + '\n')
else:
skipped += 1
print('Output contains %d annotated bbc samples and %d annotated wiki samples.' % (bbc_n, wiki_n))
print('Skipped %d samples in total.' % (skipped))
| 39.28125
| 134
| 0.661098
|
37c7e9c3c823675c1a47d2e915cab77fb1dec479
| 4,743
|
py
|
Python
|
anomaly_detection/utils/preprocessing/create_folds/camelyon16.py
|
ninatu/anomaly_detection
|
6fa35f3fd35976ce2b857801d288e17f454241b9
|
[
"Apache-2.0"
] | 22
|
2020-10-21T07:59:33.000Z
|
2022-03-18T08:07:49.000Z
|
anomaly_detection/utils/preprocessing/create_folds/camelyon16.py
|
ninatu/anomaly_detection
|
6fa35f3fd35976ce2b857801d288e17f454241b9
|
[
"Apache-2.0"
] | 2
|
2020-10-26T05:19:39.000Z
|
2021-09-21T18:16:02.000Z
|
anomaly_detection/utils/preprocessing/create_folds/camelyon16.py
|
ninatu/anomaly_detection
|
6fa35f3fd35976ce2b857801d288e17f454241b9
|
[
"Apache-2.0"
] | 7
|
2020-11-19T12:32:29.000Z
|
2022-03-06T21:02:30.000Z
|
import os
import numpy as np
import pandas as pd
import re
from sklearn.model_selection import KFold
import argparse
N_TUMOR_SLIDES_FOR_VALIDATION = 4
PATCH_NAME_PAT = re.compile('(?P<image_name>.*)_(?P<crop_type>.*)_x_(?P<x>\d+)_y_(?P<y>\d+)_w_(?P<w>\d+)_h_(?P<h>\d+)')
def _filter_filenames(slides, filenames):
filtered = []
for filename in filenames:
if PATCH_NAME_PAT.match(filename).group('image_name') in slides:
filtered.append(filename)
return filtered
def _save_split(filenames, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f_out:
f_out.writelines([filename + '\n' for filename in filenames])
def create_folds(normal_train_split, tumor_train_split, output_root, n_folds):
folds_dir = os.path.join(output_root, 'folds', 'camelyon16')
validation_classes_root = os.path.join(output_root, 'validation_classes')
validation_classes_path = os.path.join(validation_classes_root, 'camelyon16.csv')
os.makedirs(validation_classes_root, exist_ok=True)
os.makedirs(folds_dir, exist_ok=True)
"========================== CHOOSE SLIDES FOR VALIDATION ========================="
if not os.path.exists(validation_classes_path):
with open(tumor_train_split) as f_in:
anomaly_filenames = [filename.strip() for filename in f_in.readlines()]
tumor_slidenames = [PATCH_NAME_PAT.match(filename).group('image_name') for filename in anomaly_filenames]
tumor_slidenames = np.unique(tumor_slidenames)
np.random.shuffle(tumor_slidenames)
validation_slidenames = tumor_slidenames[:N_TUMOR_SLIDES_FOR_VALIDATION]
df = pd.DataFrame(np.array(validation_slidenames)[:, np.newaxis], columns=['Valid Slides'])
df.to_csv(validation_classes_path, index=False)
"===================== CREATE K-FOLD CROSS-VALIDATION SPLIT ========================"
valid_slides_df = pd.read_csv(validation_classes_path)
valid_anomaly_slides = valid_slides_df['Valid Slides'].values
with open(tumor_train_split) as f_in:
anomaly_filenames = [filename.strip() for filename in f_in.readlines()]
with open(normal_train_split) as f_in:
normal_filenames = [filename.strip() for filename in f_in.readlines()]
normal_slides = [PATCH_NAME_PAT.match(filename).group('image_name') for filename in normal_filenames]
normal_slides = np.array(normal_slides)
normal_train_split_indexes, normal_test_split_indexes = list(zip(*KFold(n_splits=n_folds).split(normal_slides)))
_, anomaly_test_split_indexes = list(zip(*KFold(n_splits=n_folds).split(valid_anomaly_slides)))
for i_fold, (normal_train_indexes, normal_test_indexes, anomaly_test_indexes) in \
enumerate(zip(normal_train_split_indexes, normal_test_split_indexes, anomaly_test_split_indexes)):
fold_dir = os.path.join(folds_dir, 'healthy', str(i_fold))
os.makedirs(fold_dir, exist_ok=True)
normal_train_filenames = _filter_filenames(normal_slides[normal_train_indexes], normal_filenames)
normal_test_filenames = _filter_filenames(normal_slides[normal_test_indexes], normal_filenames)
anomaly_test_filenames = _filter_filenames(valid_anomaly_slides[anomaly_test_indexes], anomaly_filenames)
np.random.shuffle(normal_train_filenames)
val_n = int(0.2 * len(normal_train_filenames))
normal_val_filenames = normal_train_filenames[:val_n]
normal_train_filenames = normal_train_filenames[val_n:]
_save_split(normal_train_filenames, os.path.join(fold_dir, 'normal', 'train'))
_save_split(normal_val_filenames, os.path.join(fold_dir, 'normal', 'val'))
_save_split(normal_test_filenames, os.path.join(fold_dir, 'normal', 'test'))
_save_split(anomaly_test_filenames, os.path.join(fold_dir, 'anomaly', 'test'))
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--normal_train_split",
type=str,
default='./folds/train_test_split/camelyon16/healthy/normal/train',
help='normal_train_split')
parser.add_argument("--tumor_train_split",
type=str,
default='./folds/train_test_split/camelyon16/healthy/anomaly/train',
help='tumor_train_split')
parser.add_argument("-o", "--output_root", required=True, type=str, help='output_root')
parser.add_argument("-n", "--n_folds", type=int, default=3, help='n_folds')
args = parser.parse_args()
create_folds(args.normal_train_split, args.tumor_train_split, args.output_root, args.n_folds)
| 46.5
| 119
| 0.706937
|
9c8abd9f2ab62dba5912f43ad4cc201627c30fb2
| 2,777
|
py
|
Python
|
src/imitation/rewards/serialize.py
|
dreamfyrian/imitation
|
682bc4b919baf57bdd959ac646caba21d92cdf71
|
[
"MIT"
] | 438
|
2019-04-11T22:02:03.000Z
|
2022-03-30T05:17:19.000Z
|
src/imitation/rewards/serialize.py
|
dreamfyrian/imitation
|
682bc4b919baf57bdd959ac646caba21d92cdf71
|
[
"MIT"
] | 397
|
2019-03-22T18:24:10.000Z
|
2022-03-29T23:08:05.000Z
|
src/imitation/rewards/serialize.py
|
dreamfyrian/imitation
|
682bc4b919baf57bdd959ac646caba21d92cdf71
|
[
"MIT"
] | 106
|
2019-04-11T22:02:08.000Z
|
2022-03-28T14:12:20.000Z
|
"""Load serialized reward functions of different types."""
from typing import Callable
import numpy as np
import torch as th
from stable_baselines3.common.vec_env import VecEnv
from imitation.rewards import common
from imitation.util import registry, util
# TODO(sam): I suspect this whole file can be replaced with th.load calls. Try
# that refactoring once I have things running.
RewardFnLoaderFn = Callable[[str, VecEnv], common.RewardFn]
reward_registry: registry.Registry[RewardFnLoaderFn] = registry.Registry()
def _load_reward_net_as_fn(shaped: bool) -> RewardFnLoaderFn:
def loader(path: str, venv: VecEnv) -> common.RewardFn:
"""Load train (shaped) or test (not shaped) reward from path."""
del venv # Unused.
net = th.load(str(path))
if not shaped and hasattr(net, "base"):
# If the "base" attribute exists, we are dealing with a ShapedRewardNet
# and will disable the potential shaping (if shaped is False).
# If no "base" attribute exists, we seem to use an unshaped RewardNet
# anyway, so we just use its predict() method directly.
reward = net.base.predict
else:
reward = net.predict
def rew_fn(
obs: np.ndarray,
act: np.ndarray,
next_obs: np.ndarray,
dones: np.ndarray,
) -> np.ndarray:
rew = reward(obs, act, next_obs, dones)
assert rew.shape == (len(obs),)
return rew
return rew_fn
return loader
def load_zero(path: str, venv: VecEnv) -> common.RewardFn:
del path, venv
def f(
obs: np.ndarray,
act: np.ndarray,
next_obs: np.ndarray,
dones: np.ndarray,
) -> np.ndarray:
del act, next_obs, dones # Unused.
return np.zeros(obs.shape[0])
return f
# TODO(adam): I think we can get rid of this and have just one RewardNet.
reward_registry.register(
key="RewardNet_shaped",
value=_load_reward_net_as_fn(shaped=True),
)
reward_registry.register(
key="RewardNet_unshaped",
value=_load_reward_net_as_fn(shaped=False),
)
reward_registry.register(key="zero", value=load_zero)
@util.docstring_parameter(reward_types=", ".join(reward_registry.keys()))
def load_reward(reward_type: str, reward_path: str, venv: VecEnv) -> common.RewardFn:
"""Load serialized reward.
Args:
reward_type: A key in `reward_registry`. Valid types
include {reward_types}.
reward_path: A path specifying the reward.
venv: An environment that the policy is to be used with.
Returns:
The deserialized reward.
"""
reward_loader = reward_registry.get(reward_type)
return reward_loader(reward_path, venv)
| 30.516484
| 85
| 0.658624
|
97f04cbfbea51f0e8c496093f5fea06cbd8fa872
| 643
|
py
|
Python
|
test/programytest/processors/pre/test_demojize.py
|
cdoebler1/AIML2
|
ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a
|
[
"MIT"
] | 345
|
2016-11-23T22:37:04.000Z
|
2022-03-30T20:44:44.000Z
|
test/programytest/processors/pre/test_demojize.py
|
MikeyBeez/program-y
|
00d7a0c7d50062f18f0ab6f4a041068e119ef7f0
|
[
"MIT"
] | 275
|
2016-12-07T10:30:28.000Z
|
2022-02-08T21:28:33.000Z
|
test/programytest/processors/pre/test_demojize.py
|
VProgramMist/modified-program-y
|
f32efcafafd773683b3fe30054d5485fe9002b7d
|
[
"MIT"
] | 159
|
2016-11-28T18:59:30.000Z
|
2022-03-20T18:02:44.000Z
|
import unittest
from programy.bot import Bot
from programy.config.bot.bot import BotConfiguration
from programy.context import ClientContext
from programy.processors.pre.demojize import DemojizePreProcessor
from programytest.client import TestClient
class DemoizeTests(unittest.TestCase):
def setUp(self):
self.client = TestClient()
self.bot = Bot(config=BotConfiguration(), client=self.client)
def test_demojize(self):
processor = DemojizePreProcessor()
context = ClientContext(self.client, "testid")
self.assertEqual("Python is :thumbs_up:", processor.process(context, 'Python is 👍'))
| 27.956522
| 92
| 0.746501
|
5717593d847a39f9a4abde8e75215409379f0e21
| 19,616
|
py
|
Python
|
reversion_compare/views.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 22
|
2015-01-16T01:36:32.000Z
|
2020-06-08T00:46:18.000Z
|
reversion_compare/views.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 8
|
2015-12-28T18:56:19.000Z
|
2019-04-01T17:33:48.000Z
|
reversion_compare/views.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 13
|
2015-01-13T20:56:22.000Z
|
2022-02-23T06:01:17.000Z
|
# coding: utf-8
"""
Admin extensions for django-reversion-compare
:copyleft: 2012 by the django-reversion-compare team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import logging
from django import template
from django.shortcuts import render
from django.conf.urls.defaults import patterns, url
from django.contrib.admin.util import unquote, quote
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template.loader import render_to_string
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from reversion.admin import VersionAdmin
from reversion.models import Version, VERSION_TYPE_CHOICES, VERSION_CHANGE, \
has_int_pk
import reversion
from reversion_compare.forms import SelectDiffForm
from reversion_compare.helpers import html_diff, compare_queryset
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from mozdns.record.utils import get_obj_meta
import pdb
logger = logging.getLogger(__name__)
VERSION_TYPE_DICT = dict(VERSION_TYPE_CHOICES)
class CompareObject(object):
def __init__(self, field, field_name, obj, version, has_int_pk, adapter):
self.field = field
self.field_name = field_name
self.obj = obj
self.version = version # instance of reversion.models.Version()
self.has_int_pk = has_int_pk
self.adapter = adapter
self.value = version.field_dict[field_name]
def _obj_repr(self, obj):
# FIXME: How to create a better representation of the current value?
try:
return unicode(obj)
except Exception, e:
return repr(obj)
def _to_string_ManyToManyField(self):
queryset = self.get_many_to_many()
return ", ".join([self._obj_repr(item) for item in queryset])
def _to_string_ForeignKey(self):
obj = self.get_related()
return self._obj_repr(obj)
def to_string(self):
internal_type = self.field.get_internal_type()
func_name = "_to_string_%s" % internal_type
if hasattr(self, func_name):
func = getattr(self, func_name)
return func()
if isinstance(self.value, basestring):
return self.value
else:
return self._obj_repr(self.value)
def __cmp__(self, other):
raise NotImplemented()
def __eq__(self, other):
assert self.field.get_internal_type() != "ManyToManyField"
if self.value != other.value:
return False
if self.field.get_internal_type() == "ForeignKey": # FIXME!
if self.version.field_dict != other.version.field_dict:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def get_related(self):
if self.field.rel is not None:
obj = self.version.object_version.object
related = getattr(obj, self.field.name)
return related
def get_many_to_many(self):
"""
returns a queryset with all many2many objects
"""
if self.field.get_internal_type() != "ManyToManyField": # FIXME!
return (None, None, None)
if self.has_int_pk:
ids = [int(v) for v in self.value] # is: version.field_dict[field.name]
# get instance of reversion.models.Revision(): A group of related object versions.
old_revision = self.version.revision
# Get the related model of the current field:
related_model = self.field.rel.to
# Get a queryset with all related objects.
queryset = old_revision.version_set.filter(
content_type=ContentType.objects.get_for_model(related_model),
object_id__in=ids
)
# logger.debug("m2m queryset: %s", queryset)
versions = sorted(list(queryset))
# logger.debug("versions: %s", versions)
if self.has_int_pk:
# The primary_keys would be stored in a text field -> convert it to integers
# This is interesting in different places!
for version in versions:
version.object_id = int(version.object_id)
missing_objects = []
missing_ids = []
if self.field_name not in self.adapter.follow:
# This models was not registered with follow relations
# Try to fill missing related objects
target_ids = set(ids)
actual_ids = set([version.object_id for version in versions])
missing_ids1 = target_ids.difference(actual_ids)
# logger.debug(self.field_name, "target: %s - actual: %s - missing: %s" % (target_ids, actual_ids, missing_ids1))
if missing_ids1:
missing_objects = related_model.objects.all().filter(pk__in=missing_ids1)
missing_ids = list(target_ids.difference(set(missing_objects.values_list('pk', flat=True))))
return versions, missing_objects, missing_ids
def get_debug(self):
if not settings.DEBUG:
return
result = [
"field..............: %r" % self.field,
"field_name.........: %r" % self.field_name,
"field internal type: %r" % self.field.get_internal_type(),
"field_dict.........: %s" % repr(self.version.field_dict),
"adapter............: %r (follow: %r)" % (self.adapter, ", ".join(self.adapter.follow)),
"has_int_pk ........: %r" % self.has_int_pk,
"obj................: %r (pk: %s, id: %s)" % (self.obj, self.obj.pk, id(self.obj)),
"version............: %r (pk: %s, id: %s)" % (self.version, self.version.pk, id(self.version)),
"value..............: %r" % self.value,
"to string..........: %s" % repr(self.to_string()),
"related............: %s" % repr(self.get_related()),
]
m2m_versions, missing_objects, missing_ids = self.get_many_to_many()
if m2m_versions or missing_objects or missing_ids:
result.append(
"many-to-many.......: %s" % ", ".join(
["%s (%s)" % (item, VERSION_TYPE_DICT[item.type]) for item in m2m_versions]
)
)
if missing_objects:
result.append("missing m2m objects: %s" % repr(missing_objects))
else:
result.append("missing m2m objects: (has no)")
if missing_ids:
result.append("missing m2m IDs....: %s" % repr(missing_ids))
else:
result.append("missing m2m IDs....: (has no)")
else:
result.append("many-to-many.......: (has no)")
return result
def debug(self):
if not settings.DEBUG:
return
for item in self.get_debug():
logger.debug(item)
class CompareObjects(object):
def __init__(self, field, field_name, obj, version1, version2, manager):
self.field = field
self.field_name = field_name
self.obj = obj
model = self.obj.__class__
self.has_int_pk = has_int_pk(model)
self.adapter = manager.get_adapter(model) # VersionAdapter instance
# is a related field (ForeignKey, ManyToManyField etc.)
self.is_related = self.field.rel is not None
if not self.is_related:
self.follow = None
elif self.field_name in self.adapter.follow:
self.follow = True
else:
self.follow = False
self.compare_obj1 = CompareObject(field, field_name, obj, version1, self.has_int_pk, self.adapter)
self.compare_obj2 = CompareObject(field, field_name, obj, version2, self.has_int_pk, self.adapter)
self.value1 = self.compare_obj1.value
self.value2 = self.compare_obj2.value
def changed(self):
""" return True if at least one field has changed values. """
if self.field.get_internal_type() == "ManyToManyField": # FIXME!
info = self.get_m2m_change_info()
keys = (
"changed_items", "removed_items", "added_items",
"removed_missing_objects", "added_missing_objects"
)
for key in keys:
if info[key]:
return True
return False
return self.compare_obj1 != self.compare_obj2
def _get_result(self, compare_obj, func_name):
func = getattr(compare_obj, func_name)
result = func()
return result
def _get_both_results(self, func_name):
result1 = self._get_result(self.compare_obj1, func_name)
result2 = self._get_result(self.compare_obj2, func_name)
return (result1, result2)
def to_string(self):
return self._get_both_results("to_string")
def get_related(self):
return self._get_both_results("get_related")
def get_many_to_many(self):
#return self._get_both_results("get_many_to_many")
m2m_data1, m2m_data2 = self._get_both_results("get_many_to_many")
return m2m_data1, m2m_data2
M2M_CHANGE_INFO = None
def get_m2m_change_info(self):
if self.M2M_CHANGE_INFO is not None:
return self.M2M_CHANGE_INFO
m2m_data1, m2m_data2 = self.get_many_to_many()
result1, missing_objects1, missing_ids1 = m2m_data1
result2, missing_objects2, missing_ids2 = m2m_data2
# missing_objects_pk1 = [obj.pk for obj in missing_objects1]
# missing_objects_pk2 = [obj.pk for obj in missing_objects2]
missing_objects_dict2 = dict([(obj.pk, obj) for obj in missing_objects2])
# logger.debug("missing_objects1: %s", missing_objects1)
# logger.debug("missing_objects2: %s", missing_objects2)
# logger.debug("missing_ids1: %s", missing_ids1)
# logger.debug("missing_ids2: %s", missing_ids2)
missing_object_set1 = set(missing_objects1)
missing_object_set2 = set(missing_objects2)
# logger.debug("%s %s", missing_object_set1, missing_object_set2)
same_missing_objects = missing_object_set1.intersection(missing_object_set2)
removed_missing_objects = missing_object_set1.difference(missing_object_set2)
added_missing_objects = missing_object_set2.difference(missing_object_set1)
# logger.debug("same_missing_objects: %s", same_missing_objects)
# logger.debug("removed_missing_objects: %s", removed_missing_objects)
# logger.debug("added_missing_objects: %s", added_missing_objects)
# Create same_items, removed_items, added_items with related m2m items
changed_items = []
removed_items = []
added_items = []
same_items = []
primary_keys1 = [version.object_id for version in result1]
primary_keys2 = [version.object_id for version in result2]
result_dict1 = dict([(version.object_id, version) for version in result1])
result_dict2 = dict([(version.object_id, version) for version in result2])
# logger.debug(result_dict1)
# logger.debug(result_dict2)
for primary_key in set(primary_keys1).union(set(primary_keys2)):
if primary_key in result_dict1:
version1 = result_dict1[primary_key]
else:
version1 = None
if primary_key in result_dict2:
version2 = result_dict2[primary_key]
else:
version2 = None
#logger.debug("%r - %r - %r", primary_key, version1, version2)
if version1 is not None and version2 is not None:
# In both -> version changed or the same
if version1.serialized_data == version2.serialized_data:
#logger.debug("same item: %s", version1)
same_items.append(version1)
else:
changed_items.append((version1, version2))
elif version1 is not None and version2 is None:
# In 1 but not in 2 -> removed
#logger.debug("%s %s", primary_key, missing_objects_pk2)
#logger.debug("%s %s", repr(primary_key), repr(missing_objects_pk2))
if primary_key in missing_objects_dict2:
missing_object = missing_objects_dict2[primary_key]
added_missing_objects.remove(missing_object)
same_missing_objects.add(missing_object)
continue
removed_items.append(version1)
elif version1 is None and version2 is not None:
# In 2 but not in 1 -> added
#logger.debug("added: %s", version2)
added_items.append(version2)
else:
raise RuntimeError()
self.M2M_CHANGE_INFO = {
"changed_items": changed_items,
"removed_items": removed_items,
"added_items": added_items,
"same_items": same_items,
"same_missing_objects": same_missing_objects,
"removed_missing_objects": removed_missing_objects,
"added_missing_objects": added_missing_objects,
}
return self.M2M_CHANGE_INFO
def debug(self):
if not settings.DEBUG:
return
logger.debug("_______________________________")
logger.debug(" *** CompareObjects debug: ***")
logger.debug("changed: %s", self.changed())
logger.debug("follow: %s", self.follow)
debug1 = self.compare_obj1.get_debug()
debug2 = self.compare_obj2.get_debug()
debug_set1 = set(debug1)
debug_set2 = set(debug2)
logger.debug(" *** same attributes/values in obj1 and obj2: ***")
intersection = debug_set1.intersection(debug_set2)
for item in debug1:
if item in intersection:
logger.debug(item)
logger.debug(" -" * 40)
logger.debug(" *** unique attributes/values from obj1: ***")
difference = debug_set1.difference(debug_set2)
for item in debug1:
if item in difference:
logger.debug(item)
logger.debug(" -" * 40)
logger.debug(" *** unique attributes/values from obj2: ***")
difference = debug_set2.difference(debug_set1)
for item in debug2:
if item in difference:
logger.debug(item)
logger.debug("-"*79)
def _order_version_queryset(queryset):
"""Applies the correct ordering to the given version queryset."""
return queryset.order_by("-pk")
def _get_action_list(request, object_, extra_context=None):
"""Renders the history view."""
action_list = [
{
"version": version,
"revision": version.revision,
"url": object_.get_edit_url()
}
for version
in _order_version_queryset(reversion.get_for_object(
object_,
))
]
return action_list
def history_view(request, object_class, object_id, extra_context=None):
"""Renders the history view."""
object_klass = get_obj_meta(object_class).Klass
if not object_klass:
raise Http404
object_ = get_object_or_404(object_klass, pk=object_id)
action_list = _get_action_list(request, object_,
extra_context=extra_context)
if len(action_list) < 2:
# Less than two history items aren't enough to compare ;)
comparable = False
else:
comparable = True
# for pre selecting the compare radio buttons depend on the ordering:
action_list[0]["first"] = True
action_list[1]["second"] = True
return render(request, 'reversion_compare/object_history.html',
{"object_id": object_.pk,
"object": object_,
"action_list": action_list,
"comparable": comparable,
"compare_view": True})
def compare_view(request, object_class, extra_context=None):
"""
compare two versions.
Used make_compare() to create the html diff.
"""
form = SelectDiffForm(request.GET)
if not form.is_valid():
msg = "Wrong version IDs."
if settings.DEBUG:
msg += " (form errors: %s)" % ", ".join(form.errors)
raise Http404(msg)
object_id = request.GET.get('object_id', None)
if not object_id:
raise Http404("What object are you looking for?")
version_id1 = form.cleaned_data["version_id1"]
version_id2 = form.cleaned_data["version_id2"]
object_klass = get_obj_meta(object_class).Klass
if not object_klass:
raise Http404("No {0} object type with id {1}".format(object_class,
object_id))
obj = get_object_or_404(object_klass, pk=object_id)
queryset = reversion.get_for_object(obj)
version1 = get_object_or_404(queryset, pk=version_id1)
version2 = get_object_or_404(queryset, pk=version_id2)
if version_id1 > version_id2:
# Compare always the newest one with the older one
version1, version2 = version2, version1
compare_data, has_unfollowed_fields = compare(obj, version1, version2)
opts = obj._meta
context = {
"opts": opts,
"app_label": opts.app_label,
"module_name": capfirst(opts.verbose_name),
"title": _("Compare {0}".format(version1.object_repr)),
"obj": obj,
"compare_data": compare_data,
"has_unfollowed_fields": has_unfollowed_fields,
"version1": version1,
"version2": version2,
}
extra_context = extra_context or {}
context.update(extra_context)
return render(request, 'reversion_compare/compare.html',
context)
def compare(obj, version1, version2):
"""
Create a generic html diff from the obj between version1 and version2:
A diff of every changes field values.
This method should be overwritten, to create a nice diff view
coordinated with the model.
"""
diff = []
# Create a list of all normal fields and append many-to-many fields
fields = [field for field in obj._meta.fields]
fields += obj._meta.many_to_many
compare_fields = obj.get_api_fields() + ['views']
ignore_fields = []
has_unfollowed_fields = False
for field in fields:
#logger.debug("%s %s %s", field, field.db_type, field.get_internal_type())
field_name = field.name
if compare_fields and field_name not in compare_fields:
continue
if ignore_fields and field_name in ignore_fields:
continue
obj_compare = CompareObjects(field, field_name, obj, version1,
version2, reversion)
#obj_compare.debug()
is_related = obj_compare.is_related
follow = obj_compare.follow
if is_related and not follow:
has_unfollowed_fields = True
if not obj_compare.changed():
# Skip all fields that aren't changed
continue
html = _get_compare(obj_compare)
diff.append({
"field": field,
"is_related": is_related,
"follow": follow,
"diff": html,
})
return diff, has_unfollowed_fields
def _get_compare(obj_compare):
value1, value2 = obj_compare.to_string()
html = html_diff(value1, value2)
return html
| 35.536232
| 125
| 0.620055
|
f4e1d05ab3b500bab54e4762cb2f93dfc73209a8
| 1,616
|
py
|
Python
|
bus1Topo.py
|
sayonsom/Canvass
|
e59cd68f26722144abc5caf2d7ae1e7389c39ad1
|
[
"MIT"
] | 9
|
2018-01-29T10:53:25.000Z
|
2021-02-21T19:35:23.000Z
|
bus1Topo.py
|
cyberange-dev0ps/Canvass
|
e59cd68f26722144abc5caf2d7ae1e7389c39ad1
|
[
"MIT"
] | 1
|
2019-06-04T14:43:34.000Z
|
2021-07-09T08:35:13.000Z
|
bus1Topo.py
|
cyberange-dev0ps/Canvass
|
e59cd68f26722144abc5caf2d7ae1e7389c39ad1
|
[
"MIT"
] | 12
|
2017-05-04T23:39:10.000Z
|
2021-09-25T17:05:00.000Z
|
#!/usr/bin/python
"""
Custom Smart Substation Communication Topology
----------------------------------
Model built using Sayon (a MIT License Software).
----------------------------------
W A R N I N G:
----------------------------------
--> Please make sure you know Mininet Python API very well before editing this file.
--> Read Mininet Python API Documentation Here: http://mininet.org/walkthrough/#custom-topologies
--> This program may not work properly if this file gets messed up.
--> To troubleshoot, ask questions on StackOverflow with tags "sayon" and/or "mininet",
--> 24x7 Email Support: <support@ailien.space> or, <sayon@ieee.org>
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.util import dumpNodeConnections
from mininet.node import CPULimitedHost
from mininet.log import setLogLevel, info
from mininet.link import TCLink
class bus1Topo(Topo):
def __init__(self):
#initializing topology
Topo.__init__(self, link=TCLink)
#Add Switches
s1 = self.addSwitch('s1')
#Add Hosts
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
#Adding Links and Their properties
self.addLink(s1,h1,bw=400, delay='0ms', loss= 0, use_htb=True)
self.addLink(s1,h2,bw=20, delay='5ms', loss= 2, use_htb=True)
self.addLink(s1,h3,bw=20, delay='1ms', loss= 4, use_htb=True)
self.addLink(h1,h3,bw=648, delay='1ms', loss= 0, use_htb=True)
def perfTest():
topos = { 'bus1topo': ( lambda: bus1Topo() )}
if __name__ == '__main__':
setLogLevel( 'info' )
# Prevent test_simpleperf from failing due to packet loss
perfTest()
| 26.491803
| 97
| 0.672649
|
7924ff3e637aa98a856b8c8fd3b26535036699a6
| 2,136
|
py
|
Python
|
examples/semantic_indexing/ann_util.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/semantic_indexing/ann_util.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/semantic_indexing/ann_util.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=UTF-8
import numpy as np
import hnswlib
from paddlenlp.utils.log import logger
def build_index(args, data_loader, model):
index = hnswlib.Index(space='ip', dim=args.output_emb_size)
# Initializing index
# max_elements - the maximum number of elements (capacity). Will throw an exception if exceeded
# during insertion of an element.
# The capacity can be increased by saving/loading the index, see below.
#
# ef_construction - controls index search speed/build speed tradeoff
#
# M - is tightly connected with internal dimensionality of the data. Strongly affects memory consumption (~M)
# Higher M leads to higher accuracy/run_time at fixed ef/efConstruction
index.init_index(max_elements=args.hnsw_max_elements,
ef_construction=args.hnsw_ef,
M=args.hnsw_m)
# Controlling the recall by setting ef:
# higher ef leads to better accuracy, but slower search
index.set_ef(args.hnsw_ef)
# Set number of threads used during batch search/construction
# By default using all available cores
index.set_num_threads(16)
logger.info("start build index..........")
all_embeddings = []
for text_embeddings in model.get_semantic_embedding(data_loader):
all_embeddings.append(text_embeddings.numpy())
all_embeddings = np.concatenate(all_embeddings, axis=0)
index.add_items(all_embeddings)
logger.info("Total index number:{}".format(index.get_current_count()))
return index
| 35.6
| 113
| 0.728933
|
61812894ddd30c9ed2927d6421d39676738a78f2
| 210
|
py
|
Python
|
tests/unittest/test_mod/test_sys_simulation/__init__.py
|
terencehk/rqalpha
|
349e6a0a8e45449646acd6063cdec06df3bc1171
|
[
"Apache-2.0"
] | 5,263
|
2016-07-20T10:41:10.000Z
|
2022-03-29T08:24:34.000Z
|
tests/unittest/test_mod/test_sys_simulation/__init__.py
|
terencehk/rqalpha
|
349e6a0a8e45449646acd6063cdec06df3bc1171
|
[
"Apache-2.0"
] | 572
|
2016-07-28T07:51:02.000Z
|
2022-02-09T15:28:03.000Z
|
tests/unittest/test_mod/test_sys_simulation/__init__.py
|
terencehk/rqalpha
|
349e6a0a8e45449646acd6063cdec06df3bc1171
|
[
"Apache-2.0"
] | 1,769
|
2016-07-20T11:11:55.000Z
|
2022-03-31T10:11:38.000Z
|
import os
def load_tests(loader, standard_tests, pattern):
this_dir = os.path.dirname(__file__)
standard_tests.addTests(loader.discover(start_dir=this_dir, pattern=pattern))
return standard_tests
| 26.25
| 81
| 0.780952
|
40bb7f79eaf0753d6b3f702245d6e3a5dfb042ee
| 127,691
|
py
|
Python
|
lib/googleappengine/python/google/appengine/tools/dev_appserver.py
|
mtktauseef/wordpresstoblogger
|
151cc2e1b6c7f4f835dc51dfb8f50a1b0d5caa74
|
[
"Apache-2.0"
] | null | null | null |
lib/googleappengine/python/google/appengine/tools/dev_appserver.py
|
mtktauseef/wordpresstoblogger
|
151cc2e1b6c7f4f835dc51dfb8f50a1b0d5caa74
|
[
"Apache-2.0"
] | null | null | null |
lib/googleappengine/python/google/appengine/tools/dev_appserver.py
|
mtktauseef/wordpresstoblogger
|
151cc2e1b6c7f4f835dc51dfb8f50a1b0d5caa74
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pure-Python application server for testing applications locally.
Given a port and the paths to a valid application directory (with an 'app.yaml'
file), the external library directory, and a relative URL to use for logins,
creates an HTTP server that can be used to test an application locally. Uses
stubs instead of actual APIs when SetupStubs() is called first.
Example:
root_path = '/path/to/application/directory'
login_url = '/login'
port = 8080
template_dir = '/path/to/appserver/templates'
server = dev_appserver.CreateServer(root_path, login_url, port, template_dir)
server.serve_forever()
"""
from google.appengine.tools import os_compat
import __builtin__
import BaseHTTPServer
import Cookie
import base64
import cStringIO
import cgi
import cgitb
try:
import distutils.util
except ImportError:
pass
import dummy_thread
import email.Utils
import errno
import heapq
import httplib
import imp
import inspect
import itertools
import locale
import logging
import mimetools
import mimetypes
import os
import pickle
import pprint
import random
import select
import shutil
import tempfile
import re
import sre_compile
import sre_constants
import sre_parse
import socket
import sys
import time
import traceback
import types
import urlparse
import urllib
import google
from google.pyglib import gexcept
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import blobstore
from google.appengine.api import croninfo
from google.appengine.api import datastore_admin
from google.appengine.api import datastore_file_stub
from google.appengine.api import mail
from google.appengine.api import mail_stub
from google.appengine.api import urlfetch_stub
from google.appengine.api import user_service_stub
from google.appengine.api import yaml_errors
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.blobstore import file_blob_storage
from google.appengine.api.capabilities import capability_stub
from google.appengine.api.labs.taskqueue import taskqueue_stub
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.xmpp import xmpp_service_stub
from google.appengine.datastore import datastore_sqlite_stub
from google.appengine import dist
from google.appengine.tools import dev_appserver_blobstore
from google.appengine.tools import dev_appserver_index
from google.appengine.tools import dev_appserver_login
from google.appengine.tools import dev_appserver_oauth
from google.appengine.tools import dev_appserver_upload
PYTHON_LIB_VAR = '$PYTHON_LIB'
DEVEL_CONSOLE_PATH = PYTHON_LIB_VAR + '/google/appengine/ext/admin'
FILE_MISSING_EXCEPTIONS = frozenset([errno.ENOENT, errno.ENOTDIR])
MAX_URL_LENGTH = 2047
HEADER_TEMPLATE = 'logging_console_header.html'
SCRIPT_TEMPLATE = 'logging_console.js'
MIDDLE_TEMPLATE = 'logging_console_middle.html'
FOOTER_TEMPLATE = 'logging_console_footer.html'
DEFAULT_ENV = {
'GATEWAY_INTERFACE': 'CGI/1.1',
'AUTH_DOMAIN': 'gmail.com',
'USER_ORGANIZATION': '',
'TZ': 'UTC',
}
DEFAULT_SELECT_DELAY = 30.0
for ext, mime_type in mail.EXTENSION_MIME_MAP.iteritems():
mimetypes.add_type(mime_type, '.' + ext)
MAX_RUNTIME_RESPONSE_SIZE = 10 << 20
MAX_REQUEST_SIZE = 10 * 1024 * 1024
COPY_BLOCK_SIZE = 1 << 20
API_VERSION = '1'
SITE_PACKAGES = os.path.normcase(os.path.join(os.path.dirname(os.__file__),
'site-packages'))
DEVEL_PAYLOAD_HEADER = 'HTTP_X_APPENGINE_DEVELOPMENT_PAYLOAD'
DEVEL_PAYLOAD_RAW_HEADER = 'X-AppEngine-Development-Payload'
class Error(Exception):
"""Base-class for exceptions in this module."""
class InvalidAppConfigError(Error):
"""The supplied application configuration file is invalid."""
class AppConfigNotFoundError(Error):
"""Application configuration file not found."""
class TemplatesNotLoadedError(Error):
"""Templates for the debugging console were not loaded."""
def SplitURL(relative_url):
"""Splits a relative URL into its path and query-string components.
Args:
relative_url: String containing the relative URL (often starting with '/')
to split. Should be properly escaped as www-form-urlencoded data.
Returns:
Tuple (script_name, query_string) where:
script_name: Relative URL of the script that was accessed.
query_string: String containing everything after the '?' character.
"""
(unused_scheme, unused_netloc, path, query,
unused_fragment) = urlparse.urlsplit(relative_url)
return path, query
def GetFullURL(server_name, server_port, relative_url):
"""Returns the full, original URL used to access the relative URL.
Args:
server_name: Name of the local host, or the value of the 'host' header
from the request.
server_port: Port on which the request was served (string or int).
relative_url: Relative URL that was accessed, including query string.
Returns:
String containing the original URL.
"""
if str(server_port) != '80':
netloc = '%s:%s' % (server_name, server_port)
else:
netloc = server_name
return 'http://%s%s' % (netloc, relative_url)
def CopyStreamPart(source, destination, content_size):
"""Copy a portion of a stream from one file-like object to another.
Args:
source: Source stream to copy from.
destination: Destination stream to copy to.
content_size: Maximum bytes to copy.
Returns:
Number of bytes actually copied.
"""
bytes_copied = 0
bytes_left = content_size
while bytes_left > 0:
bytes = source.read(min(bytes_left, COPY_BLOCK_SIZE))
bytes_read = len(bytes)
if bytes_read == 0:
break
destination.write(bytes)
bytes_copied += bytes_read
bytes_left -= bytes_read
return bytes_copied
class AppServerRequest(object):
"""Encapsulates app-server request.
Object used to hold a full appserver request. Used as a container that is
passed through the request forward chain and ultimately sent to the
URLDispatcher instances.
Attributes:
relative_url: String containing the URL accessed.
path: Local path of the resource that was matched; back-references will be
replaced by values matched in the relative_url. Path may be relative
or absolute, depending on the resource being served (e.g., static files
will have an absolute path; scripts will be relative).
headers: Instance of mimetools.Message with headers from the request.
infile: File-like object with input data from the request.
force_admin: Allow request admin-only URLs to proceed regardless of whether
user is logged in or is an admin.
"""
ATTRIBUTES = ['relative_url',
'path',
'headers',
'infile',
'force_admin',
]
def __init__(self,
relative_url,
path,
headers,
infile,
force_admin=False):
"""Constructor.
Args:
relative_url: Mapped directly to attribute.
path: Mapped directly to attribute.
headers: Mapped directly to attribute.
infile: Mapped directly to attribute.
force_admin: Mapped directly to attribute.
"""
self.relative_url = relative_url
self.path = path
self.headers = headers
self.infile = infile
self.force_admin = force_admin
if DEVEL_PAYLOAD_RAW_HEADER in self.headers:
self.force_admin = True
def __eq__(self, other):
"""Used mainly for testing.
Returns:
True if all fields of both requests are equal, else False.
"""
if type(self) == type(other):
for attribute in self.ATTRIBUTES:
if getattr(self, attribute) != getattr(other, attribute):
return False
return True
def __repr__(self):
"""String representation of request.
Used mainly for testing.
Returns:
String representation of AppServerRequest. Strings of different
request objects that have the same values for all fields compare
as equal.
"""
results = []
for attribute in self.ATTRUBUTES:
results.append('%s: %s' % (attributes, getattr(self, attributes)))
return '<AppServerRequest %s>' % ' '.join(results)
class URLDispatcher(object):
"""Base-class for handling HTTP requests."""
def Dispatch(self,
request,
outfile,
base_env_dict=None):
"""Dispatch and handle an HTTP request.
base_env_dict should contain at least these CGI variables:
REQUEST_METHOD, REMOTE_ADDR, SERVER_SOFTWARE, SERVER_NAME,
SERVER_PROTOCOL, SERVER_PORT
Args:
request: AppServerRequest instance.
outfile: File-like object where output data should be written.
base_env_dict: Dictionary of CGI environment parameters if available.
Defaults to None.
Returns:
None if request handling is complete.
A new AppServerRequest instance if internal redirect is required.
"""
raise NotImplementedError
def EndRedirect(self, dispatched_output, original_output):
"""Process the end of an internal redirect.
This method is called after all subsequent dispatch requests have finished.
By default the output from the dispatched process is copied to the original.
This will not be called on dispatchers that do not return an internal
redirect.
Args:
dispatched_output: StringIO buffer containing the results from the
dispatched
original_output: The original output file.
"""
original_output.write(dispatched_output.read())
class URLMatcher(object):
"""Matches an arbitrary URL using a list of URL patterns from an application.
Each URL pattern has an associated URLDispatcher instance and path to the
resource's location on disk. See AddURL for more details. The first pattern
that matches an inputted URL will have its associated values returned by
Match().
"""
def __init__(self):
"""Initializer."""
self._url_patterns = []
def AddURL(self, regex, dispatcher, path, requires_login, admin_only,
auth_fail_action):
"""Adds a URL pattern to the list of patterns.
If the supplied regex starts with a '^' or ends with a '$' an
InvalidAppConfigError exception will be raised. Start and end symbols
and implicitly added to all regexes, meaning we assume that all regexes
consume all input from a URL.
Args:
regex: String containing the regular expression pattern.
dispatcher: Instance of URLDispatcher that should handle requests that
match this regex.
path: Path on disk for the resource. May contain back-references like
r'\1', r'\2', etc, which will be replaced by the corresponding groups
matched by the regex if present.
requires_login: True if the user must be logged-in before accessing this
URL; False if anyone can access this URL.
admin_only: True if the user must be a logged-in administrator to
access the URL; False if anyone can access the URL.
auth_fail_action: either appinfo.AUTH_FAIL_ACTION_REDIRECT (default)
which indicates that the server should redirect to the login page when
an authentication is needed, or appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED
which indicates that the server should just return a 401 Unauthorized
message immediately.
Raises:
TypeError: if dispatcher is not a URLDispatcher sub-class instance.
InvalidAppConfigError: if regex isn't valid.
"""
if not isinstance(dispatcher, URLDispatcher):
raise TypeError('dispatcher must be a URLDispatcher sub-class')
if regex.startswith('^') or regex.endswith('$'):
raise InvalidAppConfigError('regex starts with "^" or ends with "$"')
adjusted_regex = '^%s$' % regex
try:
url_re = re.compile(adjusted_regex)
except re.error, e:
raise InvalidAppConfigError('regex invalid: %s' % e)
match_tuple = (url_re, dispatcher, path, requires_login, admin_only,
auth_fail_action)
self._url_patterns.append(match_tuple)
def Match(self,
relative_url,
split_url=SplitURL):
"""Matches a URL from a request against the list of URL patterns.
The supplied relative_url may include the query string (i.e., the '?'
character and everything following).
Args:
relative_url: Relative URL being accessed in a request.
split_url: Used for dependency injection.
Returns:
Tuple (dispatcher, matched_path, requires_login, admin_only,
auth_fail_action), which are the corresponding values passed to
AddURL when the matching URL pattern was added to this matcher.
The matched_path will have back-references replaced using values
matched by the URL pattern. If no match was found, dispatcher will
be None.
"""
adjusted_url, unused_query_string = split_url(relative_url)
for url_tuple in self._url_patterns:
url_re, dispatcher, path, requires_login, admin_only, auth_fail_action = url_tuple
the_match = url_re.match(adjusted_url)
if the_match:
adjusted_path = the_match.expand(path)
return (dispatcher, adjusted_path, requires_login, admin_only,
auth_fail_action)
return None, None, None, None, None
def GetDispatchers(self):
"""Retrieves the URLDispatcher objects that could be matched.
Should only be used in tests.
Returns:
A set of URLDispatcher objects.
"""
return set([url_tuple[1] for url_tuple in self._url_patterns])
class MatcherDispatcher(URLDispatcher):
"""Dispatcher across multiple URLMatcher instances."""
def __init__(self,
login_url,
url_matchers,
get_user_info=dev_appserver_login.GetUserInfo,
login_redirect=dev_appserver_login.LoginRedirect):
"""Initializer.
Args:
login_url: Relative URL which should be used for handling user logins.
url_matchers: Sequence of URLMatcher objects.
get_user_info: Used for dependency injection.
login_redirect: Used for dependency injection.
"""
self._login_url = login_url
self._url_matchers = tuple(url_matchers)
self._get_user_info = get_user_info
self._login_redirect = login_redirect
def Dispatch(self,
request,
outfile,
base_env_dict=None):
"""Dispatches a request to the first matching dispatcher.
Matchers are checked in the order they were supplied to the constructor.
If no matcher matches, a 404 error will be written to the outfile. The
path variable supplied to this method is ignored.
The value of request.path is ignored.
"""
cookies = ', '.join(request.headers.getheaders('cookie'))
email_addr, admin, user_id = self._get_user_info(cookies)
for matcher in self._url_matchers:
dispatcher, matched_path, requires_login, admin_only, auth_fail_action = matcher.Match(request.relative_url)
if dispatcher is None:
continue
logging.debug('Matched "%s" to %s with path %s',
request.relative_url, dispatcher, matched_path)
if ((requires_login or admin_only) and
not email_addr and
not request.force_admin):
logging.debug('Login required, redirecting user')
if auth_fail_action == appinfo.AUTH_FAIL_ACTION_REDIRECT:
self._login_redirect(self._login_url,
base_env_dict['SERVER_NAME'],
base_env_dict['SERVER_PORT'],
request.relative_url,
outfile)
elif auth_fail_action == appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED:
outfile.write('Status: %d Not authorized\r\n'
'\r\n'
'Login required to view page.'
% (httplib.UNAUTHORIZED))
elif admin_only and not admin and not request.force_admin:
outfile.write('Status: %d Not authorized\r\n'
'\r\n'
'Current logged in user %s is not '
'authorized to view this page.'
% (httplib.FORBIDDEN, email_addr))
else:
request.path = matched_path
forward_request = dispatcher.Dispatch(request,
outfile,
base_env_dict=base_env_dict)
if forward_request:
logging.info('Internal redirection to %s',
forward_request.relative_url)
new_outfile = cStringIO.StringIO()
self.Dispatch(forward_request,
new_outfile,
dict(base_env_dict))
new_outfile.seek(0)
dispatcher.EndRedirect(new_outfile, outfile)
return
outfile.write('Status: %d URL did not match\r\n'
'\r\n'
'Not found error: %s did not match any patterns '
'in application configuration.'
% (httplib.NOT_FOUND, request.relative_url))
class ApplicationLoggingHandler(logging.Handler):
"""Python Logging handler that displays the debugging console to users."""
_COOKIE_NAME = '_ah_severity'
_TEMPLATES_INITIALIZED = False
_HEADER = None
_SCRIPT = None
_MIDDLE = None
_FOOTER = None
@staticmethod
def InitializeTemplates(header, script, middle, footer):
"""Initializes the templates used to render the debugging console.
This method must be called before any ApplicationLoggingHandler instances
are created.
Args:
header: The header template that is printed first.
script: The script template that is printed after the logging messages.
middle: The middle element that's printed before the footer.
footer; The last element that's printed at the end of the document.
"""
ApplicationLoggingHandler._HEADER = header
ApplicationLoggingHandler._SCRIPT = script
ApplicationLoggingHandler._MIDDLE = middle
ApplicationLoggingHandler._FOOTER = footer
ApplicationLoggingHandler._TEMPLATES_INITIALIZED = True
@staticmethod
def AreTemplatesInitialized():
"""Returns True if InitializeTemplates has been called, False otherwise."""
return ApplicationLoggingHandler._TEMPLATES_INITIALIZED
def __init__(self, *args, **kwargs):
"""Initializer.
Args:
args, kwargs: See logging.Handler.
Raises:
TemplatesNotLoadedError exception if the InitializeTemplates method was
not called before creating this instance.
"""
if not self._TEMPLATES_INITIALIZED:
raise TemplatesNotLoadedError
logging.Handler.__init__(self, *args, **kwargs)
self._record_list = []
self._start_time = time.time()
def emit(self, record):
"""Called by the logging module each time the application logs a message.
Args:
record: logging.LogRecord instance corresponding to the newly logged
message.
"""
self._record_list.append(record)
def AddDebuggingConsole(self, relative_url, env, outfile):
"""Prints an HTML debugging console to an output stream, if requested.
Args:
relative_url: Relative URL that was accessed, including the query string.
Used to determine if the parameter 'debug' was supplied, in which case
the console will be shown.
env: Dictionary containing CGI environment variables. Checks for the
HTTP_COOKIE entry to see if the accessing user has any logging-related
cookies set.
outfile: Output stream to which the console should be written if either
a debug parameter was supplied or a logging cookie is present.
"""
unused_script_name, query_string = SplitURL(relative_url)
param_dict = cgi.parse_qs(query_string, True)
cookie_dict = Cookie.SimpleCookie(env.get('HTTP_COOKIE', ''))
if 'debug' not in param_dict and self._COOKIE_NAME not in cookie_dict:
return
outfile.write(self._HEADER)
for record in self._record_list:
self._PrintRecord(record, outfile)
outfile.write(self._MIDDLE)
outfile.write(self._SCRIPT)
outfile.write(self._FOOTER)
def _PrintRecord(self, record, outfile):
"""Prints a single logging record to an output stream.
Args:
record: logging.LogRecord instance to print.
outfile: Output stream to which the LogRecord should be printed.
"""
message = cgi.escape(record.getMessage())
level_name = logging.getLevelName(record.levelno).lower()
level_letter = level_name[:1].upper()
time_diff = record.created - self._start_time
outfile.write('<span class="_ah_logline_%s">\n' % level_name)
outfile.write('<span class="_ah_logline_%s_prefix">%2.5f %s ></span>\n'
% (level_name, time_diff, level_letter))
outfile.write('%s\n' % message)
outfile.write('</span>\n')
_IGNORE_REQUEST_HEADERS = frozenset(['content-type', 'content-length',
'accept-encoding', 'transfer-encoding'])
def SetupEnvironment(cgi_path,
relative_url,
headers,
infile,
split_url=SplitURL,
get_user_info=dev_appserver_login.GetUserInfo):
"""Sets up environment variables for a CGI.
Args:
cgi_path: Full file-system path to the CGI being executed.
relative_url: Relative URL used to access the CGI.
headers: Instance of mimetools.Message containing request headers.
infile: File-like object with input data from the request.
split_url, get_user_info: Used for dependency injection.
Returns:
Dictionary containing CGI environment variables.
"""
env = DEFAULT_ENV.copy()
script_name, query_string = split_url(relative_url)
env['SCRIPT_NAME'] = ''
env['QUERY_STRING'] = query_string
env['PATH_INFO'] = urllib.unquote(script_name)
env['PATH_TRANSLATED'] = cgi_path
env['CONTENT_TYPE'] = headers.getheader('content-type',
'application/x-www-form-urlencoded')
env['CONTENT_LENGTH'] = headers.getheader('content-length', '')
cookies = ', '.join(headers.getheaders('cookie'))
email_addr, admin, user_id = get_user_info(cookies)
env['USER_EMAIL'] = email_addr
env['USER_ID'] = user_id
if admin:
env['USER_IS_ADMIN'] = '1'
if env['AUTH_DOMAIN'] == '*':
auth_domain = 'gmail.com'
parts = email_addr.split('@')
if len(parts) == 2 and parts[1]:
auth_domain = parts[1]
env['AUTH_DOMAIN'] = auth_domain
for key in headers:
if key in _IGNORE_REQUEST_HEADERS:
continue
adjusted_name = key.replace('-', '_').upper()
env['HTTP_' + adjusted_name] = ', '.join(headers.getheaders(key))
if DEVEL_PAYLOAD_HEADER in env:
del env[DEVEL_PAYLOAD_HEADER]
new_data = base64.standard_b64decode(infile.getvalue())
infile.seek(0)
infile.truncate()
infile.write(new_data)
infile.seek(0)
env['CONTENT_LENGTH'] = str(len(new_data))
return env
def NotImplementedFake(*args, **kwargs):
"""Fake for methods/functions that are not implemented in the production
environment.
"""
raise NotImplementedError('This class/method is not available.')
class NotImplementedFakeClass(object):
"""Fake class for classes that are not implemented in the production env.
"""
__init__ = NotImplementedFake
def IsEncodingsModule(module_name):
"""Determines if the supplied module is related to encodings in any way.
Encodings-related modules cannot be reloaded, so they need to be treated
specially when sys.modules is modified in any way.
Args:
module_name: Absolute name of the module regardless of how it is imported
into the local namespace (e.g., foo.bar.baz).
Returns:
True if it's an encodings-related module; False otherwise.
"""
if (module_name in ('codecs', 'encodings') or
module_name.startswith('encodings.')):
return True
return False
def ClearAllButEncodingsModules(module_dict):
"""Clear all modules in a module dictionary except for those modules that
are in any way related to encodings.
Args:
module_dict: Dictionary in the form used by sys.modules.
"""
for module_name in module_dict.keys():
if not IsEncodingsModule(module_name):
del module_dict[module_name]
def FakeURandom(n):
"""Fake version of os.urandom."""
bytes = ''
for _ in range(n):
bytes += chr(random.randint(0, 255))
return bytes
def FakeUname():
"""Fake version of os.uname."""
return ('Linux', '', '', '', '')
def FakeUnlink(path):
"""Fake version of os.unlink."""
if os.path.isdir(path):
raise OSError(errno.ENOENT, "Is a directory", path)
else:
raise OSError(errno.EPERM, "Operation not permitted", path)
def FakeReadlink(path):
"""Fake version of os.readlink."""
raise OSError(errno.EINVAL, "Invalid argument", path)
def FakeAccess(path, mode):
"""Fake version of os.access where only reads are supported."""
if not os.path.exists(path) or mode != os.R_OK:
return False
else:
return True
def FakeSetLocale(category, value=None, original_setlocale=locale.setlocale):
"""Fake version of locale.setlocale that only supports the default."""
if value not in (None, '', 'C', 'POSIX'):
raise locale.Error('locale emulation only supports "C" locale')
return original_setlocale(category, 'C')
def FakeOpen(filename, flags, mode=0777):
"""Fake version of os.open."""
raise OSError(errno.EPERM, "Operation not permitted", filename)
def FakeRename(src, dst):
"""Fake version of os.rename."""
raise OSError(errno.EPERM, "Operation not permitted", src)
def FakeUTime(path, times):
"""Fake version of os.utime."""
raise OSError(errno.EPERM, "Operation not permitted", path)
def FakeGetPlatform():
"""Fake distutils.util.get_platform on OS/X. Pass-through otherwise."""
if sys.platform == 'darwin':
return 'macosx-'
else:
return distutils.util.get_platform()
def IsPathInSubdirectories(filename,
subdirectories,
normcase=os.path.normcase):
"""Determines if a filename is contained within one of a set of directories.
Args:
filename: Path of the file (relative or absolute).
subdirectories: Iterable collection of paths to subdirectories which the
given filename may be under.
normcase: Used for dependency injection.
Returns:
True if the supplied filename is in one of the given sub-directories or
its hierarchy of children. False otherwise.
"""
file_dir = normcase(os.path.dirname(os.path.abspath(filename)))
for parent in subdirectories:
fixed_parent = normcase(os.path.abspath(parent))
if os.path.commonprefix([file_dir, fixed_parent]) == fixed_parent:
return True
return False
SHARED_MODULE_PREFIXES = set([
'google',
'logging',
'sys',
'warnings',
're',
'sre_compile',
'sre_constants',
'sre_parse',
'email',
'wsgiref',
])
NOT_SHARED_MODULE_PREFIXES = set([
'google.appengine.ext',
])
def ModuleNameHasPrefix(module_name, prefix_set):
"""Determines if a module's name belongs to a set of prefix strings.
Args:
module_name: String containing the fully qualified module name.
prefix_set: Iterable set of module name prefixes to check against.
Returns:
True if the module_name belongs to the prefix set or is a submodule of
any of the modules specified in the prefix_set. Otherwise False.
"""
for prefix in prefix_set:
if prefix == module_name:
return True
if module_name.startswith(prefix + '.'):
return True
return False
def SetupSharedModules(module_dict):
"""Creates a module dictionary for the hardened part of the process.
Module dictionary will contain modules that should be shared between the
hardened and unhardened parts of the process.
Args:
module_dict: Module dictionary from which existing modules should be
pulled (usually sys.modules).
Returns:
A new module dictionary.
"""
output_dict = {}
for module_name, module in module_dict.iteritems():
if module is None:
continue
if IsEncodingsModule(module_name):
output_dict[module_name] = module
continue
shared_prefix = ModuleNameHasPrefix(module_name, SHARED_MODULE_PREFIXES)
banned_prefix = ModuleNameHasPrefix(module_name, NOT_SHARED_MODULE_PREFIXES)
if shared_prefix and not banned_prefix:
output_dict[module_name] = module
return output_dict
def GeneratePythonPaths(*p):
"""Generate all valid filenames for the given file.
Args:
p: Positional args are the folders to the file and finally the file
without a suffix.
Returns:
A list of strings representing the given path to a file with each valid
suffix for this python build.
"""
suffixes = imp.get_suffixes()
return [os.path.join(*p) + s for s, m, t in suffixes]
class FakeFile(file):
"""File sub-class that enforces the security restrictions of the production
environment.
"""
ALLOWED_MODES = frozenset(['r', 'rb', 'U', 'rU'])
ALLOWED_FILES = set(os.path.normcase(filename)
for filename in mimetypes.knownfiles
if os.path.isfile(filename))
ALLOWED_DIRS = set([
os.path.normcase(os.path.realpath(os.path.dirname(os.__file__))),
os.path.normcase(os.path.abspath(os.path.dirname(os.__file__))),
os.path.normcase(os.path.dirname(os.path.realpath(os.__file__))),
os.path.normcase(os.path.dirname(os.path.abspath(os.__file__))),
])
NOT_ALLOWED_DIRS = set([
SITE_PACKAGES,
])
ALLOWED_SITE_PACKAGE_DIRS = set(
os.path.normcase(os.path.abspath(os.path.join(SITE_PACKAGES, path)))
for path in [
])
ALLOWED_SITE_PACKAGE_FILES = set(
os.path.normcase(os.path.abspath(os.path.join(
os.path.dirname(os.__file__), 'site-packages', path)))
for path in itertools.chain(*[
[os.path.join('Crypto')],
GeneratePythonPaths('Crypto', '__init__'),
[os.path.join('Crypto', 'Cipher')],
GeneratePythonPaths('Crypto', 'Cipher', '__init__'),
GeneratePythonPaths('Crypto', 'Cipher', 'AES'),
GeneratePythonPaths('Crypto', 'Cipher', 'ARC2'),
GeneratePythonPaths('Crypto', 'Cipher', 'ARC4'),
GeneratePythonPaths('Crypto', 'Cipher', 'Blowfish'),
GeneratePythonPaths('Crypto', 'Cipher', 'CAST'),
GeneratePythonPaths('Crypto', 'Cipher', 'DES'),
GeneratePythonPaths('Crypto', 'Cipher', 'DES3'),
GeneratePythonPaths('Crypto', 'Cipher', 'XOR'),
[os.path.join('Crypto', 'Hash')],
GeneratePythonPaths('Crypto', 'Hash', '__init__'),
GeneratePythonPaths('Crypto', 'Hash', 'HMAC'),
os.path.join('Crypto', 'Hash', 'MD2'),
os.path.join('Crypto', 'Hash', 'MD4'),
GeneratePythonPaths('Crypto', 'Hash', 'MD5'),
GeneratePythonPaths('Crypto', 'Hash', 'SHA'),
os.path.join('Crypto', 'Hash', 'SHA256'),
os.path.join('Crypto', 'Hash', 'RIPEMD'),
[os.path.join('Crypto', 'Protocol')],
GeneratePythonPaths('Crypto', 'Protocol', '__init__'),
GeneratePythonPaths('Crypto', 'Protocol', 'AllOrNothing'),
GeneratePythonPaths('Crypto', 'Protocol', 'Chaffing'),
[os.path.join('Crypto', 'PublicKey')],
GeneratePythonPaths('Crypto', 'PublicKey', '__init__'),
GeneratePythonPaths('Crypto', 'PublicKey', 'DSA'),
GeneratePythonPaths('Crypto', 'PublicKey', 'ElGamal'),
GeneratePythonPaths('Crypto', 'PublicKey', 'RSA'),
GeneratePythonPaths('Crypto', 'PublicKey', 'pubkey'),
GeneratePythonPaths('Crypto', 'PublicKey', 'qNEW'),
[os.path.join('Crypto', 'Util')],
GeneratePythonPaths('Crypto', 'Util', '__init__'),
GeneratePythonPaths('Crypto', 'Util', 'RFC1751'),
GeneratePythonPaths('Crypto', 'Util', 'number'),
GeneratePythonPaths('Crypto', 'Util', 'randpool'),
]))
_original_file = file
_root_path = None
_application_paths = None
_skip_files = None
_static_file_config_matcher = None
_allow_skipped_files = True
_availability_cache = {}
@staticmethod
def SetAllowedPaths(root_path, application_paths):
"""Configures which paths are allowed to be accessed.
Must be called at least once before any file objects are created in the
hardened environment.
Args:
root_path: Absolute path to the root of the application.
application_paths: List of additional paths that the application may
access, this must include the App Engine runtime but
not the Python library directories.
"""
FakeFile._application_paths = (set(os.path.realpath(path)
for path in application_paths) |
set(os.path.abspath(path)
for path in application_paths))
FakeFile._application_paths.add(root_path)
FakeFile._root_path = os.path.join(root_path, '')
FakeFile._availability_cache = {}
@staticmethod
def SetAllowSkippedFiles(allow_skipped_files):
"""Configures access to files matching FakeFile._skip_files.
Args:
allow_skipped_files: Boolean whether to allow access to skipped files
"""
FakeFile._allow_skipped_files = allow_skipped_files
FakeFile._availability_cache = {}
@staticmethod
def SetAllowedModule(name):
"""Allow the use of a module based on where it is located.
Meant to be used by use_library() so that it has a link back into the
trusted part of the interpreter.
Args:
name: Name of the module to allow.
"""
stream, pathname, description = imp.find_module(name)
pathname = os.path.normcase(os.path.abspath(pathname))
if stream:
stream.close()
FakeFile.ALLOWED_FILES.add(pathname)
FakeFile.ALLOWED_FILES.add(os.path.realpath(pathname))
else:
assert description[2] == imp.PKG_DIRECTORY
if pathname.startswith(SITE_PACKAGES):
FakeFile.ALLOWED_SITE_PACKAGE_DIRS.add(pathname)
FakeFile.ALLOWED_SITE_PACKAGE_DIRS.add(os.path.realpath(pathname))
else:
FakeFile.ALLOWED_DIRS.add(pathname)
FakeFile.ALLOWED_DIRS.add(os.path.realpath(pathname))
@staticmethod
def SetSkippedFiles(skip_files):
"""Sets which files in the application directory are to be ignored.
Must be called at least once before any file objects are created in the
hardened environment.
Must be called whenever the configuration was updated.
Args:
skip_files: Object with .match() method (e.g. compiled regexp).
"""
FakeFile._skip_files = skip_files
FakeFile._availability_cache = {}
@staticmethod
def SetStaticFileConfigMatcher(static_file_config_matcher):
"""Sets StaticFileConfigMatcher instance for checking if a file is static.
Must be called at least once before any file objects are created in the
hardened environment.
Must be called whenever the configuration was updated.
Args:
static_file_config_matcher: StaticFileConfigMatcher instance.
"""
FakeFile._static_file_config_matcher = static_file_config_matcher
FakeFile._availability_cache = {}
@staticmethod
def IsFileAccessible(filename, normcase=os.path.normcase):
"""Determines if a file's path is accessible.
SetAllowedPaths(), SetSkippedFiles() and SetStaticFileConfigMatcher() must
be called before this method or else all file accesses will raise an error.
Args:
filename: Path of the file to check (relative or absolute). May be a
directory, in which case access for files inside that directory will
be checked.
normcase: Used for dependency injection.
Returns:
True if the file is accessible, False otherwise.
"""
logical_filename = normcase(os.path.abspath(filename))
result = FakeFile._availability_cache.get(logical_filename)
if result is None:
result = FakeFile._IsFileAccessibleNoCache(logical_filename,
normcase=normcase)
FakeFile._availability_cache[logical_filename] = result
return result
@staticmethod
def _IsFileAccessibleNoCache(logical_filename, normcase=os.path.normcase):
"""Determines if a file's path is accessible.
This is an internal part of the IsFileAccessible implementation.
Args:
logical_filename: Absolute path of the file to check.
normcase: Used for dependency injection.
Returns:
True if the file is accessible, False otherwise.
"""
logical_dirfakefile = logical_filename
if os.path.isdir(logical_filename):
logical_dirfakefile = os.path.join(logical_filename, 'foo')
if IsPathInSubdirectories(logical_dirfakefile, [FakeFile._root_path],
normcase=normcase):
relative_filename = logical_dirfakefile[len(FakeFile._root_path):]
if not FakeFile._allow_skipped_files:
path = relative_filename
while path != os.path.dirname(path):
if FakeFile._skip_files.match(path):
logging.warning('Blocking access to skipped file "%s"',
logical_filename)
return False
path = os.path.dirname(path)
if FakeFile._static_file_config_matcher.IsStaticFile(relative_filename):
logging.warning('Blocking access to static file "%s"',
logical_filename)
return False
if logical_filename in FakeFile.ALLOWED_FILES:
return True
if logical_filename in FakeFile.ALLOWED_SITE_PACKAGE_FILES:
return True
if IsPathInSubdirectories(logical_dirfakefile,
FakeFile.ALLOWED_SITE_PACKAGE_DIRS,
normcase=normcase):
return True
allowed_dirs = FakeFile._application_paths | FakeFile.ALLOWED_DIRS
if (IsPathInSubdirectories(logical_dirfakefile,
allowed_dirs,
normcase=normcase) and
not IsPathInSubdirectories(logical_dirfakefile,
FakeFile.NOT_ALLOWED_DIRS,
normcase=normcase)):
return True
return False
def __init__(self, filename, mode='r', bufsize=-1, **kwargs):
"""Initializer. See file built-in documentation."""
if mode not in FakeFile.ALLOWED_MODES:
raise IOError('invalid mode: %s' % mode)
if not FakeFile.IsFileAccessible(filename):
raise IOError(errno.EACCES, 'file not accessible', filename)
super(FakeFile, self).__init__(filename, mode, bufsize, **kwargs)
from google.appengine.dist import _library
_library.SetAllowedModule = FakeFile.SetAllowedModule
class RestrictedPathFunction(object):
"""Enforces access restrictions for functions that have a file or
directory path as their first argument."""
_original_os = os
def __init__(self, original_func):
"""Initializer.
Args:
original_func: Callable that takes as its first argument the path to a
file or directory on disk; all subsequent arguments may be variable.
"""
self._original_func = original_func
def __call__(self, path, *args, **kwargs):
"""Enforces access permissions for the function passed to the constructor.
"""
if not FakeFile.IsFileAccessible(path):
raise OSError(errno.EACCES, 'path not accessible', path)
return self._original_func(path, *args, **kwargs)
def GetSubmoduleName(fullname):
"""Determines the leaf submodule name of a full module name.
Args:
fullname: Fully qualified module name, e.g. 'foo.bar.baz'
Returns:
Submodule name, e.g. 'baz'. If the supplied module has no submodule (e.g.,
'stuff'), the returned value will just be that module name ('stuff').
"""
return fullname.rsplit('.', 1)[-1]
class CouldNotFindModuleError(ImportError):
"""Raised when a module could not be found.
In contrast to when a module has been found, but cannot be loaded because of
hardening restrictions.
"""
def Trace(func):
"""Call stack logging decorator for HardenedModulesHook class.
This decorator logs the call stack of the HardenedModulesHook class as
it executes, indenting logging messages based on the current stack depth.
Args:
func: the function to decorate.
Returns:
The decorated function.
"""
def Decorate(self, *args, **kwargs):
args_to_show = []
if args is not None:
args_to_show.extend(str(argument) for argument in args)
if kwargs is not None:
args_to_show.extend('%s=%s' % (key, value)
for key, value in kwargs.iteritems())
args_string = ', '.join(args_to_show)
self.log('Entering %s(%s)', func.func_name, args_string)
self._indent_level += 1
try:
return func(self, *args, **kwargs)
finally:
self._indent_level -= 1
self.log('Exiting %s(%s)', func.func_name, args_string)
return Decorate
class HardenedModulesHook(object):
"""Meta import hook that restricts the modules used by applications to match
the production environment.
Module controls supported:
- Disallow native/extension modules from being loaded
- Disallow built-in and/or Python-distributed modules from being loaded
- Replace modules with completely empty modules
- Override specific module attributes
- Replace one module with another
After creation, this object should be added to the front of the sys.meta_path
list (which may need to be created). The sys.path_importer_cache dictionary
should also be cleared, to prevent loading any non-restricted modules.
See PEP302 for more info on how this works:
http://www.python.org/dev/peps/pep-0302/
"""
ENABLE_LOGGING = False
def log(self, message, *args):
"""Logs an import-related message to stderr, with indentation based on
current call-stack depth.
Args:
message: Logging format string.
args: Positional format parameters for the logging message.
"""
if HardenedModulesHook.ENABLE_LOGGING:
indent = self._indent_level * ' '
print >>sys.stderr, indent + (message % args)
_WHITE_LIST_C_MODULES = [
'py_streamhtmlparser',
'AES',
'ARC2',
'ARC4',
'Blowfish',
'CAST',
'DES',
'DES3',
'MD2',
'MD4',
'RIPEMD',
'SHA256',
'XOR',
'_Crypto_Cipher__AES',
'_Crypto_Cipher__ARC2',
'_Crypto_Cipher__ARC4',
'_Crypto_Cipher__Blowfish',
'_Crypto_Cipher__CAST',
'_Crypto_Cipher__DES',
'_Crypto_Cipher__DES3',
'_Crypto_Cipher__XOR',
'_Crypto_Hash__MD2',
'_Crypto_Hash__MD4',
'_Crypto_Hash__RIPEMD',
'_Crypto_Hash__SHA256',
'array',
'binascii',
'bz2',
'cmath',
'collections',
'crypt',
'cStringIO',
'datetime',
'errno',
'exceptions',
'gc',
'itertools',
'math',
'md5',
'operator',
'posix',
'posixpath',
'pyexpat',
'sha',
'struct',
'sys',
'time',
'timing',
'unicodedata',
'zlib',
'_ast',
'_bisect',
'_codecs',
'_codecs_cn',
'_codecs_hk',
'_codecs_iso2022',
'_codecs_jp',
'_codecs_kr',
'_codecs_tw',
'_collections',
'_csv',
'_elementtree',
'_functools',
'_hashlib',
'_heapq',
'_locale',
'_lsprof',
'_md5',
'_multibytecodec',
'_scproxy',
'_random',
'_sha',
'_sha256',
'_sha512',
'_sre',
'_struct',
'_types',
'_weakref',
'__main__',
]
__CRYPTO_CIPHER_ALLOWED_MODULES = [
'MODE_CBC',
'MODE_CFB',
'MODE_CTR',
'MODE_ECB',
'MODE_OFB',
'block_size',
'key_size',
'new',
]
_WHITE_LIST_PARTIAL_MODULES = {
'Crypto.Cipher.AES': __CRYPTO_CIPHER_ALLOWED_MODULES,
'Crypto.Cipher.ARC2': __CRYPTO_CIPHER_ALLOWED_MODULES,
'Crypto.Cipher.Blowfish': __CRYPTO_CIPHER_ALLOWED_MODULES,
'Crypto.Cipher.CAST': __CRYPTO_CIPHER_ALLOWED_MODULES,
'Crypto.Cipher.DES': __CRYPTO_CIPHER_ALLOWED_MODULES,
'Crypto.Cipher.DES3': __CRYPTO_CIPHER_ALLOWED_MODULES,
'gc': [
'enable',
'disable',
'isenabled',
'collect',
'get_debug',
'set_threshold',
'get_threshold',
'get_count'
],
'os': [
'access',
'altsep',
'curdir',
'defpath',
'devnull',
'environ',
'error',
'extsep',
'EX_NOHOST',
'EX_NOINPUT',
'EX_NOPERM',
'EX_NOUSER',
'EX_OK',
'EX_OSERR',
'EX_OSFILE',
'EX_PROTOCOL',
'EX_SOFTWARE',
'EX_TEMPFAIL',
'EX_UNAVAILABLE',
'EX_USAGE',
'F_OK',
'getcwd',
'getcwdu',
'getenv',
'listdir',
'lstat',
'name',
'NGROUPS_MAX',
'O_APPEND',
'O_CREAT',
'O_DIRECT',
'O_DIRECTORY',
'O_DSYNC',
'O_EXCL',
'O_LARGEFILE',
'O_NDELAY',
'O_NOCTTY',
'O_NOFOLLOW',
'O_NONBLOCK',
'O_RDONLY',
'O_RDWR',
'O_RSYNC',
'O_SYNC',
'O_TRUNC',
'O_WRONLY',
'open',
'pardir',
'path',
'pathsep',
'R_OK',
'readlink',
'remove',
'rename',
'SEEK_CUR',
'SEEK_END',
'SEEK_SET',
'sep',
'stat',
'stat_float_times',
'stat_result',
'strerror',
'TMP_MAX',
'unlink',
'urandom',
'utime',
'walk',
'WCOREDUMP',
'WEXITSTATUS',
'WIFEXITED',
'WIFSIGNALED',
'WIFSTOPPED',
'WNOHANG',
'WSTOPSIG',
'WTERMSIG',
'WUNTRACED',
'W_OK',
'X_OK',
],
}
_MODULE_OVERRIDES = {
'locale': {
'setlocale': FakeSetLocale,
},
'os': {
'access': FakeAccess,
'listdir': RestrictedPathFunction(os.listdir),
'lstat': RestrictedPathFunction(os.stat),
'open': FakeOpen,
'readlink': FakeReadlink,
'remove': FakeUnlink,
'rename': FakeRename,
'stat': RestrictedPathFunction(os.stat),
'uname': FakeUname,
'unlink': FakeUnlink,
'urandom': FakeURandom,
'utime': FakeUTime,
},
'distutils.util': {
'get_platform': FakeGetPlatform,
},
}
_ENABLED_FILE_TYPES = (
imp.PKG_DIRECTORY,
imp.PY_SOURCE,
imp.PY_COMPILED,
imp.C_BUILTIN,
)
def __init__(self,
module_dict,
imp_module=imp,
os_module=os,
dummy_thread_module=dummy_thread,
pickle_module=pickle):
"""Initializer.
Args:
module_dict: Module dictionary to use for managing system modules.
Should be sys.modules.
imp_module, os_module, dummy_thread_module, pickle_module: References to
modules that exist in the dev_appserver that must be used by this class
in order to function, even if these modules have been unloaded from
sys.modules.
"""
self._module_dict = module_dict
self._imp = imp_module
self._os = os_module
self._dummy_thread = dummy_thread_module
self._pickle = pickle
self._indent_level = 0
@Trace
def find_module(self, fullname, path=None):
"""See PEP 302."""
if fullname in ('cPickle', 'thread'):
return self
search_path = path
all_modules = fullname.split('.')
try:
for index, current_module in enumerate(all_modules):
current_module_fullname = '.'.join(all_modules[:index + 1])
if (current_module_fullname == fullname and not
self.StubModuleExists(fullname)):
self.FindModuleRestricted(current_module,
current_module_fullname,
search_path)
else:
if current_module_fullname in self._module_dict:
module = self._module_dict[current_module_fullname]
else:
module = self.FindAndLoadModule(current_module,
current_module_fullname,
search_path)
if hasattr(module, '__path__'):
search_path = module.__path__
except CouldNotFindModuleError:
return None
return self
def StubModuleExists(self, name):
"""Check if the named module has a stub replacement."""
if name in sys.builtin_module_names:
name = 'py_%s' % name
if name in dist.__all__:
return True
return False
def ImportStubModule(self, name):
"""Import the stub module replacement for the specified module."""
if name in sys.builtin_module_names:
name = 'py_%s' % name
module = __import__(dist.__name__, {}, {}, [name])
return getattr(module, name)
@Trace
def FixModule(self, module):
"""Prunes and overrides restricted module attributes.
Args:
module: The module to prune. This should be a new module whose attributes
reference back to the real module's __dict__ members.
"""
if module.__name__ in self._WHITE_LIST_PARTIAL_MODULES:
allowed_symbols = self._WHITE_LIST_PARTIAL_MODULES[module.__name__]
for symbol in set(module.__dict__) - set(allowed_symbols):
if not (symbol.startswith('__') and symbol.endswith('__')):
del module.__dict__[symbol]
if module.__name__ in self._MODULE_OVERRIDES:
module.__dict__.update(self._MODULE_OVERRIDES[module.__name__])
@Trace
def FindModuleRestricted(self,
submodule,
submodule_fullname,
search_path):
"""Locates a module while enforcing module import restrictions.
Args:
submodule: The short name of the submodule (i.e., the last section of
the fullname; for 'foo.bar' this would be 'bar').
submodule_fullname: The fully qualified name of the module to find (e.g.,
'foo.bar').
search_path: List of paths to search for to find this module. Should be
None if the current sys.path should be used.
Returns:
Tuple (source_file, pathname, description) where:
source_file: File-like object that contains the module; in the case
of packages, this will be None, which implies to look at __init__.py.
pathname: String containing the full path of the module on disk.
description: Tuple returned by imp.find_module().
However, in the case of an import using a path hook (e.g. a zipfile),
source_file will be a PEP-302-style loader object, pathname will be None,
and description will be a tuple filled with None values.
Raises:
ImportError exception if the requested module was found, but importing
it is disallowed.
CouldNotFindModuleError exception if the request module could not even
be found for import.
"""
if search_path is None:
search_path = [None] + sys.path
for path_entry in search_path:
result = self.FindPathHook(submodule, submodule_fullname, path_entry)
if result is not None:
source_file, pathname, description = result
if description == (None, None, None):
return result
else:
break
else:
self.log('Could not find module "%s"', submodule_fullname)
raise CouldNotFindModuleError()
suffix, mode, file_type = description
if (file_type not in (self._imp.C_BUILTIN, self._imp.C_EXTENSION) and
not FakeFile.IsFileAccessible(pathname)):
error_message = 'Access to module file denied: %s' % pathname
logging.debug(error_message)
raise ImportError(error_message)
if (file_type not in self._ENABLED_FILE_TYPES and
submodule not in self._WHITE_LIST_C_MODULES):
error_message = ('Could not import "%s": Disallowed C-extension '
'or built-in module' % submodule_fullname)
logging.debug(error_message)
raise ImportError(error_message)
return source_file, pathname, description
def FindPathHook(self, submodule, submodule_fullname, path_entry):
"""Helper for FindModuleRestricted to find a module in a sys.path entry.
Args:
submodule:
submodule_fullname:
path_entry: A single sys.path entry, or None representing the builtins.
Returns:
Either None (if nothing was found), or a triple (source_file, path_name,
description). See the doc string for FindModuleRestricted() for the
meaning of the latter.
"""
if path_entry is None:
if submodule_fullname in sys.builtin_module_names:
try:
result = self._imp.find_module(submodule)
except ImportError:
pass
else:
source_file, pathname, description = result
suffix, mode, file_type = description
if file_type == self._imp.C_BUILTIN:
return result
return None
if path_entry in sys.path_importer_cache:
importer = sys.path_importer_cache[path_entry]
else:
importer = None
for hook in sys.path_hooks:
try:
importer = hook(path_entry)
break
except ImportError:
pass
sys.path_importer_cache[path_entry] = importer
if importer is None:
try:
return self._imp.find_module(submodule, [path_entry])
except ImportError:
pass
else:
loader = importer.find_module(submodule)
if loader is not None:
return (loader, None, (None, None, None))
return None
@Trace
def LoadModuleRestricted(self,
submodule_fullname,
source_file,
pathname,
description):
"""Loads a module while enforcing module import restrictions.
As a byproduct, the new module will be added to the module dictionary.
Args:
submodule_fullname: The fully qualified name of the module to find (e.g.,
'foo.bar').
source_file: File-like object that contains the module's source code,
or a PEP-302-style loader object.
pathname: String containing the full path of the module on disk.
description: Tuple returned by imp.find_module(), or (None, None, None)
in case source_file is a PEP-302-style loader object.
Returns:
The new module.
Raises:
ImportError exception of the specified module could not be loaded for
whatever reason.
"""
if description == (None, None, None):
return source_file.load_module(submodule_fullname)
try:
try:
return self._imp.load_module(submodule_fullname,
source_file,
pathname,
description)
except:
if submodule_fullname in self._module_dict:
del self._module_dict[submodule_fullname]
raise
finally:
if source_file is not None:
source_file.close()
@Trace
def FindAndLoadModule(self,
submodule,
submodule_fullname,
search_path):
"""Finds and loads a module, loads it, and adds it to the module dictionary.
Args:
submodule: Name of the module to import (e.g., baz).
submodule_fullname: Full name of the module to import (e.g., foo.bar.baz).
search_path: Path to use for searching for this submodule. For top-level
modules this should be None; otherwise it should be the __path__
attribute from the parent package.
Returns:
A new module instance that has been inserted into the module dictionary
supplied to __init__.
Raises:
ImportError exception if the module could not be loaded for whatever
reason (e.g., missing, not allowed).
"""
module = self._imp.new_module(submodule_fullname)
if submodule_fullname == 'thread':
module.__dict__.update(self._dummy_thread.__dict__)
module.__name__ = 'thread'
elif submodule_fullname == 'cPickle':
module.__dict__.update(self._pickle.__dict__)
module.__name__ = 'cPickle'
elif submodule_fullname == 'os':
module.__dict__.update(self._os.__dict__)
elif self.StubModuleExists(submodule_fullname):
module = self.ImportStubModule(submodule_fullname)
else:
source_file, pathname, description = self.FindModuleRestricted(submodule, submodule_fullname, search_path)
module = self.LoadModuleRestricted(submodule_fullname,
source_file,
pathname,
description)
module.__loader__ = self
self.FixModule(module)
if submodule_fullname not in self._module_dict:
self._module_dict[submodule_fullname] = module
if submodule_fullname == 'os':
os_path_name = module.path.__name__
os_path = self.FindAndLoadModule(os_path_name, os_path_name, search_path)
self._module_dict['os.path'] = os_path
module.__dict__['path'] = os_path
return module
@Trace
def GetParentPackage(self, fullname):
"""Retrieves the parent package of a fully qualified module name.
Args:
fullname: Full name of the module whose parent should be retrieved (e.g.,
foo.bar).
Returns:
Module instance for the parent or None if there is no parent module.
Raise:
ImportError exception if the module's parent could not be found.
"""
all_modules = fullname.split('.')
parent_module_fullname = '.'.join(all_modules[:-1])
if parent_module_fullname:
if self.find_module(fullname) is None:
raise ImportError('Could not find module %s' % fullname)
return self._module_dict[parent_module_fullname]
return None
@Trace
def GetParentSearchPath(self, fullname):
"""Determines the search path of a module's parent package.
Args:
fullname: Full name of the module to look up (e.g., foo.bar).
Returns:
Tuple (submodule, search_path) where:
submodule: The last portion of the module name from fullname (e.g.,
if fullname is foo.bar, then this is bar).
search_path: List of paths that belong to the parent package's search
path or None if there is no parent package.
Raises:
ImportError exception if the module or its parent could not be found.
"""
submodule = GetSubmoduleName(fullname)
parent_package = self.GetParentPackage(fullname)
search_path = None
if parent_package is not None and hasattr(parent_package, '__path__'):
search_path = parent_package.__path__
return submodule, search_path
@Trace
def GetModuleInfo(self, fullname):
"""Determines the path on disk and the search path of a module or package.
Args:
fullname: Full name of the module to look up (e.g., foo.bar).
Returns:
Tuple (pathname, search_path, submodule) where:
pathname: String containing the full path of the module on disk,
or None if the module wasn't loaded from disk (e.g. from a zipfile).
search_path: List of paths that belong to the found package's search
path or None if found module is not a package.
submodule: The relative name of the submodule that's being imported.
"""
submodule, search_path = self.GetParentSearchPath(fullname)
source_file, pathname, description = self.FindModuleRestricted(submodule, fullname, search_path)
suffix, mode, file_type = description
module_search_path = None
if file_type == self._imp.PKG_DIRECTORY:
module_search_path = [pathname]
pathname = os.path.join(pathname, '__init__%spy' % os.extsep)
return pathname, module_search_path, submodule
@Trace
def load_module(self, fullname):
"""See PEP 302."""
all_modules = fullname.split('.')
submodule = all_modules[-1]
parent_module_fullname = '.'.join(all_modules[:-1])
search_path = None
if parent_module_fullname and parent_module_fullname in self._module_dict:
parent_module = self._module_dict[parent_module_fullname]
if hasattr(parent_module, '__path__'):
search_path = parent_module.__path__
return self.FindAndLoadModule(submodule, fullname, search_path)
@Trace
def is_package(self, fullname):
"""See PEP 302 extensions."""
submodule, search_path = self.GetParentSearchPath(fullname)
source_file, pathname, description = self.FindModuleRestricted(submodule, fullname, search_path)
suffix, mode, file_type = description
if file_type == self._imp.PKG_DIRECTORY:
return True
return False
@Trace
def get_source(self, fullname):
"""See PEP 302 extensions."""
full_path, search_path, submodule = self.GetModuleInfo(fullname)
if full_path is None:
return None
source_file = open(full_path)
try:
return source_file.read()
finally:
source_file.close()
@Trace
def get_code(self, fullname):
"""See PEP 302 extensions."""
full_path, search_path, submodule = self.GetModuleInfo(fullname)
if full_path is None:
return None
source_file = open(full_path)
try:
source_code = source_file.read()
finally:
source_file.close()
source_code = source_code.replace('\r\n', '\n')
if not source_code.endswith('\n'):
source_code += '\n'
return compile(source_code, full_path, 'exec')
def ModuleHasValidMainFunction(module):
"""Determines if a module has a main function that takes no arguments.
This includes functions that have arguments with defaults that are all
assigned, thus requiring no additional arguments in order to be called.
Args:
module: A types.ModuleType instance.
Returns:
True if the module has a valid, reusable main function; False otherwise.
"""
if hasattr(module, 'main') and type(module.main) is types.FunctionType:
arg_names, var_args, var_kwargs, default_values = inspect.getargspec(
module.main)
if len(arg_names) == 0:
return True
if default_values is not None and len(arg_names) == len(default_values):
return True
return False
def GetScriptModuleName(handler_path):
"""Determines the fully-qualified Python module name of a script on disk.
Args:
handler_path: CGI path stored in the application configuration (as a path
like 'foo/bar/baz.py'). May contain $PYTHON_LIB references.
Returns:
String containing the corresponding module name (e.g., 'foo.bar.baz').
"""
if handler_path.startswith(PYTHON_LIB_VAR + '/'):
handler_path = handler_path[len(PYTHON_LIB_VAR):]
handler_path = os.path.normpath(handler_path)
extension_index = handler_path.rfind('.py')
if extension_index != -1:
handler_path = handler_path[:extension_index]
module_fullname = handler_path.replace(os.sep, '.')
module_fullname = module_fullname.strip('.')
module_fullname = re.sub('\.+', '.', module_fullname)
if module_fullname.endswith('.__init__'):
module_fullname = module_fullname[:-len('.__init__')]
return module_fullname
def FindMissingInitFiles(cgi_path, module_fullname, isfile=os.path.isfile):
"""Determines which __init__.py files are missing from a module's parent
packages.
Args:
cgi_path: Absolute path of the CGI module file on disk.
module_fullname: Fully qualified Python module name used to import the
cgi_path module.
isfile: Used for testing.
Returns:
List containing the paths to the missing __init__.py files.
"""
missing_init_files = []
if cgi_path.endswith('.py'):
module_base = os.path.dirname(cgi_path)
else:
module_base = cgi_path
depth_count = module_fullname.count('.')
if cgi_path.endswith('__init__.py') or not cgi_path.endswith('.py'):
depth_count += 1
for index in xrange(depth_count):
current_init_file = os.path.abspath(
os.path.join(module_base, '__init__.py'))
if not isfile(current_init_file):
missing_init_files.append(current_init_file)
module_base = os.path.abspath(os.path.join(module_base, os.pardir))
return missing_init_files
def LoadTargetModule(handler_path,
cgi_path,
import_hook,
module_dict=sys.modules):
"""Loads a target CGI script by importing it as a Python module.
If the module for the target CGI script has already been loaded before,
the new module will be loaded in its place using the same module object,
possibly overwriting existing module attributes.
Args:
handler_path: CGI path stored in the application configuration (as a path
like 'foo/bar/baz.py'). Should not have $PYTHON_LIB references.
cgi_path: Absolute path to the CGI script file on disk.
import_hook: Instance of HardenedModulesHook to use for module loading.
module_dict: Used for dependency injection.
Returns:
Tuple (module_fullname, script_module, module_code) where:
module_fullname: Fully qualified module name used to import the script.
script_module: The ModuleType object corresponding to the module_fullname.
If the module has not already been loaded, this will be an empty
shell of a module.
module_code: Code object (returned by compile built-in) corresponding
to the cgi_path to run. If the script_module was previously loaded
and has a main() function that can be reused, this will be None.
"""
module_fullname = GetScriptModuleName(handler_path)
script_module = module_dict.get(module_fullname)
module_code = None
if script_module is not None and ModuleHasValidMainFunction(script_module):
logging.debug('Reusing main() function of module "%s"', module_fullname)
else:
if script_module is None:
script_module = imp.new_module(module_fullname)
script_module.__loader__ = import_hook
try:
module_code = import_hook.get_code(module_fullname)
full_path, search_path, submodule = (
import_hook.GetModuleInfo(module_fullname))
script_module.__file__ = full_path
if search_path is not None:
script_module.__path__ = search_path
except:
exc_type, exc_value, exc_tb = sys.exc_info()
import_error_message = str(exc_type)
if exc_value:
import_error_message += ': ' + str(exc_value)
logging.exception('Encountered error loading module "%s": %s',
module_fullname, import_error_message)
missing_inits = FindMissingInitFiles(cgi_path, module_fullname)
if missing_inits:
logging.warning('Missing package initialization files: %s',
', '.join(missing_inits))
else:
logging.error('Parent package initialization files are present, '
'but must be broken')
independent_load_successful = True
if not os.path.isfile(cgi_path):
independent_load_successful = False
else:
try:
source_file = open(cgi_path)
try:
module_code = compile(source_file.read(), cgi_path, 'exec')
script_module.__file__ = cgi_path
finally:
source_file.close()
except OSError:
independent_load_successful = False
if not independent_load_successful:
raise exc_type, exc_value, exc_tb
module_dict[module_fullname] = script_module
return module_fullname, script_module, module_code
def CheckRequestSize(request_size, outfile):
"""Check that request size is below the maximum size.
Checks to see if the request size small enough for small requests. Will
write the correct error message to the response outfile if the request
is too large.
Args:
request_size: Calculated size of request.
outfile: Response outfile.
Returns:
True if request size is ok, else False.
"""
if request_size <= MAX_REQUEST_SIZE:
return True
else:
msg = ('HTTP request was too large: %d. The limit is: %d.'
% (request_size, MAX_REQUEST_SIZE))
logging.error(msg)
outfile.write('Status: %d Request entity too large\r\n'
'\r\n'
'%s' % (httplib.REQUEST_ENTITY_TOO_LARGE, msg))
return False
def ExecuteOrImportScript(handler_path, cgi_path, import_hook):
"""Executes a CGI script by importing it as a new module.
This possibly reuses the module's main() function if it is defined and
takes no arguments.
Basic technique lifted from PEP 338 and Python2.5's runpy module. See:
http://www.python.org/dev/peps/pep-0338/
See the section entitled "Import Statements and the Main Module" to understand
why a module named '__main__' cannot do relative imports. To get around this,
the requested module's path could be added to sys.path on each request.
Args:
handler_path: CGI path stored in the application configuration (as a path
like 'foo/bar/baz.py'). Should not have $PYTHON_LIB references.
cgi_path: Absolute path to the CGI script file on disk.
import_hook: Instance of HardenedModulesHook to use for module loading.
Returns:
True if the response code had an error status (e.g., 404), or False if it
did not.
Raises:
Any kind of exception that could have been raised when loading the target
module, running a target script, or executing the application code itself.
"""
module_fullname, script_module, module_code = LoadTargetModule(
handler_path, cgi_path, import_hook)
script_module.__name__ = '__main__'
sys.modules['__main__'] = script_module
try:
if module_code:
exec module_code in script_module.__dict__
else:
script_module.main()
sys.stdout.flush()
sys.stdout.seek(0)
try:
headers = mimetools.Message(sys.stdout)
finally:
sys.stdout.seek(0, 2)
status_header = headers.get('status')
error_response = False
if status_header:
try:
status_code = int(status_header.split(' ', 1)[0])
error_response = status_code >= 400
except ValueError:
error_response = True
if not error_response:
try:
parent_package = import_hook.GetParentPackage(module_fullname)
except Exception:
parent_package = None
if parent_package is not None:
submodule = GetSubmoduleName(module_fullname)
setattr(parent_package, submodule, script_module)
return error_response
finally:
script_module.__name__ = module_fullname
def ExecuteCGI(root_path,
handler_path,
cgi_path,
env,
infile,
outfile,
module_dict,
exec_script=ExecuteOrImportScript):
"""Executes Python file in this process as if it were a CGI.
Does not return an HTTP response line. CGIs should output headers followed by
the body content.
The modules in sys.modules should be the same before and after the CGI is
executed, with the specific exception of encodings-related modules, which
cannot be reloaded and thus must always stay in sys.modules.
Args:
root_path: Path to the root of the application.
handler_path: CGI path stored in the application configuration (as a path
like 'foo/bar/baz.py'). May contain $PYTHON_LIB references.
cgi_path: Absolute path to the CGI script file on disk.
env: Dictionary of environment variables to use for the execution.
infile: File-like object to read HTTP request input data from.
outfile: FIle-like object to write HTTP response data to.
module_dict: Dictionary in which application-loaded modules should be
preserved between requests. This removes the need to reload modules that
are reused between requests, significantly increasing load performance.
This dictionary must be separate from the sys.modules dictionary.
exec_script: Used for dependency injection.
"""
old_module_dict = sys.modules.copy()
old_builtin = __builtin__.__dict__.copy()
old_argv = sys.argv
old_stdin = sys.stdin
old_stdout = sys.stdout
old_env = os.environ.copy()
old_cwd = os.getcwd()
old_file_type = types.FileType
reset_modules = False
try:
ClearAllButEncodingsModules(sys.modules)
before_path = sys.path[:]
sys.modules.update(module_dict)
sys.argv = [cgi_path]
sys.stdin = cStringIO.StringIO(infile.getvalue())
sys.stdout = outfile
os.environ.clear()
os.environ.update(env)
cgi_dir = os.path.normpath(os.path.dirname(cgi_path))
root_path = os.path.normpath(os.path.abspath(root_path))
if cgi_dir.startswith(root_path + os.sep):
os.chdir(cgi_dir)
else:
os.chdir(root_path)
hook = HardenedModulesHook(sys.modules)
sys.meta_path = [hook]
if hasattr(sys, 'path_importer_cache'):
sys.path_importer_cache.clear()
__builtin__.file = FakeFile
__builtin__.open = FakeFile
types.FileType = FakeFile
__builtin__.buffer = NotImplementedFakeClass
logging.debug('Executing CGI with env:\n%s', pprint.pformat(env))
try:
reset_modules = exec_script(handler_path, cgi_path, hook)
except SystemExit, e:
logging.debug('CGI exited with status: %s', e)
except:
reset_modules = True
raise
finally:
sys.meta_path = []
sys.path_importer_cache.clear()
_ClearTemplateCache(sys.modules)
module_dict.update(sys.modules)
ClearAllButEncodingsModules(sys.modules)
sys.modules.update(old_module_dict)
__builtin__.__dict__.update(old_builtin)
sys.argv = old_argv
sys.stdin = old_stdin
sys.stdout = old_stdout
sys.path[:] = before_path
os.environ.clear()
os.environ.update(old_env)
os.chdir(old_cwd)
types.FileType = old_file_type
class CGIDispatcher(URLDispatcher):
"""Dispatcher that executes Python CGI scripts."""
def __init__(self,
module_dict,
root_path,
path_adjuster,
setup_env=SetupEnvironment,
exec_cgi=ExecuteCGI,
create_logging_handler=ApplicationLoggingHandler):
"""Initializer.
Args:
module_dict: Dictionary in which application-loaded modules should be
preserved between requests. This dictionary must be separate from the
sys.modules dictionary.
path_adjuster: Instance of PathAdjuster to use for finding absolute
paths of CGI files on disk.
setup_env, exec_cgi, create_logging_handler: Used for dependency
injection.
"""
self._module_dict = module_dict
self._root_path = root_path
self._path_adjuster = path_adjuster
self._setup_env = setup_env
self._exec_cgi = exec_cgi
self._create_logging_handler = create_logging_handler
def Dispatch(self,
request,
outfile,
base_env_dict=None):
"""Dispatches the Python CGI."""
request_size = int(request.headers.get('content-length', 0))
if not CheckRequestSize(request_size, outfile):
return
memory_file = cStringIO.StringIO()
CopyStreamPart(request.infile, memory_file, request_size)
memory_file.seek(0)
handler = self._create_logging_handler()
logging.getLogger().addHandler(handler)
before_level = logging.root.level
try:
env = {}
if base_env_dict:
env.update(base_env_dict)
cgi_path = self._path_adjuster.AdjustPath(request.path)
env.update(self._setup_env(cgi_path,
request.relative_url,
request.headers,
memory_file))
self._exec_cgi(self._root_path,
request.path,
cgi_path,
env,
memory_file,
outfile,
self._module_dict)
handler.AddDebuggingConsole(request.relative_url, env, outfile)
finally:
logging.root.level = before_level
logging.getLogger().removeHandler(handler)
def __str__(self):
"""Returns a string representation of this dispatcher."""
return 'CGI dispatcher'
class LocalCGIDispatcher(CGIDispatcher):
"""Dispatcher that executes local functions like they're CGIs.
The contents of sys.modules will be preserved for local CGIs running this
dispatcher, but module hardening will still occur for any new imports. Thus,
be sure that any local CGIs have loaded all of their dependent modules
_before_ they are executed.
"""
def __init__(self, module_dict, path_adjuster, cgi_func):
"""Initializer.
Args:
module_dict: Passed to CGIDispatcher.
path_adjuster: Passed to CGIDispatcher.
cgi_func: Callable function taking no parameters that should be
executed in a CGI environment in the current process.
"""
self._cgi_func = cgi_func
def curried_exec_script(*args, **kwargs):
cgi_func()
return False
def curried_exec_cgi(*args, **kwargs):
kwargs['exec_script'] = curried_exec_script
return ExecuteCGI(*args, **kwargs)
CGIDispatcher.__init__(self,
module_dict,
'',
path_adjuster,
exec_cgi=curried_exec_cgi)
def Dispatch(self, *args, **kwargs):
"""Preserves sys.modules for CGIDispatcher.Dispatch."""
self._module_dict.update(sys.modules)
CGIDispatcher.Dispatch(self, *args, **kwargs)
def __str__(self):
"""Returns a string representation of this dispatcher."""
return 'Local CGI dispatcher for %s' % self._cgi_func
class PathAdjuster(object):
"""Adjusts application file paths to paths relative to the application or
external library directories."""
def __init__(self, root_path):
"""Initializer.
Args:
root_path: Path to the root of the application running on the server.
"""
self._root_path = os.path.abspath(root_path)
def AdjustPath(self, path):
"""Adjusts application file paths to relative to the application.
More precisely this method adjusts application file path to paths
relative to the application or external library directories.
Handler paths that start with $PYTHON_LIB will be converted to paths
relative to the google directory.
Args:
path: File path that should be adjusted.
Returns:
The adjusted path.
"""
if path.startswith(PYTHON_LIB_VAR):
path = os.path.join(os.path.dirname(os.path.dirname(google.__file__)),
path[len(PYTHON_LIB_VAR) + 1:])
else:
path = os.path.join(self._root_path, path)
return path
class StaticFileConfigMatcher(object):
"""Keeps track of file/directory specific application configuration.
Specifically:
- Computes mime type based on URLMap and file extension.
- Decides on cache expiration time based on URLMap and default expiration.
To determine the mime type, we first see if there is any mime-type property
on each URLMap entry. If non is specified, we use the mimetypes module to
guess the mime type from the file path extension, and use
application/octet-stream if we can't find the mimetype.
"""
def __init__(self,
url_map_list,
path_adjuster,
default_expiration):
"""Initializer.
Args:
url_map_list: List of appinfo.URLMap objects.
If empty or None, then we always use the mime type chosen by the
mimetypes module.
path_adjuster: PathAdjuster object used to adjust application file paths.
default_expiration: String describing default expiration time for browser
based caching of static files. If set to None this disallows any
browser caching of static content.
"""
if default_expiration is not None:
self._default_expiration = appinfo.ParseExpiration(default_expiration)
else:
self._default_expiration = None
self._patterns = []
if url_map_list:
for entry in url_map_list:
handler_type = entry.GetHandlerType()
if handler_type not in (appinfo.STATIC_FILES, appinfo.STATIC_DIR):
continue
if handler_type == appinfo.STATIC_FILES:
regex = entry.upload + '$'
else:
path = entry.static_dir
if path[-1] == '/':
path = path[:-1]
regex = re.escape(path + os.path.sep) + r'(.*)'
try:
path_re = re.compile(regex)
except re.error, e:
raise InvalidAppConfigError('regex %s does not compile: %s' %
(regex, e))
if self._default_expiration is None:
expiration = 0
elif entry.expiration is None:
expiration = self._default_expiration
else:
expiration = appinfo.ParseExpiration(entry.expiration)
self._patterns.append((path_re, entry.mime_type, expiration))
def IsStaticFile(self, path):
"""Tests if the given path points to a "static" file.
Args:
path: String containing the file's path relative to the app.
Returns:
Boolean, True if the file was configured to be static.
"""
for (path_re, _, _) in self._patterns:
if path_re.match(path):
return True
return False
def GetMimeType(self, path):
"""Returns the mime type that we should use when serving the specified file.
Args:
path: String containing the file's path relative to the app.
Returns:
String containing the mime type to use. Will be 'application/octet-stream'
if we have no idea what it should be.
"""
for (path_re, mimetype, unused_expiration) in self._patterns:
if mimetype is not None:
the_match = path_re.match(path)
if the_match:
return mimetype
unused_filename, extension = os.path.splitext(path)
return mimetypes.types_map.get(extension, 'application/octet-stream')
def GetExpiration(self, path):
"""Returns the cache expiration duration to be users for the given file.
Args:
path: String containing the file's path relative to the app.
Returns:
Integer number of seconds to be used for browser cache expiration time.
"""
for (path_re, unused_mimetype, expiration) in self._patterns:
the_match = path_re.match(path)
if the_match:
return expiration
return self._default_expiration or 0
def ReadDataFile(data_path, openfile=file):
"""Reads a file on disk, returning a corresponding HTTP status and data.
Args:
data_path: Path to the file on disk to read.
openfile: Used for dependency injection.
Returns:
Tuple (status, data) where status is an HTTP response code, and data is
the data read; will be an empty string if an error occurred or the
file was empty.
"""
status = httplib.INTERNAL_SERVER_ERROR
data = ""
try:
data_file = openfile(data_path, 'rb')
try:
data = data_file.read()
finally:
data_file.close()
status = httplib.OK
except (OSError, IOError), e:
logging.error('Error encountered reading file "%s":\n%s', data_path, e)
if e.errno in FILE_MISSING_EXCEPTIONS:
status = httplib.NOT_FOUND
else:
status = httplib.FORBIDDEN
return status, data
class FileDispatcher(URLDispatcher):
"""Dispatcher that reads data files from disk."""
def __init__(self,
path_adjuster,
static_file_config_matcher,
read_data_file=ReadDataFile):
"""Initializer.
Args:
path_adjuster: Instance of PathAdjuster to use for finding absolute
paths of data files on disk.
static_file_config_matcher: StaticFileConfigMatcher object.
read_data_file: Used for dependency injection.
"""
self._path_adjuster = path_adjuster
self._static_file_config_matcher = static_file_config_matcher
self._read_data_file = read_data_file
def Dispatch(self,
request,
outfile,
base_env_dict=None):
"""Reads the file and returns the response status and data."""
full_path = self._path_adjuster.AdjustPath(request.path)
status, data = self._read_data_file(full_path)
content_type = self._static_file_config_matcher.GetMimeType(request.path)
expiration = self._static_file_config_matcher.GetExpiration(request.path)
outfile.write('Status: %d\r\n' % status)
outfile.write('Content-type: %s\r\n' % content_type)
if expiration:
outfile.write('Expires: %s\r\n'
% email.Utils.formatdate(time.time() + expiration,
usegmt=True))
outfile.write('Cache-Control: public, max-age=%i\r\n' % expiration)
outfile.write('\r\n')
outfile.write(data)
def __str__(self):
"""Returns a string representation of this dispatcher."""
return 'File dispatcher'
_IGNORE_RESPONSE_HEADERS = frozenset([
'content-encoding', 'accept-encoding', 'transfer-encoding',
'server', 'date', blobstore.BLOB_KEY_HEADER
])
class AppServerResponse(object):
"""Development appserver response object.
Object used to hold the full appserver response. Used as a container
that is passed through the request rewrite chain and ultimately sent
to the web client.
Attributes:
status_code: Integer HTTP response status (e.g., 200, 302, 404, 500)
status_message: String containing an informational message about the
response code, possibly derived from the 'status' header, if supplied.
headers: mimetools.Message containing the HTTP headers of the response.
body: File-like object containing the body of the response.
large_response: Indicates that response is permitted to be larger than
MAX_RUNTIME_RESPONSE_SIZE.
"""
__slots__ = ['status_code',
'status_message',
'headers',
'body',
'large_response']
def __init__(self, response_file=None, **kwds):
"""Initializer.
Args:
response_file: A file-like object that contains the full response
generated by the user application request handler. If present
the headers and body are set from this value, although the values
may be further overridden by the keyword parameters.
kwds: All keywords are mapped to attributes of AppServerResponse.
"""
self.status_code = 200
self.status_message = 'Good to go'
self.large_response = False
if response_file:
self.SetResponse(response_file)
else:
self.headers = mimetools.Message(cStringIO.StringIO())
self.body = None
for name, value in kwds.iteritems():
setattr(self, name, value)
def SetResponse(self, response_file):
"""Sets headers and body from the response file.
Args:
response_file: File like object to set body and headers from.
"""
self.headers = mimetools.Message(response_file)
self.body = response_file
@property
def header_data(self):
"""Get header data as a string.
Returns:
String representation of header with line breaks cleaned up.
"""
header_list = []
for header in self.headers.headers:
header = header.rstrip('\n\r')
header_list.append(header)
return '\r\n'.join(header_list) + '\r\n'
def IgnoreHeadersRewriter(response):
"""Ignore specific response headers.
Certain response headers cannot be modified by an Application. For a
complete list of these headers please see:
http://code.google.com/appengine/docs/webapp/responseclass.html#Disallowed_HTTP_Response_Headers
This rewriter simply removes those headers.
"""
for h in _IGNORE_RESPONSE_HEADERS:
if h in response.headers:
del response.headers[h]
def ParseStatusRewriter(response):
"""Parse status header, if it exists.
Handles the server-side 'status' header, which instructs the server to change
the HTTP response code accordingly. Handles the 'location' header, which
issues an HTTP 302 redirect to the client. Also corrects the 'content-length'
header to reflect actual content length in case extra information has been
appended to the response body.
If the 'status' header supplied by the client is invalid, this method will
set the response to a 500 with an error message as content.
"""
location_value = response.headers.getheader('location')
status_value = response.headers.getheader('status')
if status_value:
response_status = status_value
del response.headers['status']
elif location_value:
response_status = '%d Redirecting' % httplib.FOUND
else:
return response
status_parts = response_status.split(' ', 1)
response.status_code, response.status_message = (status_parts + [''])[:2]
try:
response.status_code = int(response.status_code)
except ValueError:
response.status_code = 500
response.body = cStringIO.StringIO(
'Error: Invalid "status" header value returned.')
def CacheRewriter(response):
"""Update the cache header."""
if not 'Cache-Control' in response.headers:
response.headers['Cache-Control'] = 'no-cache'
if not 'Expires' in response.headers:
response.headers['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT'
def ContentLengthRewriter(response):
"""Rewrite the Content-Length header.
Even though Content-Length is not a user modifiable header, App Engine
sends a correct Content-Length to the user based on the actual response.
"""
current_position = response.body.tell()
response.body.seek(0, 2)
response.headers['Content-Length'] = str(response.body.tell() -
current_position)
response.body.seek(current_position)
def CreateResponseRewritersChain():
"""Create the default response rewriter chain.
A response rewriter is the a function that gets a final chance to change part
of the dev_appservers response. A rewriter is not like a dispatcher in that
it is called after every request has been handled by the dispatchers
regardless of which dispatcher was used.
The order in which rewriters are registered will be the order in which they
are used to rewrite the response. Modifications from earlier rewriters
are used as input to later rewriters.
A response rewriter is a function that can rewrite the request in any way.
Thefunction can returned modified values or the original values it was
passed.
A rewriter function has the following parameters and return values:
Args:
status_code: Status code of response from dev_appserver or previous
rewriter.
status_message: Text corresponding to status code.
headers: mimetools.Message instance with parsed headers. NOTE: These
headers can contain its own 'status' field, but the default
dev_appserver implementation will remove this. Future rewriters
should avoid re-introducing the status field and return new codes
instead.
body: File object containing the body of the response. This position of
this file may not be at the start of the file. Any content before the
files position is considered not to be part of the final body.
Returns:
An AppServerResponse instance.
Returns:
List of response rewriters.
"""
rewriters = [dev_appserver_blobstore.DownloadRewriter,
IgnoreHeadersRewriter,
ParseStatusRewriter,
CacheRewriter,
ContentLengthRewriter,
]
return rewriters
def RewriteResponse(response_file, response_rewriters=None):
"""Allows final rewrite of dev_appserver response.
This function receives the unparsed HTTP response from the application
or internal handler, parses out the basic structure and feeds that structure
in to a chain of response rewriters.
It also makes sure the final HTTP headers are properly terminated.
For more about response rewriters, please see documentation for
CreateResponeRewritersChain.
Args:
response_file: File-like object containing the full HTTP response including
the response code, all headers, and the request body.
response_rewriters: A list of response rewriters. If none is provided it
will create a new chain using CreateResponseRewritersChain.
Returns:
An AppServerResponse instance configured with the rewritten response.
"""
if response_rewriters is None:
response_rewriters = CreateResponseRewritersChain()
response = AppServerResponse(response_file)
for response_rewriter in response_rewriters:
response_rewriter(response)
return response
class ModuleManager(object):
"""Manages loaded modules in the runtime.
Responsible for monitoring and reporting about file modification times.
Modules can be loaded from source or precompiled byte-code files. When a
file has source code, the ModuleManager monitors the modification time of
the source file even if the module itself is loaded from byte-code.
"""
def __init__(self, modules):
"""Initializer.
Args:
modules: Dictionary containing monitored modules.
"""
self._modules = modules
self._default_modules = self._modules.copy()
self._save_path_hooks = sys.path_hooks[:]
self._modification_times = {}
@staticmethod
def GetModuleFile(module, is_file=os.path.isfile):
"""Helper method to try to determine modules source file.
Args:
module: Module object to get file for.
is_file: Function used to determine if a given path is a file.
Returns:
Path of the module's corresponding Python source file if it exists, or
just the module's compiled Python file. If the module has an invalid
__file__ attribute, None will be returned.
"""
module_file = getattr(module, '__file__', None)
if module_file is None:
return None
source_file = module_file[:module_file.rfind('py') + 2]
if is_file(source_file):
return source_file
return module.__file__
def AreModuleFilesModified(self):
"""Determines if any monitored files have been modified.
Returns:
True if one or more files have been modified, False otherwise.
"""
for name, (mtime, fname) in self._modification_times.iteritems():
if name not in self._modules:
continue
module = self._modules[name]
if not os.path.isfile(fname):
return True
if mtime != os.path.getmtime(fname):
return True
return False
def UpdateModuleFileModificationTimes(self):
"""Records the current modification times of all monitored modules."""
self._modification_times.clear()
for name, module in self._modules.items():
if not isinstance(module, types.ModuleType):
continue
module_file = self.GetModuleFile(module)
if not module_file:
continue
try:
self._modification_times[name] = (os.path.getmtime(module_file),
module_file)
except OSError, e:
if e.errno not in FILE_MISSING_EXCEPTIONS:
raise e
def ResetModules(self):
"""Clear modules so that when request is run they are reloaded."""
self._modules.clear()
self._modules.update(self._default_modules)
sys.path_hooks[:] = self._save_path_hooks
apiproxy_stub_map.apiproxy.GetPreCallHooks().Clear()
apiproxy_stub_map.apiproxy.GetPostCallHooks().Clear()
def _ClearTemplateCache(module_dict=sys.modules):
"""Clear template cache in webapp.template module.
Attempts to load template module. Ignores failure. If module loads, the
template cache is cleared.
Args:
module_dict: Used for dependency injection.
"""
template_module = module_dict.get('google.appengine.ext.webapp.template')
if template_module is not None:
template_module.template_cache.clear()
def CreateRequestHandler(root_path,
login_url,
require_indexes=False,
static_caching=True):
"""Creates a new BaseHTTPRequestHandler sub-class.
This class will be used with the Python BaseHTTPServer module's HTTP server.
Python's built-in HTTP server does not support passing context information
along to instances of its request handlers. This function gets around that
by creating a sub-class of the handler in a closure that has access to
this context information.
Args:
root_path: Path to the root of the application running on the server.
login_url: Relative URL which should be used for handling user logins.
require_indexes: True if index.yaml is read-only gospel; default False.
static_caching: True if browser caching of static files should be allowed.
Returns:
Sub-class of BaseHTTPRequestHandler.
"""
application_module_dict = SetupSharedModules(sys.modules)
if require_indexes:
index_yaml_updater = None
else:
index_yaml_updater = dev_appserver_index.IndexYamlUpdater(root_path)
application_config_cache = AppConfigCache()
class DevAppServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Dispatches URLs using patterns from a URLMatcher.
The URLMatcher is created by loading an application's configuration file.
Executes CGI scripts in the local process so the scripts can use mock
versions of APIs.
HTTP requests that correctly specify a user info cookie
(dev_appserver_login.COOKIE_NAME) will have the 'USER_EMAIL' environment
variable set accordingly. If the user is also an admin, the
'USER_IS_ADMIN' variable will exist and be set to '1'. If the user is not
logged in, 'USER_EMAIL' will be set to the empty string.
On each request, raises an InvalidAppConfigError exception if the
application configuration file in the directory specified by the root_path
argument is invalid.
"""
server_version = 'Development/1.0'
module_dict = application_module_dict
module_manager = ModuleManager(application_module_dict)
config_cache = application_config_cache
rewriter_chain = CreateResponseRewritersChain()
def __init__(self, *args, **kwargs):
"""Initializer.
Args:
args: Positional arguments passed to the superclass constructor.
kwargs: Keyword arguments passed to the superclass constructor.
"""
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def version_string(self):
"""Returns server's version string used for Server HTTP header."""
return self.server_version
def do_GET(self):
"""Handle GET requests."""
self._HandleRequest()
def do_POST(self):
"""Handles POST requests."""
self._HandleRequest()
def do_PUT(self):
"""Handle PUT requests."""
self._HandleRequest()
def do_HEAD(self):
"""Handle HEAD requests."""
self._HandleRequest()
def do_OPTIONS(self):
"""Handles OPTIONS requests."""
self._HandleRequest()
def do_DELETE(self):
"""Handle DELETE requests."""
self._HandleRequest()
def do_TRACE(self):
"""Handles TRACE requests."""
self._HandleRequest()
def _Dispatch(self, dispatcher, socket_infile, outfile, env_dict):
"""Copy request data from socket and dispatch.
Args:
dispatcher: Dispatcher to handle request (MatcherDispatcher).
socket_infile: Original request file stream.
outfile: Output file to write response to.
env_dict: Environment dictionary.
"""
request_descriptor, request_file_name = tempfile.mkstemp('.tmp',
'request.')
try:
request_file = os.fdopen(request_descriptor, 'wb')
try:
CopyStreamPart(self.rfile,
request_file,
int(self.headers.get('content-length', 0)))
finally:
request_file.close()
request_file = open(request_file_name, 'rb')
try:
app_server_request = AppServerRequest(self.path,
None,
self.headers,
request_file)
dispatcher.Dispatch(app_server_request,
outfile,
base_env_dict=env_dict)
finally:
request_file.close()
finally:
try:
os.remove(request_file_name)
except OSError, err:
if err.errno != errno.ENOENT:
raise
def _HandleRequest(self):
"""Handles any type of request and prints exceptions if they occur."""
server_name = self.headers.get('host') or self.server.server_name
server_name = server_name.split(':', 1)[0]
env_dict = {
'REQUEST_METHOD': self.command,
'REMOTE_ADDR': self.client_address[0],
'SERVER_SOFTWARE': self.server_version,
'SERVER_NAME': server_name,
'SERVER_PROTOCOL': self.protocol_version,
'SERVER_PORT': str(self.server.server_port),
}
full_url = GetFullURL(server_name, self.server.server_port, self.path)
if len(full_url) > MAX_URL_LENGTH:
msg = 'Requested URI too long: %s' % full_url
logging.error(msg)
self.send_response(httplib.REQUEST_URI_TOO_LONG, msg)
return
tbhandler = cgitb.Hook(file=self.wfile).handle
try:
if self.module_manager.AreModuleFilesModified():
self.module_manager.ResetModules()
implicit_matcher = CreateImplicitMatcher(self.module_dict,
root_path,
login_url)
config, explicit_matcher = LoadAppConfig(root_path, self.module_dict,
cache=self.config_cache,
static_caching=static_caching)
if config.api_version != API_VERSION:
logging.error(
"API versions cannot be switched dynamically: %r != %r",
config.api_version, API_VERSION)
sys.exit(1)
env_dict['CURRENT_VERSION_ID'] = config.version + ".1"
env_dict['APPLICATION_ID'] = config.application
dispatcher = MatcherDispatcher(login_url,
[implicit_matcher, explicit_matcher])
if require_indexes:
dev_appserver_index.SetupIndexes(config.application, root_path)
outfile = cStringIO.StringIO()
try:
self._Dispatch(dispatcher, self.rfile, outfile, env_dict)
finally:
self.module_manager.UpdateModuleFileModificationTimes()
outfile.flush()
outfile.seek(0)
response = RewriteResponse(outfile, self.rewriter_chain)
if not response.large_response:
position = response.body.tell()
response.body.seek(0, 2)
end = response.body.tell()
response.body.seek(position)
runtime_response_size = end - position
if runtime_response_size > MAX_RUNTIME_RESPONSE_SIZE:
response.status_code = 500
response.status_message = 'Forbidden'
if 'content-length' in response.headers:
del response.headers['content-length']
new_response = ('HTTP response was too large: %d. '
'The limit is: %d.'
% (runtime_response_size,
MAX_RUNTIME_RESPONSE_SIZE))
response.headers['content-length'] = str(len(new_response))
response.body = cStringIO.StringIO(new_response)
except yaml_errors.EventListenerError, e:
title = 'Fatal error when loading application configuration'
msg = '%s:\n%s' % (title, str(e))
logging.error(msg)
self.send_response(httplib.INTERNAL_SERVER_ERROR, title)
self.wfile.write('Content-Type: text/html\r\n\r\n')
self.wfile.write('<pre>%s</pre>' % cgi.escape(msg))
except KeyboardInterrupt, e:
logging.info('Server interrupted by user, terminating')
self.server.stop_serving_forever()
except:
msg = 'Exception encountered handling request'
logging.exception(msg)
self.send_response(httplib.INTERNAL_SERVER_ERROR, msg)
tbhandler()
else:
try:
self.send_response(response.status_code, response.status_message)
self.wfile.write(response.header_data)
self.wfile.write('\r\n')
if self.command != 'HEAD':
shutil.copyfileobj(response.body, self.wfile, COPY_BLOCK_SIZE)
elif response.body:
logging.warning('Dropping unexpected body in response '
'to HEAD request')
except (IOError, OSError), e:
if e.errno != errno.EPIPE:
raise e
except socket.error, e:
if len(e.args) >= 1 and e.args[0] != errno.EPIPE:
raise e
else:
if index_yaml_updater is not None:
index_yaml_updater.UpdateIndexYaml()
def log_error(self, format, *args):
"""Redirect error messages through the logging module."""
logging.error(format, *args)
def log_message(self, format, *args):
"""Redirect log messages through the logging module."""
logging.info(format, *args)
return DevAppServerRequestHandler
def ReadAppConfig(appinfo_path, parse_app_config=appinfo.LoadSingleAppInfo):
"""Reads app.yaml file and returns its app id and list of URLMap instances.
Args:
appinfo_path: String containing the path to the app.yaml file.
parse_app_config: Used for dependency injection.
Returns:
AppInfoExternal instance.
Raises:
If the config file could not be read or the config does not contain any
URLMap instances, this function will raise an InvalidAppConfigError
exception.
"""
try:
appinfo_file = file(appinfo_path, 'r')
except IOError, unused_e:
raise InvalidAppConfigError(
'Application configuration could not be read from "%s"' % appinfo_path)
try:
return parse_app_config(appinfo_file)
finally:
appinfo_file.close()
def CreateURLMatcherFromMaps(root_path,
url_map_list,
module_dict,
default_expiration,
create_url_matcher=URLMatcher,
create_cgi_dispatcher=CGIDispatcher,
create_file_dispatcher=FileDispatcher,
create_path_adjuster=PathAdjuster,
normpath=os.path.normpath):
"""Creates a URLMatcher instance from URLMap.
Creates all of the correct URLDispatcher instances to handle the various
content types in the application configuration.
Args:
root_path: Path to the root of the application running on the server.
url_map_list: List of appinfo.URLMap objects to initialize this
matcher with. Can be an empty list if you would like to add patterns
manually.
module_dict: Dictionary in which application-loaded modules should be
preserved between requests. This dictionary must be separate from the
sys.modules dictionary.
default_expiration: String describing default expiration time for browser
based caching of static files. If set to None this disallows any
browser caching of static content.
create_url_matcher: Used for dependency injection.
create_cgi_dispatcher: Used for dependency injection.
create_file_dispatcher: Used for dependency injection.
create_path_adjuster: Used for dependency injection.
normpath: Used for dependency injection.
Returns:
Instance of URLMatcher with the supplied URLMap objects properly loaded.
Raises:
InvalidAppConfigError: if the handler in url_map_list is an unknown type.
"""
url_matcher = create_url_matcher()
path_adjuster = create_path_adjuster(root_path)
cgi_dispatcher = create_cgi_dispatcher(module_dict, root_path, path_adjuster)
static_file_config_matcher = StaticFileConfigMatcher(url_map_list,
path_adjuster,
default_expiration)
file_dispatcher = create_file_dispatcher(path_adjuster,
static_file_config_matcher)
FakeFile.SetStaticFileConfigMatcher(static_file_config_matcher)
for url_map in url_map_list:
admin_only = url_map.login == appinfo.LOGIN_ADMIN
requires_login = url_map.login == appinfo.LOGIN_REQUIRED or admin_only
auth_fail_action = url_map.auth_fail_action
handler_type = url_map.GetHandlerType()
if handler_type == appinfo.HANDLER_SCRIPT:
dispatcher = cgi_dispatcher
elif handler_type in (appinfo.STATIC_FILES, appinfo.STATIC_DIR):
dispatcher = file_dispatcher
else:
raise InvalidAppConfigError('Unknown handler type "%s"' % handler_type)
regex = url_map.url
path = url_map.GetHandler()
if handler_type == appinfo.STATIC_DIR:
if regex[-1] == r'/':
regex = regex[:-1]
if path[-1] == os.path.sep:
path = path[:-1]
regex = '/'.join((re.escape(regex), '(.*)'))
if os.path.sep == '\\':
backref = r'\\1'
else:
backref = r'\1'
path = (normpath(path).replace('\\', '\\\\') +
os.path.sep + backref)
url_matcher.AddURL(regex,
dispatcher,
path,
requires_login, admin_only, auth_fail_action)
return url_matcher
class AppConfigCache(object):
"""Cache used by LoadAppConfig.
If given to LoadAppConfig instances of this class are used to cache contents
of the app config (app.yaml or app.yml) and the Matcher created from it.
Code outside LoadAppConfig should treat instances of this class as opaque
objects and not access its members.
"""
path = None
mtime = None
config = None
matcher = None
def LoadAppConfig(root_path,
module_dict,
cache=None,
static_caching=True,
read_app_config=ReadAppConfig,
create_matcher=CreateURLMatcherFromMaps):
"""Creates a Matcher instance for an application configuration file.
Raises an InvalidAppConfigError exception if there is anything wrong with
the application configuration file.
Args:
root_path: Path to the root of the application to load.
module_dict: Dictionary in which application-loaded modules should be
preserved between requests. This dictionary must be separate from the
sys.modules dictionary.
cache: Instance of AppConfigCache or None.
static_caching: True if browser caching of static files should be allowed.
read_app_config: Used for dependency injection.
create_matcher: Used for dependency injection.
Returns:
tuple: (AppInfoExternal, URLMatcher)
Raises:
AppConfigNotFound: if an app.yaml file cannot be found.
"""
for appinfo_path in [os.path.join(root_path, 'app.yaml'),
os.path.join(root_path, 'app.yml')]:
if os.path.isfile(appinfo_path):
if cache is not None:
mtime = os.path.getmtime(appinfo_path)
if cache.path == appinfo_path and cache.mtime == mtime:
return (cache.config, cache.matcher)
cache.config = cache.matcher = cache.path = None
cache.mtime = mtime
try:
config = read_app_config(appinfo_path, appinfo.LoadSingleAppInfo)
if static_caching:
if config.default_expiration:
default_expiration = config.default_expiration
else:
default_expiration = '0'
else:
default_expiration = None
matcher = create_matcher(root_path,
config.handlers,
module_dict,
default_expiration)
FakeFile.SetSkippedFiles(config.skip_files)
if cache is not None:
cache.path = appinfo_path
cache.config = config
cache.matcher = matcher
return (config, matcher)
except gexcept.AbstractMethod:
pass
raise AppConfigNotFoundError
def ReadCronConfig(croninfo_path, parse_cron_config=croninfo.LoadSingleCron):
"""Reads cron.yaml file and returns a list of CronEntry instances.
Args:
croninfo_path: String containing the path to the cron.yaml file.
parse_cron_config: Used for dependency injection.
Returns:
A CronInfoExternal object.
Raises:
If the config file is unreadable, empty or invalid, this function will
raise an InvalidAppConfigError or a MalformedCronConfiguration exception.
"""
try:
croninfo_file = file(croninfo_path, 'r')
except IOError, e:
raise InvalidAppConfigError(
'Cron configuration could not be read from "%s": %s'
% (croninfo_path, e))
try:
return parse_cron_config(croninfo_file)
finally:
croninfo_file.close()
def SetupStubs(app_id, **config):
"""Sets up testing stubs of APIs.
Args:
app_id: Application ID being served.
config: keyword arguments.
Keywords:
root_path: Root path to the directory of the application which should
contain the app.yaml, indexes.yaml, and queues.yaml files.
login_url: Relative URL which should be used for handling user login/logout.
blobstore_path: Path to the directory to store Blobstore blobs in.
datastore_path: Path to the file to store Datastore file stub data in.
use_sqlite: Use the SQLite stub for the datastore.
history_path: DEPRECATED, No-op.
clear_datastore: If the datastore should be cleared on startup.
smtp_host: SMTP host used for sending test mail.
smtp_port: SMTP port.
smtp_user: SMTP user.
smtp_password: SMTP password.
enable_sendmail: Whether to use sendmail as an alternative to SMTP.
show_mail_body: Whether to log the body of emails.
remove: Used for dependency injection.
disable_task_running: True if tasks should not automatically run after
they are enqueued.
task_retry_seconds: How long to wait after an auto-running task before it
is tried again.
trusted: True if this app can access data belonging to other apps. This
behavior is different from the real app server and should be left False
except for advanced uses of dev_appserver.
"""
root_path = config.get('root_path', None)
login_url = config['login_url']
blobstore_path = config['blobstore_path']
datastore_path = config['datastore_path']
clear_datastore = config['clear_datastore']
use_sqlite = config.get('use_sqlite', False)
require_indexes = config.get('require_indexes', False)
smtp_host = config.get('smtp_host', None)
smtp_port = config.get('smtp_port', 25)
smtp_user = config.get('smtp_user', '')
smtp_password = config.get('smtp_password', '')
enable_sendmail = config.get('enable_sendmail', False)
show_mail_body = config.get('show_mail_body', False)
remove = config.get('remove', os.remove)
disable_task_running = config.get('disable_task_running', False)
task_retry_seconds = config.get('task_retry_seconds', 30)
trusted = config.get('trusted', False)
os.environ['APPLICATION_ID'] = app_id
if clear_datastore:
path = datastore_path
if os.path.lexists(path):
logging.info('Attempting to remove file at %s', path)
try:
remove(path)
except OSError, e:
logging.warning('Removing file failed: %s', e)
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
if use_sqlite:
datastore = datastore_sqlite_stub.DatastoreSqliteStub(
app_id, datastore_path, require_indexes=require_indexes,
trusted=trusted)
else:
datastore = datastore_file_stub.DatastoreFileStub(
app_id, datastore_path, require_indexes=require_indexes,
trusted=trusted)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore)
fixed_login_url = '%s?%s=%%s' % (login_url,
dev_appserver_login.CONTINUE_PARAM)
fixed_logout_url = '%s&%s' % (fixed_login_url,
dev_appserver_login.LOGOUT_PARAM)
apiproxy_stub_map.apiproxy.RegisterStub(
'user',
user_service_stub.UserServiceStub(login_url=fixed_login_url,
logout_url=fixed_logout_url))
apiproxy_stub_map.apiproxy.RegisterStub(
'urlfetch',
urlfetch_stub.URLFetchServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'mail',
mail_stub.MailServiceStub(smtp_host,
smtp_port,
smtp_user,
smtp_password,
enable_sendmail=enable_sendmail,
show_mail_body=show_mail_body))
apiproxy_stub_map.apiproxy.RegisterStub(
'memcache',
memcache_stub.MemcacheServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'capability_service',
capability_stub.CapabilityServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'taskqueue',
taskqueue_stub.TaskQueueServiceStub(
root_path=root_path,
auto_task_running=(not disable_task_running),
task_retry_seconds=task_retry_seconds))
apiproxy_stub_map.apiproxy.RegisterStub(
'xmpp',
xmpp_service_stub.XmppServiceStub())
try:
from google.appengine.api.images import images_stub
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_stub.ImagesServiceStub())
except ImportError, e:
logging.warning('Could not initialize images API; you are likely missing '
'the Python "PIL" module. ImportError: %s', e)
from google.appengine.api.images import images_not_implemented_stub
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_not_implemented_stub.ImagesNotImplementedServiceStub())
blob_storage = file_blob_storage.FileBlobStorage(blobstore_path, app_id)
apiproxy_stub_map.apiproxy.RegisterStub(
'blobstore',
blobstore_stub.BlobstoreServiceStub(blob_storage))
def CreateImplicitMatcher(
module_dict,
root_path,
login_url,
create_path_adjuster=PathAdjuster,
create_local_dispatcher=LocalCGIDispatcher,
create_cgi_dispatcher=CGIDispatcher,
get_blob_storage=dev_appserver_blobstore.GetBlobStorage):
"""Creates a URLMatcher instance that handles internal URLs.
Used to facilitate handling user login/logout, debugging, info about the
currently running app, etc.
Args:
module_dict: Dictionary in the form used by sys.modules.
root_path: Path to the root of the application.
login_url: Relative URL which should be used for handling user login/logout.
create_path_adjuster: Used for dependedency injection.
create_local_dispatcher: Used for dependency injection.
create_cgi_dispatcher: Used for dependedency injection.
get_blob_storage: Used for dependency injection.
Returns:
Instance of URLMatcher with appropriate dispatchers.
"""
url_matcher = URLMatcher()
path_adjuster = create_path_adjuster(root_path)
login_dispatcher = create_local_dispatcher(sys.modules, path_adjuster,
dev_appserver_login.main)
url_matcher.AddURL(login_url,
login_dispatcher,
'',
False,
False,
appinfo.AUTH_FAIL_ACTION_REDIRECT)
admin_dispatcher = create_cgi_dispatcher(module_dict, root_path,
path_adjuster)
url_matcher.AddURL('/_ah/admin(?:/.*)?',
admin_dispatcher,
DEVEL_CONSOLE_PATH,
False,
False,
appinfo.AUTH_FAIL_ACTION_REDIRECT)
upload_dispatcher = dev_appserver_blobstore.CreateUploadDispatcher(
get_blob_storage)
url_matcher.AddURL(dev_appserver_blobstore.UPLOAD_URL_PATTERN,
upload_dispatcher,
'',
False,
False,
appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED)
oauth_dispatcher = dev_appserver_oauth.CreateOAuthDispatcher()
url_matcher.AddURL(dev_appserver_oauth.OAUTH_URL_PATTERN,
oauth_dispatcher,
'',
False,
False,
appinfo.AUTH_FAIL_ACTION_UNAUTHORIZED)
return url_matcher
def SetupTemplates(template_dir):
"""Reads debugging console template files and initializes the console.
Does nothing if templates have already been initialized.
Args:
template_dir: Path to the directory containing the templates files.
Raises:
OSError or IOError if any of the template files could not be read.
"""
if ApplicationLoggingHandler.AreTemplatesInitialized():
return
try:
header = open(os.path.join(template_dir, HEADER_TEMPLATE)).read()
script = open(os.path.join(template_dir, SCRIPT_TEMPLATE)).read()
middle = open(os.path.join(template_dir, MIDDLE_TEMPLATE)).read()
footer = open(os.path.join(template_dir, FOOTER_TEMPLATE)).read()
except (OSError, IOError):
logging.error('Could not read template files from %s', template_dir)
raise
ApplicationLoggingHandler.InitializeTemplates(header, script, middle, footer)
def CreateServer(root_path,
login_url,
port,
template_dir,
serve_address='',
require_indexes=False,
allow_skipped_files=False,
static_caching=True,
python_path_list=sys.path,
sdk_dir=os.path.dirname(os.path.dirname(google.__file__))):
"""Creates an new HTTPServer for an application.
The sdk_dir argument must be specified for the directory storing all code for
the SDK so as to allow for the sandboxing of module access to work for any
and all SDK code. While typically this is where the 'google' package lives,
it can be in another location because of API version support.
Args:
root_path: String containing the path to the root directory of the
application where the app.yaml file is.
login_url: Relative URL which should be used for handling user login/logout.
port: Port to start the application server on.
template_dir: Path to the directory in which the debug console templates
are stored.
serve_address: Address on which the server should serve.
require_indexes: True if index.yaml is read-only gospel; default False.
allow_skipped_files: True if skipped files should be accessible.
static_caching: True if browser caching of static files should be allowed.
python_path_list: Used for dependency injection.
sdk_dir: Directory where the SDK is stored.
Returns:
Instance of BaseHTTPServer.HTTPServer that's ready to start accepting.
"""
absolute_root_path = os.path.realpath(root_path)
SetupTemplates(template_dir)
FakeFile.SetAllowedPaths(absolute_root_path,
[sdk_dir,
template_dir])
FakeFile.SetAllowSkippedFiles(allow_skipped_files)
handler_class = CreateRequestHandler(absolute_root_path,
login_url,
require_indexes,
static_caching)
if absolute_root_path not in python_path_list:
python_path_list.insert(0, absolute_root_path)
server = HTTPServerWithScheduler((serve_address, port), handler_class)
queue_stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')
if queue_stub:
queue_stub._add_event = server.AddEvent
return server
class HTTPServerWithScheduler(BaseHTTPServer.HTTPServer):
"""A BaseHTTPServer subclass that calls a method at a regular interval."""
def __init__(self, server_address, request_handler_class):
"""Constructor.
Args:
server_address: the bind address of the server.
request_handler_class: class used to handle requests.
"""
BaseHTTPServer.HTTPServer.__init__(self, server_address,
request_handler_class)
self._events = []
self._stopped = False
def get_request(self, time_func=time.time, select_func=select.select):
"""Overrides the base get_request call.
Args:
time_func: used for testing.
select_func: used for testing.
Returns:
a (socket_object, address info) tuple.
"""
while True:
if self._events:
current_time = time_func()
next_eta = self._events[0][0]
delay = next_eta - current_time
else:
delay = DEFAULT_SELECT_DELAY
readable, _, _ = select_func([self.socket], [], [], max(delay, 0))
if readable:
return self.socket.accept()
current_time = time_func()
if self._events and current_time >= self._events[0][0]:
unused_eta, runnable = heapq.heappop(self._events)
request_tuple = runnable()
if request_tuple:
return request_tuple
def serve_forever(self):
"""Handle one request at a time until told to stop."""
while not self._stopped:
self.handle_request()
def stop_serving_forever(self):
"""Stop the serve_forever() loop.
Stop happens on the next handle_request() loop; it will not stop
immediately. Since dev_appserver.py must run on py2.5 we can't
use newer features of SocketServer (e.g. shutdown(), added in py2.6).
"""
self._stopped = True
def AddEvent(self, eta, runnable):
"""Add a runnable event to be run at the specified time.
Args:
eta: when to run the event, in seconds since epoch.
runnable: a callable object.
"""
heapq.heappush(self._events, (eta, runnable))
| 33.200988
| 114
| 0.669272
|
b7d4e63a4efb33c15327804e792caa706440c590
| 6,520
|
py
|
Python
|
salt/states/netntp.py
|
alexjennings/salt
|
921cfe1fe40f37471ebb58fa6577d72b0d6b77d1
|
[
"Apache-2.0"
] | null | null | null |
salt/states/netntp.py
|
alexjennings/salt
|
921cfe1fe40f37471ebb58fa6577d72b0d6b77d1
|
[
"Apache-2.0"
] | null | null | null |
salt/states/netntp.py
|
alexjennings/salt
|
921cfe1fe40f37471ebb58fa6577d72b0d6b77d1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Network NTP
===============
Configure NTP peers on the device via a salt proxy.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com>
:maturity: new
:depends: napalm
:platform: linux
Dependencies
------------
- :doc:`napalm ntp management module (salt.modules.napalm_ntp) </ref/modules/all/salt.modules.napalm_ntp>`
.. versionadded: 2016.3
'''
from __future__ import absolute_import
import logging
log = logging.getLogger(__name__)
# std lib
from netaddr import IPAddress
from netaddr.core import AddrFormatError
# third party libs
import dns.resolver
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
return 'netntp'
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _retrieve_ntp_peers():
"""Retrieves configured NTP peers"""
return __salt__['ntp.peers']()
def _check_peers(peers):
"""Checks whether the input is a valid list of peers and transforms domain names into IP Addresses"""
if not isinstance(peers, list):
return False
for peer in peers:
if not isinstance(peer, str):
return False
ip_only_peers = list()
for peer in peers:
try:
ip_only_peers.append(str(IPAddress(peer))) # append the str value
except AddrFormatError:
# if not a valid IP Address
dns_reply = list()
try:
# try to see if it is a valid NS
dns_reply = dns.resolver.query(peer)
except dns.resolver.NoAnswer:
# no a valid DNS entry either
continue
for dns_ip in dns_reply:
ip_only_peers.append(str(dns_ip))
peers = ip_only_peers
return True
def _set_ntp_peers(peers):
"""Calls ntp.set_peers."""
return __salt__['ntp.set_peers'](peers)
def _delete_ntp_peers(peers):
"""Calls ntp.delete_peers."""
return __salt__['ntp.delete_peers'](peers)
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def managed(name, peers=None):
"""
Updates the list of NTP peers on the devices as speified in the state SLS file.
NTP peers not specified in this list will be removed and peers that are not configured will be set.
SLS Example:
.. code-block:: yaml
netntp_example:
netntp.managed:
- peers:
- 192.168.0.1
- 172.17.17.1
"""
result = False
comment = ''
changes = dict()
ret = {
'name': name,
'changes': changes,
'result': result,
'comment': comment
}
if not _check_peers(peers): # check and clean
ret['comment'] = 'NTP peers must be a list of valid IP Addresses or Domain Names'
return ret
# ----- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
ntp_peers_output = _retrieve_ntp_peers() # contains only IP Addresses as dictionary keys
if not ntp_peers_output.get('result'):
ret['comment'] = 'Cannot retrieve NTP peers from the device: {reason}'.format(
reason=ntp_peers_output.get('comment')
)
return ret
configured_ntp_peers = set(ntp_peers_output.get('out', {}).keys())
desired_ntp_peers = set(peers)
if configured_ntp_peers == desired_ntp_peers:
ret['comment'] = 'NTP peers already configured as needed.'
return ret
peers_to_set = list(desired_ntp_peers - configured_ntp_peers)
peers_to_delete = list(configured_ntp_peers - desired_ntp_peers)
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_ntp_peers and _delete_ntp_peers as needed ------------------------------------------------------->
config_change_expected = False
if peers_to_set:
_set = _set_ntp_peers(peers_to_set)
if _set.get('result'):
config_change_expected = True
else: # something went wrong...
result = False
comment += 'Cannot set NTP peers: {reason}'.format(
reason=_set.get('comment')
)
if peers_to_delete:
_removed = _delete_ntp_peers(peers_to_delete)
if _removed.get('result'):
config_change_expected = True
else: # something went wrong...
result = False
comment += 'Cannot remove NTP peers: {reason}'.format(
reason=_removed.get('comment')
)
# <---- Call _set_ntp_peers and _delete_ntp_peers as needed --------------------------------------------------------
# ----- Try to commit changes ------------------------------------------------------------------------------------->
if config_change_expected:
result, config_comment = __salt__['net.config_control']()
comment += config_comment
changes = {
'diff': {
'added': list(peers_to_set),
'removed': list(peers_to_delete)
}
}
# <---- Try to commit changes --------------------------------------------------------------------------------------
ret.update({
'result': result,
'comment': comment,
'changes': changes
})
return ret
| 30.46729
| 120
| 0.466871
|
e56b058a7b15a6e68b95dee1dd09e56debac0c35
| 3,537
|
py
|
Python
|
scripts/ance/encode_corpus_msmarco_doc.py
|
keleog/pyserini
|
3cd6b7ee8e77d699726756938fac0714c10ad0a9
|
[
"Apache-2.0"
] | 451
|
2019-11-02T03:13:38.000Z
|
2022-03-31T22:12:53.000Z
|
scripts/ance/encode_corpus_msmarco_doc.py
|
keleog/pyserini
|
3cd6b7ee8e77d699726756938fac0714c10ad0a9
|
[
"Apache-2.0"
] | 568
|
2019-11-18T21:52:44.000Z
|
2022-03-31T21:03:13.000Z
|
scripts/ance/encode_corpus_msmarco_doc.py
|
keleog/pyserini
|
3cd6b7ee8e77d699726756938fac0714c10ad0a9
|
[
"Apache-2.0"
] | 187
|
2019-11-22T15:27:54.000Z
|
2022-03-26T17:19:07.000Z
|
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import os
import sys
import numpy as np
import faiss
from tqdm import tqdm
from transformers import RobertaTokenizer
# We're going to explicitly use a local installation of Pyserini (as opposed to a pip-installed one).
# Comment these lines out to use a pip-installed one instead.
sys.path.insert(0, './')
sys.path.insert(0, '../pyserini/')
from pyserini.dsearch import AnceEncoder
def encode_passage(texts, tokenizer, model, device='cuda:0'):
max_length = 512 # hardcode for now
inputs = tokenizer(
texts,
max_length=max_length,
padding='longest',
truncation=True,
add_special_tokens=True,
return_tensors='pt'
)
inputs.to(device)
embeddings = model(inputs["input_ids"]).detach().cpu().numpy()
return embeddings
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--encoder', type=str, help='encoder name or path', required=True)
parser.add_argument('--dimension', type=int, help='dimension of passage embeddings', required=False, default=768)
parser.add_argument('--corpus', type=str,
help='directory that contains corpus files to be encoded, in jsonl format.', required=True)
parser.add_argument('--index', type=str, help='directory to store brute force index of corpus', required=True)
parser.add_argument('--batch', type=int, help='batch size', default=8)
parser.add_argument('--device', type=str, help='device cpu or cuda [cuda:0, cuda:1...]', default='cuda:0')
args = parser.parse_args()
tokenizer = RobertaTokenizer.from_pretrained(args.encoder)
model = AnceEncoder.from_pretrained(args.encoder)
model.to(args.device)
index = faiss.IndexFlatIP(args.dimension)
if not os.path.exists(args.index):
os.mkdir(args.index)
texts = []
with open(os.path.join(args.index, 'docid'), 'w') as id_file:
for file in sorted(os.listdir(args.corpus)):
file = os.path.join(args.corpus, file)
if file.endswith('json') or file.endswith('jsonl'):
print(f'Loading {file}')
with open(file, 'r') as corpus:
for idx, line in enumerate(tqdm(corpus.readlines())):
info = json.loads(line)
docid = info['id']
text = info['contents']
id_file.write(f'{docid}\n')
url, title, text = text.split('\n')
text = f"{url} <sep> {title} <sep> {text}"
texts.append(text.lower())
for idx in tqdm(range(0, len(texts), args.batch)):
text_batch = texts[idx: idx+args.batch]
embeddings = encode_passage(text_batch, tokenizer, model, args.device)
index.add(np.array(embeddings))
faiss.write_index(index, os.path.join(args.index, 'index'))
| 40.193182
| 117
| 0.655075
|
732d3ebe14e52c8e69f50a651a4c817f7655d3c5
| 185
|
py
|
Python
|
cride/circles/apps.py
|
Hernandiaz035/CRide
|
6ed47b7d76afd7c70081dd9722227b61a68d46b9
|
[
"MIT"
] | null | null | null |
cride/circles/apps.py
|
Hernandiaz035/CRide
|
6ed47b7d76afd7c70081dd9722227b61a68d46b9
|
[
"MIT"
] | null | null | null |
cride/circles/apps.py
|
Hernandiaz035/CRide
|
6ed47b7d76afd7c70081dd9722227b61a68d46b9
|
[
"MIT"
] | null | null | null |
"""Circles App."""
# Django
from django.apps import AppConfig
class CirclesAppConfig(AppConfig):
"""Circles Appconfig"""
name = 'cride.circles'
verbose_name = 'Circles'
| 15.416667
| 34
| 0.681081
|
38b98522ba2f58eb13ea732a06ffa74f438b4707
| 4,990
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20191101/get_route.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20191101/get_route.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20191101/get_route.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRouteResult',
'AwaitableGetRouteResult',
'get_route',
]
@pulumi.output_type
class GetRouteResult:
"""
Route resource.
"""
def __init__(__self__, address_prefix=None, etag=None, name=None, next_hop_ip_address=None, next_hop_type=None, provisioning_state=None):
if address_prefix and not isinstance(address_prefix, str):
raise TypeError("Expected argument 'address_prefix' to be a str")
pulumi.set(__self__, "address_prefix", address_prefix)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if next_hop_ip_address and not isinstance(next_hop_ip_address, str):
raise TypeError("Expected argument 'next_hop_ip_address' to be a str")
pulumi.set(__self__, "next_hop_ip_address", next_hop_ip_address)
if next_hop_type and not isinstance(next_hop_type, str):
raise TypeError("Expected argument 'next_hop_type' to be a str")
pulumi.set(__self__, "next_hop_type", next_hop_type)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[str]:
"""
The destination CIDR to which the route applies.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nextHopIpAddress")
def next_hop_ip_address(self) -> Optional[str]:
"""
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
"""
return pulumi.get(self, "next_hop_ip_address")
@property
@pulumi.getter(name="nextHopType")
def next_hop_type(self) -> str:
"""
The type of Azure hop the packet should be sent to.
"""
return pulumi.get(self, "next_hop_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the route resource.
"""
return pulumi.get(self, "provisioning_state")
class AwaitableGetRouteResult(GetRouteResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteResult(
address_prefix=self.address_prefix,
etag=self.etag,
name=self.name,
next_hop_ip_address=self.next_hop_ip_address,
next_hop_type=self.next_hop_type,
provisioning_state=self.provisioning_state)
def get_route(resource_group_name: Optional[str] = None,
route_name: Optional[str] = None,
route_table_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str route_name: The name of the route.
:param str route_table_name: The name of the route table.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['routeName'] = route_name
__args__['routeTableName'] = route_table_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20191101:getRoute', __args__, opts=opts, typ=GetRouteResult).value
return AwaitableGetRouteResult(
address_prefix=__ret__.address_prefix,
etag=__ret__.etag,
name=__ret__.name,
next_hop_ip_address=__ret__.next_hop_ip_address,
next_hop_type=__ret__.next_hop_type,
provisioning_state=__ret__.provisioning_state)
| 37.518797
| 142
| 0.67014
|
ea4ae40aaf554902ab62981837dc833893680275
| 3,136
|
py
|
Python
|
Config Tool/page/examples/rework_progress_bar/main.py
|
FrauBluher/PMSM
|
acb806ea23705ecc8ea29d8a23c3fb10c3b61e19
|
[
"MIT"
] | 51
|
2015-01-17T16:08:08.000Z
|
2022-01-02T05:06:25.000Z
|
Config Tool/page/examples/rework_progress_bar/main.py
|
hhintoglu/PMSM
|
acb806ea23705ecc8ea29d8a23c3fb10c3b61e19
|
[
"MIT"
] | null | null | null |
Config Tool/page/examples/rework_progress_bar/main.py
|
hhintoglu/PMSM
|
acb806ea23705ecc8ea29d8a23c3fb10c3b61e19
|
[
"MIT"
] | 39
|
2016-06-18T05:43:14.000Z
|
2022-03-16T13:19:15.000Z
|
#! /usr/bin/env python
#
# Generated by PAGE version 4.2
# In conjunction with Tcl version 8.6
# Jan. 21, 2014 06:35:03 AM
import sys
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = 0
except ImportError:
import tkinter.ttk as ttk
py3 = 1
import main_support
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = Tk()
root.title('Main')
root.geometry('600x450+918+173')
w = Main (root)
main_support.init(root, w)
root.mainloop()
w = None
def create_Main (root):
'''Starting point when module is imported by another program.'''
global w, w_win
w = Toplevel (root)
w.title('Main')
w.geometry('600x450+918+173')
w_win = Main (w)
main_support.init(w, w_win)
return w_win
def destroy_Main ():
global w
w.destroy()
w = None
class Main:
def __init__(self, master=None):
_bgcolor = 'wheat' # X11 color: #f5deb3
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#b2c9f4' # Closest X11 color: 'SlateGray2'
_ana1color = '#eaf4b2' # Closest X11 color: '{pale goldenrod}'
_ana2color = '#f4bcb2' # Closest X11 color: 'RosyBrown2'
font12 = "-family {Nimbus Sans L} -size 20 -weight normal " + \
"-slant roman -underline 0 -overstrike 0"
font13 = "-family {Nimbus Sans L} -size 18 -weight normal " + \
"-slant roman -underline 0 -overstrike 0"
master.configure(background=_bgcolor)
master.configure(highlightbackground="wheat")
master.configure(highlightcolor="black")
self.Button1 = Button (master)
self.Button1.place(relx=0.3,rely=0.4,height=40,width=260)
self.Button1.configure(activebackground="#f9f9f9")
self.Button1.configure(background=_bgcolor)
self.Button1.configure(command=main_support.advance)
self.Button1.configure(disabledforeground="#b8a786")
self.Button1.configure(font=font12)
self.Button1.configure(highlightbackground="wheat")
self.Button1.configure(text='''Advance Progress Bar''')
self.Button2 = Button (master)
self.Button2.place(relx=0.45,rely=0.67,height=25,width=49)
self.Button2.configure(activebackground="#f9f9f9")
self.Button2.configure(background=_bgcolor)
self.Button2.configure(command=main_support.quit)
self.Button2.configure(disabledforeground="#b8a786")
self.Button2.configure(highlightbackground="wheat")
self.Button2.configure(text='''Quit''')
self.Label1 = Label (master)
self.Label1.place(relx=0.23,rely=0.18,height=49,width=352)
self.Label1.configure(activebackground="#f9f9f9")
self.Label1.configure(background=_bgcolor)
self.Label1.configure(disabledforeground="#b8a786")
self.Label1.configure(font=font13)
self.Label1.configure(highlightbackground="wheat")
self.Label1.configure(text='''Example of Using a Progress bar''')
if __name__ == '__main__':
vp_start_gui()
| 28.509091
| 73
| 0.652742
|
82cc467366648dd52cc9ded65cc21f46e0da1437
| 41,156
|
py
|
Python
|
sdk/python/pulumi_nomad/external_volume.py
|
pulumi/pulumi-nomad
|
00cc556d40bc1895f9ce10cb221fd21d1ef15350
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-07-25T19:12:40.000Z
|
2022-03-17T17:52:35.000Z
|
sdk/python/pulumi_nomad/external_volume.py
|
pulumi/pulumi-nomad
|
00cc556d40bc1895f9ce10cb221fd21d1ef15350
|
[
"ECL-2.0",
"Apache-2.0"
] | 18
|
2021-11-10T15:49:13.000Z
|
2022-03-31T15:36:18.000Z
|
sdk/python/pulumi_nomad/external_volume.py
|
pulumi/pulumi-nomad
|
00cc556d40bc1895f9ce10cb221fd21d1ef15350
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-07-25T19:12:49.000Z
|
2021-07-25T19:12:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ExternalVolumeArgs', 'ExternalVolume']
@pulumi.input_type
class ExternalVolumeArgs:
def __init__(__self__, *,
capabilities: pulumi.Input[Sequence[pulumi.Input['ExternalVolumeCapabilityArgs']]],
plugin_id: pulumi.Input[str],
volume_id: pulumi.Input[str],
capacity_max: Optional[pulumi.Input[str]] = None,
capacity_min: Optional[pulumi.Input[str]] = None,
clone_id: Optional[pulumi.Input[str]] = None,
mount_options: Optional[pulumi.Input['ExternalVolumeMountOptionsArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ExternalVolume resource.
:param pulumi.Input[Sequence[pulumi.Input['ExternalVolumeCapabilityArgs']]] capabilities: Capabilities intended to be used in a job. At least one capability must be provided.
:param pulumi.Input[str] plugin_id: The ID of the CSI plugin that manages this volume.
:param pulumi.Input[str] volume_id: The unique ID of the volume, how jobs will refer to the volume.
:param pulumi.Input[str] capacity_max: Defines how large the volume can be. The storage provider may return a volume that is smaller than this value.
:param pulumi.Input[str] capacity_min: Defines how small the volume can be. The storage provider may return a volume that is larger than this value.
:param pulumi.Input[str] clone_id: The volume ID to clone when creating this volume. Storage provider must support cloning. Conflicts with 'snapshot_id'.
:param pulumi.Input['ExternalVolumeMountOptionsArgs'] mount_options: Options for mounting 'block-device' volumes without a pre-formatted file system.
:param pulumi.Input[str] name: The display name of the volume.
:param pulumi.Input[str] namespace: The namespace in which to create the volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] secrets: An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
:param pulumi.Input[str] snapshot_id: The snapshot ID to restore when creating this volume. Storage provider must support snapshots. Conflicts with
'clone_id'.
:param pulumi.Input[str] type: The type of the volume. Currently, only 'csi' is supported.
"""
pulumi.set(__self__, "capabilities", capabilities)
pulumi.set(__self__, "plugin_id", plugin_id)
pulumi.set(__self__, "volume_id", volume_id)
if capacity_max is not None:
pulumi.set(__self__, "capacity_max", capacity_max)
if capacity_min is not None:
pulumi.set(__self__, "capacity_min", capacity_min)
if clone_id is not None:
pulumi.set(__self__, "clone_id", clone_id)
if mount_options is not None:
pulumi.set(__self__, "mount_options", mount_options)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if secrets is not None:
pulumi.set(__self__, "secrets", secrets)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def capabilities(self) -> pulumi.Input[Sequence[pulumi.Input['ExternalVolumeCapabilityArgs']]]:
"""
Capabilities intended to be used in a job. At least one capability must be provided.
"""
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: pulumi.Input[Sequence[pulumi.Input['ExternalVolumeCapabilityArgs']]]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter(name="pluginId")
def plugin_id(self) -> pulumi.Input[str]:
"""
The ID of the CSI plugin that manages this volume.
"""
return pulumi.get(self, "plugin_id")
@plugin_id.setter
def plugin_id(self, value: pulumi.Input[str]):
pulumi.set(self, "plugin_id", value)
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> pulumi.Input[str]:
"""
The unique ID of the volume, how jobs will refer to the volume.
"""
return pulumi.get(self, "volume_id")
@volume_id.setter
def volume_id(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_id", value)
@property
@pulumi.getter(name="capacityMax")
def capacity_max(self) -> Optional[pulumi.Input[str]]:
"""
Defines how large the volume can be. The storage provider may return a volume that is smaller than this value.
"""
return pulumi.get(self, "capacity_max")
@capacity_max.setter
def capacity_max(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "capacity_max", value)
@property
@pulumi.getter(name="capacityMin")
def capacity_min(self) -> Optional[pulumi.Input[str]]:
"""
Defines how small the volume can be. The storage provider may return a volume that is larger than this value.
"""
return pulumi.get(self, "capacity_min")
@capacity_min.setter
def capacity_min(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "capacity_min", value)
@property
@pulumi.getter(name="cloneId")
def clone_id(self) -> Optional[pulumi.Input[str]]:
"""
The volume ID to clone when creating this volume. Storage provider must support cloning. Conflicts with 'snapshot_id'.
"""
return pulumi.get(self, "clone_id")
@clone_id.setter
def clone_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "clone_id", value)
@property
@pulumi.getter(name="mountOptions")
def mount_options(self) -> Optional[pulumi.Input['ExternalVolumeMountOptionsArgs']]:
"""
Options for mounting 'block-device' volumes without a pre-formatted file system.
"""
return pulumi.get(self, "mount_options")
@mount_options.setter
def mount_options(self, value: Optional[pulumi.Input['ExternalVolumeMountOptionsArgs']]):
pulumi.set(self, "mount_options", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace in which to create the volume.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter
def secrets(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
"""
return pulumi.get(self, "secrets")
@secrets.setter
def secrets(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "secrets", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
"""
The snapshot ID to restore when creating this volume. Storage provider must support snapshots. Conflicts with
'clone_id'.
"""
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the volume. Currently, only 'csi' is supported.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _ExternalVolumeState:
def __init__(__self__, *,
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input['ExternalVolumeCapabilityArgs']]]] = None,
capacity_max: Optional[pulumi.Input[str]] = None,
capacity_min: Optional[pulumi.Input[str]] = None,
clone_id: Optional[pulumi.Input[str]] = None,
controller_required: Optional[pulumi.Input[bool]] = None,
controllers_expected: Optional[pulumi.Input[int]] = None,
controllers_healthy: Optional[pulumi.Input[int]] = None,
mount_options: Optional[pulumi.Input['ExternalVolumeMountOptionsArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
nodes_expected: Optional[pulumi.Input[int]] = None,
nodes_healthy: Optional[pulumi.Input[int]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
plugin_id: Optional[pulumi.Input[str]] = None,
plugin_provider: Optional[pulumi.Input[str]] = None,
plugin_provider_version: Optional[pulumi.Input[str]] = None,
schedulable: Optional[pulumi.Input[bool]] = None,
secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ExternalVolume resources.
:param pulumi.Input[Sequence[pulumi.Input['ExternalVolumeCapabilityArgs']]] capabilities: Capabilities intended to be used in a job. At least one capability must be provided.
:param pulumi.Input[str] capacity_max: Defines how large the volume can be. The storage provider may return a volume that is smaller than this value.
:param pulumi.Input[str] capacity_min: Defines how small the volume can be. The storage provider may return a volume that is larger than this value.
:param pulumi.Input[str] clone_id: The volume ID to clone when creating this volume. Storage provider must support cloning. Conflicts with 'snapshot_id'.
:param pulumi.Input['ExternalVolumeMountOptionsArgs'] mount_options: Options for mounting 'block-device' volumes without a pre-formatted file system.
:param pulumi.Input[str] name: The display name of the volume.
:param pulumi.Input[str] namespace: The namespace in which to create the volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
:param pulumi.Input[str] plugin_id: The ID of the CSI plugin that manages this volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] secrets: An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
:param pulumi.Input[str] snapshot_id: The snapshot ID to restore when creating this volume. Storage provider must support snapshots. Conflicts with
'clone_id'.
:param pulumi.Input[str] type: The type of the volume. Currently, only 'csi' is supported.
:param pulumi.Input[str] volume_id: The unique ID of the volume, how jobs will refer to the volume.
"""
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if capacity_max is not None:
pulumi.set(__self__, "capacity_max", capacity_max)
if capacity_min is not None:
pulumi.set(__self__, "capacity_min", capacity_min)
if clone_id is not None:
pulumi.set(__self__, "clone_id", clone_id)
if controller_required is not None:
pulumi.set(__self__, "controller_required", controller_required)
if controllers_expected is not None:
pulumi.set(__self__, "controllers_expected", controllers_expected)
if controllers_healthy is not None:
pulumi.set(__self__, "controllers_healthy", controllers_healthy)
if mount_options is not None:
pulumi.set(__self__, "mount_options", mount_options)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if nodes_expected is not None:
pulumi.set(__self__, "nodes_expected", nodes_expected)
if nodes_healthy is not None:
pulumi.set(__self__, "nodes_healthy", nodes_healthy)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if plugin_id is not None:
pulumi.set(__self__, "plugin_id", plugin_id)
if plugin_provider is not None:
pulumi.set(__self__, "plugin_provider", plugin_provider)
if plugin_provider_version is not None:
pulumi.set(__self__, "plugin_provider_version", plugin_provider_version)
if schedulable is not None:
pulumi.set(__self__, "schedulable", schedulable)
if secrets is not None:
pulumi.set(__self__, "secrets", secrets)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
if type is not None:
pulumi.set(__self__, "type", type)
if volume_id is not None:
pulumi.set(__self__, "volume_id", volume_id)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExternalVolumeCapabilityArgs']]]]:
"""
Capabilities intended to be used in a job. At least one capability must be provided.
"""
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExternalVolumeCapabilityArgs']]]]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter(name="capacityMax")
def capacity_max(self) -> Optional[pulumi.Input[str]]:
"""
Defines how large the volume can be. The storage provider may return a volume that is smaller than this value.
"""
return pulumi.get(self, "capacity_max")
@capacity_max.setter
def capacity_max(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "capacity_max", value)
@property
@pulumi.getter(name="capacityMin")
def capacity_min(self) -> Optional[pulumi.Input[str]]:
"""
Defines how small the volume can be. The storage provider may return a volume that is larger than this value.
"""
return pulumi.get(self, "capacity_min")
@capacity_min.setter
def capacity_min(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "capacity_min", value)
@property
@pulumi.getter(name="cloneId")
def clone_id(self) -> Optional[pulumi.Input[str]]:
"""
The volume ID to clone when creating this volume. Storage provider must support cloning. Conflicts with 'snapshot_id'.
"""
return pulumi.get(self, "clone_id")
@clone_id.setter
def clone_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "clone_id", value)
@property
@pulumi.getter(name="controllerRequired")
def controller_required(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "controller_required")
@controller_required.setter
def controller_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "controller_required", value)
@property
@pulumi.getter(name="controllersExpected")
def controllers_expected(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "controllers_expected")
@controllers_expected.setter
def controllers_expected(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "controllers_expected", value)
@property
@pulumi.getter(name="controllersHealthy")
def controllers_healthy(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "controllers_healthy")
@controllers_healthy.setter
def controllers_healthy(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "controllers_healthy", value)
@property
@pulumi.getter(name="mountOptions")
def mount_options(self) -> Optional[pulumi.Input['ExternalVolumeMountOptionsArgs']]:
"""
Options for mounting 'block-device' volumes without a pre-formatted file system.
"""
return pulumi.get(self, "mount_options")
@mount_options.setter
def mount_options(self, value: Optional[pulumi.Input['ExternalVolumeMountOptionsArgs']]):
pulumi.set(self, "mount_options", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace in which to create the volume.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="nodesExpected")
def nodes_expected(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "nodes_expected")
@nodes_expected.setter
def nodes_expected(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "nodes_expected", value)
@property
@pulumi.getter(name="nodesHealthy")
def nodes_healthy(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "nodes_healthy")
@nodes_healthy.setter
def nodes_healthy(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "nodes_healthy", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="pluginId")
def plugin_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the CSI plugin that manages this volume.
"""
return pulumi.get(self, "plugin_id")
@plugin_id.setter
def plugin_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plugin_id", value)
@property
@pulumi.getter(name="pluginProvider")
def plugin_provider(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "plugin_provider")
@plugin_provider.setter
def plugin_provider(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plugin_provider", value)
@property
@pulumi.getter(name="pluginProviderVersion")
def plugin_provider_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "plugin_provider_version")
@plugin_provider_version.setter
def plugin_provider_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plugin_provider_version", value)
@property
@pulumi.getter
def schedulable(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "schedulable")
@schedulable.setter
def schedulable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "schedulable", value)
@property
@pulumi.getter
def secrets(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
"""
return pulumi.get(self, "secrets")
@secrets.setter
def secrets(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "secrets", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
"""
The snapshot ID to restore when creating this volume. Storage provider must support snapshots. Conflicts with
'clone_id'.
"""
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the volume. Currently, only 'csi' is supported.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique ID of the volume, how jobs will refer to the volume.
"""
return pulumi.get(self, "volume_id")
@volume_id.setter
def volume_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_id", value)
class ExternalVolume(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExternalVolumeCapabilityArgs']]]]] = None,
capacity_max: Optional[pulumi.Input[str]] = None,
capacity_min: Optional[pulumi.Input[str]] = None,
clone_id: Optional[pulumi.Input[str]] = None,
mount_options: Optional[pulumi.Input[pulumi.InputType['ExternalVolumeMountOptionsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
plugin_id: Optional[pulumi.Input[str]] = None,
secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a ExternalVolume resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExternalVolumeCapabilityArgs']]]] capabilities: Capabilities intended to be used in a job. At least one capability must be provided.
:param pulumi.Input[str] capacity_max: Defines how large the volume can be. The storage provider may return a volume that is smaller than this value.
:param pulumi.Input[str] capacity_min: Defines how small the volume can be. The storage provider may return a volume that is larger than this value.
:param pulumi.Input[str] clone_id: The volume ID to clone when creating this volume. Storage provider must support cloning. Conflicts with 'snapshot_id'.
:param pulumi.Input[pulumi.InputType['ExternalVolumeMountOptionsArgs']] mount_options: Options for mounting 'block-device' volumes without a pre-formatted file system.
:param pulumi.Input[str] name: The display name of the volume.
:param pulumi.Input[str] namespace: The namespace in which to create the volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
:param pulumi.Input[str] plugin_id: The ID of the CSI plugin that manages this volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] secrets: An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
:param pulumi.Input[str] snapshot_id: The snapshot ID to restore when creating this volume. Storage provider must support snapshots. Conflicts with
'clone_id'.
:param pulumi.Input[str] type: The type of the volume. Currently, only 'csi' is supported.
:param pulumi.Input[str] volume_id: The unique ID of the volume, how jobs will refer to the volume.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ExternalVolumeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a ExternalVolume resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param ExternalVolumeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExternalVolumeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExternalVolumeCapabilityArgs']]]]] = None,
capacity_max: Optional[pulumi.Input[str]] = None,
capacity_min: Optional[pulumi.Input[str]] = None,
clone_id: Optional[pulumi.Input[str]] = None,
mount_options: Optional[pulumi.Input[pulumi.InputType['ExternalVolumeMountOptionsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
plugin_id: Optional[pulumi.Input[str]] = None,
secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ExternalVolumeArgs.__new__(ExternalVolumeArgs)
if capabilities is None and not opts.urn:
raise TypeError("Missing required property 'capabilities'")
__props__.__dict__["capabilities"] = capabilities
__props__.__dict__["capacity_max"] = capacity_max
__props__.__dict__["capacity_min"] = capacity_min
__props__.__dict__["clone_id"] = clone_id
__props__.__dict__["mount_options"] = mount_options
__props__.__dict__["name"] = name
__props__.__dict__["namespace"] = namespace
__props__.__dict__["parameters"] = parameters
if plugin_id is None and not opts.urn:
raise TypeError("Missing required property 'plugin_id'")
__props__.__dict__["plugin_id"] = plugin_id
__props__.__dict__["secrets"] = secrets
__props__.__dict__["snapshot_id"] = snapshot_id
__props__.__dict__["type"] = type
if volume_id is None and not opts.urn:
raise TypeError("Missing required property 'volume_id'")
__props__.__dict__["volume_id"] = volume_id
__props__.__dict__["controller_required"] = None
__props__.__dict__["controllers_expected"] = None
__props__.__dict__["controllers_healthy"] = None
__props__.__dict__["nodes_expected"] = None
__props__.__dict__["nodes_healthy"] = None
__props__.__dict__["plugin_provider"] = None
__props__.__dict__["plugin_provider_version"] = None
__props__.__dict__["schedulable"] = None
super(ExternalVolume, __self__).__init__(
'nomad:index/externalVolume:ExternalVolume',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
capabilities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExternalVolumeCapabilityArgs']]]]] = None,
capacity_max: Optional[pulumi.Input[str]] = None,
capacity_min: Optional[pulumi.Input[str]] = None,
clone_id: Optional[pulumi.Input[str]] = None,
controller_required: Optional[pulumi.Input[bool]] = None,
controllers_expected: Optional[pulumi.Input[int]] = None,
controllers_healthy: Optional[pulumi.Input[int]] = None,
mount_options: Optional[pulumi.Input[pulumi.InputType['ExternalVolumeMountOptionsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
nodes_expected: Optional[pulumi.Input[int]] = None,
nodes_healthy: Optional[pulumi.Input[int]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
plugin_id: Optional[pulumi.Input[str]] = None,
plugin_provider: Optional[pulumi.Input[str]] = None,
plugin_provider_version: Optional[pulumi.Input[str]] = None,
schedulable: Optional[pulumi.Input[bool]] = None,
secrets: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None) -> 'ExternalVolume':
"""
Get an existing ExternalVolume resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExternalVolumeCapabilityArgs']]]] capabilities: Capabilities intended to be used in a job. At least one capability must be provided.
:param pulumi.Input[str] capacity_max: Defines how large the volume can be. The storage provider may return a volume that is smaller than this value.
:param pulumi.Input[str] capacity_min: Defines how small the volume can be. The storage provider may return a volume that is larger than this value.
:param pulumi.Input[str] clone_id: The volume ID to clone when creating this volume. Storage provider must support cloning. Conflicts with 'snapshot_id'.
:param pulumi.Input[pulumi.InputType['ExternalVolumeMountOptionsArgs']] mount_options: Options for mounting 'block-device' volumes without a pre-formatted file system.
:param pulumi.Input[str] name: The display name of the volume.
:param pulumi.Input[str] namespace: The namespace in which to create the volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
:param pulumi.Input[str] plugin_id: The ID of the CSI plugin that manages this volume.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] secrets: An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
:param pulumi.Input[str] snapshot_id: The snapshot ID to restore when creating this volume. Storage provider must support snapshots. Conflicts with
'clone_id'.
:param pulumi.Input[str] type: The type of the volume. Currently, only 'csi' is supported.
:param pulumi.Input[str] volume_id: The unique ID of the volume, how jobs will refer to the volume.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ExternalVolumeState.__new__(_ExternalVolumeState)
__props__.__dict__["capabilities"] = capabilities
__props__.__dict__["capacity_max"] = capacity_max
__props__.__dict__["capacity_min"] = capacity_min
__props__.__dict__["clone_id"] = clone_id
__props__.__dict__["controller_required"] = controller_required
__props__.__dict__["controllers_expected"] = controllers_expected
__props__.__dict__["controllers_healthy"] = controllers_healthy
__props__.__dict__["mount_options"] = mount_options
__props__.__dict__["name"] = name
__props__.__dict__["namespace"] = namespace
__props__.__dict__["nodes_expected"] = nodes_expected
__props__.__dict__["nodes_healthy"] = nodes_healthy
__props__.__dict__["parameters"] = parameters
__props__.__dict__["plugin_id"] = plugin_id
__props__.__dict__["plugin_provider"] = plugin_provider
__props__.__dict__["plugin_provider_version"] = plugin_provider_version
__props__.__dict__["schedulable"] = schedulable
__props__.__dict__["secrets"] = secrets
__props__.__dict__["snapshot_id"] = snapshot_id
__props__.__dict__["type"] = type
__props__.__dict__["volume_id"] = volume_id
return ExternalVolume(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def capabilities(self) -> pulumi.Output[Sequence['outputs.ExternalVolumeCapability']]:
"""
Capabilities intended to be used in a job. At least one capability must be provided.
"""
return pulumi.get(self, "capabilities")
@property
@pulumi.getter(name="capacityMax")
def capacity_max(self) -> pulumi.Output[Optional[str]]:
"""
Defines how large the volume can be. The storage provider may return a volume that is smaller than this value.
"""
return pulumi.get(self, "capacity_max")
@property
@pulumi.getter(name="capacityMin")
def capacity_min(self) -> pulumi.Output[Optional[str]]:
"""
Defines how small the volume can be. The storage provider may return a volume that is larger than this value.
"""
return pulumi.get(self, "capacity_min")
@property
@pulumi.getter(name="cloneId")
def clone_id(self) -> pulumi.Output[Optional[str]]:
"""
The volume ID to clone when creating this volume. Storage provider must support cloning. Conflicts with 'snapshot_id'.
"""
return pulumi.get(self, "clone_id")
@property
@pulumi.getter(name="controllerRequired")
def controller_required(self) -> pulumi.Output[bool]:
return pulumi.get(self, "controller_required")
@property
@pulumi.getter(name="controllersExpected")
def controllers_expected(self) -> pulumi.Output[int]:
return pulumi.get(self, "controllers_expected")
@property
@pulumi.getter(name="controllersHealthy")
def controllers_healthy(self) -> pulumi.Output[int]:
return pulumi.get(self, "controllers_healthy")
@property
@pulumi.getter(name="mountOptions")
def mount_options(self) -> pulumi.Output[Optional['outputs.ExternalVolumeMountOptions']]:
"""
Options for mounting 'block-device' volumes without a pre-formatted file system.
"""
return pulumi.get(self, "mount_options")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The display name of the volume.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> pulumi.Output[Optional[str]]:
"""
The namespace in which to create the volume.
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="nodesExpected")
def nodes_expected(self) -> pulumi.Output[int]:
return pulumi.get(self, "nodes_expected")
@property
@pulumi.getter(name="nodesHealthy")
def nodes_healthy(self) -> pulumi.Output[int]:
return pulumi.get(self, "nodes_healthy")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
An optional key-value map of strings passed directly to the CSI plugin to configure the volume.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="pluginId")
def plugin_id(self) -> pulumi.Output[str]:
"""
The ID of the CSI plugin that manages this volume.
"""
return pulumi.get(self, "plugin_id")
@property
@pulumi.getter(name="pluginProvider")
def plugin_provider(self) -> pulumi.Output[str]:
return pulumi.get(self, "plugin_provider")
@property
@pulumi.getter(name="pluginProviderVersion")
def plugin_provider_version(self) -> pulumi.Output[str]:
return pulumi.get(self, "plugin_provider_version")
@property
@pulumi.getter
def schedulable(self) -> pulumi.Output[bool]:
return pulumi.get(self, "schedulable")
@property
@pulumi.getter
def secrets(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
An optional key-value map of strings used as credentials for publishing and unpublishing volumes.
"""
return pulumi.get(self, "secrets")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> pulumi.Output[Optional[str]]:
"""
The snapshot ID to restore when creating this volume. Storage provider must support snapshots. Conflicts with
'clone_id'.
"""
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the volume. Currently, only 'csi' is supported.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> pulumi.Output[str]:
"""
The unique ID of the volume, how jobs will refer to the volume.
"""
return pulumi.get(self, "volume_id")
| 46.662132
| 200
| 0.659685
|
9977bb9ef57f531ee4663b1f2668ced79e8e6f99
| 1,906
|
py
|
Python
|
tinkering/nlp/nlp_processing.py
|
Darthone/Informed-Finance-Canary
|
1fe398c1c157ca09c591be95e6a2f317ad1add8c
|
[
"MIT"
] | 1
|
2018-03-07T08:27:48.000Z
|
2018-03-07T08:27:48.000Z
|
tinkering/nlp/nlp_processing.py
|
Darthone/Informed-Finance-Canary
|
1fe398c1c157ca09c591be95e6a2f317ad1add8c
|
[
"MIT"
] | null | null | null |
tinkering/nlp/nlp_processing.py
|
Darthone/Informed-Finance-Canary
|
1fe398c1c157ca09c591be95e6a2f317ad1add8c
|
[
"MIT"
] | null | null | null |
import nltk
import math
from nltk.corpus import state_union, stopwords
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
import pandas as pd
#State of the Union Addresses
train_text = open("GM1.txt")
train_text2 = open("GM2.txt")
train_text3 = open("GM3.txt")
train_text4 = open("GM4.txt")
temp_train_text = train_text.readlines()
#print temp_train_text
str1 = ''.join(temp_train_text)
#print type(temp_train_text)
temp_train_text2 = train_text2.readlines()
str2 = ''.join(temp_train_text2)
temp_train_text3 = train_text3.readlines()
#print temp_train_text
str3 = ''.join(temp_train_text3)
#print type(temp_train_text)
temp_train_text4 = train_text4.readlines()
str4 = ''.join(temp_train_text4)
#preprocessing
#tokenize by word - this is the Bag of Words
#tokenized = word_tokenize(str1)
#tokenized2 = word_tokenize(str2)
corpus = [str1, str2, str3, str4]
print corpus
#discard any stop words - saves on processing
stopset = list(stopwords.words('english'))
stopset.append('000')
for i in range(9999):
stopset.append(str(i))
vectorizer = TfidfVectorizer(stop_words=stopset, use_idf=True, ngram_range=(2,3))
#matrix of input set
X = vectorizer.fit_transform(corpus)
#print '\n HERPING DERPING\n'
#print vectorizer.get_feature_names()
X = X.toarray()
print sorted(X[0], reverse=True)
print sorted(vectorizer.inverse_transform(X[0]), reverse=True)
size_matrix = X.shape[0]
lsa = TruncatedSVD(n_components=size_matrix, n_iter=100)
lsa.fit(X)
#print lsa.components_[0]
#print type(lsa.components_)
terms = vectorizer.get_feature_names()
for i, comp in enumerate(X):
#for i, comp in enumerate(lsa.components_):
termsInComp = zip(terms,comp)
sortedTerms = sorted(termsInComp, key=lambda x: x[1], reverse=True) [:10]
print "Article %d:" % i
for term in sortedTerms:
print term[0]
print " "
| 26.472222
| 81
| 0.766527
|
02187dae6fed9d0eb27598ae17db1bd7ea496228
| 152
|
py
|
Python
|
Desafios/MODULO 1/Desafio 07.py
|
deneyjunior/python-mundos-cev
|
4bc82bf0630f65cf66e5442ae57b72fd4b0207fc
|
[
"MIT"
] | null | null | null |
Desafios/MODULO 1/Desafio 07.py
|
deneyjunior/python-mundos-cev
|
4bc82bf0630f65cf66e5442ae57b72fd4b0207fc
|
[
"MIT"
] | null | null | null |
Desafios/MODULO 1/Desafio 07.py
|
deneyjunior/python-mundos-cev
|
4bc82bf0630f65cf66e5442ae57b72fd4b0207fc
|
[
"MIT"
] | null | null | null |
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota:'))
media = (n1 + n2) / 2
print('A sua média é {}.'.format(media))
| 38
| 45
| 0.638158
|
1a18b1158dd639799bb07df0f7e36bd065c6e649
| 1,166
|
py
|
Python
|
.history/my_classes/ScopesClosuresAndDecorators/Closures_20210711172600.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/Closures_20210711172600.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/Closures_20210711172600.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
""" Closuers
Free variables and closures
Remember: Functions defined inside another function can access the outer (nonLocal) variables
"""
def outer():
x = 'python'
/ this x refers to the one in outer's scope', this nonlocal variable x is called a free variable
def inner(): /
print("{0} rocks!".format(x)) when we consider inner, we are really looking at:
The function inner
the free variable x (with the current value python)
This is called a closure, # x thru the print statement
inner()
outer() # python rocks!
""" Returning the inner function
What happens if, instead of calling(running) inner from inside outer, we rune it?
def outer():
x = 'python' # x is a free variable in inner, it is bound to the variable x in outer, this happens when outer runs
def inner():
print("{0} rocks!".format(x))
return inner # when returning inner, we are actually 'returning' the closure
"""
| 32.388889
| 132
| 0.560034
|
890f1edd22585740ec2499c79f8b455849a8181e
| 459
|
py
|
Python
|
Processsos Estagio/camelCase.py
|
ArthurGini/EstudosPython
|
905e1227fa66fe29feb411dbb8d1ce1b75a98bd6
|
[
"MIT"
] | 1
|
2019-12-03T02:39:17.000Z
|
2019-12-03T02:39:17.000Z
|
Processsos Estagio/camelCase.py
|
ArthurGini/EstudosPython
|
905e1227fa66fe29feb411dbb8d1ce1b75a98bd6
|
[
"MIT"
] | null | null | null |
Processsos Estagio/camelCase.py
|
ArthurGini/EstudosPython
|
905e1227fa66fe29feb411dbb8d1ce1b75a98bd6
|
[
"MIT"
] | null | null | null |
import re
def CamelCase(str):
str = re.sub('[^a-zA-Z0-9\.]', '', str)
print(str)
list_words = str.split(" ")
aux = ""
for index, word in enumerate(list_words):
print(index)
print(word)
if index == 0:
aux = aux + word.lower()
else:
aux = aux = word.title()
#aux = aux + word.upper() + word[1:].lower()
print(aux)
# code goes here
return str
# keep this function call here
print(CamelCase(input()))
| 15.827586
| 50
| 0.566449
|
44b6dd155c3eba59c4932b7896824559247df8b1
| 5,795
|
py
|
Python
|
deps/mrtaskman/packages/launch/android_launch.py
|
mims2707/bite-project
|
d6ce4e97fcf0ce666ec92498ff2c2b77c6fe80db
|
[
"Apache-2.0"
] | 11
|
2015-05-27T04:07:32.000Z
|
2021-05-24T11:55:59.000Z
|
deps/mrtaskman/packages/launch/android_launch.py
|
mims2707/bite-project
|
d6ce4e97fcf0ce666ec92498ff2c2b77c6fe80db
|
[
"Apache-2.0"
] | 1
|
2017-12-14T05:25:34.000Z
|
2017-12-14T05:25:34.000Z
|
deps/mrtaskman/packages/launch/android_launch.py
|
katmoon/bite-project
|
d6ce4e97fcf0ce666ec92498ff2c2b77c6fe80db
|
[
"Apache-2.0"
] | 5
|
2016-03-31T13:27:39.000Z
|
2019-02-25T01:18:24.000Z
|
#!/usr/bin/python
"""Executes Android Launch test over adb to attached Android device."""
__author__ = 'jeff.carollo@gmail.com (Jeff Carollo)'
import datetime
import logging
import os
import re
import subprocess
import sys
import time
from tasklib import apklib
ADB_COMMAND = apklib.ADB_COMMAND
LAUNCH_COMMAND = (ADB_COMMAND +
'shell "am start --activity-reset-task-if-needed -W -n %s/%s; echo $? > /data/local/tmp/ret"')
STDOUT_FILENAME = 'cmd_stdout.log'
STDERR_FILENAME = 'cmd_stderr.log'
def ExitWithErrorCode(error_code):
if error_code == 0:
logging.warning('Error code is zero, maaking it non-zero')
error_code = -7
sys.exit(error_code)
def main(argv):
my_name = argv.pop(0)
try:
apk_file_path = argv.pop(0)
except:
sys.stderr.write('Must give apk_file_path as first argument.\n')
sys.exit(-1)
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
result_metadata = {}
try:
manifest = apklib.ReadAndroidManifest(apk_file_path)
result_metadata[u'AndroidManifest.xml'] = manifest.encode('utf-8')
class_path = apklib.FindClassPath(manifest)
class_name = apklib.FindClassName(manifest)
logging.info('Found class_path: %s', class_path)
logging.info('installing apk...')
try:
output = subprocess.check_output(
ADB_COMMAND + 'install -r %s' % apk_file_path,
shell=True)
apklib.CheckAdbSuccess(output)
except subprocess.CalledProcessError, e:
logging.error('adb install error %d:\n%s', e.returncode, e.output)
try:
logging.info('Signing .apk...')
apklib.SignApk(apk_file_path)
output = subprocess.check_output(
ADB_COMMAND + 'install -r %s' % apk_file_path,
shell=True)
apklib.CheckAdbSuccess(output)
except subprocess.CalledProcessError, e:
logging.error('adb install error %d:\n%s', e.returncode, e.output)
ExitWithErrorCode(e.returncode)
try:
if '.' not in class_name:
class_name = '.%s' % class_name
command = LAUNCH_COMMAND % (class_path, class_name)
logging.info('Running command %s.', command)
cmd_stdout = open(STDOUT_FILENAME, 'w')
cmd_stderr = open(STDERR_FILENAME, 'w')
try:
timeout = datetime.timedelta(0, 62) # Give the thing 62 seconds.
begin_time = datetime.datetime.now()
timeout_time = begin_time + timeout
process = subprocess.Popen(args=command,
stdout=cmd_stdout,
stderr=cmd_stderr,
shell=True)
ret = None
while None == ret and (datetime.datetime.now() < timeout_time):
time.sleep(0.02)
ret = process.poll()
finished_time = datetime.datetime.now()
if finished_time >= timeout_time and (None == ret):
logging.error('command %s timed out.', command)
process.terminate()
process.wait()
# TODO(jeff.carollo): Figure out why this times out and fix it.
# Not so easy. Others on Internet report same behavior.
ret = 0
execution_time = finished_time - begin_time
logging.info('execution_time: %s', execution_time)
apklib.CheckAdbShellExitCode()
if ret != 0:
logging.error('adb command exited with code %s', ret)
ExitWithErrorCode(ret)
except subprocess.CalledProcessError, e:
logging.error('CalledProcessError %d:\n%s', e.returncode, e.output)
ExitWithErrorCode(e.returncode)
finally:
cmd_stdout.flush()
cmd_stdout.close()
cmd_stderr.flush()
cmd_stderr.close()
logging.info('Uninstalling .apk...')
try:
output = subprocess.check_output(
ADB_COMMAND + 'uninstall %s' % class_path,
shell=True)
apklib.CheckAdbSuccess(output)
except subprocess.CalledProcessError, e:
logging.error('adb uninstall error %d:\n%s', e.returncode, e.output)
# Don't fail just because uninstall failed.
try:
# Inspect and dump to logs the cmd stdout output.
cmd_stdout = open(STDOUT_FILENAME, 'r')
stdout_exitcode = apklib.DumpAndCheckErrorLogs(cmd_stdout, sys.stdout)
except Exception, e:
logging.error('Error while dumping command stdout: %s', str(e))
stdout_exitcode = -5 # Don't exit yet, allow stderr to be dumped.
finally:
cmd_stdout.close()
try:
# Parse execution_time from output of command and write to metadata.
cmd_stdout = open(STDOUT_FILENAME, 'r')
stdout = cmd_stdout.read()
match = re.match(r'.*TotalTime..(\d+).*', stdout, re.S)
if match:
total_ms = match.group(1)
result_metadata['execution_time'] = float(total_ms) / 1000.0
except Exception, e:
logging.exception(e)
finally:
cmd_stdout.close()
apklib.WriteResultMetadata(result_metadata)
try:
# Inspect and dump to logs the cmd stderr output.
cmd_stderr = open(STDERR_FILENAME, 'r')
stderr_exitcode = apklib.DumpAndCheckErrorLogs(cmd_stderr, sys.stderr)
except Exception, e:
logging.error('Error while dumping command stderr: %s', str(e))
stderr_exitcode = -5
finally:
cmd_stderr.close()
if stdout_exitcode != 0:
logging.info('Error found in stdout.')
ExitWithErrorCode(stdout_exitcode)
if stderr_exitcode != 0:
logging.info('Error found in stderr.')
ExitWithErrorCode(stderr_exitcode)
logging.info('Launch work done successfully.')
return 0
finally:
logging.shutdown()
if __name__ == '__main__':
main(sys.argv)
| 32.740113
| 98
| 0.636238
|
8da68522b504d386d32594ed439db20f5ceb50d5
| 4,731
|
py
|
Python
|
examples/FrictionValidationCases/rolling_friction_initial_velocity.py
|
tp5uiuc/PyElastica
|
37db35137b198d1c0756e058ec1635a3675fab22
|
[
"MIT"
] | null | null | null |
examples/FrictionValidationCases/rolling_friction_initial_velocity.py
|
tp5uiuc/PyElastica
|
37db35137b198d1c0756e058ec1635a3675fab22
|
[
"MIT"
] | 1
|
2022-01-06T11:30:20.000Z
|
2022-02-07T07:11:22.000Z
|
examples/FrictionValidationCases/rolling_friction_initial_velocity.py
|
tp5uiuc/PyElastica
|
37db35137b198d1c0756e058ec1635a3675fab22
|
[
"MIT"
] | null | null | null |
__doc__ = """Rolling friction validation, for detailed explanation refer to Gazzola et. al. R. Soc. 2018
section 4.1.4 and Appendix G """
import numpy as np
import sys
# FIXME without appending sys.path make it more generic
sys.path.append("../../")
from elastica import *
from examples.FrictionValidationCases.friction_validation_postprocessing import (
plot_friction_validation,
)
class RollingFrictionInitialVelocitySimulator(
BaseSystemCollection, Constraints, Forcing
):
pass
# Options
PLOT_FIGURE = True
SAVE_FIGURE = False
SAVE_RESULTS = False
def simulate_rolling_friction_initial_velocity_with(IFactor=0.0):
rolling_friction_initial_velocity_sim = RollingFrictionInitialVelocitySimulator()
# setting up test params
n_elem = 50
start = np.zeros((3,))
direction = np.array([0.0, 0.0, 1.0])
normal = np.array([0.0, 1.0, 0.0])
base_length = 1.0
base_radius = 0.025
base_area = np.pi * base_radius ** 2
mass = 1.0
density = mass / (base_length * base_area)
nu = 1e-6
E = 1e9
# For shear modulus of 2E/3
poisson_ratio = 0.5
shear_modulus = E / (poisson_ratio + 1.0)
# Set shear matrix
shear_matrix = np.repeat(1e4 * np.identity((3))[:, :, np.newaxis], n_elem, axis=2)
shearable_rod = CosseratRod.straight_rod(
n_elem,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
E,
shear_modulus=shear_modulus,
)
# TODO: CosseratRod has to be able to take shear matrix as input, we should change it as done below
shearable_rod.shear_matrix = shear_matrix
# change the mass moment of inertia matrix and its inverse
shearable_rod.mass_second_moment_of_inertia *= IFactor
shearable_rod.inv_mass_second_moment_of_inertia /= IFactor
# set initial velocity of 1m/s to rod elements in the slip direction
Vs = 1.0
shearable_rod.velocity_collection[0, :] += Vs
rolling_friction_initial_velocity_sim.append(shearable_rod)
rolling_friction_initial_velocity_sim.constrain(shearable_rod).using(FreeRod)
# Add gravitational forces
gravitational_acc = -9.80665
rolling_friction_initial_velocity_sim.add_forcing_to(shearable_rod).using(
GravityForces, acc_gravity=np.array([0.0, gravitational_acc, 0.0])
)
# Add friction forces
origin_plane = np.array([0.0, -base_radius, 0.0])
normal_plane = np.array([0.0, 1.0, 0.0])
slip_velocity_tol = 1e-6
static_mu_array = np.array([0.4, 0.4, 0.4]) # [forward, backward, sideways]
kinetic_mu_array = np.array([0.2, 0.2, 0.2]) # [forward, backward, sideways]
rolling_friction_initial_velocity_sim.add_forcing_to(shearable_rod).using(
AnisotropicFrictionalPlane,
k=10.0,
nu=1e-4,
plane_origin=origin_plane,
plane_normal=normal_plane,
slip_velocity_tol=slip_velocity_tol,
static_mu_array=static_mu_array,
kinetic_mu_array=kinetic_mu_array,
)
rolling_friction_initial_velocity_sim.finalize()
timestepper = PositionVerlet()
final_time = 2.0
dt = 1e-6
total_steps = int(final_time / dt)
print("Total steps", total_steps)
integrate(
timestepper, rolling_friction_initial_velocity_sim, final_time, total_steps
)
# compute translational and rotational energy
translational_energy = shearable_rod.compute_translational_energy()
rotational_energy = shearable_rod.compute_rotational_energy()
# compute translational and rotational energy using analytical equations
analytical_translational_energy = 0.5 * mass * Vs ** 2 / (1.0 + IFactor / 2) ** 2
analytical_rotational_energy = (
0.5 * mass * Vs ** 2 * (IFactor / 2.0) / (1.0 + IFactor / 2) ** 2
)
return {
"rod": shearable_rod,
"sweep": IFactor / 2.0,
"translational_energy": translational_energy,
"rotational_energy": rotational_energy,
"analytical_translational_energy": analytical_translational_energy,
"analytical_rotational_energy": analytical_rotational_energy,
}
if __name__ == "__main__":
import multiprocessing as mp
IFactor = list([float(x) / 100.0 for x in range(20, 200, 10)])
with mp.Pool(mp.cpu_count()) as pool:
results = pool.map(simulate_rolling_friction_initial_velocity_with, IFactor)
if PLOT_FIGURE:
filename = "rolling_friction_initial_velocity.png"
plot_friction_validation(results, SAVE_FIGURE, filename)
if SAVE_RESULTS:
import pickle
filename = "rolling_friction_initial_velocity.dat"
file = open(filename, "wb")
pickle.dump([results], file)
file.close()
| 31.54
| 106
| 0.692877
|
217302c9c1ea5e1db1a2bc3fd40a88d0ea1b3efc
| 1,049
|
py
|
Python
|
splunk_connect_for_snmp/common/hummanbool.py
|
polarG/splunk-connect-for-snmp
|
d1e85675edd5caa5bad9114d1611411e15cec063
|
[
"Apache-2.0"
] | null | null | null |
splunk_connect_for_snmp/common/hummanbool.py
|
polarG/splunk-connect-for-snmp
|
d1e85675edd5caa5bad9114d1611411e15cec063
|
[
"Apache-2.0"
] | null | null | null |
splunk_connect_for_snmp/common/hummanbool.py
|
polarG/splunk-connect-for-snmp
|
d1e85675edd5caa5bad9114d1611411e15cec063
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Union
def human_bool(flag: Union[str, bool], default: bool = False) -> bool:
if flag is None:
return False
if isinstance(flag, bool):
return flag
if flag.lower() in [
"true",
"1",
"t",
"y",
"yes",
]:
return True
elif flag.lower() in [
"false",
"0",
"f",
"n",
"no",
]:
return False
else:
return default
| 23.311111
| 74
| 0.616778
|
1457132ef6ceb9be2329ebf040dc42697704eb68
| 1,334
|
py
|
Python
|
src/mur/admin.py
|
lyy289065406/py-register
|
b76b190e19d221e2901aa0f1034289adae2a6b0b
|
[
"MIT"
] | 1
|
2021-12-27T01:45:09.000Z
|
2021-12-27T01:45:09.000Z
|
src/mur/admin.py
|
lyy289065406/py-gen-mur
|
b76b190e19d221e2901aa0f1034289adae2a6b0b
|
[
"MIT"
] | null | null | null |
src/mur/admin.py
|
lyy289065406/py-gen-mur
|
b76b190e19d221e2901aa0f1034289adae2a6b0b
|
[
"MIT"
] | 1
|
2021-12-27T00:16:13.000Z
|
2021-12-27T00:16:13.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------
import random
import string
from ._public import *
def read_machine_code() :
'''
管理员场景: 读取用户提供的机器码
[return] 机器码
'''
return read(MACHINE_CODE_PATH)
def gen_user_code(days=3, crypt=CRYPT, to_file=True) :
'''
管理员场景: 分配用户码,并写入文件
(可直接把文件发送给用户,让其放到程序根目录)
[param] day: 授权天数。 0 为永久授权
[param] to_file: 是否把用户码写入文件
[return] 用户码
'''
after_time = after(int(days))
user_code = crypt.encrypt_des(after_time)
if to_file :
save(user_code, USER_CODE_PATH)
return user_code
def gen_register_code(machine_code, user_code, crypt=CRYPT, to_file=True) :
'''
管理员场景:
1. 用户提供 机器码(加密)
2. 用户预先指定 或 管理员随机分配 的用户码
3. 用户码(解密) + 机器码 生成 注册码
4. 注册码写入文件(可直接把文件发送给用户,让其放到程序根目录)
[param] machine_code: 机器码
[param] user_code: 用户码
[param] crypt: 加解密类
[param] to_file: 是否把机器码写入文件
[return] 注册码
'''
try :
uuid = crypt.decrypt_des(machine_code)
expire_time = crypt.decrypt_des(user_code)
register_code = gen_rc(crypt, uuid, expire_time)
except :
register_code = ''
print('无法解密【机器码】:加密密钥不同、或加密格式不正确')
if to_file :
save(register_code, REGISTER_CODE_PATH)
return register_code
| 23
| 75
| 0.606447
|
6840a03cabae0d89ef78aa444e94222af506c1f6
| 967
|
py
|
Python
|
ifrc/urls.py
|
IFRCGo/ifrcgo_react
|
bfd8572daeb492b0659543be58152626f613a7f4
|
[
"MIT"
] | null | null | null |
ifrc/urls.py
|
IFRCGo/ifrcgo_react
|
bfd8572daeb492b0659543be58152626f613a7f4
|
[
"MIT"
] | null | null | null |
ifrc/urls.py
|
IFRCGo/ifrcgo_react
|
bfd8572daeb492b0659543be58152626f613a7f4
|
[
"MIT"
] | null | null | null |
"""ifrc URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include,url
from django.contrib import admin
from main import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^', include('main.urls')),
]
| 35.814815
| 80
| 0.671148
|
88bf31be2920b6ec7390cf1c30b08cd48a52c06e
| 6,670
|
py
|
Python
|
filters.py
|
Shamairu/leb-master
|
5efaf9c5914e3d387388ff6a43a3dc21b5505dfc
|
[
"Apache-2.0"
] | null | null | null |
filters.py
|
Shamairu/leb-master
|
5efaf9c5914e3d387388ff6a43a3dc21b5505dfc
|
[
"Apache-2.0"
] | null | null | null |
filters.py
|
Shamairu/leb-master
|
5efaf9c5914e3d387388ff6a43a3dc21b5505dfc
|
[
"Apache-2.0"
] | 1
|
2020-06-26T21:39:33.000Z
|
2020-06-26T21:39:33.000Z
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import hashlib
import string
from datetime import datetime
from datetime import timedelta
def convert_to_class(name):
"""Converts a section name to a CSS class.
Args:
name: (string) The section name.
Returns:
Returns a CSS class (string) of the section name.
"""
return re.sub(r"\s", "-", name.lower())
def add_hash(path):
"""Generates a hash from a file.
Args:
path: (string) The path to the file to generate the hash from.
Returns:
Returns a hash digest (string) of the file.
"""
blocksize = 32768
file_hash = hashlib.sha256()
file_path = re.sub(r'/devsummit/', './', path)
with open(file_path) as file_to_hash:
file_buffer = file_to_hash.read(blocksize)
while (len(file_buffer) > 0):
file_hash.update(file_buffer)
file_buffer = file_to_hash.read(blocksize)
return re.sub(r'(.*?)\.(.*)$', ("\\1.%s.\\2" % file_hash.hexdigest()), path)
def find_session(sessions_info, url):
"""Finds an individual session based off the URL.
Args:
url: (string) The URL to use to match the session.
Returns:
Returns the session info or none.
"""
# Try and find the session info.
day_index = 0
sorted_date_keys = sorted(sessions_info.keys())
date_format = "%Y-%m-%dT%H:%M:%S"
for date in sorted_date_keys:
day_index = day_index + 1
day = sessions_info[date]
for time, session in day.iteritems():
if "url" not in session:
continue
if session["url"] == ('/devsummit/%s' % url):
session["day_index"] = day_index
session["datetime"] = datetime.strptime(("%sT%s" % (date, time)), date_format)
return session
return None
def as_pst(time, date):
"""Converts the datetime to a PST-centric label.
Args:
time: (string) The time of day in HH:MM:SS format.
date: (string) The date in YYYY-mm-dd format.
Returns:
Returns the PST label.
"""
# Try and find the session info.
date = datetime.strptime('%sT%s' % (date, time), '%Y-%m-%dT%H:%M:%S')
hour = date.hour
minutes = date.minute
meridiem = 'AM'
if hour >= 12:
meridiem = 'PM'
if hour > 12:
hour -= 12
time = str(hour)
if minutes is not 0:
time += ' %s' % minutes
return '%s %s PST' % (time, meridiem)
def as_24hr(time, include_separator=False):
"""Converts the time to a 24hr label.
Args:
time: (string) The time of day in HH:MM:SS format.
Returns:
Returns the PST label.
"""
if include_separator:
return time[:5]
return re.sub(r"[^\d]", "", time)[:4]
def get_keys_for_date(sessions_info, date=None):
if date == None:
date = datetime.today().strftime('%Y-%m-%d')
if date not in sessions_info:
return []
return sorted(sessions_info[date].keys())
def get_current_session(sessions_info):
current_session = None
# Adjust from UTC back to PST
now = datetime.utcnow() - timedelta(hours=8)
# Try and find the session info.
for date in sessions_info.keys():
sorted_session_keys = get_keys_for_date(sessions_info, date)
for time in sorted_session_keys:
dateParts = string.split(date, "-")
timeParts = string.split(time, ":")
session_datetime = datetime(
int(dateParts[0]),
int(dateParts[1]),
int(dateParts[2]),
int(timeParts[0]),
int(timeParts[1]),
int(timeParts[2])
)
if session_datetime < now and session_datetime.day == now.day:
current_session = sessions_info[date][time]
return current_session
def get_next_session(sessions_info):
sorted_date_keys = sorted(sessions_info.keys())
# Adjust from UTC back to PST
now = datetime.utcnow() - timedelta(hours=8)
for date in sorted_date_keys:
sorted_session_keys = get_keys_for_date(sessions_info, date)
for time in sorted_session_keys:
dateParts = string.split(date, "-")
timeParts = string.split(time, ":")
session_datetime = datetime(
int(dateParts[0]), # Year
int(dateParts[1]), # Month
int(dateParts[2]), # Date
int(timeParts[0]), # Hours
int(timeParts[1]), # Minutes
int(timeParts[2]) # Seconds
)
if session_datetime > now and session_datetime.day == now.day:
return {
"datetime": session_datetime,
"details": sessions_info[date][time]
}
return {
"datetime": None,
"details": {}
}
def get_upcoming_sessions(sessions_info):
now = datetime.utcnow() - timedelta(hours=8)
sorted_date_keys = sorted(sessions_info.keys())
upcoming_sessions = []
skip_first_match = True
for date in sorted_date_keys:
sorted_session_keys = get_keys_for_date(sessions_info)
for time in sorted_session_keys:
dateParts = string.split(date, "-")
timeParts = string.split(time, ":")
session_datetime = datetime(
int(dateParts[0]), # Year
int(dateParts[1]), # Month
int(dateParts[2]), # Date
int(timeParts[0]), # Hours
int(timeParts[1]), # Minutes
int(timeParts[2]) # Seconds
)
if session_datetime > now and session_datetime.day == now.day:
if skip_first_match:
skip_first_match = False
continue
upcoming_sessions.append({
"datetime": session_datetime,
"details": sessions_info[date][time]
})
return upcoming_sessions
def get_conference_dates (sessions_info):
return sorted(sessions_info.keys())
| 28.262712
| 94
| 0.590255
|
b30649decc1f7af9e2a404dfa0afe60cabf05de8
| 2,387
|
py
|
Python
|
nebula_meta/nebula_meta/variable.py
|
threathunterX/python_lib
|
e2d4052de04c82cb7bccd08042f28db824cab442
|
[
"Apache-2.0"
] | 2
|
2019-03-17T04:03:08.000Z
|
2019-05-01T09:42:23.000Z
|
nebula_meta/nebula_meta/variable.py
|
threathunterX/python_lib
|
e2d4052de04c82cb7bccd08042f28db824cab442
|
[
"Apache-2.0"
] | null | null | null |
nebula_meta/nebula_meta/variable.py
|
threathunterX/python_lib
|
e2d4052de04c82cb7bccd08042f28db824cab442
|
[
"Apache-2.0"
] | 4
|
2019-06-24T05:47:24.000Z
|
2020-09-29T05:00:31.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from .util import text, unicode_dict
__author__ = "nebula"
class Variable(object):
def __init__(self, app, name, key, timestamp, value, property_values):
self._app = text(app)
self._name = text(name)
self._key = text(key)
self._timestamp = timestamp
self._value = float(value)
self._property_values = unicode_dict(property_values)
@property
def app(self):
return self._app
@app.setter
def app(self, app):
self._app = text(app)
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = text(name)
@property
def key(self):
return self._key
@key.setter
def key(self, key):
self._key = text(key)
@property
def timestamp(self):
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
self._timestamp = timestamp
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
@property
def property_values(self):
return self._property_values or dict()
@property_values.setter
def property_values(self, property_values):
self._property_values = unicode_dict(property_values)
def get_dict(self):
result = dict()
result["app"] = self.app
result["name"] = self.name
result["key"] = self.key
result["timestamp"] = self.timestamp
result["value"] = self.value
result["propertyValues"] = self.property_values
return result
def get_json(self):
return json.dumps(self.get_dict())
@staticmethod
def from_dict(d):
return Variable(d.get("app"), d.get("name"), d.get("key"), d.get("timestamp"), d.get("value"),
d.get("propertyValues"))
@staticmethod
def from_json(json_str):
return Variable.from_dict(json.loads(json_str))
def copy(self):
return Variable.from_dict(self.get_dict())
def __str__(self):
return "Variable[{}]".format(self.get_dict())
def __eq__(self, other):
return self.get_dict() == other.get_dict()
def __ne__(self, other):
return not self == other
| 23.401961
| 102
| 0.611227
|
1711ee35182f6e88029cca8d7407fffc22b7c3d5
| 427
|
py
|
Python
|
sqlpuzzle/_backends/postgresql.py
|
Dundee/python-sqlpuzzle
|
260524922a0645c9bf94a9779195f93ef2c78cba
|
[
"MIT"
] | 8
|
2015-03-19T11:25:32.000Z
|
2020-09-02T11:30:10.000Z
|
sqlpuzzle/_backends/postgresql.py
|
Dundee/python-sqlpuzzle
|
260524922a0645c9bf94a9779195f93ef2c78cba
|
[
"MIT"
] | 7
|
2015-03-23T14:34:28.000Z
|
2022-02-21T12:36:01.000Z
|
sqlpuzzle/_backends/postgresql.py
|
Dundee/python-sqlpuzzle
|
260524922a0645c9bf94a9779195f93ef2c78cba
|
[
"MIT"
] | 4
|
2018-11-28T21:59:27.000Z
|
2020-01-05T01:50:08.000Z
|
import binascii
from .sql import SqlBackend
__all__ = ('PostgreSqlBackend',)
class PostgreSqlBackend(SqlBackend):
name = 'PostgreSQL'
supports_full_join = True
supports_on_conflict_do_update = True
@classmethod
def boolean(cls, value):
return 'true' if value else 'false'
@classmethod
def bytes(cls, value):
return "E'\\\\x{}'".format(binascii.hexlify(value).decode('ascii'))
| 20.333333
| 75
| 0.679157
|
a5604962087b9c22f878317af2623c75bb92246e
| 38
|
py
|
Python
|
nssrc/com/citrix/netscaler/nitro/resource/config/quic/__init__.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/quic/__init__.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/quic/__init__.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
__all__ = ['quicparam', 'quicprofile']
| 38
| 38
| 0.710526
|
817b9709538d7d271dd6620a1df0432b3c489191
| 6,295
|
py
|
Python
|
src/utils.py
|
EojinRho/trpo
|
bfcb9632b9dcd28cdb92f40e989f5d78571c9d45
|
[
"MIT"
] | null | null | null |
src/utils.py
|
EojinRho/trpo
|
bfcb9632b9dcd28cdb92f40e989f5d78571c9d45
|
[
"MIT"
] | null | null | null |
src/utils.py
|
EojinRho/trpo
|
bfcb9632b9dcd28cdb92f40e989f5d78571c9d45
|
[
"MIT"
] | null | null | null |
"""
Logging and Data Scaling Utilities
Written by Patrick Coady (pat-coady.github.io)
"""
import numpy as np
import json
import os
import shutil
import glob
import csv
class Scaler(object):
""" Generate scale and offset based on running mean and stddev along axis=0
offset = running mean
scale = 1 / (stddev + 0.1) / 3 (i.e. 3x stddev = +/- 1.0)
"""
def __init__(self, obs_dim):
"""
Args:
obs_dim: dimension of axis=1
"""
self.vars = np.zeros(obs_dim)
self.means = np.zeros(obs_dim)
self.m = 0
self.n = 0
self.first_pass = True
def update(self, x):
""" Update running mean and variance (this is an exact method)
Args:
x: NumPy array, shape = (N, obs_dim)
see: https://stats.stackexchange.com/questions/43159/how-to-calculate-pooled-
variance-of-two-groups-given-known-group-variances-mean
"""
if self.first_pass:
self.means = np.mean(x, axis=0)
self.vars = np.var(x, axis=0)
self.m = x.shape[0]
self.first_pass = False
else:
n = x.shape[0]
new_data_var = np.var(x, axis=0)
new_data_mean = np.mean(x, axis=0)
new_data_mean_sq = np.square(new_data_mean)
new_means = ((self.means * self.m) + (new_data_mean * n)) / (self.m + n)
self.vars = (((self.m * (self.vars + np.square(self.means))) +
(n * (new_data_var + new_data_mean_sq))) / (self.m + n) -
np.square(new_means))
self.vars = np.maximum(0.0, self.vars) # occasionally goes negative, clip
self.means = new_means
self.m += n
def get(self):
""" returns 2-tuple: (scale, offset) """
return 1/(np.sqrt(self.vars) + 0.1)/3, self.means
class Logger(object):
""" Simple training logger: saves to file and optionally prints to stdout """
def __init__(self, logname, now):
"""
Args:
logname: name for log (e.g. 'Hopper-v1')
now: unique sub-directory name (e.g. date/time string)
"""
path = os.path.join('log-files', logname, now)
self.base_path = path
self.model_count = 0
self.model = None
self.model_2 = None
self.model_3 = None
self.env_name = logname
os.makedirs(path)
filenames = glob.glob('*.py') # put copy of all python files in log_dir
for filename in filenames: # for reference
shutil.copy(filename, path)
path = os.path.join(path, 'log.csv')
self.write_header = True
self.log_entry = {}
self.f = open(path, 'w')
self.writer = None # DictWriter created with first call to write() method
def write(self, display=True):
""" Write 1 log entry to file, and optionally to stdout
Log fields preceded by '_' will not be printed to stdout
Args:
display: boolean, print to stdout
"""
if display:
print(self.env_name)
print(os.path.join(self.base_path, 'model.'+str(self.model_count)+'.json'))
self.disp(self.log_entry)
if self.write_header:
fieldnames = [x for x in self.log_entry.keys()]
self.writer = csv.DictWriter(self.f, fieldnames=fieldnames)
self.writer.writeheader()
self.write_header = False
# write model
if self.model and self.model_count % 25 == 0:
model_file_path = os.path.join(self.base_path, 'model.'+str(self.model_count)+'.json')
mf = open(model_file_path, 'wt')
json.dump(self.model, mf, indent=0, separators=(',', ':'))
mf.close()
model_file_path_2 = os.path.join(self.base_path, 'value.'+str(self.model_count)+'.json')
mf = open(model_file_path_2, 'wt')
json.dump(self.model_2, mf, indent=0, separators=(',', ':'))
mf.close()
if self.model_3 != None:
model_file_path_3 = os.path.join(self.base_path, 'auto.csv')
mf = open(model_file_path_3, 'a')
mf.write("%d,%f,%f,%f,%f" % (self.model_count, self.model_3["alive_coef"], self.model_3["progress_coef"],
self.model_3["alive_sum"], self.model_3["progr_sum"]))
mf.close()
self.model_count += 1
self.model = None
self.writer.writerow(self.log_entry)
self.log_entry = {}
self.model = None
self.model_2 = None
self.model_3 = None
@staticmethod
def disp(log):
"""Print metrics to stdout"""
log_keys = [k for k in log.keys()]
log_keys.sort()
print('***** Episode {}, Mean R = {:.1f}, Adv = {:.2f}, Adv_Min = {:.2f} Adv_Max = {:.2f} *****'.format(log['_Episode'],
log['_MeanReward'], log['_mean_adv'], log['_min_adv'], log['_max_adv']))
for key in log_keys:
if key[0] != '_': # don't display log items with leading '_'
print('{:s}: {:.3g}'.format(key, log[key]))
print('\n')
def log(self, items):
""" Update fields in log (does not write to file, used to collect updates.
Args:
items: dictionary of items to update
"""
self.log_entry.update(items)
def log_model(self, model_list):
""" stores the model (as a python list of names and param values)
Args:
model_list: list of param names and values
"""
self.model = model_list
def log_model_2(self, model_list):
""" stores the model (as a python list of names and param values)
Args:
model_list: list of param names and values
"""
self.model_2 = model_list
def log_model_3(self, model_list):
""" stores the model (as a python list of names and param values)
Args:
model_list: list of param names and values
"""
self.model_3 = model_list
def close(self):
""" Close log file - log cannot be written after this """
self.f.close()
| 34.779006
| 128
| 0.549643
|
1528961c80f92672b75cbad0a8e8784a71c3384d
| 16,995
|
py
|
Python
|
azurelinuxagent/common/rdma.py
|
peterd3270/WALinuxAgent
|
68c333ed80b6428a51f7b7e7c49605a47de1176e
|
[
"Apache-2.0"
] | null | null | null |
azurelinuxagent/common/rdma.py
|
peterd3270/WALinuxAgent
|
68c333ed80b6428a51f7b7e7c49605a47de1176e
|
[
"Apache-2.0"
] | null | null | null |
azurelinuxagent/common/rdma.py
|
peterd3270/WALinuxAgent
|
68c333ed80b6428a51f7b7e7c49605a47de1176e
|
[
"Apache-2.0"
] | null | null | null |
# Windows Azure Linux Agent
#
# Copyright 2016 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handle packages and modules to enable RDMA for IB networking
"""
import os
import re
import time
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib
dapl_config_paths = [ # pylint: disable=invalid-name
'/etc/dat.conf',
'/etc/rdma/dat.conf',
'/usr/local/etc/dat.conf'
]
def setup_rdma_device(nd_version, shared_conf):
logger.verbose("Parsing SharedConfig XML contents for RDMA details")
xml_doc = parse_doc(shared_conf.xml_text)
if xml_doc is None:
logger.error("Could not parse SharedConfig XML document")
return
instance_elem = find(xml_doc, "Instance")
if not instance_elem:
logger.error("Could not find <Instance> in SharedConfig document")
return
rdma_ipv4_addr = getattrib(instance_elem, "rdmaIPv4Address")
if not rdma_ipv4_addr:
logger.error(
"Could not find rdmaIPv4Address attribute on Instance element of SharedConfig.xml document")
return
rdma_mac_addr = getattrib(instance_elem, "rdmaMacAddress")
if not rdma_mac_addr:
logger.error(
"Could not find rdmaMacAddress attribute on Instance element of SharedConfig.xml document")
return
# add colons to the MAC address (e.g. 00155D33FF1D ->
# 00:15:5D:33:FF:1D)
rdma_mac_addr = ':'.join([rdma_mac_addr[i:i + 2]
for i in range(0, len(rdma_mac_addr), 2)])
logger.info("Found RDMA details. IPv4={0} MAC={1}".format(
rdma_ipv4_addr, rdma_mac_addr))
# Set up the RDMA device with collected informatino
RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr, nd_version).start()
logger.info("RDMA: device is set up")
return
class RDMAHandler(object):
driver_module_name = 'hv_network_direct'
nd_version = None
def get_rdma_version(self): # pylint: disable=R1710
"""Retrieve the firmware version information from the system.
This depends on information provided by the Linux kernel."""
if self.nd_version :
return self.nd_version
kvp_key_size = 512
kvp_value_size = 2048
driver_info_source = '/var/lib/hyperv/.kvp_pool_0'
base_kernel_err_msg = 'Kernel does not provide the necessary '
base_kernel_err_msg += 'information or the kvp daemon is not running.'
if not os.path.isfile(driver_info_source):
error_msg = 'RDMA: Source file "%s" does not exist. '
error_msg += base_kernel_err_msg
logger.error(error_msg % driver_info_source)
return
f = open(driver_info_source) # pylint: disable=C0103
while True :
key = f.read(kvp_key_size)
value = f.read(kvp_value_size)
if key and value :
key_0 = key.split("\x00")[0]
value_0 = value.split("\x00")[0]
if key_0 == "NdDriverVersion" :
f.close()
self.nd_version = value_0
return self.nd_version
else :
break
f.close()
error_msg = 'RDMA: NdDriverVersion not found in "%s"'
logger.error(error_msg % driver_info_source)
return
@staticmethod
def is_kvp_daemon_running():
"""Look for kvp daemon names in ps -ef output and return True/False
"""
# for centos, the hypervkvpd and the hv_kvp_daemon both are ok.
# for suse, it uses hv_kvp_daemon
kvp_daemon_names = ['hypervkvpd', 'hv_kvp_daemon']
exitcode, ps_out = shellutil.run_get_output("ps -ef")
if exitcode != 0:
raise Exception('RDMA: ps -ef failed: %s' % ps_out)
for n in kvp_daemon_names: # pylint: disable=C0103
if n in ps_out: # pylint: disable=R1705
logger.info('RDMA: kvp daemon (%s) is running' % n)
return True
else:
logger.verbose('RDMA: kvp daemon (%s) is not running' % n)
return False
def load_driver_module(self):
"""Load the kernel driver, this depends on the proper driver
to be installed with the install_driver() method"""
logger.info("RDMA: probing module '%s'" % self.driver_module_name)
result = shellutil.run('modprobe --first-time %s' % self.driver_module_name)
if result != 0:
error_msg = 'Could not load "%s" kernel module. '
error_msg += 'Run "modprobe --first-time %s" as root for more details'
logger.error(
error_msg % (self.driver_module_name, self.driver_module_name)
)
return False
logger.info('RDMA: Loaded the kernel driver successfully.')
return True
def install_driver_if_needed(self):
if self.nd_version:
if conf.enable_check_rdma_driver():
self.install_driver()
else:
logger.info('RDMA: check RDMA driver is disabled, skip installing driver')
else:
logger.info('RDMA: skip installing driver when ndversion not present\n')
def install_driver(self):
"""Install the driver. This is distribution specific and must
be overwritten in the child implementation."""
logger.error('RDMAHandler.install_driver not implemented')
def is_driver_loaded(self):
"""Check if the network module is loaded in kernel space"""
cmd = 'lsmod | grep ^%s' % self.driver_module_name
status, loaded_modules = shellutil.run_get_output(cmd) # pylint: disable=W0612
logger.info('RDMA: Checking if the module loaded.')
if loaded_modules:
logger.info('RDMA: module loaded.')
return True
logger.info('RDMA: module not loaded.')
return False
def reboot_system(self):
"""Reboot the system. This is required as the kernel module for
the rdma driver cannot be unloaded with rmmod"""
logger.info('RDMA: Rebooting system.')
ret = shellutil.run('shutdown -r now')
if ret != 0:
logger.error('RDMA: Failed to reboot the system')
dapl_config_paths = [ # pylint: disable=invalid-name
'/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf']
class RDMADeviceHandler(object):
"""
Responsible for writing RDMA IP and MAC address to the /dev/hvnd_rdma
interface.
"""
rdma_dev = '/dev/hvnd_rdma'
sriov_dir = '/sys/class/infiniband'
device_check_timeout_sec = 120
device_check_interval_sec = 1
ipoib_check_timeout_sec = 60
ipoib_check_interval_sec = 1
ipv4_addr = None
mac_adr = None
nd_version = None
def __init__(self, ipv4_addr, mac_addr, nd_version):
self.ipv4_addr = ipv4_addr
self.mac_addr = mac_addr
self.nd_version = nd_version
def start(self):
logger.info("RDMA: starting device processing.")
self.process()
logger.info("RDMA: completed device processing.")
def process(self):
try:
if not self.nd_version :
logger.info("RDMA: provisioning SRIOV RDMA device.")
self.provision_sriov_rdma()
else :
logger.info("RDMA: provisioning Network Direct RDMA device.")
self.provision_network_direct_rdma()
except Exception as e: # pylint: disable=C0103
logger.error("RDMA: device processing failed: {0}".format(e))
def provision_network_direct_rdma(self) :
RDMADeviceHandler.update_dat_conf(dapl_config_paths, self.ipv4_addr)
if not conf.enable_check_rdma_driver():
logger.info("RDMA: skip checking RDMA driver version")
RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr)
return
skip_rdma_device = False
module_name = "hv_network_direct"
retcode,out = shellutil.run_get_output("modprobe -R %s" % module_name, chk_err=False)
if retcode == 0:
module_name = out.strip()
else:
logger.info("RDMA: failed to resolve module name. Use original name")
retcode,out = shellutil.run_get_output("modprobe %s" % module_name)
if retcode != 0:
logger.error("RDMA: failed to load module %s" % module_name)
return
retcode,out = shellutil.run_get_output("modinfo %s" % module_name)
if retcode == 0:
version = re.search("version:\s+(\d+)\.(\d+)\.(\d+)\D", out, re.IGNORECASE) # pylint: disable=W1401
if version:
v1 = int(version.groups(0)[0]) # pylint: disable=C0103
v2 = int(version.groups(0)[1]) # pylint: disable=C0103
if v1>4 or v1==4 and v2>0:
logger.info("Skip setting /dev/hvnd_rdma on 4.1 or later")
skip_rdma_device = True
else:
logger.info("RDMA: hv_network_direct driver version not present, assuming 4.0.x or older.")
else:
logger.warn("RDMA: failed to get module info on hv_network_direct.")
if not skip_rdma_device:
RDMADeviceHandler.wait_rdma_device(
self.rdma_dev, self.device_check_timeout_sec, self.device_check_interval_sec)
RDMADeviceHandler.write_rdma_config_to_device(
self.rdma_dev, self.ipv4_addr, self.mac_addr)
RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr)
def provision_sriov_rdma(self) : # pylint: disable=R1711
RDMADeviceHandler.wait_any_rdma_device(
self.sriov_dir, self.device_check_timeout_sec, self.device_check_interval_sec)
RDMADeviceHandler.update_iboip_interface(self.ipv4_addr, self.ipoib_check_timeout_sec, self.ipoib_check_interval_sec)
return
@staticmethod
def update_iboip_interface(ipv4_addr, timeout_sec, check_interval_sec) :
logger.info("Wait for ib0 become available")
total_retries = timeout_sec/check_interval_sec
n = 0 # pylint: disable=C0103
found_ib0 = None
while not found_ib0 and n < total_retries:
ret, output = shellutil.run_get_output("ifconfig -a")
if ret != 0:
raise Exception("Failed to list network interfaces")
found_ib0 = re.search("ib0", output, re.IGNORECASE)
if found_ib0:
break
time.sleep(check_interval_sec)
n += 1 # pylint: disable=C0103
if not found_ib0:
raise Exception("ib0 is not available")
netmask = 16
logger.info("RDMA: configuring IPv4 addr and netmask on ipoib interface")
addr = '{0}/{1}'.format(ipv4_addr, netmask)
if shellutil.run("ifconfig ib0 {0}".format(addr)) != 0:
raise Exception("Could set addr to {0} on ib0".format(addr))
logger.info("RDMA: ipoib address and netmask configured on interface")
@staticmethod
def update_dat_conf(paths, ipv4_addr):
"""
Looks at paths for dat.conf file and updates the ip address for the
infiniband interface.
"""
logger.info("Updating DAPL configuration file")
for f in paths: # pylint: disable=C0103
logger.info("RDMA: trying {0}".format(f))
if not os.path.isfile(f):
logger.info(
"RDMA: DAPL config not found at {0}".format(f))
continue
logger.info("RDMA: DAPL config is at: {0}".format(f))
cfg = fileutil.read_file(f)
new_cfg = RDMADeviceHandler.replace_dat_conf_contents(
cfg, ipv4_addr)
fileutil.write_file(f, new_cfg)
logger.info("RDMA: DAPL configuration is updated")
return
raise Exception("RDMA: DAPL configuration file not found at predefined paths")
@staticmethod
def replace_dat_conf_contents(cfg, ipv4_addr):
old = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"\S+ 0\"" # pylint: disable=W1401
new = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"{0} 0\"".format(
ipv4_addr)
return re.sub(old, new, cfg)
@staticmethod
def write_rdma_config_to_device(path, ipv4_addr, mac_addr):
data = RDMADeviceHandler.generate_rdma_config(ipv4_addr, mac_addr)
logger.info(
"RDMA: Updating device with configuration: {0}".format(data))
with open(path, "w") as f: # pylint: disable=C0103
logger.info("RDMA: Device opened for writing")
f.write(data)
logger.info("RDMA: Updated device with IPv4/MAC addr successfully")
@staticmethod
def generate_rdma_config(ipv4_addr, mac_addr):
return 'rdmaMacAddress="{0}" rdmaIPv4Address="{1}"'.format(mac_addr, ipv4_addr)
@staticmethod
def wait_rdma_device(path, timeout_sec, check_interval_sec):
logger.info("RDMA: waiting for device={0} timeout={1}s".format(path, timeout_sec))
total_retries = timeout_sec/check_interval_sec
n = 0 # pylint: disable=C0103
while n < total_retries:
if os.path.exists(path):
logger.info("RDMA: device ready")
return
logger.verbose(
"RDMA: device not ready, sleep {0}s".format(check_interval_sec))
time.sleep(check_interval_sec)
n += 1 # pylint: disable=C0103
logger.error("RDMA device wait timed out")
raise Exception("The device did not show up in {0} seconds ({1} retries)".format(
timeout_sec, total_retries))
@staticmethod
def wait_any_rdma_device(dir, timeout_sec, check_interval_sec): # pylint: disable=W0622
logger.info(
"RDMA: waiting for any Infiniband device at directory={0} timeout={1}s".format(
dir, timeout_sec))
total_retries = timeout_sec/check_interval_sec
n = 0 # pylint: disable=C0103
while n < total_retries:
r = os.listdir(dir) # pylint: disable=C0103
if r:
logger.info("RDMA: device found in {0}".format(dir))
return
logger.verbose(
"RDMA: device not ready, sleep {0}s".format(check_interval_sec))
time.sleep(check_interval_sec)
n += 1 # pylint: disable=C0103
logger.error("RDMA device wait timed out")
raise Exception("The device did not show up in {0} seconds ({1} retries)".format(
timeout_sec, total_retries))
@staticmethod
def update_network_interface(mac_addr, ipv4_addr):
netmask=16
logger.info("RDMA: will update the network interface with IPv4/MAC")
if_name=RDMADeviceHandler.get_interface_by_mac(mac_addr)
logger.info("RDMA: network interface found: {0}", if_name)
logger.info("RDMA: bringing network interface up")
if shellutil.run("ifconfig {0} up".format(if_name)) != 0:
raise Exception("Could not bring up RMDA interface: {0}".format(if_name))
logger.info("RDMA: configuring IPv4 addr and netmask on interface")
addr = '{0}/{1}'.format(ipv4_addr, netmask)
if shellutil.run("ifconfig {0} {1}".format(if_name, addr)) != 0:
raise Exception("Could set addr to {1} on {0}".format(if_name, addr))
logger.info("RDMA: network address and netmask configured on interface")
@staticmethod
def get_interface_by_mac(mac):
ret, output = shellutil.run_get_output("ifconfig -a")
if ret != 0:
raise Exception("Failed to list network interfaces")
output = output.replace('\n', '')
match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac),
output, re.IGNORECASE)
if match is None:
raise Exception("Failed to get ifname with mac: {0}".format(mac))
output = match.group(0)
eths = re.findall(r"eth\d", output)
if eths is None or len(eths) == 0: # pylint: disable=len-as-condition
raise Exception("ifname with mac: {0} not found".format(mac))
return eths[-1]
| 41.050725
| 125
| 0.631774
|
eeb8c4f7582fd393e47374292984d42057da74de
| 8,492
|
py
|
Python
|
FGG/dataset/graph_builder.py
|
RunOrVeith/FGG
|
ff761a82692bd13f19add3b0478cc32f71b63a33
|
[
"Apache-2.0"
] | 3
|
2019-09-06T00:27:06.000Z
|
2019-10-28T08:51:18.000Z
|
FGG/dataset/graph_builder.py
|
RunOrVeith/FGG
|
ff761a82692bd13f19add3b0478cc32f71b63a33
|
[
"Apache-2.0"
] | null | null | null |
FGG/dataset/graph_builder.py
|
RunOrVeith/FGG
|
ff761a82692bd13f19add3b0478cc32f71b63a33
|
[
"Apache-2.0"
] | null | null | null |
import enum
from typing import Union, Optional
import itertools
import warnings
import numpy as np
import networkx as nx
from scipy.spatial import distance
from FGG.dataset.tracks import TrackCollection
from FGG.dataset.split_strategy import SplitStrategy
from FGG.metrics.evaluation import GraphMetrics
@enum.unique
class EdgeTypes(enum.IntEnum):
# Warning: Some other parts of the code base rely on the order!
must_link = 1
cannot_link = 2
class GraphBuilder(object):
def __init__(self, split_strategy: SplitStrategy, pos_edge_dropout: float = None, neg_edge_dropout: float = None,
pair_sample_fraction=0.4, edge_between_top_fraction=0.03, weighted_edges=True,
isolates_similarity_only=False, add_wrong_edges=None,
rng: Union[Optional[int], np.random.RandomState] = None):
self.split_strategy = split_strategy
self._original_rng = rng
self.rng = None
self.reset()
self.add_wrong_edges = add_wrong_edges
self.pos_edge_dropout = pos_edge_dropout
self.neg_edge_dropout = neg_edge_dropout
self.pair_sample_fraction = pair_sample_fraction
self.edge_between_top_fraction = edge_between_top_fraction
self.weighted_edges = weighted_edges
self.isolates_similarity_only = isolates_similarity_only
def reset(self):
if isinstance(self._original_rng, int) or self._original_rng is None:
self.rng = np.random.RandomState(seed=self._original_rng)
else:
self.rng = self._original_rng
self.split_strategy.reset()
@staticmethod
def unconnected_graph(tracks: TrackCollection):
graph = nx.Graph()
for track in tracks:
graph.add_node(track)
return graph
@staticmethod
def cannot_link_from_temporal_overlap(graph):
for track_a, track_b in itertools.combinations(graph.nodes, 2):
if track_a.overlaps(track_b):
graph.add_edge(track_a, track_b, type=EdgeTypes.cannot_link, weight=1)
return graph
@staticmethod
def must_link_from_tracker_label(graph):
for track_a, track_b in itertools.combinations(graph.nodes, 2):
if track_a.tracker_id == track_b.tracker_id:
graph.add_edge(track_a, track_b, type=EdgeTypes.must_link, weight=1)
return graph
def both_types_from_sample_distance(self, graph: nx.Graph, tracks, distance_func=distance.cosine):
if self.edge_between_top_fraction is None or self.pair_sample_fraction is None:
return graph
if self.isolates_similarity_only:
sample_from = list(nx.isolates(graph))
else:
sample_from = graph.nodes
graph_size = len(sample_from)
if graph_size <= 1:
return graph
num_samples = int(self.pair_sample_fraction * graph_size)
selected_nodes = self.rng.choice(sample_from, num_samples, replace=False)
assert len(selected_nodes) == num_samples
samples = list(itertools.combinations(selected_nodes, 2))
assert len(samples) == num_samples * (num_samples - 1) / 2
samples = [(track_a, track_b) for track_a, track_b in samples if not graph.has_edge(track_a, track_b)]
distances = np.array([distance_func(tracks[track_a].mean(axis=0), tracks[track_b].mean(axis=0))
for track_a, track_b in samples])
num_samples_to_connect = int(self.edge_between_top_fraction * len(samples) / 2)
most_similar = np.argpartition(distances, num_samples_to_connect)[:num_samples_to_connect]
least_similar = np.argpartition(-distances, num_samples_to_connect)[:num_samples_to_connect]
for same_idx, different_idx in zip(most_similar, least_similar):
# Use 1-distance in both cases because the negation is already present in the edge type
pos_weight, neg_weight = 1, 1
if self.weighted_edges:
pos_weight, neg_weight = 1 - distances[same_idx], 1 - distances[different_idx]
graph.add_edge(*samples[same_idx], type=EdgeTypes.must_link, weight=pos_weight)
graph.add_edge(*samples[different_idx], type=EdgeTypes.cannot_link, weight=neg_weight)
return graph
@staticmethod
def split(graph, split_strategy: SplitStrategy):
marked_for_deletion = []
split_graph = graph.copy(as_view=False)
for track in graph.nodes:
into = split_strategy(track)
neighbors = list(nx.all_neighbors(split_graph, track))
subtracks = track.split(into=into)
for subtrack in subtracks:
split_graph.add_node(subtrack)
for neighbor in neighbors:
split_graph.add_edge(subtrack, neighbor,
type=split_graph[track][neighbor]["type"],
weight=split_graph[track][neighbor]["weight"])
for subtrack_a, subtrack_b in itertools.combinations(subtracks, 2):
split_graph.add_edge(subtrack_a, subtrack_b, type=EdgeTypes.must_link, weight=1)
marked_for_deletion.append(track)
split_graph.remove_nodes_from(marked_for_deletion)
return split_graph
@staticmethod
def graph_to_track_collection(graph, tracks: TrackCollection):
graph_tracks = sorted(graph.nodes)
return TrackCollection(tracks=graph_tracks, features=tracks.features,
person_id_handler=tracks.person_id_handler)
def edge_dropout(self, graph: nx.Graph, edge_type, p):
drop_edges = [(u, v) for u, v, data in graph.edges(data=True)
if data["type"] == edge_type and self.rng.random_sample() <= p]
graph.remove_edges_from(drop_edges)
return graph
def add_random_wrong_edges(self, graph):
graph_size = nx.number_of_nodes(graph)
num_samples = int(self.add_wrong_edges * graph_size)
sample_from = graph.nodes
selected_nodes = self.rng.choice(sample_from, num_samples, replace=False)
for track_a, track_b in itertools.combinations(selected_nodes, 2):
if graph.has_edge(track_a, track_b):
continue
elif track_a.label == track_b.label:
graph.add_edge(track_a, track_b, type=EdgeTypes.cannot_link, weight=1)
else:
graph.add_edge(track_a, track_b, type=EdgeTypes.must_link, weight=1)
return graph
def constraints_to_graph(self, tracks: TrackCollection, split_disconnected_components=False):
graph = self.unconnected_graph(tracks=tracks)
graph = self.cannot_link_from_temporal_overlap(graph)
graph = self.split(graph, split_strategy=self.split_strategy)
graph = self.must_link_from_tracker_label(graph)
if self.pos_edge_dropout is not None:
graph = self.edge_dropout(graph=graph, edge_type=EdgeTypes.must_link, p=self.pos_edge_dropout)
if self.neg_edge_dropout is not None:
graph = self.edge_dropout(graph=graph, edge_type=EdgeTypes.cannot_link, p=self.neg_edge_dropout)
if self.add_wrong_edges is not None:
graph = self.add_random_wrong_edges(graph)
graph = self.both_types_from_sample_distance(graph, tracks=tracks)
if not split_disconnected_components:
print(GraphMetrics(graph))
yield graph, self.graph_to_track_collection(graph=graph, tracks=tracks)
else:
# Need to merge single node components into one because batch norm does not work otherwise
single_node_components = []
for component in nx.connected_components(graph):
if len(component) == 1:
single_node_components.extend(component)
continue
subgraph = graph.subgraph(component)
print(GraphMetrics(subgraph))
yield subgraph, self.graph_to_track_collection(graph=subgraph, tracks=tracks)
if len(single_node_components) == 1:
warnings.warn("Found one single-node component, skipping!")
else:
merged_single_nodes = graph.subgraph(single_node_components)
print(GraphMetrics(merged_single_nodes))
yield merged_single_nodes, self.graph_to_track_collection(graph=merged_single_nodes, tracks=tracks)
| 44.694737
| 117
| 0.670631
|
f823793de9fb82f2d5c6e39b5cc640746ebe1bd9
| 16,607
|
py
|
Python
|
tabun_stat/tabun_stat/datasource/base.py
|
andreymal/stuff
|
33f9c7c80efc29b29fb0e14116eb641f53e45ccc
|
[
"MIT"
] | 9
|
2017-12-07T21:28:07.000Z
|
2020-10-11T17:29:55.000Z
|
tabun_stat/tabun_stat/datasource/base.py
|
andreymal/stuff
|
33f9c7c80efc29b29fb0e14116eb641f53e45ccc
|
[
"MIT"
] | null | null | null |
tabun_stat/tabun_stat/datasource/base.py
|
andreymal/stuff
|
33f9c7c80efc29b29fb0e14116eb641f53e45ccc
|
[
"MIT"
] | 3
|
2019-10-11T10:07:05.000Z
|
2020-01-27T21:05:15.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import Optional, Iterable, List, Dict, Any
__all__ = ['DataNotFound', 'BaseDataSource']
class DataNotFound(Exception):
pass
class BaseDataSource:
def destroy(self) -> None:
pass
def _filter_object(self, type: str, obj: Dict[str, Any], filters: Optional[Dict[str, Any]] = None) -> bool:
# pylint: disable=W0622,C0325
if type not in ('user', 'blog', 'post', 'comment'):
raise ValueError('Invalid filter type: {!r}'.format(type))
if not filters:
return True
ok = True
for k, v in filters.items():
if '__' in k:
key, act = k.rsplit('__', 1)
else:
key, act = k, '?'
if type == 'user' and key not in ('user_id',):
raise ValueError('Invalid {} filter: {!r}'.format(type, k))
if type == 'blog' and key not in ('blog_id',):
raise ValueError('Invalid {} filter: {!r}'.format(type, k))
if type == 'post' and key not in ('post_id', 'time'):
raise ValueError('Invalid {} filter: {!r}'.format(type, k))
if type == 'comment' and key not in ('comment_id', 'post_id', 'time'):
raise ValueError('Invalid {} filter: {!r}'.format(type, k))
if act == 'lt':
if not (obj[key] < v):
ok = False
elif act == 'lte':
if not (obj[key] <= v):
ok = False
elif act == 'gt':
if not (obj[key] > v):
ok = False
elif act == 'gte':
if not (obj[key] >= v):
ok = False
else:
raise ValueError('Invalid {} filter: {!r}'.format(type, k))
return ok
# Табунчане
def get_user_by_id(self, user_id: int) -> Dict[str, Any]:
"""Возвращает словарь с данными пользователя по его id.
Если пользователь не найден, выбрасывает ошибку DataNotFound.
Поля: user_id (int), username (str), realname (str),
skill (float), rating (float), gender ('M', 'F' или None),
birthday (date или None), registered_at (datetime или None),
description (str)
"""
raise NotImplementedError
def get_user_by_name(self, username: str) -> Dict[str, Any]:
"""Возвращает словарь с данными пользователя по его нику.
Если пользователь не найден, выбрасывает ошибку DataNotFound.
"""
raise NotImplementedError
def iter_users(self, filters: Optional[Dict[str, Any]] = None) -> Iterable[List[Dict[str, Any]]]:
"""По очереди yield'ит всех существующих пользователей. Если указаны
фильтры, то с учётом их ограничений. Должны быть реализованы следующие
фильтры:
* ``user_id__lt``: id меньше указанного;
* ``user_id__lte``: id меньше указанного или равен ему;
* ``user_id__gt``: id больше указанного;
* ``user_id__gte``: id больше указанного или равен ему.
"""
stat = self.get_users_limits(filters=filters)
for user_id in range(stat['first_id'], stat['last_id'] + 1):
try:
user = self.get_user_by_id(user_id)
except DataNotFound:
continue
if filters and 'user_id__lt' in filters:
if user['user_id'] >= filters['user_id__lt']:
continue
if filters and 'user_id__lte' in filters:
if user['user_id'] > filters['user_id__lte']:
continue
if filters and 'user_id__gt' in filters:
if user['user_id'] <= filters['user_id__gt']:
continue
if filters and 'user_id__gte' in filters:
if user['user_id'] < filters['user_id__gte']:
continue
yield [user]
def get_username_by_user_id(self, user_id: int) -> str:
"""Возвращает имя пользователя по его id."""
return self.get_user_by_id(user_id)['username']
def get_users_limits(self, filters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""Возвращает статистику о всех существующих пользователях. Если
указаны фильтры, то с учётом их ограничений. Результат — вот такой
словарь:
* ``count`` (int) — общее число пользователей;
* ``first_id`` (int или None) — самый маленький id (если есть);
* ``last_id`` (int или None) — самый большой id (если есть).
"""
raise NotImplementedError
# Блоги
def get_blog_by_id(self, blog_id: int) -> Dict[str, Any]:
"""Возвращает словарь с данными блога по его id.
Если пост не найден, выбрасывает ошибку DataNotFound.
Поля: blog_id (int), slug (str), name (str),
creator_id (int, id юзера), rating (float), status (int, 0 - открытый,
1 - закрытый, 2 - полузакрытый), description (str), vote_count (int),
created_at (datetime), deleted (bool)
"""
raise NotImplementedError
def get_blog_by_slug(self, slug: str) -> Dict[str, Any]:
"""Возвращает словарь с данными блога по его slug (url-имя блога,
которое используется в ссылках).
"""
raise NotImplementedError
def get_blog_status_by_id(self, blog_id: Optional[int]) -> int:
"""Возвращает статус блога по его id. 0 - открытый блог,
1 - закрытый, 2 - полузакрытый.
blog_id = None означает личный блог, и для него всегда должнен
возвращаться ноль (личные блоги всегда открытые).
"""
if blog_id is None:
return 0
return self.get_blog_by_id(blog_id)['status']
def get_blog_status_by_slug(self, slug: Optional[str]) -> int:
"""Возвращает статус блога по его slug. 0 - открытый блог,
1 - закрытый, 2 - полузакрытый.
Пустой slug означает личный блог, и для него всегда должнен
возвращаться ноль (личные блоги всегда открытые).
"""
if not slug:
return 0
return self.get_blog_by_slug(slug)['status']
def get_blog_id_by_slug(self, slug: str) -> int:
"""Возвращает id блога по его slug."""
return self.get_blog_by_slug(slug)['blog_id']
def get_blog_id_of_post(self, post_id: int) -> Optional[int]:
"""Возвращает id блога, в котором находится указанный пост.
Если в личном блоге, то None.
"""
return self.get_post_by_id(post_id)['blog_id']
def iter_blogs(self, filters: Optional[Dict[str, Any]] = None) -> Iterable[List[Dict[str, Any]]]:
"""По очереди yield'ит все существующие блоги. Если указаны фильтры,
то с учётом их ограничений. Должны быть реализованы следующие фильтры:
* ``blog_id__lt``: id меньше указанного;
* ``blog_id__lte``: id меньше указанного или равен ему;
* ``blog_id__gt``: id больше указанного;
* ``blog_id__gte``: id больше указанного или равен ему.
"""
stat = self.get_blogs_limits(filters=filters)
for blog_id in range(stat['first_id'], stat['last_id'] + 1):
try:
blog = self.get_blog_by_id(blog_id)
except DataNotFound:
continue
if filters and 'blog_id__lt' in filters:
if blog['blog_id'] >= filters['blog_id__lt']:
continue
if filters and 'blog_id__lte' in filters:
if blog['blog_id'] > filters['blog_id__lte']:
continue
if filters and 'blog_id__gt' in filters:
if blog['blog_id'] <= filters['blog_id__gt']:
continue
if filters and 'blog_id__gte' in filters:
if blog['blog_id'] < filters['blog_id__gte']:
continue
yield [blog]
def get_blogs_limits(self, filters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""Возвращает статистику о всех существующих блогах. Если указаны
фильтры, то с учётом их ограничений. Результат — вот такой словарь:
* ``count`` (int) — общее число блогов;
* ``first_id`` (int или None) — самый маленький id (если есть);
* ``last_id`` (int или None) — самый большой id (если есть).
"""
raise NotImplementedError
# Посты (опционально с комментами)
def get_post_by_id(self, post_id: int, with_comments: bool = False) -> Dict[str, Any]:
"""Возвращает словарь с данными поста по его id.
Если пост не найден, выбрасывает ошибку DataNotFound.
Если указано with_comments=True, то должно ещё присутствовать
поле comments со списком комментариев (сортировка не определена);
формат комментария см. в справке get_comment_by_id.
Поля: post_id (int), created_at (datetime), blog_id (int; для личного
блога None), blog_status (0, 1 или 2; для личных блогов всегда 0),
author_id (int, id пользователя), title (str), vote_count (int),
vote_value (int или None, если неизвестно), body (str),
favorites_count (int), deleted (bool), draft (bool)
"""
raise NotImplementedError
def iter_posts(self, with_comments: bool = False, filters: Optional[Dict[str, Any]] = None) -> Iterable[List[Dict[str, Any]]]:
"""По очереди yield'ит все существующие посты. Если указаны фильтры,
то с учётом их ограничений. Должны быть реализованы следующие фильтры:
* ``post_id__lt``: id меньше указанного;
* ``post_id__lte``: id меньше указанного или равен ему;
* ``post_id__gt``: id больше указанного;
* ``post_id__gte``: id больше указанного или равен ему;
* ``created_at__lt``, ``created_at__lte``, ``created_at__gt``,
``created_at__gte``: аналогично для времени создания поста (datetime)
"""
stat = self.get_posts_limits(filters=filters)
for post_id in range(stat['first_id'], stat['last_id'] + 1):
try:
post = self.get_post_by_id(post_id, with_comments)
except DataNotFound:
continue
if filters and 'post_id__lt' in filters:
if post['post_id'] >= filters['post_id__lt']:
continue
if filters and 'post_id__lte' in filters:
if post['post_id'] > filters['post_id__lte']:
continue
if filters and 'post_id__gt' in filters:
if post['post_id'] <= filters['post_id__gt']:
continue
if filters and 'post_id__gte' in filters:
if post['post_id'] < filters['post_id__gte']:
continue
if filters and 'created_at__lt' in filters:
if not post['created_at'] or post['created_at'] >= filters['created_at__lt']:
continue
if filters and 'created_at__lte' in filters:
if not post['created_at'] or post['created_at'] > filters['created_at__lte']:
continue
if filters and 'created_at__gt' in filters:
if not post['created_at'] or post['created_at'] <= filters['created_at__gt']:
continue
if filters and 'created_at__gte' in filters:
if not post['created_at'] or post['created_at'] < filters['created_at__gte']:
continue
yield [post]
def get_posts_limits(self, filters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""Возвращает статистику о всех существующих постах. Если указаны
фильтры, то с учётом их ограничений. Результат — вот такой словарь:
* ``count`` (int) — общее число постов;
* ``first_id`` (int или None) — самый маленький id;
* ``last_id`` (int или None) — самый большой id;
* ``first_created_at`` (datetime или None) — самая ранняя дата поста;
* ``last_created_at`` (datetime или None) — самая поздняя дата поста.
"""
raise NotImplementedError
# Комменты
def get_comment_by_id(self, comment_id: int) -> Dict[str, Any]:
"""Возвращает словарь с данными комментария по его id.
Если комментарий не найден, выбрасывает ошибку DataNotFound.
Поля: comment_id (int), post_id (int или None для комментов-сирот),
parent_id (int или None), author_id (int), created_at (datetime),
vote_value (int), body (str), deleted (bool), favorites_count (int).
"""
raise NotImplementedError
def get_post_comments(self, post_id: int) -> List[Dict[str, Any]]:
"""Возвращает список комментариев для данного поста.
Если пост не найден, выбрасывает ошибку DataNotFound.
"""
raise NotImplementedError
def iter_comments(self, filters: Optional[Dict[str, Any]] = None) -> Iterable[List[Dict[str, Any]]]:
"""По очереди yield'ит все существующие комменты. Если указаны фильтры,
то с учётом их ограничений. Должны быть реализованы следующие фильтры:
* ``comment_id__lt``: id меньше указанного;
* ``comment_id__lte``: id меньше указанного или равен ему;
* ``comment_id__gt``: id больше указанного;
* ``comment_id__gte``: id больше указанного или равен ему;
* ``post_id__lt``, ``post_id__lte``, ``post_id__gt``, ``post_id__gte``:
аналогично, но для id поста, которому принадлежит коммент;
* ``created_at__lt``, ``created_at__lte``, ``created_at__gt``,
``created_at__gte``: аналогично для времени создания коммента
(datetime)
"""
stat = self.get_comments_limits(filters=filters)
for comment_id in range(stat['first_id'], stat['last_id'] + 1):
try:
comment = self.get_comment_by_id(comment_id)
except DataNotFound:
continue
if filters and 'comment_id__lt' in filters:
if comment['comment_id'] >= filters['comment_id__lt']:
continue
if filters and 'comment_id__lte' in filters:
if comment['comment_id'] > filters['comment_id__lte']:
continue
if filters and 'comment_id__gt' in filters:
if comment['comment_id'] <= filters['comment_id__gt']:
continue
if filters and 'comment_id__gte' in filters:
if comment['comment_id'] < filters['comment_id__gte']:
continue
if filters and 'post_id__lt' in filters:
if comment['post_id'] >= filters['post_id__lt']:
continue
if filters and 'post_id__lte' in filters:
if comment['post_id'] > filters['post_id__lte']:
continue
if filters and 'post_id__gt' in filters:
if comment['post_id'] <= filters['post_id__gt']:
continue
if filters and 'post_id__gte' in filters:
if comment['post_id'] < filters['post_id__gte']:
continue
if filters and 'created_at__lt' in filters:
if not comment['created_at'] or comment['created_at'] >= filters['created_at__lt']:
continue
if filters and 'created_at__lte' in filters:
if not comment['created_at'] or comment['created_at'] > filters['created_at__lte']:
continue
if filters and 'created_at__gt' in filters:
if not comment['created_at'] or comment['created_at'] <= filters['created_at__gt']:
continue
if filters and 'created_at__gte' in filters:
if not comment['created_at'] or comment['created_at'] < filters['created_at__gte']:
continue
yield [comment]
def get_comments_limits(self, filters: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""Возвращает статистику о всех существующих комментах. Если указаны
фильтры, то с учётом их ограничений. Результат — вот такой словарь:
* ``count`` (int) — общее число комментов;
* ``first_id`` (int или None) — самый маленький id;
* ``last_id`` (int или None) — самый большой id;
* ``first_created_at`` (datetime или None) — самая ранняя дата
коммента;
* ``last_created_at`` (datetime или None) — самая поздняя дата
коммента.
"""
raise NotImplementedError
| 42.256997
| 130
| 0.584151
|
982e6e1f317079970922f9ec03b3dc2961b54346
| 21,102
|
py
|
Python
|
vsts/vsts/service_hooks/v4_1/service_hooks_client.py
|
dhilmathy/azure-devops-python-api
|
d16026911f93361becb52d2f1c124d5c3e8a82e7
|
[
"MIT"
] | null | null | null |
vsts/vsts/service_hooks/v4_1/service_hooks_client.py
|
dhilmathy/azure-devops-python-api
|
d16026911f93361becb52d2f1c124d5c3e8a82e7
|
[
"MIT"
] | 37
|
2020-04-27T07:45:19.000Z
|
2021-04-05T07:27:15.000Z
|
vsts/vsts/service_hooks/v4_1/service_hooks_client.py
|
dhilmathy/azure-devops-python-api
|
d16026911f93361becb52d2f1c124d5c3e8a82e7
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class ServiceHooksClient(VssClient):
"""ServiceHooks
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(ServiceHooksClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def get_consumer_action(self, consumer_id, consumer_action_id, publisher_id=None):
"""GetConsumerAction.
Get details about a specific consumer action.
:param str consumer_id: ID for a consumer.
:param str consumer_action_id: ID for a consumerActionId.
:param str publisher_id:
:rtype: :class:`<ConsumerAction> <service-hooks.v4_1.models.ConsumerAction>`
"""
route_values = {}
if consumer_id is not None:
route_values['consumerId'] = self._serialize.url('consumer_id', consumer_id, 'str')
if consumer_action_id is not None:
route_values['consumerActionId'] = self._serialize.url('consumer_action_id', consumer_action_id, 'str')
query_parameters = {}
if publisher_id is not None:
query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='c3428e90-7a69-4194-8ed8-0f153185ee0d',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ConsumerAction', response)
def list_consumer_actions(self, consumer_id, publisher_id=None):
"""ListConsumerActions.
Get a list of consumer actions for a specific consumer.
:param str consumer_id: ID for a consumer.
:param str publisher_id:
:rtype: [ConsumerAction]
"""
route_values = {}
if consumer_id is not None:
route_values['consumerId'] = self._serialize.url('consumer_id', consumer_id, 'str')
query_parameters = {}
if publisher_id is not None:
query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='c3428e90-7a69-4194-8ed8-0f153185ee0d',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ConsumerAction]', self._unwrap_collection(response))
def get_consumer(self, consumer_id, publisher_id=None):
"""GetConsumer.
Get a specific consumer service. Optionally filter out consumer actions that do not support any event types for the specified publisher.
:param str consumer_id: ID for a consumer.
:param str publisher_id:
:rtype: :class:`<Consumer> <service-hooks.v4_1.models.Consumer>`
"""
route_values = {}
if consumer_id is not None:
route_values['consumerId'] = self._serialize.url('consumer_id', consumer_id, 'str')
query_parameters = {}
if publisher_id is not None:
query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='4301c514-5f34-4f5d-a145-f0ea7b5b7d19',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Consumer', response)
def list_consumers(self, publisher_id=None):
"""ListConsumers.
Get a list of available service hook consumer services. Optionally filter by consumers that support at least one event type from the specific publisher.
:param str publisher_id:
:rtype: [Consumer]
"""
query_parameters = {}
if publisher_id is not None:
query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='4301c514-5f34-4f5d-a145-f0ea7b5b7d19',
version='4.1',
query_parameters=query_parameters)
return self._deserialize('[Consumer]', self._unwrap_collection(response))
def get_subscription_diagnostics(self, subscription_id):
"""GetSubscriptionDiagnostics.
[Preview API]
:param str subscription_id:
:rtype: :class:`<SubscriptionDiagnostics> <service-hooks.v4_1.models.SubscriptionDiagnostics>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
response = self._send(http_method='GET',
location_id='3b36bcb5-02ad-43c6-bbfa-6dfc6f8e9d68',
version='4.1-preview.1',
route_values=route_values)
return self._deserialize('SubscriptionDiagnostics', response)
def update_subscription_diagnostics(self, update_parameters, subscription_id):
"""UpdateSubscriptionDiagnostics.
[Preview API]
:param :class:`<UpdateSubscripitonDiagnosticsParameters> <service-hooks.v4_1.models.UpdateSubscripitonDiagnosticsParameters>` update_parameters:
:param str subscription_id:
:rtype: :class:`<SubscriptionDiagnostics> <service-hooks.v4_1.models.SubscriptionDiagnostics>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
content = self._serialize.body(update_parameters, 'UpdateSubscripitonDiagnosticsParameters')
response = self._send(http_method='PUT',
location_id='3b36bcb5-02ad-43c6-bbfa-6dfc6f8e9d68',
version='4.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('SubscriptionDiagnostics', response)
def get_event_type(self, publisher_id, event_type_id):
"""GetEventType.
Get a specific event type.
:param str publisher_id: ID for a publisher.
:param str event_type_id:
:rtype: :class:`<EventTypeDescriptor> <service-hooks.v4_1.models.EventTypeDescriptor>`
"""
route_values = {}
if publisher_id is not None:
route_values['publisherId'] = self._serialize.url('publisher_id', publisher_id, 'str')
if event_type_id is not None:
route_values['eventTypeId'] = self._serialize.url('event_type_id', event_type_id, 'str')
response = self._send(http_method='GET',
location_id='db4777cd-8e08-4a84-8ba3-c974ea033718',
version='4.1',
route_values=route_values)
return self._deserialize('EventTypeDescriptor', response)
def list_event_types(self, publisher_id):
"""ListEventTypes.
Get the event types for a specific publisher.
:param str publisher_id: ID for a publisher.
:rtype: [EventTypeDescriptor]
"""
route_values = {}
if publisher_id is not None:
route_values['publisherId'] = self._serialize.url('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='db4777cd-8e08-4a84-8ba3-c974ea033718',
version='4.1',
route_values=route_values)
return self._deserialize('[EventTypeDescriptor]', self._unwrap_collection(response))
def get_notification(self, subscription_id, notification_id):
"""GetNotification.
Get a specific notification for a subscription.
:param str subscription_id: ID for a subscription.
:param int notification_id:
:rtype: :class:`<Notification> <service-hooks.v4_1.models.Notification>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
if notification_id is not None:
route_values['notificationId'] = self._serialize.url('notification_id', notification_id, 'int')
response = self._send(http_method='GET',
location_id='0c62d343-21b0-4732-997b-017fde84dc28',
version='4.1',
route_values=route_values)
return self._deserialize('Notification', response)
def get_notifications(self, subscription_id, max_results=None, status=None, result=None):
"""GetNotifications.
Get a list of notifications for a specific subscription. A notification includes details about the event, the request to and the response from the consumer service.
:param str subscription_id: ID for a subscription.
:param int max_results: Maximum number of notifications to return. Default is **100**.
:param str status: Get only notifications with this status.
:param str result: Get only notifications with this result type.
:rtype: [Notification]
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
query_parameters = {}
if max_results is not None:
query_parameters['maxResults'] = self._serialize.query('max_results', max_results, 'int')
if status is not None:
query_parameters['status'] = self._serialize.query('status', status, 'str')
if result is not None:
query_parameters['result'] = self._serialize.query('result', result, 'str')
response = self._send(http_method='GET',
location_id='0c62d343-21b0-4732-997b-017fde84dc28',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Notification]', self._unwrap_collection(response))
def query_notifications(self, query):
"""QueryNotifications.
Query for notifications. A notification includes details about the event, the request to and the response from the consumer service.
:param :class:`<NotificationsQuery> <service-hooks.v4_1.models.NotificationsQuery>` query:
:rtype: :class:`<NotificationsQuery> <service-hooks.v4_1.models.NotificationsQuery>`
"""
content = self._serialize.body(query, 'NotificationsQuery')
response = self._send(http_method='POST',
location_id='1a57562f-160a-4b5c-9185-905e95b39d36',
version='4.1',
content=content)
return self._deserialize('NotificationsQuery', response)
def query_input_values(self, input_values_query, publisher_id):
"""QueryInputValues.
:param :class:`<InputValuesQuery> <service-hooks.v4_1.models.InputValuesQuery>` input_values_query:
:param str publisher_id:
:rtype: :class:`<InputValuesQuery> <service-hooks.v4_1.models.InputValuesQuery>`
"""
route_values = {}
if publisher_id is not None:
route_values['publisherId'] = self._serialize.url('publisher_id', publisher_id, 'str')
content = self._serialize.body(input_values_query, 'InputValuesQuery')
response = self._send(http_method='POST',
location_id='d815d352-a566-4dc1-a3e3-fd245acf688c',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('InputValuesQuery', response)
def get_publisher(self, publisher_id):
"""GetPublisher.
Get a specific service hooks publisher.
:param str publisher_id: ID for a publisher.
:rtype: :class:`<Publisher> <service-hooks.v4_1.models.Publisher>`
"""
route_values = {}
if publisher_id is not None:
route_values['publisherId'] = self._serialize.url('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='1e83a210-5b53-43bc-90f0-d476a4e5d731',
version='4.1',
route_values=route_values)
return self._deserialize('Publisher', response)
def list_publishers(self):
"""ListPublishers.
Get a list of publishers.
:rtype: [Publisher]
"""
response = self._send(http_method='GET',
location_id='1e83a210-5b53-43bc-90f0-d476a4e5d731',
version='4.1')
return self._deserialize('[Publisher]', self._unwrap_collection(response))
def query_publishers(self, query):
"""QueryPublishers.
Query for service hook publishers.
:param :class:`<PublishersQuery> <service-hooks.v4_1.models.PublishersQuery>` query:
:rtype: :class:`<PublishersQuery> <service-hooks.v4_1.models.PublishersQuery>`
"""
content = self._serialize.body(query, 'PublishersQuery')
response = self._send(http_method='POST',
location_id='99b44a8a-65a8-4670-8f3e-e7f7842cce64',
version='4.1',
content=content)
return self._deserialize('PublishersQuery', response)
def create_subscription(self, subscription):
"""CreateSubscription.
Create a subscription.
:param :class:`<Subscription> <service-hooks.v4_1.models.Subscription>` subscription: Subscription to be created.
:rtype: :class:`<Subscription> <service-hooks.v4_1.models.Subscription>`
"""
content = self._serialize.body(subscription, 'Subscription')
response = self._send(http_method='POST',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='4.1',
content=content)
return self._deserialize('Subscription', response)
def delete_subscription(self, subscription_id):
"""DeleteSubscription.
Delete a specific service hooks subscription.
:param str subscription_id: ID for a subscription.
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
self._send(http_method='DELETE',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='4.1',
route_values=route_values)
def get_subscription(self, subscription_id):
"""GetSubscription.
Get a specific service hooks subscription.
:param str subscription_id: ID for a subscription.
:rtype: :class:`<Subscription> <service-hooks.v4_1.models.Subscription>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
response = self._send(http_method='GET',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='4.1',
route_values=route_values)
return self._deserialize('Subscription', response)
def list_subscriptions(self, publisher_id=None, event_type=None, consumer_id=None, consumer_action_id=None):
"""ListSubscriptions.
Get a list of subscriptions.
:param str publisher_id: ID for a subscription.
:param str event_type: Maximum number of notifications to return. Default is 100.
:param str consumer_id: ID for a consumer.
:param str consumer_action_id: ID for a consumerActionId.
:rtype: [Subscription]
"""
query_parameters = {}
if publisher_id is not None:
query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str')
if event_type is not None:
query_parameters['eventType'] = self._serialize.query('event_type', event_type, 'str')
if consumer_id is not None:
query_parameters['consumerId'] = self._serialize.query('consumer_id', consumer_id, 'str')
if consumer_action_id is not None:
query_parameters['consumerActionId'] = self._serialize.query('consumer_action_id', consumer_action_id, 'str')
response = self._send(http_method='GET',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='4.1',
query_parameters=query_parameters)
return self._deserialize('[Subscription]', self._unwrap_collection(response))
def replace_subscription(self, subscription, subscription_id=None):
"""ReplaceSubscription.
Update a subscription. <param name="subscriptionId">ID for a subscription that you wish to update.</param>
:param :class:`<Subscription> <service-hooks.v4_1.models.Subscription>` subscription:
:param str subscription_id:
:rtype: :class:`<Subscription> <service-hooks.v4_1.models.Subscription>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
content = self._serialize.body(subscription, 'Subscription')
response = self._send(http_method='PUT',
location_id='fc50d02a-849f-41fb-8af1-0a5216103269',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('Subscription', response)
def create_subscriptions_query(self, query):
"""CreateSubscriptionsQuery.
Query for service hook subscriptions.
:param :class:`<SubscriptionsQuery> <service-hooks.v4_1.models.SubscriptionsQuery>` query:
:rtype: :class:`<SubscriptionsQuery> <service-hooks.v4_1.models.SubscriptionsQuery>`
"""
content = self._serialize.body(query, 'SubscriptionsQuery')
response = self._send(http_method='POST',
location_id='c7c3c1cf-9e05-4c0d-a425-a0f922c2c6ed',
version='4.1',
content=content)
return self._deserialize('SubscriptionsQuery', response)
def create_test_notification(self, test_notification, use_real_data=None):
"""CreateTestNotification.
Sends a test notification. This is useful for verifying the configuration of an updated or new service hooks subscription.
:param :class:`<Notification> <service-hooks.v4_1.models.Notification>` test_notification:
:param bool use_real_data: Only allow testing with real data in existing subscriptions.
:rtype: :class:`<Notification> <service-hooks.v4_1.models.Notification>`
"""
query_parameters = {}
if use_real_data is not None:
query_parameters['useRealData'] = self._serialize.query('use_real_data', use_real_data, 'bool')
content = self._serialize.body(test_notification, 'Notification')
response = self._send(http_method='POST',
location_id='1139462c-7e27-4524-a997-31b9b73551fe',
version='4.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('Notification', response)
| 53.020101
| 172
| 0.616198
|
a3406189699b31e88ee4145d075ade91efa85aec
| 77,182
|
py
|
Python
|
stage/test_directory_origin.py
|
anubandhan/datacollector-tests
|
301c024c66d68353735256b262b681dd05ba16cc
|
[
"Apache-2.0"
] | null | null | null |
stage/test_directory_origin.py
|
anubandhan/datacollector-tests
|
301c024c66d68353735256b262b681dd05ba16cc
|
[
"Apache-2.0"
] | 1
|
2019-04-24T11:06:38.000Z
|
2019-04-24T11:06:38.000Z
|
stage/test_directory_origin.py
|
anubandhan/datacollector-tests
|
301c024c66d68353735256b262b681dd05ba16cc
|
[
"Apache-2.0"
] | 2
|
2019-05-24T06:34:37.000Z
|
2020-03-30T11:48:18.000Z
|
# Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import pytest
import random
import string
import tempfile
import time
import csv
import textwrap
from streamsets.testframework.markers import sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
FILE_WRITER_SCRIPT = """
file_contents = '''{file_contents}'''
for record in records:
with open('{filepath}', 'w') as f:
f.write(file_contents.decode('utf8').encode('{encoding}'))
"""
FILE_WRITER_SCRIPT_BINARY = """
with open('{filepath}', 'wb') as f:
f.write({file_contents})
"""
@pytest.fixture(scope='module')
def sdc_common_hook():
def hook(data_collector):
data_collector.add_stage_lib('streamsets-datacollector-jython_2_7-lib')
return hook
@pytest.fixture
def file_writer(sdc_executor):
"""Writes a file to SDC's local FS.
Args:
filepath (:obj:`str`): The absolute path to which to write the file.
file_contents (:obj:`str`): The file contents.
encoding (:obj:`str`, optional): The file encoding. Default: ``'utf8'``
file_data_type (:obj:`str`, optional): The file which type of data containing . Default: ``'NOT_BINARY'``
"""
def file_writer_(filepath, file_contents, encoding='utf8', file_data_type='NOT_BINARY'):
write_file_with_pipeline(sdc_executor, filepath, file_contents, encoding, file_data_type)
return file_writer_
def write_file_with_pipeline(sdc_executor, filepath, file_contents, encoding='utf8', file_data_type='NOT_BINARY'):
builder = sdc_executor.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data='noop', stop_after_first_batch=True)
jython_evaluator = builder.add_stage('Jython Evaluator')
file_writer_script = FILE_WRITER_SCRIPT_BINARY if file_data_type == 'BINARY' else FILE_WRITER_SCRIPT
jython_evaluator.script = textwrap.dedent(file_writer_script).format(filepath=str(filepath),
file_contents=file_contents,
encoding=encoding)
trash = builder.add_stage('Trash')
dev_raw_data_source >> jython_evaluator >> trash
pipeline = builder.build('File writer pipeline')
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
sdc_executor.remove_pipeline(pipeline)
@pytest.fixture
def shell_executor(sdc_executor):
def shell_executor_(script, environment_variables=None):
builder = sdc_executor.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data='noop', stop_after_first_batch=True)
shell = builder.add_stage('Shell')
shell.set_attributes(script=script,
environment_variables=(Configuration(**environment_variables)._data
if environment_variables
else []))
trash = builder.add_stage('Trash')
dev_raw_data_source >> [trash, shell]
pipeline = builder.build('Shell executor pipeline')
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
sdc_executor.remove_pipeline(pipeline)
return shell_executor_
@pytest.fixture
def list_dir(sdc_executor):
def list_dir_(data_format, files_directory, file_name_pattern, recursive=True, batches=1, batch_size=10):
builder = sdc_executor.get_pipeline_builder()
directory = builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format=data_format,
file_name_pattern=file_name_pattern,
file_name_pattern_mode='GLOB',
files_directory=files_directory,
process_subdirectories=recursive)
trash = builder.add_stage('Trash')
pipeline_finisher = builder.add_stage('Pipeline Finisher Executor')
pipeline_finisher.set_attributes(preconditions=['${record:eventType() == \'no-more-data\'}'],
on_record_error='DISCARD')
directory >> trash
directory >= pipeline_finisher
pipeline = builder.build('List dir pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
batches=batches,
batch_size=batch_size,
start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
files = [str(record.field['filepath']) for b in range(len(snapshot.snapshot_batches))
for record in snapshot.snapshot_batches[b][directory.instance_name].event_records
if record.header.values['sdc.event.type'] == 'new-file']
sdc_executor.remove_pipeline(pipeline)
return files
return list_dir_
# pylint: disable=pointless-statement, too-many-locals
def test_directory_origin(sdc_builder, sdc_executor):
"""Test Directory Origin. We test by making sure files are pre-created using Local FS destination stage pipeline
and then have the Directory Origin read those files. The pipelines looks like:
dev_raw_data_source >> local_fs
directory >> trash
"""
raw_data = 'Hello!'
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
# 1st pipeline which generates the required files for Directory Origin
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=raw_data)
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=os.path.join(tmp_directory, '${YYYY()}-${MM()}-${DD()}-${hh()}'),
files_prefix='sdc-${sdc:id()}', files_suffix='txt', max_records_in_file=100)
dev_raw_data_source >> local_fs
files_pipeline = pipeline_builder.build('Generate files pipeline')
sdc_executor.add_pipeline(files_pipeline)
# generate some batches/files
sdc_executor.start_pipeline(files_pipeline).wait_for_pipeline_batch_count(10)
sdc_executor.stop_pipeline(files_pipeline)
# 2nd pipeline which reads the files using Directory Origin stage
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='TEXT', file_name_pattern='sdc*.txt', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
process_subdirectories=True, read_order='TIMESTAMP')
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build('Directory Origin pipeline')
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(directory_pipeline)
# assert all the data captured have the same raw_data
for record in snapshot.snapshot_batches[0][directory.instance_name].output:
assert raw_data == record.field['text'].value
assert record.header['sourceId'] is not None
assert record.header['stageCreator'] is not None
@pytest.mark.parametrize('no_of_threads', [1, 5])
@sdc_min_version('3.1.0.0')
def test_directory_origin_order_by_timestamp(sdc_builder, sdc_executor, no_of_threads):
"""Test Directory Origin. We make sure we covered race condition
when directory origin is configured order by last modified timestamp.
The default wait time for directory spooler is 5 seconds,
when the files are modified between 5 seconds make sure all files are processed.
The pipelines looks like:
dev_raw_data_source >> local_fs
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
# 1st pipeline which writes one record per file with interval 0.1 seconds
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.set_attributes(batch_size=1,
delay_between_batches=10)
dev_data_generator.fields_to_generate = [{'field': 'text', 'precision': 10, 'scale': 2, 'type': 'STRING'}]
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=os.path.join(tmp_directory),
files_prefix='sdc-${sdc:id()}', files_suffix='txt', max_records_in_file=1)
dev_data_generator >> local_fs
# run the 1st pipeline to create the directory and starting files
files_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(files_pipeline)
sdc_executor.start_pipeline(files_pipeline).wait_for_pipeline_batch_count(1)
sdc_executor.stop_pipeline(files_pipeline)
# 2nd pipeline which reads the files using Directory Origin stage
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(batch_wait_time_in_secs=1,
data_format='TEXT', file_name_pattern='sdc*.txt',
file_name_pattern_mode='GLOB', file_post_processing='DELETE',
files_directory=tmp_directory, process_subdirectories=True,
read_order='TIMESTAMP', number_of_threads=no_of_threads)
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(directory_pipeline)
sdc_executor.start_pipeline(directory_pipeline)
# re-run the 1st pipeline
sdc_executor.start_pipeline(files_pipeline).wait_for_pipeline_batch_count(10)
sdc_executor.stop_pipeline(files_pipeline)
# wait till 2nd pipeline reads all files
time.sleep(10)
sdc_executor.stop_pipeline(directory_pipeline)
# Validate history is as expected
file_pipeline_history = sdc_executor.get_pipeline_history(files_pipeline)
msgs_sent_count1 = file_pipeline_history.entries[4].metrics.counter('pipeline.batchOutputRecords.counter').count
msgs_sent_count2 = file_pipeline_history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count
directory_pipeline_history = sdc_executor.get_pipeline_history(directory_pipeline)
msgs_result_count = directory_pipeline_history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count
assert msgs_result_count == msgs_sent_count1 + msgs_sent_count2
@pytest.mark.parametrize('no_of_threads', [10])
@sdc_min_version('3.2.0.0')
def test_directory_origin_in_whole_file_dataformat(sdc_builder, sdc_executor, no_of_threads):
"""Test Directory Origin. We make sure multiple threads on whole data format works correct.
The pipelines looks like:
dev_raw_data_source >> local_fs
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
# 1st pipeline which writes one record per file with interval 0.1 seconds
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
batch_size = 100
dev_data_generator.set_attributes(batch_size=batch_size,
delay_between_batches=10,
number_of_threads=no_of_threads)
dev_data_generator.fields_to_generate = [{'field': 'text', 'precision': 10, 'scale': 2, 'type': 'STRING'}]
max_records_in_file = 10
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=os.path.join(tmp_directory),
files_prefix='sdc-${sdc:id()}',
files_suffix='txt',
max_records_in_file=max_records_in_file)
dev_data_generator >> local_fs
number_of_batches = 5
# run the 1st pipeline to create the directory and starting files
files_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(files_pipeline)
sdc_executor.start_pipeline(files_pipeline).wait_for_pipeline_batch_count(number_of_batches)
sdc_executor.stop_pipeline(files_pipeline)
# get the how many records are sent
file_pipeline_history = sdc_executor.get_pipeline_history(files_pipeline)
msgs_sent_count = file_pipeline_history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count
# compute the expected number of batches to process all files
no_of_input_files = (msgs_sent_count / max_records_in_file)
# 2nd pipeline which reads the files using Directory Origin stage in whole data format
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(batch_wait_time_in_secs=1,
data_format='WHOLE_FILE',
max_files_in_directory=1000,
files_directory=tmp_directory,
file_name_pattern='*',
file_name_pattern_mode='GLOB',
file_post_processing='DELETE',
number_of_threads=no_of_threads,
process_subdirectories=True,
read_order='TIMESTAMP')
localfs = pipeline_builder.add_stage('Local FS', type='destination')
localfs.set_attributes(data_format='WHOLE_FILE',
file_name_expression='${record:attribute(\'filename\')}')
directory >> localfs
directory_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(directory_pipeline)
sdc_executor.start_pipeline(directory_pipeline).wait_for_pipeline_batch_count(no_of_input_files)
sdc_executor.stop_pipeline(directory_pipeline)
directory_pipeline_history = sdc_executor.get_pipeline_history(directory_pipeline)
msgs_result_count = directory_pipeline_history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count
assert msgs_result_count == no_of_input_files
@pytest.mark.parametrize('no_of_threads', [10])
@sdc_min_version('3.2.0.0')
def test_directory_origin_multiple_batches_no_initial_file(sdc_builder, sdc_executor, no_of_threads):
"""Test Directory Origin. We use the directory origin to read a batch of 100 files,
after some times we will read a new batch of 100 files. No initial file configured.
This test has been written to avoid regression, especially of issues raised in ESC-371
The pipelines look like:
Pipeline 1 (Local FS Target 1 in SDC UI): dev_data_generator >> local_fs_3 (in files_pipeline in the test)
Pipeline 2 (Local FS Target 2 in SDC UI): dev_data_generator_2 >> local_fs_4 (in files_pipeline_2 in the test)
Pipeline 3 (Directory Origin in SDC UI): directory >> local_fs
Pipeline 4 (tmp_directory to tmp_directory_2 in SDC UI): directory_2 >> local_fs_2
The test works as follows:
1) Pipeline 1 writes files with prefix SDC1 to directory tmp_directory and then it is stopped
2) Pipeline 3 is started and directory origin read files from directory tmp_directory. Pipeline is NOT
stopped
3) Pipeline 2 writes files with prefix SDC2 to directory tmp_directory_2 and then it is stopped
4) Pipeline 4 reads files from directory tmp_directory_2 and writes them to directory tmp_directory, then
it is stopped
5) Pipeline 3 will read files Pipeline 4 writes to directory tmp_directory
6) Test checks that all the corresponding files from directory tmp_directory are read and then test ends
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
tmp_directory_2 = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
number_of_batches = 5
max_records_in_file = 10
# run the 1st pipeline to create the directory and starting files
files_pipeline = get_localfs_writer_pipeline(sdc_builder, no_of_threads, tmp_directory, max_records_in_file, 1)
sdc_executor.add_pipeline(files_pipeline)
sdc_executor.start_pipeline(files_pipeline).wait_for_pipeline_batch_count(number_of_batches)
sdc_executor.stop_pipeline(files_pipeline)
# get how many records are sent
file_pipeline_history = sdc_executor.get_pipeline_history(files_pipeline)
msgs_sent_count = file_pipeline_history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count
# compute the expected number of batches to process all files
no_of_input_files = (msgs_sent_count / max_records_in_file)
# 2nd pipeline which reads the files using Directory Origin stage in whole data format
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(batch_wait_time_in_secs=1,
data_format='WHOLE_FILE',
max_files_in_directory=1000,
files_directory=tmp_directory,
file_name_pattern='*',
file_name_pattern_mode='GLOB',
number_of_threads=no_of_threads,
process_subdirectories=True,
read_order='LEXICOGRAPHICAL',
file_post_processing='DELETE')
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='WHOLE_FILE',
file_name_expression='${record:attribute(\'filename\')}')
directory >> local_fs
directory_pipeline = pipeline_builder.build(title='Directory Origin')
sdc_executor.add_pipeline(directory_pipeline)
pipeline_start_command = sdc_executor.start_pipeline(directory_pipeline)
pipeline_start_command.wait_for_pipeline_batch_count(no_of_input_files)
# Send another round of records while the reading pipeline is running
files_pipeline_2 = get_localfs_writer_pipeline(sdc_builder, no_of_threads, tmp_directory_2, max_records_in_file, 2)
sdc_executor.add_pipeline(files_pipeline_2)
sdc_executor.start_pipeline(files_pipeline_2).wait_for_pipeline_batch_count(number_of_batches)
sdc_executor.stop_pipeline(files_pipeline_2)
file_pipeline_2_history = sdc_executor.get_pipeline_history(files_pipeline_2)
msgs_sent_count_2 = file_pipeline_2_history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count
no_of_input_files_2 = (msgs_sent_count_2 / max_records_in_file)
pipeline_builder = sdc_builder.get_pipeline_builder()
directory_2 = pipeline_builder.add_stage('Directory', type='origin')
directory_2.set_attributes(batch_wait_time_in_secs=1,
data_format='WHOLE_FILE',
max_files_in_directory=1000,
files_directory=tmp_directory_2,
file_name_pattern='*',
file_name_pattern_mode='GLOB',
number_of_threads=no_of_threads,
process_subdirectories=True,
read_order='LEXICOGRAPHICAL',
file_post_processing='DELETE')
local_fs_2 = pipeline_builder.add_stage('Local FS', type='destination')
local_fs_2.set_attributes(data_format='WHOLE_FILE',
file_name_expression='${record:attribute(\'filename\')}',
directory_template=tmp_directory,
files_prefix='')
directory_2 >> local_fs_2
directory_pipeline_2 = pipeline_builder.build(title='tmp_directory to tmp_directory_2')
sdc_executor.add_pipeline(directory_pipeline_2)
pipeline_start_command_2 = sdc_executor.start_pipeline(directory_pipeline_2)
pipeline_start_command_2.wait_for_pipeline_batch_count(no_of_input_files_2)
sdc_executor.stop_pipeline(directory_pipeline_2)
# Wait until the pipeline reads all the expected files
pipeline_start_command.wait_for_pipeline_batch_count(no_of_input_files + no_of_input_files_2)
sdc_executor.stop_pipeline(directory_pipeline)
directory_pipeline_history = sdc_executor.get_pipeline_history(directory_pipeline)
msgs_result_count = directory_pipeline_history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count
assert msgs_result_count == (no_of_input_files + no_of_input_files_2)
def get_localfs_writer_pipeline(sdc_builder, no_of_threads, tmp_directory, max_records_in_file, index,
delay_between_batches=10):
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
batch_size = 100
dev_data_generator.set_attributes(batch_size=batch_size,
delay_between_batches=delay_between_batches,
number_of_threads=no_of_threads)
dev_data_generator.fields_to_generate = [{'field': 'text', 'precision': 10, 'scale': 2, 'type': 'STRING'}]
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=os.path.join(tmp_directory),
files_prefix=f'sdc{index}-${{sdc:id()}}',
files_suffix='txt',
max_records_in_file=max_records_in_file)
dev_data_generator >> local_fs
files_pipeline = pipeline_builder.build(title=f'Local FS Target {index}')
return files_pipeline
def test_directory_timestamp_ordering(sdc_builder, sdc_executor):
"""This test is mainly for SDC-10019. The bug that was fixed there involves a race condition. It only manifests if
the files are ordered in increasing timestamp order and reverse alphabetical order AND the processing time required
for a batch is sufficiently high. That's why the pipeline is configured to write relatively large files (200k
records, gzipped).
Functionally, the test simply ensures that the second pipeline (with the directory origin) reads the same number of
batches as was written by the first pipeline, and hence all data is read. If the test times out, that essentially
means that bug has occurred.
"""
max_records_per_file = random.randint(100000, 300000)
# randomize the batch size
batch_size = random.randint(100, 5000)
# generate enough batches to have 20 or so files
num_batches = random.randint(15, 25) * max_records_per_file/batch_size
random_str = get_random_string(string.ascii_letters, 10)
tmp_directory = os.path.join(tempfile.gettempdir(), 'directory_timestamp_ordering', random_str, 'data')
scratch_directory = os.path.join(tempfile.gettempdir(), 'directory_timestamp_ordering', random_str, 'scatch')
logger.info('Test run information: num_batches=%d, batch_size=%d, max_records_per_file=%d, tmp_directory=%s, scratch_directory=%s',
num_batches, batch_size, max_records_per_file, tmp_directory, scratch_directory)
# use one pipeline to generate the .txt.gz files to be consumed by the directory pipeline
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.fields_to_generate = [{'field': 'text', 'precision': 10, 'scale': 2, 'type': 'STRING'}]
dev_data_generator.set_attributes(delay_between_batches=0, batch_size=batch_size)
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=tmp_directory,
files_prefix='sdc-${sdc:id()}',
files_suffix='txt',
compression_codec='GZIP',
max_records_in_file=max_records_per_file)
dev_data_generator >> local_fs
shell_executor = pipeline_builder.add_stage('Shell')
shell_executor.set_attributes(stage_record_preconditions=["${record:eventType() == 'file-closed'}"])
shell_executor.set_attributes(environment_variables=[{'key': 'FILENAME', 'value': '${record:value(\'/filename\')}'},
{'key': 'FILEPATH', 'value': '${record:value(\'/filepath\')}'}])
# this script will rename the completed txt.gz file to be of the form WORD_TIMESTAMP.txt.gz where WORD is chosen from
# a reverse-alphabetical list of cycling words and TIMESTAMP is the current timestamp, and also ; this ensures that newer files
# (i.e. those written later in the pipeline execution) will sometimes have earlier lexicographical orderings to
# trigger SDC-10091
shell_executor.set_attributes(script=f'''\
#!/bin/bash
if [[ ! -s {scratch_directory}/count.txt ]]; then
echo '0' > {scratch_directory}/count.txt
fi
COUNT=$(cat {scratch_directory}/count.txt)
echo $(($COUNT+1)) > {scratch_directory}/count.txt
if [[ ! -s {scratch_directory}/words.txt ]]; then
mkdir -p {scratch_directory}
echo 'eggplant
dill
cucumber
broccoli
apple' > {scratch_directory}/words.txt
WORD=fig
else
WORD=$(head -1 {scratch_directory}/words.txt)
grep -v $WORD {scratch_directory}/words.txt > {scratch_directory}/words_new.txt
mv {scratch_directory}/words_new.txt {scratch_directory}/words.txt
fi
RAND_NUM=$(($RANDOM % 10))
SUBDIR="subdir${{RAND_NUM}}"
cd $(dirname $FILEPATH)
mkdir -p $SUBDIR
mv $FILENAME $SUBDIR/${{WORD}}_$COUNT.txt.gz
''')
local_fs >= shell_executor
files_pipeline = pipeline_builder.build('Generate files pipeline')
sdc_executor.add_pipeline(files_pipeline)
# generate the input files
sdc_executor.start_pipeline(files_pipeline).wait_for_pipeline_batch_count(num_batches)
sdc_executor.stop_pipeline(files_pipeline)
# create the actual directory origin pipeline, which will read the generated *.txt.gz files (across
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='TEXT',
file_name_pattern='*.txt.gz',
file_name_pattern_mode='GLOB',
file_post_processing='DELETE',
files_directory=tmp_directory,
process_subdirectories=True,
read_order='TIMESTAMP',
compression_format='COMPRESSED_FILE',
batch_size_in_recs=batch_size)
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build('Directory Origin pipeline')
sdc_executor.add_pipeline(directory_pipeline)
# if we set the batch size to the same value in the directory origin pipeline, it should read exactly as many batches
# as were written by the first pipeline
sdc_executor.start_pipeline(directory_pipeline).wait_for_pipeline_batch_count(num_batches)
sdc_executor.stop_pipeline(directory_pipeline)
@sdc_min_version('3.0.0.0')
def test_directory_origin_avro_produce_less_file(sdc_builder, sdc_executor):
"""Test Directory Origin in Avro data format. The sample Avro file has 5 lines and
the batch size is 1. The pipeline should produce the event, "new-file" and 1 record
The pipelines looks like:
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
avro_records = setup_avro_file(sdc_executor, tmp_directory)
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='AVRO', file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
process_subdirectories=True, read_order='TIMESTAMP')
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True, batch_size=1).snapshot
sdc_executor.stop_pipeline(directory_pipeline)
# assert all the data captured have the same raw_data
output_records = snapshot[directory.instance_name].output
event_records = snapshot[directory.instance_name].event_records
assert 1 == len(event_records)
assert 1 == len(output_records)
assert 'new-file' == event_records[0].header['values']['sdc.event.type']
assert output_records[0].get_field_data('/name') == avro_records[0].get('name')
assert output_records[0].get_field_data('/age') == avro_records[0].get('age')
assert output_records[0].get_field_data('/emails') == avro_records[0].get('emails')
assert output_records[0].get_field_data('/boss') == avro_records[0].get('boss')
@sdc_min_version('3.8.0')
def test_directory_origin_multiple_threads_no_more_data_sent_after_all_data_read(sdc_builder, sdc_executor):
"""Test that directory origin with more than one threads read all data from all the files in a folder before
sending no more data event.
The pipelines looks like:
directory >> trash
directory >= pipeline finisher executor
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED', header_line='WITH_HEADER', file_name_pattern='test*.csv',
file_name_pattern_mode='GLOB', file_post_processing='NONE',
files_directory='/resources/resources/directory_origin', read_order='LEXICOGRAPHICAL',
batch_size_in_recs=10, batch_wait_time_in_secs=60,
number_of_threads=3, on_record_error='STOP_PIPELINE')
trash = pipeline_builder.add_stage('Trash')
directory >> trash
pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')
pipeline_finisher_executor.set_attributes(preconditions=['${record:eventType() == \'no-more-data\'}'],
on_record_error='DISCARD')
directory >= pipeline_finisher_executor
directory_pipeline = pipeline_builder.build(
title='test_directory_origin_multiple_threads_no_more_data_sent_after_all_data_read')
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True, batch_size=10,
batches=14, wait_for_statuses=['FINISHED'], timeout_sec=120).snapshot
# assert all the data captured have the same raw_data
output_records = [record for i in range(len(snapshot.snapshot_batches)) for record in
snapshot.snapshot_batches[i][directory.instance_name].output]
output_records_text_fields = [f'{record.field["Name"]},{record.field["Job"]},{record.field["Salary"]}' for record in
output_records]
temp_data_from_csv_file = (read_csv_file('./resources/directory_origin/test4.csv', ',', True))
data_from_csv_files = [f'{row[0]},{row[1]},{row[2]}' for row in temp_data_from_csv_file]
temp_data_from_csv_file = (read_csv_file('./resources/directory_origin/test5.csv', ',', True))
for row in temp_data_from_csv_file:
data_from_csv_files.append(f'{row[0]},{row[1]},{row[2]}')
temp_data_from_csv_file = (read_csv_file('./resources/directory_origin/test6.csv', ',', True))
for row in temp_data_from_csv_file:
data_from_csv_files.append(f'{row[0]},{row[1]},{row[2]}')
assert len(data_from_csv_files) == len(output_records_text_fields)
assert sorted(data_from_csv_files) == sorted(output_records_text_fields)
@sdc_min_version('3.0.0.0')
def test_directory_origin_avro_produce_full_file(sdc_builder, sdc_executor):
""" Test Directory Origin in Avro data format. The sample Avro file has 5 lines and
the batch size is 10. The pipeline should produce the event, "new-file" and "finished-file"
and 5 records
The pipelines looks like:
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
avro_records = setup_avro_file(sdc_executor, tmp_directory)
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='AVRO', file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
process_subdirectories=True, read_order='TIMESTAMP')
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True, batch_size=10).snapshot
sdc_executor.stop_pipeline(directory_pipeline)
# assert all the data captured have the same raw_data
output_records = snapshot[directory.instance_name].output
event_records = snapshot[directory.instance_name].event_records
assert 2 == len(event_records)
assert 5 == len(output_records)
assert 'new-file' == event_records[0].header['values']['sdc.event.type']
assert 'finished-file' == event_records[1].header['values']['sdc.event.type']
for i in range(0, 5):
assert output_records[i].get_field_data('/name') == avro_records[i].get('name')
assert output_records[i].get_field_data('/age') == avro_records[i].get('age')
assert output_records[i].get_field_data('/emails') == avro_records[i].get('emails')
assert output_records[i].get_field_data('/boss') == avro_records[i].get('boss')
@sdc_min_version('3.12.0')
@pytest.mark.parametrize('csv_record_type', ['LIST_MAP','LIST'])
def test_directory_origin_bom_file(sdc_builder, sdc_executor, csv_record_type):
""" Test Directory Origin with file in CSV data format and containing BOM.
The file(file_with_bom.csv) is present in resources/directory_origin. To view the
BOM bytes, we can use "hexdump -C file_with_bom.csv". The first 3 bytes(ef bb bf)
are BOM.
The pipeline looks like:
directory >> trash
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED',
file_name_pattern='file_with_bom.csv',
file_name_pattern_mode='GLOB',
files_directory='/resources/resources/directory_origin',
process_subdirectories=True,
read_order='TIMESTAMP',
root_field_type=csv_record_type)
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True, batch_size=10).snapshot
sdc_executor.stop_pipeline(directory_pipeline)
output_records = snapshot[directory.instance_name].output
# contents of file_with_bom.csv: <BOM>abc,123,xyz
if csv_record_type == 'LIST_MAP':
assert 'abc' == output_records[0].get_field_data('/0')
assert '123' == output_records[0].get_field_data('/1')
assert 'xyz' == output_records[0].get_field_data('/2')
else:
assert 'abc' == output_records[0].get_field_data('/0').get('value')
assert '123' == output_records[0].get_field_data('/1').get('value')
assert 'xyz' == output_records[0].get_field_data('/2').get('value')
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('csv_record_type', ['LIST_MAP', 'LIST'])
def test_directory_origin_csv_produce_full_file(sdc_builder, sdc_executor, csv_record_type):
""" Test Directory Origin in CSV data format. The sample CSV file has 3 lines and
the batch size is 10. The pipeline should produce the event, "new-file" and "finished-file"
and 3 records
The pipelines looks like:
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
csv_records = setup_basic_dilimited_file(sdc_executor, tmp_directory)
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED',
file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
process_subdirectories=True, read_order='TIMESTAMP',
root_field_type=csv_record_type)
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True, batch_size=10).snapshot
sdc_executor.stop_pipeline(directory_pipeline)
# assert all the data captured have the same csv_records
output_records = snapshot[directory.instance_name].output
event_records = snapshot[directory.instance_name].event_records
assert 2 == len(event_records)
assert 3 == len(output_records)
assert 'new-file' == event_records[0].header['values']['sdc.event.type']
assert 'finished-file' == event_records[1].header['values']['sdc.event.type']
for i in range(0, 3):
csv_record_fields = csv_records[i].split(',')
for j in range(0, len(csv_record_fields)):
if type(output_records[i].get_field_data(f'/{j}')) is dict:
output_records[i].get_field_data(f'/{j}').get('value') == csv_record_fields[j]
else:
output_records[i].get_field_data(f'/{j}') == csv_record_fields[j]
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('csv_record_type', ['LIST_MAP', 'LIST'])
@pytest.mark.parametrize('header_line', ['WITH_HEADER', 'IGNORE_HEADER', 'NO_HEADER'])
def test_directory_origin_csv_produce_less_file(sdc_builder, sdc_executor, csv_record_type, header_line):
""" Test Directory Origin in CSV data format. The sample CSV file has 3 lines and
the batch size is 1. The pipeline should produce the event, "new-file"
and 1 record
The pipelines looks like:
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
csv_records = setup_basic_dilimited_file(sdc_executor, tmp_directory)
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED',
file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
header_line=header_line, process_subdirectories=True,
read_order='TIMESTAMP', root_field_type=csv_record_type)
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True, batch_size=1).snapshot
sdc_executor.stop_pipeline(directory_pipeline)
# assert all the data captured have the same csv_records
output_records = snapshot[directory.instance_name].output
event_records = snapshot[directory.instance_name].event_records
assert 1 == len(event_records)
assert 1 == len(output_records)
assert 'new-file' == event_records[0].header['values']['sdc.event.type']
csv_record_fields = csv_records[0].split(',')
for j in range(0, len(csv_record_fields)):
name = csv_record_fields[j] if header_line == 'WITH_HEADER' and csv_record_type == 'LIST_MAP' else j
if type(output_records[0].get_field_data(f'/{name}')) is dict:
output_records[0].get_field_data(f'/{name}').get('value') == csv_record_fields[j]
else:
output_records[0].get_field_data(f'/{name}') == csv_record_fields[j]
@sdc_min_version('3.0.0.0')
def test_directory_origin_csv_custom_file(sdc_builder, sdc_executor):
""" Test Directory Origin in custom CSV data format. The sample CSV file has 1 custom CSV
The pipelines looks like:
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
csv_records = setup_custom_delimited_file(sdc_executor, tmp_directory)
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED', delimiter_format_type='CUSTOM',
file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
process_subdirectories=True, read_order='TIMESTAMP')
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True, batch_size=1).snapshot
sdc_executor.stop_pipeline(directory_pipeline)
output_records = snapshot[directory.instance_name].output
assert 1 == len(output_records)
assert output_records[0].get_field_data('/0') == ' '.join(csv_records)
@sdc_min_version('3.8.0')
def test_directory_origin_multi_char_delimited(sdc_builder, sdc_executor):
"""
Test Directory Origin with multi-character delimited format. This will generate a sample file with the custom
multi-char delimiter then read it with the test pipeline.
The pipeline looks like:
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
# crazy delimiter
delim = '_/-\\_'
custom_delimited_lines = [
f"first{delim}second{delim}third",
f"1{delim}11{delim}111",
f"2{delim}22{delim}222",
f"31{delim}3,3{delim}3,_/-_3,3"
]
setup_dilimited_file(sdc_executor, tmp_directory, custom_delimited_lines)
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED', delimiter_format_type='MULTI_CHARACTER',
multi_character_field_delimiter=delim,
header_line='WITH_HEADER',
file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
process_subdirectories=True, read_order='TIMESTAMP')
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build('Multi Char Delimited Directory')
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True, batch_size=3).snapshot
sdc_executor.stop_pipeline(directory_pipeline)
output_records = snapshot[directory.instance_name].output
assert 3 == len(output_records)
assert output_records[0].get_field_data('/first') == '1'
assert output_records[0].get_field_data('/second') == '11'
assert output_records[0].get_field_data('/third') == '111'
assert output_records[1].get_field_data('/first') == '2'
assert output_records[1].get_field_data('/second') == '22'
assert output_records[1].get_field_data('/third') == '222'
assert output_records[2].get_field_data('/first') == '31'
assert output_records[2].get_field_data('/second') == '3,3'
assert output_records[2].get_field_data('/third') == '3,_/-_3,3'
@sdc_min_version('3.0.0.0')
def test_directory_origin_csv_custom_comment_file(sdc_builder, sdc_executor):
""" Test Directory Origin in custom CSV data format with comment enabled. The sample CSV file have
1 delimited line follow by 1 comment line and 1 delimited line
The pipelines looks like:
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
csv_records = setup_dilimited_with_comment_file(sdc_executor, tmp_directory)
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED', delimiter_format_type='CUSTOM',
file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
enable_comments = True,
file_post_processing='DELETE',
files_directory=tmp_directory,
process_subdirectories=True, read_order='TIMESTAMP')
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True, batch_size=10).snapshot
sdc_executor.stop_pipeline(directory_pipeline)
# assert all the data captured have the same raw_data
output_records = snapshot[directory.instance_name].output
assert 2 == len(output_records)
assert output_records[0].get_field_data('/0') == csv_records[0]
assert output_records[1].get_field_data('/0') == csv_records[2]
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('ignore_empty_line', [True, False])
def test_directory_origin_custom_csv_empty_line_file(sdc_builder, sdc_executor, ignore_empty_line):
""" Test Directory Origin in custom CSV data format with empty line enabled and disabled.
The sample CSV file has 2 CSV records and 1 empty line.
The pipeline should produce 2 when empty line is enabled and 3 when empty line is disabled
The pipelines looks like:
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
csv_records = setup_dilimited_with_empty_line_file(sdc_executor, tmp_directory)
empty_line_position = [1]
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED', delimiter_format_type='CUSTOM',
ignore_empty_lines = ignore_empty_line,
file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE',
files_directory=tmp_directory,
process_subdirectories=True, read_order='TIMESTAMP')
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True, batch_size=10).snapshot
sdc_executor.stop_pipeline(directory_pipeline)
# assert all the data captured have the same raw_data
output_records = snapshot[directory.instance_name].output
expected_record_size = len(csv_records)
if ignore_empty_line:
expected_record_size = 2
assert expected_record_size == len(output_records)
assert output_records[0].get_field_data('/0') == csv_records[0]
if ignore_empty_line:
assert output_records[1].get_field_data('/0') == csv_records[2]
else:
assert output_records[2].get_field_data('/0') == csv_records[2]
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('batch_size', [3,4,5,6])
def test_directory_origin_csv_record_overrun_on_batch_boundary(sdc_builder, sdc_executor, batch_size):
""" Test Directory Origin in Delimited data format. The long delimited record in [2,4,5,8,9]th in the file
the long delimited record should be ignored in the batch
The pipelines looks like:
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
csv_records = setup_long_dilimited_file(sdc_executor, tmp_directory)
long_dilimited_record_position = [2,4,5,8,9]
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED',
file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
max_record_length_in_chars=10,
process_subdirectories=True, read_order='TIMESTAMP')
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(directory_pipeline)
snapshot = sdc_executor.capture_snapshot(directory_pipeline, start_pipeline=True, batch_size=batch_size).snapshot
sdc_executor.stop_pipeline(directory_pipeline)
# assert all the data captured have the same raw_data
output_records = snapshot[directory.instance_name].output
expected_batch_size = batch_size
for i in range(0, batch_size):
if i in long_dilimited_record_position:
expected_batch_size = expected_batch_size - 1
assert expected_batch_size == len(output_records)
j = 0
for i in range(0, batch_size):
if j not in long_dilimited_record_position:
csv_record_fields = csv_records[j].split(',')
for k in range(0, len(csv_record_fields)):
output_records[0].get_field_data(f'/{k}') == csv_record_fields[k]
j = j + 1
# SDC-10424
@sdc_min_version('3.5.3')
def test_directory_post_delete_on_batch_failure(sdc_builder, sdc_executor):
"""Make sure that post-actions are not executed on batch failure."""
raw_data = '1\n2\n3\n4\n5'
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
# 1st pipeline which generates the required files for Directory Origin
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Dev Raw Data Source')
origin.stop_after_first_batch = True
origin.set_attributes(data_format='TEXT', raw_data=raw_data)
local_fs = builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=os.path.join(tmp_directory, '${YYYY()}-${MM()}-${DD()}-${hh()}'),
files_prefix='sdc-${sdc:id()}',
files_suffix='txt',
max_records_in_file=100)
origin >> local_fs
files_pipeline = builder.build('Generate files pipeline')
sdc_executor.add_pipeline(files_pipeline)
# Generate exactly one input file
sdc_executor.start_pipeline(files_pipeline).wait_for_finished()
# 2nd pipeline which reads the files using Directory Origin stage
builder = sdc_builder.get_pipeline_builder()
directory = builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='TEXT',
file_name_pattern='sdc*.txt',
file_name_pattern_mode='GLOB',
file_post_processing='DELETE',
files_directory=tmp_directory,
process_subdirectories=True,
read_order='TIMESTAMP')
shell = builder.add_stage('Shell')
shell.script = "return -1"
shell.on_record_error = "STOP_PIPELINE"
directory >> shell
directory_pipeline = builder.build('Directory Origin pipeline')
sdc_executor.add_pipeline(directory_pipeline)
sdc_executor.start_pipeline(directory_pipeline, wait=False).wait_for_status(status='RUN_ERROR', ignore_errors=True)
# The main check is now - the pipeline should not drop the input file
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Directory')
origin.set_attributes(data_format='WHOLE_FILE',
file_name_pattern='sdc*.txt',
file_name_pattern_mode='GLOB',
file_post_processing='DELETE',
files_directory=tmp_directory,
process_subdirectories=True,
read_order='TIMESTAMP')
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build('Validation')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert 1 == len(snapshot[origin.instance_name].output)
# SDC-13559: Directory origin fires one batch after another when Allow Late directories is in effect
def test_directory_allow_late_directory_wait_time(sdc_builder, sdc_executor):
"""Test to ensure that when user explicitly enables "Allow Late Directory" and the directory doesn't exists,
the origin won't go into a mode where it will generate one batch after another, ignoring the option Batch Wait
Time completely."""
builder = sdc_builder.get_pipeline_builder()
directory = builder.add_stage('Directory', type='origin')
directory.data_format = 'TEXT'
directory.file_name_pattern = 'sdc*.txt'
directory.files_directory = '/i/do/not/exists'
directory.allow_late_directory = True
trash = builder.add_stage('Trash')
directory >> trash
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
# We let the pipeline run for ~10 seconds - enough time to validate whether the origin is creating one batch
# after another or not.
time.sleep(10)
sdc_executor.stop_pipeline(pipeline)
# The origin and/or pipeline can still generate some batches, so we don't test precise number, just that is
# really small (less then 1 batch/second).
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchCount.counter').count < 5
# Test for SDC-13476
def test_directory_origin_read_different_file_type(sdc_builder, sdc_executor):
"""Test Directory Origin. We make sure we covered race condition
when directory origin is configured with JSON data format but files directory have txt files.
It shows the relative stage errors depending on the type of file we try to read from files directory.
The pipelines looks like:
dev_raw_data_source >> local_fs
directory >> trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
generate_files(sdc_builder, sdc_executor, tmp_directory)
# 2nd pipeline which reads the files using Directory Origin stage
builder = sdc_builder.get_pipeline_builder()
directory = builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='JSON',
file_name_pattern='*',
number_of_threads=10,
file_name_pattern_mode='GLOB',
file_post_processing='DELETE',
files_directory=tmp_directory,
error_directory=tmp_directory,
read_order='LEXICOGRAPHICAL')
trash = builder.add_stage('Trash')
directory >> trash
pipeline = builder.build('Validation')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
assert 10 == len(sdc_executor.get_stage_errors(pipeline, directory))
sdc_executor.stop_pipeline(pipeline)
output_records = snapshot[directory.instance_name].output
assert 0 == len(output_records)
@pytest.mark.parametrize('no_of_threads', [4])
@sdc_min_version('3.2.0.0')
def test_directory_origin_multiple_threads_timestamp_ordering(sdc_builder, sdc_executor, no_of_threads):
"""Test Directory Origin. We test that we read the same amount of files that we write with no reprocessing
of files and no NoSuchFileException in the sdc logs
Pipeline looks like:
Dev Data Generator >> Local FS (files_pipeline in the test)
Directory Origin >> Trash
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
number_of_batches = 100
max_records_in_file = 10
# Start files_pipeline
files_pipeline = get_localfs_writer_pipeline(sdc_builder, no_of_threads, tmp_directory, max_records_in_file, 1,
2000)
sdc_executor.add_pipeline(files_pipeline)
start_pipeline_command = sdc_executor.start_pipeline(files_pipeline)
# 2nd pipeline which reads the files using Directory Origin stage in whole data format
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(batch_wait_time_in_secs=1,
data_format='WHOLE_FILE',
max_files_in_directory=1000,
files_directory=tmp_directory,
file_name_pattern='*',
file_name_pattern_mode='GLOB',
number_of_threads=no_of_threads,
process_subdirectories=True,
read_order='TIMESTAMP',
file_post_processing='DELETE')
trash = pipeline_builder.add_stage('Trash')
directory >> trash
directory_pipeline = pipeline_builder.build(title='Directory Origin')
sdc_executor.add_pipeline(directory_pipeline)
pipeline_start_command = sdc_executor.start_pipeline(directory_pipeline)
# Stop files_pipeline after number_of_batches or more
start_pipeline_command.wait_for_pipeline_batch_count(number_of_batches)
sdc_executor.stop_pipeline(files_pipeline)
# Get how many records are sent
file_pipeline_history = sdc_executor.get_pipeline_history(files_pipeline)
msgs_sent_count = file_pipeline_history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count
# Compute the expected number of batches to process all files
no_of_input_files = (msgs_sent_count / max_records_in_file)
pipeline_start_command.wait_for_pipeline_batch_count(no_of_input_files)
assert 0 == len(sdc_executor.get_stage_errors(directory_pipeline, directory))
sdc_executor.stop_pipeline(directory_pipeline)
directory_pipeline_history = sdc_executor.get_pipeline_history(directory_pipeline)
msgs_result_count = directory_pipeline_history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count
assert msgs_result_count == no_of_input_files
# Test for SDC-13486
def test_directory_origin_error_file_to_error_dir(sdc_builder, sdc_executor):
""" Test Directory Origin. Create two files in tmp_directory file1.txt which is correctly parsed by directory
origin and file2.txt which is not correctly parsed by directory origin and hence it is sent to tmp_error_directory
by that directory origin. After that we check with another directory origin reading from tmp_error_directory that
we get an error_record specifying that file2.txt cannot be parsed again so we have checked that file2.txt was moved
to tmp_error_directory by the first directory origin.
Pipelines look like:
dev_raw_data_source >> local_fs (called Custom Generate file1.txt pipeline)
dev_raw_data_source >> local_fs (called Custom Generate file2.txt pipeline)
dev_raw_data_source >= shell (events lane for the same pipeline as in above comment)
directory >> trash (called Directory Read file1.txt and file2.txt)
directory >> trash (called Directory Read file2.txt from error directory)
"""
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
tmp_error_directory = os.path.join(tempfile.mkdtemp(prefix="err_dir_", dir=tempfile.gettempdir()))
headers = "publication_title print_identifier online_identifier\n"
# Generate file1.txt with good data.
raw_data = headers + "abcd efgh ijkl\n"
pipeline_builder = sdc_executor.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT',
raw_data=raw_data,
stop_after_first_batch=True,
event_data='create-directory')
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=tmp_directory,
files_prefix='file1', files_suffix='txt')
dev_raw_data_source >> local_fs
files_pipeline = pipeline_builder.build('Custom Generate file1.txt pipeline')
sdc_executor.add_pipeline(files_pipeline)
logger.debug("Creating file1.txt")
sdc_executor.start_pipeline(files_pipeline).wait_for_finished(timeout_sec=5)
# Generate file2.txt with bad data and create error directory.
raw_data = headers + f'''ab " "' ''abcd efgh\n'''
dev_raw_data_source.set_attributes(raw_data=raw_data)
local_fs.set_attributes(files_prefix='file2')
shell = pipeline_builder.add_stage('Shell')
shell.set_attributes(preconditions=["${record:value('/text') == 'create-directory'}"],
script=f'''mkdir {tmp_error_directory}''')
dev_raw_data_source >= shell
files_pipeline_2 = pipeline_builder.build('Custom Generate file2.txt pipeline')
sdc_executor.add_pipeline(files_pipeline_2)
logger.debug("Creating file2.txt")
sdc_executor.start_pipeline(files_pipeline_2).wait_for_finished(timeout_sec=5)
# 1st Directory pipeline which tries to read both file1.txt and file2.txt.
builder = sdc_builder.get_pipeline_builder()
directory = builder.add_stage('Directory', type='origin')
directory.set_attributes(file_name_pattern='*.txt',
number_of_threads=2,
file_name_pattern_mode='GLOB',
file_post_processing='NONE',
files_directory=tmp_directory,
error_directory=tmp_error_directory,
read_order='LEXICOGRAPHICAL',
data_format='DELIMITED',
header_line='WITH_HEADER',
delimiter_format_type='TDF') # Tab separated values.
trash = builder.add_stage('Trash')
directory >> trash
pipeline_dir = builder.build('Directory Read file1.txt and file2.txt')
sdc_executor.add_pipeline(pipeline_dir)
sdc_executor.start_pipeline(pipeline_dir)
assert 1 == len(sdc_executor.get_stage_errors(pipeline_dir, directory))
assert "file2" in sdc_executor.get_stage_errors(pipeline_dir, directory)[0].error_message
sdc_executor.stop_pipeline(pipeline_dir)
# 2nd Directory pipeline which will read from error directory to check file2.txt is there.
builder = sdc_builder.get_pipeline_builder()
directory_error = builder.add_stage('Directory', type='origin')
directory_error.set_attributes(file_name_pattern='*.txt',
number_of_threads=2,
file_name_pattern_mode='GLOB',
file_post_processing='NONE',
files_directory=tmp_error_directory,
error_directory=tmp_error_directory,
read_order='LEXICOGRAPHICAL',
data_format='DELIMITED',
header_line='WITH_HEADER',
delimiter_format_type='TDF') # Tab separated values.
trash_2 = builder.add_stage('Trash')
directory_error >> trash_2
pipeline_error_dir = builder.build('Directory Read file2.txt from error directory')
sdc_executor.add_pipeline(pipeline_error_dir)
sdc_executor.start_pipeline(pipeline_error_dir)
assert 1 == len(sdc_executor.get_stage_errors(pipeline_error_dir, directory))
assert "file2" in sdc_executor.get_stage_errors(pipeline_error_dir, directory)[0].error_message
sdc_executor.stop_pipeline(pipeline_error_dir)
def generate_files(sdc_builder, sdc_executor, tmp_directory):
raw_data = 'Hello!'
# pipeline which generates the required files for Directory Origin
builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=raw_data)
local_fs = builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=tmp_directory,
files_prefix='sdc-${sdc:id()}',
files_suffix='txt',
max_records_in_file=1)
dev_raw_data_source >> local_fs
files_pipeline = builder.build('Generate files pipeline')
sdc_executor.add_pipeline(files_pipeline)
# generate some batches/files
sdc_executor.start_pipeline(files_pipeline).wait_for_pipeline_batch_count(10)
sdc_executor.stop_pipeline(files_pipeline)
@pytest.mark.parametrize('read_order', ['TIMESTAMP', 'LEXICOGRAPHICAL'])
@pytest.mark.parametrize('file_post_processing', ['DELETE', 'ARCHIVE'])
def test_directory_no_post_process_older_files(sdc_builder, sdc_executor, file_writer, shell_executor, list_dir,
read_order, file_post_processing):
"""
Test that only files that have been processed by the origin are post processed
"""
FILES_DIRECTORY = '/tmp'
random_str = get_random_string(string.ascii_letters, 10)
file_path = os.path.join(FILES_DIRECTORY, random_str)
archive_path = os.path.join(FILES_DIRECTORY, random_str + '_archive')
# Create files and archive directories
shell_executor(f"""
mkdir {file_path}
mkdir {archive_path}
""")
# Create files
for i in range(4):
file_writer(os.path.join(file_path, f'file-{i}.txt'), f'{i}')
builder = sdc_builder.get_pipeline_builder()
directory = builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='TEXT',
file_name_pattern='file-*.txt',
file_name_pattern_mode='GLOB',
file_post_processing=file_post_processing,
archive_directory=archive_path,
files_directory=file_path,
process_subdirectories=True,
read_order=read_order,
first_file_to_process='file-2.txt')
trash = builder.add_stage('Trash')
pipeline_finisher = builder.add_stage('Pipeline Finisher Executor')
pipeline_finisher.set_attributes(preconditions=['${record:eventType() == \'no-more-data\'}'],
on_record_error='DISCARD')
directory >> trash
directory >= pipeline_finisher
pipeline = builder.build(f'Test directory origin no postprocess older files {read_order} {file_post_processing}')
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
unprocessed_files = [os.path.join(file_path, f'file-{i}.txt') for i in range(2)]
assert sorted(list_dir('TEXT', file_path, 'file-*.txt', batches=2)) == unprocessed_files
if file_post_processing == 'ARCHIVE':
archived_files = [os.path.join(archive_path, f'file-{i}.txt') for i in range(2, 4)]
assert sorted(list_dir('TEXT', archive_path, 'file-*.txt', batches=2)) == archived_files
def setup_avro_file(sdc_executor, tmp_directory):
"""Setup 5 avro records and save in local system. The pipelines looks like:
dev_raw_data_source >> local_fs
"""
avro_records = [
{
"name": "sdc1",
"age": 3,
"emails": ["sdc1@streamsets.com", "sdc@company.com"],
"boss": {
"name": "sdc0",
"age": 3,
"emails": ["sdc0@streamsets.com", "sdc1@apache.org"],
"boss": None
}
},
{
"name": "sdc2",
"age": 3,
"emails": ["sdc0@streamsets.com", "sdc@gmail.com"],
"boss": {
"name": "sdc0",
"age": 3,
"emails": ["sdc0@streamsets.com", "sdc1@apache.org"],
"boss": None
}
},
{
"name": "sdc3",
"age": 3,
"emails": ["sdc0@streamsets.com", "sdc@gmail.com"],
"boss": {
"name": "sdc0",
"age": 3,
"emails": ["sdc0@streamsets.com", "sdc1@apache.org"],
"boss": None
}
},
{
"name": "sdc4",
"age": 3,
"emails": ["sdc0@streamsets.com", "sdc@gmail.com"],
"boss": {
"name": "sdc0",
"age": 3,
"emails": ["sdc0@streamsets.com", "sdc1@apache.org"],
"boss": None
}
},
{
"name": "sdc5",
"age": 3,
"emails": ["sdc0@streamsets.com", "sdc@gmail.com"],
"boss": {
"name": "sdc0",
"age": 3,
"emails": ["sdc0@streamsets.com", "sdc1@apache.org"],
"boss": None
}
}]
avro_schema = {
"type": "record",
"name": "Employee",
"fields": [
{"name": "name", "type": "string"},
{"name": "age", "type": "int"},
{"name": "emails", "type": {"type": "array", "items": "string"}},
{"name": "boss" ,"type": ["Employee", "null"]}
]
}
raw_data = ''.join(json.dumps(avro_record) for avro_record in avro_records)
pipeline_builder = sdc_executor.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data, stop_after_first_batch=True)
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='AVRO',
avro_schema_location='INLINE',
avro_schema=json.dumps(avro_schema),
directory_template=tmp_directory,
files_prefix='sdc-${sdc:id()}', files_suffix='txt', max_records_in_file=5)
dev_raw_data_source >> local_fs
files_pipeline = pipeline_builder.build('Generate files pipeline')
sdc_executor.add_pipeline(files_pipeline)
# generate some batches/files
sdc_executor.start_pipeline(files_pipeline).wait_for_finished(timeout_sec=5)
return avro_records
def setup_basic_dilimited_file(sdc_executor, tmp_directory):
"""Setup simple 3 csv records and save in local system. The pipelines looks like:
dev_raw_data_source >> local_fs
"""
csv_records = ["A,B", "c,d", "e,f"]
return setup_dilimited_file(sdc_executor, tmp_directory, csv_records)
def setup_custom_delimited_file(sdc_executor, tmp_directory):
"""Setup 1 custom csv records and save in local system. The pipelines looks like:
dev_raw_data_source >> local_fs
"""
csv_records = ["A^!B !^$^A"]
return setup_dilimited_file(sdc_executor, tmp_directory, csv_records)
def setup_long_dilimited_file(sdc_executor, tmp_directory):
"""Setup 10 csv records and some records contain long charsets
and save in local system. The pipelines looks like:
dev_raw_data_source >> local_fs
"""
csv_records = [
"a,b,c,d",
"e,f,g,h",
"aaa,bbb,ccc,ddd",
"i,j,k,l",
"aa1,bb1,cc1,dd1",
"aa2,bb2,cc2,dd2",
"m,n,o,p",
"q,r,s,t",
"aa3,bb3,cc3,dd3",
"aa4,bb5,cc5,dd5"
]
return setup_dilimited_file(sdc_executor, tmp_directory, csv_records)
def setup_dilimited_with_comment_file(sdc_executor, tmp_directory):
"""Setup 3 csv records and save in local system. The pipelines looks like:
dev_raw_data_source >> local_fs
"""
csv_records = [
"a,b",
"# This is comment",
"c,d"
]
return setup_dilimited_file(sdc_executor, tmp_directory, csv_records)
def setup_dilimited_with_empty_line_file(sdc_executor, tmp_directory):
"""Setup 3 csv records and save in local system. The pipelines looks like:
dev_raw_data_source >> local_fs
"""
csv_records = [
"a,b",
"",
"c,d"
]
return setup_dilimited_file(sdc_executor, tmp_directory, csv_records)
def setup_dilimited_file(sdc_executor, tmp_directory, csv_records):
"""Setup csv records and save in local system. The pipelines looks like:
dev_raw_data_source >> local_fs
"""
raw_data = "\n".join(csv_records)
pipeline_builder = sdc_executor.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=raw_data, stop_after_first_batch=True)
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=tmp_directory,
files_prefix='sdc-${sdc:id()}', files_suffix='csv')
dev_raw_data_source >> local_fs
files_pipeline = pipeline_builder.build('Generate files pipeline')
sdc_executor.add_pipeline(files_pipeline)
# generate some batches/files
sdc_executor.start_pipeline(files_pipeline).wait_for_finished(timeout_sec=5)
return csv_records
def read_csv_file(file_path, delimiter, remove_header=False):
""" Reads a csv file with records separated by delimiter"""
rows = []
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=delimiter)
for row in csv_reader:
rows.append(row)
if remove_header:
rows = rows[1:]
return rows
| 45.294601
| 135
| 0.687453
|
d376bfb81eb157e7ba39f5837d94123798a0a29b
| 4,716
|
py
|
Python
|
src/solarpi/solarpi.py
|
dprossel/solarPi
|
865e3db70500bab19293f222cd0c7f085e73dcf2
|
[
"MIT"
] | null | null | null |
src/solarpi/solarpi.py
|
dprossel/solarPi
|
865e3db70500bab19293f222cd0c7f085e73dcf2
|
[
"MIT"
] | null | null | null |
src/solarpi/solarpi.py
|
dprossel/solarPi
|
865e3db70500bab19293f222cd0c7f085e73dcf2
|
[
"MIT"
] | null | null | null |
import abc
import datetime
import rx
from rx.core.typing import Observable
import rx.operators as ops
from influxdb_client import Point, InfluxDBClient, WriteOptions
import sdm_modbus
from dataclasses import dataclass
from abc import ABC
import serial
import threading
@dataclass
class InfluxDbParams:
"""Contains the necessary parameters to communicate with an InfluxDb instance.
"""
url: str
token: str
organisation: str
bucket: str
class Inverter(ABC):
name: str
@abc.abstractmethod
def read_values(self):
pass
class KacoPowadorRs485(Inverter):
RESPONSE_LENGTH = 66
GET_ALL_CMD = 0
bus_address: int
serialPort: serial.Serial
def __init__(self, serial: serial.Serial, bus_address: int, name=None):
self.bus_address = bus_address
self.serialPort = serial
self.name=name
if name is None:
self.name = "Kaco Powador ({}:{})".format(serial.port, bus_address)
def read_values(self, lock: threading.Lock = None):
if lock is not None:
with lock:
result = self._do_read_values(1)
else:
result = self._do_read_values(1)
if result is None:
return {"status": -1,
"generatorspannung": -1.0,
"generatorstrom": -1.0,
"generatorleistung": -1.0,
"netzspannung": -1.0,
"einspeisestrom": -1.0,
"einspeiseleistung": -1.0,
"temperatur": -1.0,
"tagesertrag": -1.0}
values = result.split()[1:10]
return {"status": int(values[0]),
"generatorspannung": float(values[1]),
"generatorstrom": float(values[2])*1000,
"generatorleistung": float(values[3]),
"netzspannung": float(values[4]),
"einspeisestrom": float(values[5])*1000,
"einspeiseleistung": float(values[6]),
"temperatur": float(values[7]),
"tagesertrag": float(values[8])}
def _do_read_values(self, retries):
if not self.serialPort.is_open:
self.serialPort.open()
self.write_command(self.GET_ALL_CMD)
result = self.serialPort.read(self.RESPONSE_LENGTH)
if len(result) != self.RESPONSE_LENGTH:
print("Wrong response length", len(result))
if retries > 0:
return self._do_read_values(retries - 1)
return None
return result
def write_command(self, command: int):
return self.serialPort.write(str.encode("#{:02d}{}\r".format(self.bus_address, command)))
def read_sdm_energy_values(device: sdm_modbus.SDM630, values: list, lock: threading.Lock = None):
"""Read relevant energy values from SDM.
"""
if lock is not None:
with lock:
results = {register: device.read(register) for register in values}
#results = device.read_all(sdm_modbus.registerType.INPUT)
else:
results = {register: device.read(register) for register in values}
#results = device.read_all(sdm_modbus.registerType.INPUT)
return results
def convert_measurements_to_influxdb_point(name: str, measurements: dict):
point = Point(name)
point.time(datetime.datetime.now(datetime.timezone.utc))
for key, val in measurements.items():
point.field(key, val)
return point
def get_sdm_energy_values_observable(
device: sdm_modbus.SDM630, interval: float, values: list, lock: threading.Lock = None, scheduler = None):
return rx.interval(period=datetime.timedelta(seconds=interval), scheduler=scheduler) \
.pipe(ops.map(lambda _: read_sdm_energy_values(device, values, lock)),
ops.map(lambda meas: convert_measurements_to_influxdb_point("sdm630", meas)))
def get_inverter_values_observable(device: Inverter, interval: float, lock: threading.Lock = None, scheduler = None):
return rx.interval(period=datetime.timedelta(seconds=interval), scheduler=scheduler) \
.pipe(ops.map(lambda _: device.read_values(lock)),
ops.map(lambda meas: convert_measurements_to_influxdb_point(device.name, meas)))
def get_combined_observable(observables: list):
return rx.merge(*observables)
def log_observable_to_influx_db(data: Observable, params: InfluxDbParams):
with InfluxDBClient(url=params.url, token=params.token,
org=params.organisation) as db_client:
with db_client.write_api(write_options=WriteOptions(batch_size=1)) as write_api:
write_api.write(bucket=params.bucket, record=data)
data.run()
| 34.933333
| 117
| 0.640797
|
79c4d1f8738ba3c7207c5f5d11c8052d3a1bfcd6
| 9,621
|
py
|
Python
|
run.py
|
isabellaenriquez/pokemon-battle-logic
|
0a003c72d436f9215b9353216f9b562643a9e27b
|
[
"MIT"
] | null | null | null |
run.py
|
isabellaenriquez/pokemon-battle-logic
|
0a003c72d436f9215b9353216f9b562643a9e27b
|
[
"MIT"
] | null | null | null |
run.py
|
isabellaenriquez/pokemon-battle-logic
|
0a003c72d436f9215b9353216f9b562643a9e27b
|
[
"MIT"
] | 1
|
2021-01-07T00:07:20.000Z
|
2021-01-07T00:07:20.000Z
|
from nnf import Var, true, false
from lib204 import Encoding
from random import randint
num_pokemon = 3 # number of pokemon each party has
foe = [] # the foe's party of pokemon
player = [] # the player's party of pokemon
# different pokemon types
types = ["fire", "water", "grass", "electric", "ice", "ground", "flying", "rock"]
# initializing variables
""" each pokemon is a dictionary of types, whether the key is true if the pokemon
is of that type, and the key is false if the pokemon is not of that type
"""
for i in range(num_pokemon):
foe_mon = {}
player_mon = {}
for j in range(len(types)): #initialize each type of each pokemon
foe_mon[types[j]] = false
# our variables are the types of the player's pokemon
# taking the format '#number_type'
player_mon[types[j]] = Var(str(i) + '_' + types[j])
# add pokemon to the party
foe.append(foe_mon)
player.append(player_mon)
"""function for making implications
"""
def make_implication(left, right):
return (left.negate() | right)
"""
This function returns 'exclusion,' a disjunction of all
types that aren't in included_types
"""
def exclude_types(pokemon, included_types):
exclusion = false
for t in types: # iterate through all types
if t not in included_types: # check if current type should be excluded
exclusion |= pokemon[t]
return exclusion
# our theory
def optimal_battle_strategy():
E = Encoding()
for i in range(num_pokemon):
# whether dual type or not
dual_foe = false
dual_player = false
# all the possible dual types for both foe and player pokemon
#if a pokemon is type A and type B, it can't be any other type
for t in range(len(types)):
for y in range(t+1, len(types)):
if y != t: # check to see if the types being compared aren't the same
dual_foe |= (foe[i][types[t]] & foe[i][types[y]] & (exclude_types(foe[i], [types[t], types[y]])).negate()) # exclude types that aren't t or y
dual_player |= player[i][types[t]] & player[i][types[y]] & (exclude_types(player[i], [types[t], types[y]])).negate()
# all type possibilities (can have 2 and nothing else OR 1 and nothing else)
# if single typed, a pokemon is type A and can't be any other type
foe_type = dual_foe
player_type = dual_player
for t in range(len(types)):
foe_type |= (foe[i][types[t]] & (exclude_types(foe[i], [types[t]])).negate())
player_type |= (player[i][types[t]] & (exclude_types(player[i], [types[t]])).negate())
E.add_constraint(foe_type)
E.add_constraint(player_type)
# nonexisting type combination (fire-grass)
E.add_constraint((player[i]["fire"] & player[i]["grass"]).negate())
E.add_constraint((foe[i]["fire"] & foe[i]["grass"]).negate())
"""if a foe is type A, then player type B is strong against it and
player type C is weak against it (or that type C would be less effective on type A)
thus we say foe type A implies player type B and not player type C
"""
#FIRE is weak against water, ground, and rock; strong against grass and ice; fire is not very effective on fire
E.add_constraint(make_implication(foe[i]["fire"], ((player[i]["water"] | player[i]["ground"] | player[i]["rock"]) & (player[i]["grass"] | player[i]["ice"] | player[i]["fire"]).negate())))
# WATER is weak against grass and electric; strong against fire, ground, and rock; ice is weak against it; water is not very effective on water
E.add_constraint(make_implication(foe[i]["water"], ((player[i]["grass"] | player[i]["electric"]) & (player[i]["fire"] | player[i]["ground"] | player[i]["rock"] | player[i]["ice"] | player[i]["water"]).negate())))
# GRASS is weak against fire, ice, and flying; strong against water, ground, and rock; grass is not very effective on grass
E.add_constraint(make_implication(foe[i]["grass"], ((player[i]["fire"] | player[i]["ice"] | player[i]["flying"]) & (player[i]["water"] | player[i]["ground"] | player[i]["rock"] | player[i]["grass"]).negate())))
# ELECTRIC is ineffective against ground; strong against water and flying; electric is not very effective on electric
E.add_constraint(make_implication(foe[i]["electric"], ((player[i]["ground"]) & (player[i]["water"] | player[i]["flying"] | player[i]["electric"]).negate())))
# ICE is weak against fire, rock, and water; strong against grass, ground, and flying; ice is not very effective on ice
E.add_constraint(make_implication(foe[i]["ice"], ((player[i]["fire"] | player[i]["rock"] | player[i]["water"]) & (player[i]["grass"] | player[i]["ground"] | player[i]["flying"] | player[i]["ice"]).negate())))
# GROUND is weak against water, grass, and ice; ineffective against flying; strong against fire, and rock; immune to electric
E.add_constraint(make_implication(foe[i]["ground"], ((player[i]["water"] | player[i]["grass"] | player[i]["ice"] | player[i]["flying"]) & (player[i]["fire"] | player[i]["electric"] | player[i]["rock"]).negate())))
# FLYING is weak against electric, ice, and rock; strong against grass; immune to ground
E.add_constraint(make_implication(foe[i]["flying"], ((player[i]["electric"] | player[i]["ice"] | player[i]["rock"]) & (player[i]["grass"] | player[i]["ground"]).negate())))
# ROCK is weak against water, grass, and ground; strong against fire, ice, and flying
E.add_constraint(make_implication(foe[i]["rock"], ((player[i]["water"] | player[i]["grass"] | player[i]["ground"]) & (player[i]["fire"] | player[i]["ice"] | player[i]["flying"]).negate())))
return E
"""
Displaying the solution in an easy, readable way
"""
def display_solution(sol):
print('Your foe\'s Pokémon:')
for i in range (len(foe)): # iterate through foe's party
print('Foe Pokémon %s' % str(i + 1) + '.', end=" ") # number in party
full_type = ''
for type_key in foe[i]: # iterate through types of that pokemon
if foe[i][type_key] == true:
if full_type == '':
full_type += type_key
else: # one type has already been displayed
full_type += '-' + type_key
print(full_type)
print('Your Pokémon:')
if not sol: # no solution was found; see our documentation
print('No optimal Pokémon typing found. Give up now.')
else: # valid solution
for i in range(len(player)): # iterate through player's party
print('Player Pokémon %s' % str(i + 1) + '.', end=" ")
full_type = ''
for type_key in sol:
"""first letter in each Var is the number describing the
Pokémon's place in the player's party. for aesthetic purposes,
we will display just the type name without the #_ in front of it
"""
if (type_key[0] == str(i)):
if sol[type_key]:
if full_type == '':
full_type += type_key[2:]
else: # one type has already been displayed
full_type += '-' + type_key[2:]
print(full_type)
# randomize the foe's party
def randomize_foe():
for pokemon in foe:
# choose random number of types (1 or 2)
num_types = randint(1, 2)
# choose random type
type1 = types[randint(0, len(types) - 1)]
pokemon[type1] = true
if num_types == 2: # choose 2nd random type if 2 types were selected
type2 = types[randint(0, len(types) - 1)]
if type1 == types[0]: # fire grass is not an existing type combination
while type2 == types[2]:
type2 = types[randint(0, len(types) - 1)]
elif type1 == types[2]:
while type2 == types[0]:
type2 = types[randint(0, len(types) - 1)]
pokemon[type2] = true
def test_all_combos():
# this assumes num_pokemon = 1
for t in types:
foe[0][t] = true
T = optimal_battle_strategy()
sol = T.solve()
display_solution(sol)
print('\n')
foe[0][t] = false
for i in range(len(types)-1):
for j in range(i+1, len(types)):
if (types[i] == "fire" and types[j] == "grass") or (types[i] == "grass" and types[j] == "fire"):
print('invalid type combination\n')
else:
foe[0][types[i]] = true
foe[0][types[j]] = true
T = optimal_battle_strategy()
sol = T.solve()
display_solution(sol)
print('\n')
foe[0][types[i]] = false
foe[0][types[j]] = false
if __name__ == "__main__":
choice = input('Enter 0 to choose the foe\'s party, 1 to test all combos, or enter any other key to randomize the foe\'s party, or enter any other key : ')
if choice == '0': # randomly selects the foe's pokemon
for p in range(num_pokemon): # choose the foe pokemon
print(f'Foe #{p+1}')
type1 = None
type2 = None
while (type1 not in types):
type1 = (input('Choose type 1: ')).lower()
while (type2 not in types and type2 != 'single'):
type2 = (input('Choose type 2 (type \'single\' for a single-type Pokémon): ')).lower()
if (type1 == 'fire' and type2 == 'grass') or (type1 == 'grass' and type2 == 'fire'):
print('This type combination does not exist in the world of Pokémon.')
break
foe[p][type1] = true
if type2 != 'single':
foe[p][type2] = true
elif choice == '1':
if num_pokemon == 1:
test_all_combos()
else:
print('Please set number of Pokémon to 1.')
else:
randomize_foe()
if choice != '1':
T = optimal_battle_strategy()
sol = T.solve()
display_solution(sol)
| 45.814286
| 219
| 0.619166
|
20752292af252943638dadd582a081ca7db87fa1
| 2,125
|
py
|
Python
|
addons/syscalls/syscallinfo.py
|
Kileak/OTA-Challenge-Bot
|
aa3663be856e45a6969c9d3b9c141a514d66752d
|
[
"MIT"
] | null | null | null |
addons/syscalls/syscallinfo.py
|
Kileak/OTA-Challenge-Bot
|
aa3663be856e45a6969c9d3b9c141a514d66752d
|
[
"MIT"
] | null | null | null |
addons/syscalls/syscallinfo.py
|
Kileak/OTA-Challenge-Bot
|
aa3663be856e45a6969c9d3b9c141a514d66752d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import collections
import os
class SyscallTable:
def __init__(self, filename):
self.source = filename
self.entries = collections.OrderedDict()
self.parse_table(filename)
def getEntryDict(self, parts, identifiers):
entry = collections.OrderedDict()
for i in range(len(parts)):
if identifiers[i] == "Definition":
parts[i] = parts[i].split(":")[0]
entry[identifiers[i]] = parts[i]
return entry
def parse_table(self, filename):
lines = []
with open(filename) as f:
lines = f.readlines()
# retrieve identifiers from first line
identifiers = lines[0].strip().split("\t")
for line in lines[1:]:
parts = line.split("\t")
self.entries[parts[1]] = self.getEntryDict(
line.split("\t"), identifiers)
def getEntryByID(self, idx):
for entry in self.entries:
if self.entries[entry]["#"] == str(idx):
return self.entries[entry]
return None
def getEntryByName(self, name):
if name in self.entries:
return self.entries[name]
return None
def getInfoMessage(self, entry):
if entry:
msg = ""
for part in entry:
msg += "{0:15} : {1}\n".format(part, entry[part])
return msg
return None
def getInfoMessageByID(self, idx):
entry = self.getEntryByID(idx)
return self.getInfoMessage(entry)
def getInfoMessageByName(self, name):
entry = self.getEntryByName(name)
return self.getInfoMessage(entry)
class SyscallInfo:
def __init__(self, basedir):
self.tables = {}
for table in os.listdir(basedir):
filename = os.path.join(basedir, table)
self.tables[table] = SyscallTable(filename)
def getAvailableArchitectures(self):
return self.tables.keys()
def getArch(self, arch):
if arch in self.tables:
return self.tables[arch]
return None
| 23.611111
| 65
| 0.571765
|
7f310d2031fcc6eb7bc45815610a08b9d46f41cd
| 3,820
|
py
|
Python
|
runtime/perf.py
|
LinuxGit/tidb-insight
|
95b685e543127eda6a29016553cc00f1473e4f4b
|
[
"Apache-2.0"
] | null | null | null |
runtime/perf.py
|
LinuxGit/tidb-insight
|
95b685e543127eda6a29016553cc00f1473e4f4b
|
[
"Apache-2.0"
] | null | null | null |
runtime/perf.py
|
LinuxGit/tidb-insight
|
95b685e543127eda6a29016553cc00f1473e4f4b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Collect stack trace with `perf`
#
# TODO: - switch to perf entire system or only given process(es)
# - set time of perf record, default to 10s
import logging
from os import path
from utils import util
from utils import fileopt
from utils.measurement import MeasurementBase
class Perf(MeasurementBase):
def __init__(self, args, basedir=None, subdir=None, process=None):
# init self.options and prepare self.outdir
super(Perf, self).__init__(args, basedir, subdir)
# the process name and PID of processes(es) to run perf on,
# perf entire system if empty, in format of {"PID": "name"}
self.process_info = process if process else {}
# set params of perf
def build_record_cmd(self, pid=None, outfile=None):
cmd = ["perf", # default executable name
"record", # default action of perf
"-g",
"--call-graph",
"dwarf"]
cmd.append("-F")
try:
cmd.append("%d", self.options["freq"])
except (KeyError, TypeError):
cmd.append("120") # default to 120Hz
if pid:
cmd.append("-p")
cmd.append("%d" % pid)
else:
cmd.append("-a") # default to whole system
# default will be perf.data if neither pid nor outfile is specified
if outfile:
cmd.append("-o")
cmd.append("%s/%s.data" % (self.outdir, outfile))
elif not outfile and pid:
cmd.append("-o")
cmd.append("%s/%d.data" % (self.outdir, pid))
cmd.append("sleep")
try:
cmd.append("%d", self.options["time"])
except (KeyError, TypeError):
cmd.append("10") # default to 10s
return cmd
def build_archive_cmd(self, pid=None, outfile=None):
cmd = ["perf",
"archive"]
# default will be perf.data if nothing specified
if outfile:
cmd.append("%s/%s.data" % (self.outdir, outfile))
elif not outfile and pid:
cmd.append("%s/%d.data" % (self.outdir, pid))
else:
cmd.append("%s/perf.data" % self.outdir)
return cmd
def run_collecting(self):
if len(self.process_info) > 0:
# perf on given process(es)
for pid, pname in self.process_info.items():
cmd = self.build_record_cmd(pid, pname)
# TODO: unified output: "Now perf recording %s(%d)..." % (pname, pid)
stdout, stderr = util.run_cmd(cmd)
if stdout:
fileopt.write_file(
path.join(self.outdir, "%s.stdout" % pname), stdout)
if stderr:
logging.warn(
"Command '%s' returned error: %s" % (cmd, stderr))
if self.options.archive:
cmd = self.build_archive_cmd(pid, pname)
stdout, stderr = util.run_cmd(cmd)
if stderr:
logging.warn(
"Command '%s' returned error: %s" % (cmd, stderr))
else:
# perf the entire system
cmd = self.build_record_cmd()
stdout, stderr = util.run_cmd(cmd)
if stdout:
fileopt.write_file(
path.join(self.outdir, "perf.stdout"), stdout)
if stderr:
logging.warn("Command '%s' returned error: %s" % (cmd, stderr))
if self.options.archive:
cmd = self.build_archive_cmd()
stdout, stderr = util.run_cmd(cmd)
if stderr:
logging.warn(
"Command '%s' returned error: %s" % (cmd, stderr))
| 35.045872
| 85
| 0.520419
|
8c10cd0e9922859bf3bad2015587fc0a6b2ba5da
| 3,743
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_auto_checkpoint_multiple.py
|
Huangheyl/Paddle
|
a1b640bc66a5cc9583de503e7406aeba67565e8d
|
[
"Apache-2.0"
] | 8
|
2019-06-16T12:36:11.000Z
|
2021-03-05T05:33:21.000Z
|
python/paddle/fluid/tests/unittests/test_auto_checkpoint_multiple.py
|
zlsh80826/Paddle
|
c560a7d57aad990f374ebadd330351f18e2ca65f
|
[
"Apache-2.0"
] | 1
|
2020-09-10T09:05:52.000Z
|
2020-09-10T09:06:22.000Z
|
python/paddle/fluid/tests/unittests/test_auto_checkpoint_multiple.py
|
zlsh80826/Paddle
|
c560a7d57aad990f374ebadd330351f18e2ca65f
|
[
"Apache-2.0"
] | 25
|
2019-12-07T02:14:14.000Z
|
2021-12-30T06:16:30.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet
import os
import sys
from paddle.distributed.fleet.utils.fs import LocalFS, HDFSClient
import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp
from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel
from paddle.fluid.framework import program_guard
from paddle.fluid import unique_name
import numpy as np
from paddle.io import Dataset, BatchSampler, DataLoader
from paddle.fluid.tests.unittests.auto_checkpoint_utils import AutoCheckpointBase, get_logger
from paddle.fluid.tests.unittests.test_auto_checkpoint import AutoCheckPointACLBase
logger = get_logger()
class AutoCheckpointTestMul(AutoCheckPointACLBase):
def setUp(self):
get_logger()
logger.info("enter tests")
self._old_environ = dict(os.environ)
proc_env = {
"PADDLE_RUNNING_ENV": "PADDLE_EDL_AUTO_CHECKPOINT",
"PADDLE_TRAINER_ID": "0",
"PADDLE_RUNNING_PLATFORM": "PADDLE_CLOUD",
"PADDLE_JOB_ID": "test_job_auto_dist_multiple",
"PADDLE_EDL_HDFS_HOME": "/usr/local/hadoop-2.7.7",
"PADDLE_EDL_HDFS_NAME": "",
"PADDLE_EDL_HDFS_UGI": "",
"PADDLE_EDL_HDFS_CHECKPOINT_PATH": "auto_checkpoint_dist_multiple",
"PADDLE_EDL_ONLY_FOR_CE_TEST": "1",
"PADDLE_EDL_FS_CACHE": ".auto_checkpoint_test_dist_multiple",
"PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0"
}
os.environ.update(proc_env)
def test_multiple(self):
checker = acp._get_checker()
fs = HDFSClient(checker.hdfs_home, None)
fs.delete(checker.hdfs_checkpoint_path)
self._reset_generator()
logger.info("begin test_multiple")
fs = LocalFS()
save_dir = "./run_save_0"
fs.delete(save_dir)
exe, main_prog1, startup_prog1 = self._generate()
_, main_prog2, startup_prog2 = self._generate()
compiled1, data_loader1, optimizer1, loss1, image1, label1 = \
self._init_env(exe, main_prog1, startup_prog1)
compiled2, data_loader2, optimizer2, loss2, image2, label2 = \
self._init_env(exe, main_prog2, startup_prog2)
o = None
epochs = []
for i in acp.train_epoch_range(3, 0):
for data in data_loader1():
fetch = exe.run(compiled1, feed=data, fetch_list=[loss1])
for data in data_loader2():
fetch = exe.run(compiled2, feed=data, fetch_list=[loss2])
o = acp._get_train_epoch_range()
self.assertEqual(len(o._exe_status), 2)
print(o._exe_status)
epochs.append(i)
o = acp._get_train_epoch_range()
self.assertTrue(o == None, "now train epoch must not exits now")
self.assertEqual(i, 2)
self.assertEqual(epochs, [0, 1, 2])
fs.delete(save_dir)
logger.info("end test_multiple")
if __name__ == '__main__':
unittest.main()
| 35.990385
| 93
| 0.687417
|
a6b67bc2f60bc3709558c9310839bb812f0c1362
| 20,116
|
py
|
Python
|
reco_utils/recommender/deeprec/models/dkn.py
|
myknotruby/recommenders
|
d4a41de4e10cf0fa4db7f4e5c4d6bccc6629e201
|
[
"MIT"
] | 3
|
2021-06-22T02:12:38.000Z
|
2021-11-25T02:39:52.000Z
|
reco_utils/recommender/deeprec/models/dkn.py
|
myknotruby/recommenders
|
d4a41de4e10cf0fa4db7f4e5c4d6bccc6629e201
|
[
"MIT"
] | null | null | null |
reco_utils/recommender/deeprec/models/dkn.py
|
myknotruby/recommenders
|
d4a41de4e10cf0fa4db7f4e5c4d6bccc6629e201
|
[
"MIT"
] | 2
|
2021-11-14T13:36:48.000Z
|
2022-03-02T18:09:20.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import tensorflow as tf
from reco_utils.recommender.deeprec.models.base_model import BaseModel
__all__ = ["DKN"]
class DKN(BaseModel):
"""DKN model (Deep Knowledge-Aware Network)
:Citation:
H. Wang, F. Zhang, X. Xie and M. Guo, "DKN: Deep Knowledge-Aware Network for News
Recommendation", in Proceedings of the 2018 World Wide Web Conference on World
Wide Web, 2018.
"""
def __init__(self, hparams, iterator_creator):
"""Initialization steps for DKN.
Compared with the BaseModel, DKN requires two different pre-computed embeddings,
i.e. word embedding and entity embedding.
After creating these two embedding variables, BaseModel's `__init__` method will be called.
Args:
hparams (object): Global hyper-parameters.
iterator_creator (object): DKN data loader class.
"""
self.graph = tf.Graph()
with self.graph.as_default():
with tf.name_scope("embedding"):
word2vec_embedding = self._init_embedding(hparams.wordEmb_file)
self.embedding = tf.Variable(
word2vec_embedding, trainable=True, name="word"
)
if hparams.use_entity:
e_embedding = self._init_embedding(hparams.entityEmb_file)
W = tf.Variable(
tf.random.uniform([hparams.entity_dim, hparams.dim], -1, 1)
)
b = tf.Variable(tf.zeros([hparams.dim]))
e_embedding_transformed = tf.nn.tanh(tf.matmul(e_embedding, W) + b)
self.entity_embedding = tf.Variable(
e_embedding_transformed, trainable=True, name="entity"
)
else:
self.entity_embedding = tf.Variable(
tf.constant(
0.0,
shape=[hparams.entity_size, hparams.dim],
dtype=tf.float32,
),
trainable=True,
name="entity",
)
if hparams.use_context:
c_embedding = self._init_embedding(hparams.contextEmb_file)
W = tf.Variable(
tf.random.uniform([hparams.entity_dim, hparams.dim], -1, 1)
)
b = tf.Variable(tf.zeros([hparams.dim]))
c_embedding_transformed = tf.nn.tanh(tf.matmul(c_embedding, W) + b)
self.context_embedding = tf.Variable(
c_embedding_transformed, trainable=True, name="context"
)
else:
self.context_embedding = tf.Variable(
tf.constant(
0.0,
shape=[hparams.entity_size, hparams.dim],
dtype=tf.float32,
),
trainable=True,
name="context",
)
super().__init__(hparams, iterator_creator, graph=self.graph)
def _init_embedding(self, file_path):
"""Load pre-trained embeddings as a constant tensor.
Args:
file_path (str): the pre-trained embeddings filename.
Returns:
object: A constant tensor.
"""
return tf.constant(np.load(file_path).astype(np.float32))
def _l2_loss(self):
hparams = self.hparams
l2_loss = tf.zeros([1], dtype=tf.float32)
# embedding_layer l2 loss
l2_loss = tf.add(
l2_loss, tf.multiply(hparams.embed_l2, tf.nn.l2_loss(self.embedding))
)
if hparams.use_entity:
l2_loss = tf.add(
l2_loss,
tf.multiply(hparams.embed_l2, tf.nn.l2_loss(self.entity_embedding)),
)
if hparams.use_entity and hparams.use_context:
l2_loss = tf.add(
l2_loss,
tf.multiply(hparams.embed_l2, tf.nn.l2_loss(self.context_embedding)),
)
params = self.layer_params
for param in params:
l2_loss = tf.add(
l2_loss, tf.multiply(hparams.layer_l2, tf.nn.l2_loss(param))
)
return l2_loss
def _l1_loss(self):
hparams = self.hparams
l1_loss = tf.zeros([1], dtype=tf.float32)
# embedding_layer l2 loss
l1_loss = tf.add(
l1_loss, tf.multiply(hparams.embed_l1, tf.norm(self.embedding, ord=1))
)
if hparams.use_entity:
l1_loss = tf.add(
l1_loss,
tf.multiply(hparams.embed_l1, tf.norm(self.entity_embedding, ord=1)),
)
if hparams.use_entity and hparams.use_context:
l1_loss = tf.add(
l1_loss,
tf.multiply(hparams.embed_l1, tf.norm(self.context_embedding, ord=1)),
)
params = self.layer_params
for param in params:
l1_loss = tf.add(
l1_loss, tf.multiply(hparams.layer_l1, tf.norm(param, ord=1))
)
return l1_loss
def _build_graph(self):
hparams = self.hparams
self.keep_prob_train = 1 - np.array(hparams.dropout)
self.keep_prob_test = np.ones_like(hparams.dropout)
with tf.compat.v1.variable_scope("DKN") as scope:
logit = self._build_dkn()
return logit
def _build_dkn(self):
"""The main function to create DKN's logic.
Returns:
object: Prediction score made by the DKN model.
"""
hparams = self.hparams
# build attention model for clicked news and candidate news
click_news_embed_batch, candidate_news_embed_batch = self._build_pair_attention(
self.iterator.candidate_news_index_batch,
self.iterator.candidate_news_entity_index_batch,
self.iterator.click_news_index_batch,
self.iterator.click_news_entity_index_batch,
hparams,
)
nn_input = tf.concat(
[click_news_embed_batch, candidate_news_embed_batch], axis=1
)
dnn_channel_part = 2
last_layer_size = dnn_channel_part * self.num_filters_total
layer_idx = 0
hidden_nn_layers = []
hidden_nn_layers.append(nn_input)
with tf.compat.v1.variable_scope(
"nn_part", initializer=self.initializer
) as scope:
for idx, layer_size in enumerate(hparams.layer_sizes):
curr_w_nn_layer = tf.compat.v1.get_variable(
name="w_nn_layer" + str(layer_idx),
shape=[last_layer_size, layer_size],
dtype=tf.float32,
)
curr_b_nn_layer = tf.compat.v1.get_variable(
name="b_nn_layer" + str(layer_idx),
shape=[layer_size],
dtype=tf.float32,
)
curr_hidden_nn_layer = tf.compat.v1.nn.xw_plus_b(
hidden_nn_layers[layer_idx], curr_w_nn_layer, curr_b_nn_layer
)
if hparams.enable_BN is True:
curr_hidden_nn_layer = tf.layers.batch_normalization(
curr_hidden_nn_layer,
momentum=0.95,
epsilon=0.0001,
training=self.is_train_stage,
)
activation = hparams.activation[idx]
curr_hidden_nn_layer = self._active_layer(
logit=curr_hidden_nn_layer, activation=activation
)
hidden_nn_layers.append(curr_hidden_nn_layer)
layer_idx += 1
last_layer_size = layer_size
self.layer_params.append(curr_w_nn_layer)
self.layer_params.append(curr_b_nn_layer)
w_nn_output = tf.compat.v1.get_variable(
name="w_nn_output", shape=[last_layer_size, 1], dtype=tf.float32
)
b_nn_output = tf.compat.v1.get_variable(
name="b_nn_output", shape=[1], dtype=tf.float32
)
self.layer_params.append(w_nn_output)
self.layer_params.append(b_nn_output)
nn_output = tf.compat.v1.nn.xw_plus_b(
hidden_nn_layers[-1], w_nn_output, b_nn_output
)
return nn_output
def _build_pair_attention(
self,
candidate_word_batch,
candidate_entity_batch,
click_word_batch,
click_entity_batch,
hparams,
):
"""This function learns the candidate news article's embedding and user embedding.
User embedding is generated from click history and also depends on the candidate news article via attention mechanism.
Article embedding is generated via KCNN module.
Args:
candidate_word_batch (object): tensor word indices for constructing news article
candidate_entity_batch (object): tensor entity values for constructing news article
click_word_batch (object): tensor word indices for constructing user clicked history
click_entity_batch (object): tensor entity indices for constructing user clicked history
hparams (object): global hyper-parameters
Returns:
click_field_embed_final_batch: user embedding
news_field_embed_final_batch: candidate news article embedding
"""
doc_size = hparams.doc_size
attention_hidden_sizes = hparams.attention_layer_sizes
clicked_words = tf.reshape(click_word_batch, shape=[-1, doc_size])
clicked_entities = tf.reshape(click_entity_batch, shape=[-1, doc_size])
with tf.compat.v1.variable_scope(
"attention_net", initializer=self.initializer
) as scope:
# use kims cnn to get conv embedding
with tf.compat.v1.variable_scope(
"kcnn", initializer=self.initializer, reuse=tf.compat.v1.AUTO_REUSE
) as cnn_scope:
news_field_embed = self._kims_cnn(
candidate_word_batch, candidate_entity_batch, hparams
)
click_field_embed = self._kims_cnn(
clicked_words, clicked_entities, hparams
)
click_field_embed = tf.reshape(
click_field_embed,
shape=[
-1,
hparams.history_size,
hparams.num_filters * len(hparams.filter_sizes),
],
)
avg_strategy = False
if avg_strategy:
click_field_embed_final = tf.reduce_mean(
click_field_embed, axis=1, keepdims=True
)
else:
news_field_embed = tf.expand_dims(news_field_embed, 1)
news_field_embed_repeat = tf.add(
tf.zeros_like(click_field_embed), news_field_embed
)
attention_x = tf.concat(
axis=-1, values=[click_field_embed, news_field_embed_repeat]
)
attention_x = tf.reshape(
attention_x, shape=[-1, self.num_filters_total * 2]
)
attention_w = tf.compat.v1.get_variable(
name="attention_hidden_w",
shape=[self.num_filters_total * 2, attention_hidden_sizes],
dtype=tf.float32,
)
attention_b = tf.compat.v1.get_variable(
name="attention_hidden_b",
shape=[attention_hidden_sizes],
dtype=tf.float32,
)
curr_attention_layer = tf.compat.v1.nn.xw_plus_b(
attention_x, attention_w, attention_b
)
if hparams.enable_BN is True:
curr_attention_layer = tf.layers.batch_normalization(
curr_attention_layer,
momentum=0.95,
epsilon=0.0001,
training=self.is_train_stage,
)
activation = hparams.attention_activation
curr_attention_layer = self._active_layer(
logit=curr_attention_layer, activation=activation
)
attention_output_w = tf.compat.v1.get_variable(
name="attention_output_w",
shape=[attention_hidden_sizes, 1],
dtype=tf.float32,
)
attention_output_b = tf.compat.v1.get_variable(
name="attention_output_b", shape=[1], dtype=tf.float32
)
attention_weight = tf.compat.v1.nn.xw_plus_b(
curr_attention_layer, attention_output_w, attention_output_b
)
attention_weight = tf.reshape(
attention_weight, shape=[-1, hparams.history_size, 1]
)
norm_attention_weight = tf.nn.softmax(attention_weight, axis=1)
click_field_embed_final = tf.reduce_sum(
tf.multiply(click_field_embed, norm_attention_weight),
axis=1,
keepdims=True,
)
if attention_w not in self.layer_params:
self.layer_params.append(attention_w)
if attention_b not in self.layer_params:
self.layer_params.append(attention_b)
if attention_output_w not in self.layer_params:
self.layer_params.append(attention_output_w)
if attention_output_b not in self.layer_params:
self.layer_params.append(attention_output_b)
self.news_field_embed_final_batch = tf.squeeze(news_field_embed)
click_field_embed_final_batch = tf.squeeze(click_field_embed_final)
return click_field_embed_final_batch, self.news_field_embed_final_batch
def _kims_cnn(self, word, entity, hparams):
"""The KCNN module. KCNN is an extension of traditional CNN that incorporates symbolic knowledge from
a knowledge graph into sentence representation learning.
Args:
word (object): word indices for the sentence.
entity (object): entity indices for the sentence. Entities are aligned with words in the sentence.
hparams (object): global hyper-parameters.
Returns:
object: Sentence representation.
"""
# kims cnn parameter
filter_sizes = hparams.filter_sizes
num_filters = hparams.num_filters
dim = hparams.dim
embedded_chars = tf.nn.embedding_lookup(self.embedding, word)
if hparams.use_entity and hparams.use_context:
entity_embedded_chars = tf.nn.embedding_lookup(
self.entity_embedding, entity
)
context_embedded_chars = tf.nn.embedding_lookup(
self.context_embedding, entity
)
concat = tf.concat(
[embedded_chars, entity_embedded_chars, context_embedded_chars], axis=-1
)
elif hparams.use_entity:
entity_embedded_chars = tf.nn.embedding_lookup(
self.entity_embedding, entity
)
concat = tf.concat([embedded_chars, entity_embedded_chars], axis=-1)
else:
concat = embedded_chars
concat_expanded = tf.expand_dims(concat, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.compat.v1.variable_scope(
"conv-maxpool-%s" % filter_size, initializer=self.initializer
):
# Convolution Layer
if hparams.use_entity and hparams.use_context:
filter_shape = [filter_size, dim * 3, 1, num_filters]
elif hparams.use_entity:
filter_shape = [filter_size, dim * 2, 1, num_filters]
else:
filter_shape = [filter_size, dim, 1, num_filters]
W = tf.compat.v1.get_variable(
name="W" + "_filter_size_" + str(filter_size),
shape=filter_shape,
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(uniform=False),
)
b = tf.compat.v1.get_variable(
name="b" + "_filter_size_" + str(filter_size),
shape=[num_filters],
dtype=tf.float32,
)
if W not in self.layer_params:
self.layer_params.append(W)
if b not in self.layer_params:
self.layer_params.append(b)
conv = tf.nn.conv2d(
concat_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv",
)
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool2d(
h,
ksize=[1, hparams.doc_size - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding="VALID",
name="pool",
)
pooled_outputs.append(pooled)
# Combine all the pooled features
# self.num_filters_total is the kims cnn output dimension
self.num_filters_total = num_filters * len(filter_sizes)
h_pool = tf.concat(pooled_outputs, axis=-1)
h_pool_flat = tf.reshape(h_pool, [-1, self.num_filters_total])
return h_pool_flat
def infer_embedding(self, sess, feed_dict):
"""Infer document embedding in feed_dict with current model.
Args:
sess (object): The model session object.
feed_dict (dict): Feed values for evaluation. This is a dictionary that maps graph elements to values.
Returns:
list: News embedding in a batch.
"""
feed_dict[self.layer_keeps] = self.keep_prob_test
feed_dict[self.is_train_stage] = False
return sess.run([self.news_field_embed_final_batch], feed_dict=feed_dict)
def run_get_embedding(self, infile_name, outfile_name):
"""infer document embedding with current model.
Args:
infile_name (str): Input file name, format is [Newsid] [w1,w2,w3...] [e1,e2,e3...]
outfile_name (str): Output file name, format is [Newsid] [embedding]
Returns:
object: An instance of self.
"""
load_sess = self.sess
with tf.io.gfile.GFile(outfile_name, "w") as wt:
for (
batch_data_input,
newsid_list,
data_size,
) in self.iterator.load_infer_data_from_file(infile_name):
news_embedding = self.infer_embedding(load_sess, batch_data_input)[0]
for i in range(data_size):
wt.write(
newsid_list[i]
+ " "
+ ",".join(
[
str(embedding_value)
for embedding_value in news_embedding[i]
]
)
+ "\n"
)
return self
| 41.561983
| 126
| 0.547673
|
7f319b40dd14137b81f9f1182826d67df37d6c09
| 5,437
|
py
|
Python
|
google-datacatalog-apache-atlas-connector/tests/google/datacatalog_connectors/apache_atlas/apache_atlas2datacatalog_cli_test.py
|
skadinyo/datacatalog-connectors-hive
|
4239faf72f5c9a7d55b8538c8ddc556cd6071c6d
|
[
"Apache-2.0"
] | 19
|
2020-04-27T21:55:47.000Z
|
2022-03-22T19:45:14.000Z
|
google-datacatalog-apache-atlas-connector/tests/google/datacatalog_connectors/apache_atlas/apache_atlas2datacatalog_cli_test.py
|
skadinyo/datacatalog-connectors-hive
|
4239faf72f5c9a7d55b8538c8ddc556cd6071c6d
|
[
"Apache-2.0"
] | 12
|
2020-05-28T14:48:29.000Z
|
2022-01-15T17:52:09.000Z
|
google-datacatalog-apache-atlas-connector/tests/google/datacatalog_connectors/apache_atlas/apache_atlas2datacatalog_cli_test.py
|
mesmacosta/datacatalog-connectors-hive
|
ab7e49fbef8599dd9053c2260b261ce01f510a47
|
[
"Apache-2.0"
] | 15
|
2020-05-03T17:25:51.000Z
|
2022-01-11T22:10:35.000Z
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from google.datacatalog_connectors import apache_atlas
from google.datacatalog_connectors.apache_atlas import \
apache_atlas2datacatalog_cli
class ApacheAtlas2DataCatalogCliTest(unittest.TestCase):
def test_parse_args_missing_project_id_should_raise_system_exit(self):
self.assertRaises(
SystemExit, apache_atlas2datacatalog_cli.
ApacheAtlas2DataCatalogCli._parse_args, [
'sync', '--atlas-host', 'my-host', '--atlas-port', 'my-port',
'--atlas-user', 'my-user', '--atlas-passsword', 'my-pass'
])
def test_parse_args_missing_atlas_credentials_should_raise_system_exit(
self):
self.assertRaises(
SystemExit, apache_atlas2datacatalog_cli.
ApacheAtlas2DataCatalogCli._parse_args, [
'sync', '--datacatalog-project-id', 'dc-project_id',
'--atlas-host', 'my-host', '--atlas-port', 'my-port'
])
@mock.patch('google.datacatalog_connectors.apache_atlas.sync'
'.MetadataSynchronizer')
def test_run_should_call_synchronizer(self, mock_metadata_synchonizer):
apache_atlas2datacatalog_cli.ApacheAtlas2DataCatalogCli.run([
'sync', '--datacatalog-project-id', 'dc-project_id',
'--atlas-host', 'my-host', '--atlas-port', 'my-port',
'--atlas-user', 'my-user', '--atlas-passsword', 'my-pass'
])
mock_metadata_synchonizer.assert_called_once_with(
atlas_entity_types=None,
atlas_connection_args={
'host': 'my-host',
'port': 'my-port',
'user': 'my-user',
'pass': 'my-pass'
},
datacatalog_location_id='us-central1',
datacatalog_project_id='dc-project_id',
enable_monitoring=None)
synchonizer = mock_metadata_synchonizer.return_value
synchonizer.run.assert_called_once()
@mock.patch('google.datacatalog_connectors.apache_atlas.sync'
'.MetadataSynchronizer')
def test_run_with_entity_types_should_call_synchronizer(
self, mock_metadata_synchonizer):
apache_atlas2datacatalog_cli.ApacheAtlas2DataCatalogCli.run([
'sync', '--datacatalog-project-id', 'dc-project_id',
'--atlas-host', 'my-host', '--atlas-port', 'my-port',
'--atlas-user', 'my-user', '--atlas-passsword', 'my-pass',
'--atlas-entity-types', 'Tables,Columns'
])
mock_metadata_synchonizer.assert_called_once_with(
atlas_entity_types=['Tables', 'Columns'],
atlas_connection_args={
'host': 'my-host',
'port': 'my-port',
'user': 'my-user',
'pass': 'my-pass'
},
datacatalog_location_id='us-central1',
datacatalog_project_id='dc-project_id',
enable_monitoring=None)
synchonizer = mock_metadata_synchonizer.return_value
synchonizer.run.assert_called_once()
@mock.patch('google.datacatalog_connectors.apache_atlas.sync'
'.MetadataEventSynchronizer')
def test_run_should_call_event_synchronizer(self,
mock_metadata_synchonizer):
apache_atlas2datacatalog_cli.ApacheAtlas2DataCatalogCli.run([
'sync-event-hook',
'--datacatalog-project-id',
'dc-project_id',
'--atlas-host',
'my-host',
'--atlas-port',
'my-port',
'--atlas-user',
'my-user',
'--atlas-passsword',
'my-pass',
'--event-servers',
'my-host:port',
'--event-consumer-group-id',
'my_consumer_group',
])
mock_metadata_synchonizer.assert_called_once_with(
atlas_entity_types=None,
atlas_connection_args={
'host': 'my-host',
'port': 'my-port',
'user': 'my-user',
'pass': 'my-pass',
'event_servers': ['my-host:port'],
'event_consumer_group_id': 'my_consumer_group',
'event_hook': True
},
datacatalog_location_id='us-central1',
datacatalog_project_id='dc-project_id',
enable_monitoring=None)
synchonizer = mock_metadata_synchonizer.return_value
synchonizer.run.assert_called_once()
@mock.patch('google.datacatalog_connectors.apache_atlas.'
'apache_atlas2datacatalog_cli'
'.ApacheAtlas2DataCatalogCli')
def test_main_should_call_cli_run(self, mock_cli):
apache_atlas.main()
mock_cli.run.assert_called_once()
| 38.835714
| 77
| 0.608792
|
ff5aef2e9933f0776b7e2c9889a40eb4b4da5712
| 1,012
|
py
|
Python
|
r2s/extensions/abstract.py
|
Kokan/Rest2Syslog
|
69838495e5eb0049ca3dc679dfc12d8026f3e567
|
[
"Apache-2.0"
] | null | null | null |
r2s/extensions/abstract.py
|
Kokan/Rest2Syslog
|
69838495e5eb0049ca3dc679dfc12d8026f3e567
|
[
"Apache-2.0"
] | null | null | null |
r2s/extensions/abstract.py
|
Kokan/Rest2Syslog
|
69838495e5eb0049ca3dc679dfc12d8026f3e567
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC,abstractmethod
class R2SAPIAdaptor(ABC):
def __init__(self):
super().__init__()
@abstractmethod
def fetchItems(self, page_num):
"""Implement a call to a specific API to fetch items (single page)
Return value should be a touple of: isFull, response_json where:
is
"""
pass
class R2SItemFormatter(ABC):
"""Abstract Item formatter - wraps a single json item. handles json parsing"""
options = {}
def __init__(self, item):
self.item = item
super().__init__()
@abstractmethod
def buildMessage(self):
"""Return a string representation of an item - this will be sent to syslog"""
pass
@staticmethod
def wrapItems(items_as_json_array):
"""Input is json array of items. output is list of item formatters (each formatter wraps a single item)"""
pass
@abstractmethod
def getID(self):
"""each item should have a unique identifier"""
| 28.111111
| 114
| 0.634387
|
6fed6f525271440d0df9186e8aba3045c12d2c67
| 2,457
|
py
|
Python
|
backend/pyrogram/types/input_media/input_media_photo.py
|
appheap/social-media-analyzer
|
0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c
|
[
"Apache-2.0"
] | 5
|
2021-09-11T22:01:15.000Z
|
2022-03-16T21:33:42.000Z
|
backend/pyrogram/types/input_media/input_media_photo.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | null | null | null |
backend/pyrogram/types/input_media/input_media_photo.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | 3
|
2022-01-18T11:06:22.000Z
|
2022-02-26T13:39:28.000Z
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from typing import Optional, List
from .input_media import InputMedia
from ..messages_and_media import MessageEntity
class InputMediaPhoto(InputMedia):
"""A photo to be sent inside an album.
It is intended to be used with :obj:`~pyrogram.Client.send_media_group`.
Parameters:
media (``str``):
Photo to send.
Pass a file_id as string to send a photo that exists on the Telegram servers or
pass a file path as string to upload a new photo that exists on your local machine.
Sending photo by a URL is currently unsupported.
caption (``str``, *optional*):
Caption of the photo to be sent, 0-1024 characters.
If not specified, the original caption is kept. Pass "" (empty string) to remove the caption.
parse_mode (``str``, *optional*):
By default, texts are parsed using both Markdown and HTML styles.
You can combine both syntaxes together.
Pass "markdown" or "md" to enable Markdown-style parsing only.
Pass "html" to enable HTML-style parsing only.
Pass None to completely disable style parsing.
caption_entities (List of :obj:`~pyrogram.types.MessageEntity`):
List of special entities that appear in the caption, which can be specified instead of *parse_mode*.
"""
def __init__(
self,
media: str,
caption: str = None,
parse_mode: Optional[str] = object,
caption_entities: List[MessageEntity] = None
):
super().__init__(media, caption, parse_mode, caption_entities)
| 41.644068
| 112
| 0.678877
|
b5667b6e99a25d8b9bc10f31e57185e963d2735f
| 798
|
py
|
Python
|
app/routes/options.py
|
PaperDevil/bottle-skill-template
|
69ca2f4e946e1cb1b748fa4b0d3fd78299ce1ed8
|
[
"Apache-2.0"
] | null | null | null |
app/routes/options.py
|
PaperDevil/bottle-skill-template
|
69ca2f4e946e1cb1b748fa4b0d3fd78299ce1ed8
|
[
"Apache-2.0"
] | null | null | null |
app/routes/options.py
|
PaperDevil/bottle-skill-template
|
69ca2f4e946e1cb1b748fa4b0d3fd78299ce1ed8
|
[
"Apache-2.0"
] | null | null | null |
"""
options.py - Configuring CORS and headers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from os import getenv
from bottle import hook, route, response
_allow_origin = getenv('ALLOW_ORIGIN', '*')
_allow_methods = getenv('ALLOW_METHODS', 'GET, POST, PATCH, DELETE')
_allow_headers = getenv('ALLOW_HEADERS', '*')
@hook('after_request')
def enable_cors():
"""Add headers to enable CORS"""
response.headers['Access-Control-Allow-Origin'] = _allow_origin
response.headers['Access-Control-Allow-Methods'] = _allow_methods
response.headers['Access-Control-Allow-Headers'] = _allow_headers
@route('/', method='OPTIONS')
@route('/<path:path>', method = 'OPTIONS')
def options_handler(path=None):
"""Returns basic application settings to the request client"""
return
| 29.555556
| 69
| 0.674185
|
e38168ab8934b42719a7e97f579f66389fda0ce3
| 3,585
|
py
|
Python
|
pretrain_data.py
|
BUAAw-ML/KE-OPT
|
d957798ccd62b8e98ab2bbed9562c50bd5f772f9
|
[
"MIT"
] | null | null | null |
pretrain_data.py
|
BUAAw-ML/KE-OPT
|
d957798ccd62b8e98ab2bbed9562c50bd5f772f9
|
[
"MIT"
] | null | null | null |
pretrain_data.py
|
BUAAw-ML/KE-OPT
|
d957798ccd62b8e98ab2bbed9562c50bd5f772f9
|
[
"MIT"
] | null | null | null |
import argparse
from collections import defaultdict
import json
import math
import os
from os.path import exists, join
from time import time, sleep
import lmdb
import torch
from torch.utils.data import DataLoader, ConcatDataset
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from tqdm import tqdm
import random
from utils.distributed import all_gather_list
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from horovod import torch as hvd
from data import PrefetchLoader
from data.data import TxtVideoAudioDataset, txtvideoaudio_collate
import ipdb
def build_dataloader(dataset, collate_fn, is_train, opts, batch_size):
loader = DataLoader(dataset, batch_size = batch_size,
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
collate_fn=collate_fn, shuffle = is_train)
return loader
def build_dataset(ids_path, txt_mapper, video_mapper, audio_mapper):
dataset = TxtVideoAudioDataset(ids_path, txt_mapper, video_mapper, audio_mapper)
collate_fn = txtvideoaudio_collate
return dataset, collate_fn
def create_train_dataloaders(data_cfg, opts, mapper_group):
data_cfg = data_cfg['train']
dataloaders = {}
for d_cfg in data_cfg:
concate_name = ''
dataset_ls = []
for dset in d_cfg['datasets']:
name = dset['name']
concate_name = concate_name + name if concate_name == '' else concate_name + '_' + name
data_type = dset['datatype'] + '_' + name
ids_path = dset['ids_path']
video_mapper = mapper_group.set_video_mapper(dset['video'], opts.video_cfg, data_type)
txt_mapper = mapper_group.set_txt_mapper(dset['txt'], opts.max_txt_len, data_type)
audio_path = dset.get('audio','')
audio_mapper = mapper_group.set_audio_mapper(audio_path, opts.audio_cfg, data_type)
dataset, collate_fn = build_dataset(ids_path, txt_mapper, video_mapper, audio_mapper)
LOGGER.info("Create Dataset {} Success".format(name))
dataset_ls.append(dataset)
dataset = ConcatDataset(dataset_ls)
LOGGER.info("Create Dataset {} Success".format(concate_name))
ratio = d_cfg['mix_ratio']
task = d_cfg['task']
batch_size = d_cfg['batch_size']
loader = build_dataloader(dataset, collate_fn, True, opts, batch_size)
task_name = f'{task}--{concate_name}'
dataloaders[task_name] = (loader, ratio)
return dataloaders, mapper_group
def create_val_dataloaders(data_cfg, opts, mapper_group):
data_cfg = data_cfg['val']
dataloaders = {}
for d_cfg in data_cfg:
name = d_cfg['name']
data_type = d_cfg['datatype']
ids_path = d_cfg['ids_path']
video_mapper = mapper_group.set_video_mapper(d_cfg['video'], opts.video_cfg, data_type)
txt_mapper = mapper_group.set_txt_mapper(d_cfg['txt'], opts.max_txt_len, data_type)
audio_path = d_cfg.get('audio','')
audio_mapper = mapper_group.set_audio_mapper(audio_path, opts.audio_cfg, data_type)
dataset, collate_fn = build_dataset(ids_path, txt_mapper, video_mapper, audio_mapper)
LOGGER.info("Create Dataset {} Success".format(name))
task = d_cfg['task']
batch_size = d_cfg['batch_size']
loader = build_dataloader(dataset, collate_fn, False, opts, batch_size)
task_name = f'{task}--{name}'
dataloaders[task_name] = PrefetchLoader(loader)
return dataloaders, mapper_group
| 38.138298
| 99
| 0.692887
|
1a36169f796d511ef8b177da249a907d38b8ef23
| 4,551
|
py
|
Python
|
app/user/tests/test_user_api.py
|
vivek28111992/python-project
|
80cce4c1ca42c7ad5cabeedd4d4d2cf37d03e83a
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
vivek28111992/python-project
|
80cce4c1ca42c7ad5cabeedd4d4d2cf37d03e83a
|
[
"MIT"
] | null | null | null |
app/user/tests/test_user_api.py
|
vivek28111992/python-project
|
80cce4c1ca42c7ad5cabeedd4d4d2cf37d03e83a
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self) -> None:
self.client = APIClient()
def test_create_valid_user_success(self) -> None:
"""Test creating user with valid payload is successful"""
payload = {
'email': 'test@demo.com',
'password': 'pass@123',
'name': 'Test Name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that already exists fall"""
payload = {'email': 'test@demo.com', 'password': 'pass@1234', 'name': 'Test'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that the password must be more than 5 characters"""
payload = {'email': 'test@demo.com', 'password': 'pass@', 'name': 'Test'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': 'test@demo.com', 'password': 'pass@123', 'name': 'Test'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='test@demo.com', password='pass@123')
payload = {'email': 'test@demo.com', 'password': 'wrongpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doesn't exists"""
payload = {'email': 'test@demo.com', 'password': 'pass@123'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email & password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTest(TestCase):
"""Test API requests that require authentication"""
def setUp(self) -> None:
self.user = create_user(
email='test@demo.com',
password='pass@123',
name='Vivek Pawar'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""test updating the user profile for authentication user"""
payload = {'name': 'Vivek Pawar', 'password': 'pass@123'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 33.463235
| 81
| 0.70468
|
8c790f0dbb4d81b408276df7fbdf4f5924a798a9
| 7,868
|
py
|
Python
|
pyon/core/governance/conversation/test/test_fsm_processing.py
|
ooici/pyon
|
122c629290d27f32f2f41dafd5c12469295e8acf
|
[
"BSD-2-Clause"
] | 2
|
2015-06-09T16:07:09.000Z
|
2015-07-28T10:06:31.000Z
|
pyon/core/governance/conversation/test/test_fsm_processing.py
|
ooici/pyon
|
122c629290d27f32f2f41dafd5c12469295e8acf
|
[
"BSD-2-Clause"
] | 3
|
2020-07-22T15:14:55.000Z
|
2021-12-13T19:35:06.000Z
|
pyon/core/governance/conversation/test/test_fsm_processing.py
|
ooici/pyon
|
122c629290d27f32f2f41dafd5c12469295e8acf
|
[
"BSD-2-Clause"
] | null | null | null |
__author__ = 'rn710'
import os, inspect
from pyon.core.governance.conversation.core.transition import TransitionFactory
from pyon.core.governance.conversation.core.local_type import LocalType
from pyon.core.governance.conversation.core.fsm import ExceptionFSM, ExceptionFailAssertion
from pyon.core.governance.conversation.parsing.base_parser import ANTLRScribbleParser
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.log import log
from nose.plugins.attrib import attr
def purchasingAtBuyer_events():
events = []
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Confirmation', 'Seller'))
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Confirmation', 'Seller'))
return events
def locateChoiceAtBuyer_events():
events = []
events.append(TransitionFactory.create(LocalType.RESV, 'Confirmation', 'Seller'))
events.append(TransitionFactory.create(LocalType.SEND, 'OK', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'OutOfStock', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'OutOfStock', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'OutOfStock', 'Seller'))
events.append(TransitionFactory.create(LocalType.SEND, 'Finish', 'Seller'))
return events
def recAtBuyer_events():
events = []
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Invoice', 'Seller'))
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Invoice', 'Seller'))
return events
def recAndChoice_events():
events = []
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Stop', 'Seller'))
return events
def parallelAtSeller1_events():
events = []
events.append(TransitionFactory.create(LocalType.RESV, 'Order', 'Buyer'))
events.append(TransitionFactory.create(LocalType.SEND, 'Confirmation', 'Buyer'))
events.append(TransitionFactory.create(LocalType.SEND, 'OK', 'Buyer'))
return events
def Interrupt_events():
events = []
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Confirmation', 'Seller'))
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Confirmation', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Help', 'Seller'))
events.append(TransitionFactory.create(LocalType.SEND, 'MoreHelp', 'Seller'))
return events
def main_auction_events():
events = []
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Invoice', 'Seller'))
events.append(TransitionFactory.create(LocalType.SEND, 'OK', 'Buyer'))
return events
def logic_events():
events = []
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Confirmation', 'Seller'))
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Confirmation', 'Seller'))
return events
def recAsRepeat_events():
events = []
events.append(TransitionFactory.create(LocalType.SEND, 'Order', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Confirmation', 'Seller'))
events.append(TransitionFactory.create(LocalType.SEND, 'OK', 'Seller'))
events.append(TransitionFactory.create(LocalType.RESV, 'Confirmation', 'Seller'))
events.append(TransitionFactory.create(LocalType.SEND, 'OK', 'Seller'))
return events
@attr('INT')
class TestFSM(IonIntegrationTestCase):
def setUp(self):
cur_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
self.path = '%s/specs/'%cur_dir
def base(self, lt_filename, events):
try:
myparser = ANTLRScribbleParser()
res = myparser.parse(self.path + lt_filename)
builder = myparser.walk(res)
log.debug(builder.memory)
log.debug(builder.main_fsm.fsm.memory)
log.debug(builder.main_fsm.recursions_states)
log.debug(builder.current_fsm.fsm.state_transitions)
builder.main_fsm.fsm.process_list(events)
except ExceptionFSM: raise
def base_logic(self, lt_filename, events, payloads):
try:
myparser = ANTLRScribbleParser()
res = myparser.parse(self.path + lt_filename)
builder = myparser.walk(res)
log.debug(builder.current_fsm.fsm.state_transitions)
builder.main_fsm.fsm.set_assertion_check_on()
builder.main_fsm.fsm.process_list(events, payloads)
log.debug(builder.main_fsm.fsm.interrupt_transition)
log.debug(builder.main_fsm.fsm.interrupt_start_state)
except ExceptionFSM:
raise
def test_rec_as_repeat(self):
self.base('RecAsRepeat.spr', recAsRepeat_events())
self.assertEqual(1, 1)
def test_simpleInteraction(self):
self.base('PurchasingAtBuyer.spr', purchasingAtBuyer_events())
self.assertEqual(1, 1)
def test_choice(self):
# Test The First branch
self.base('LocateChoiceAtBuyer.spr', locateChoiceAtBuyer_events()[0:2])
# Test The Second branch
self.base('LocateChoiceAtBuyer.spr', locateChoiceAtBuyer_events()[2:6])
self.assertEqual(1, 1)
def test_choice_wrong(self):
# Test The First branch
self.base('LocateChoiceAtBuyer.spr', locateChoiceAtBuyer_events()[0:2])
# Test The Second branch
self.assertRaises(ExceptionFSM, self.base, 'LocateChoiceAtBuyer.spr', locateChoiceAtBuyer_events()[1:4])
def test_parallel(self):
self.base('ParallelAtSeller1.spr', parallelAtSeller1_events())
self.assertEqual(1, 1)
def test_parallel_wrong(self):
self.assertRaises(ExceptionFSM, self.base, 'ParallelAtSeller1.spr', recAtBuyer_events()[1:])
def test_logic(self):
payloads = [[1], ["a"], [5], [4]]
self.base_logic('logic.spr', logic_events(), payloads)
self.assertEqual(1, 1)
def test_logic_fail(self):
payloads = [[1], ["Hello"], [1], [4]]
self.assertRaises(ExceptionFailAssertion, self.base_logic, 'logic.spr',logic_events(), payloads)
self.assertEqual(1, 1)
def test_interrupt(self):
self.base('Interrupt.spr', Interrupt_events()[0:3])
self.assertEqual(1, 1)
def test_interrupt_execute_do_and_interrupt(self):
self.assertRaises(ExceptionFSM, self.base, 'Interrupt.spr', Interrupt_events()[0:6])
self.assertEqual(1, 1)
def test_interrupt_when_interrupt_occur(self):
self.base('Interrupt.spr', (Interrupt_events()[0:2]+Interrupt_events()[4:6]))
self.assertEqual(1, 1)
| 47.113772
| 572
| 0.665735
|
31bbc5d2a06d56d7d7e4e82b0cdc2dfd37b3166a
| 19,200
|
py
|
Python
|
fiftyone/utils/eval/detection.py
|
williamcorsel/fiftyone
|
22e34e91deb1d2e2fe6316ec81714e0c55015523
|
[
"Apache-2.0"
] | null | null | null |
fiftyone/utils/eval/detection.py
|
williamcorsel/fiftyone
|
22e34e91deb1d2e2fe6316ec81714e0c55015523
|
[
"Apache-2.0"
] | null | null | null |
fiftyone/utils/eval/detection.py
|
williamcorsel/fiftyone
|
22e34e91deb1d2e2fe6316ec81714e0c55015523
|
[
"Apache-2.0"
] | null | null | null |
"""
Detection evaluation.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import itertools
import logging
import numpy as np
import fiftyone.core.evaluation as foe
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.utils as fou
import fiftyone.core.validation as fov
import fiftyone.utils.iou as foui
from .base import BaseEvaluationResults
logger = logging.getLogger(__name__)
def evaluate_detections(
samples,
pred_field,
gt_field="ground_truth",
eval_key=None,
classes=None,
missing=None,
method=None,
iou=0.50,
use_masks=False,
use_boxes=False,
classwise=True,
**kwargs,
):
"""Evaluates the predicted detections in the given samples with respect to
the specified ground truth detections.
This method supports evaluating the following spatial data types:
- Object detections in :class:`fiftyone.core.labels.Detections` format
- Instance segmentations in :class:`fiftyone.core.labels.Detections`
format with their ``mask`` attributes populated
- Polygons in :class:`fiftyone.core.labels.Polylines` format
- Temporal detections in :class:`fiftyone.core.labels.TemporalDetections`
format
For spatial object detection evaluation, this method uses COCO-style
evaluation by default.
For temporal segment detection, this method uses ActivityNet-style
evaluation by default.
You can use the ``method`` parameter to select a different method, and you
can optionally customize the method by passing additional parameters for
the method's config class as ``kwargs``.
The supported ``method`` values and their associated configs are:
- ``"coco"``: :class:`fiftyone.utils.eval.coco.COCOEvaluationConfig`
- ``"open-images"``: :class:`fiftyone.utils.eval.openimages.OpenImagesEvaluationConfig`
- ``"activitynet"``: :class:`fiftyone.utils.eval.activitynet.ActivityNetEvaluationConfig`
If an ``eval_key`` is provided, a number of fields are populated at the
object- and sample-level recording the results of the evaluation:
- True positive (TP), false positive (FP), and false negative (FN) counts
for the each sample are saved in top-level fields of each sample::
TP: sample.<eval_key>_tp
FP: sample.<eval_key>_fp
FN: sample.<eval_key>_fn
In addition, when evaluating frame-level objects, TP/FP/FN counts are
recorded for each frame::
TP: frame.<eval_key>_tp
FP: frame.<eval_key>_fp
FN: frame.<eval_key>_fn
- The fields listed below are populated on each individual object; these
fields tabulate the TP/FP/FN status of the object, the ID of the
matching object (if any), and the matching IoU::
TP/FP/FN: object.<eval_key>
ID: object.<eval_key>_id
IoU: object.<eval_key>_iou
Args:
samples: a :class:`fiftyone.core.collections.SampleCollection`
pred_field: the name of the field containing the predicted
:class:`fiftyone.core.labels.Detections`,
:class:`fiftyone.core.labels.Polylines`,
or :class:`fiftyone.core.labels.TemporalDetections`
gt_field ("ground_truth"): the name of the field containing the ground
truth :class:`fiftyone.core.labels.Detections`,
:class:`fiftyone.core.labels.Polylines`,
or :class:`fiftyone.core.labels.TemporalDetections`
eval_key (None): an evaluation key to use to refer to this evaluation
classes (None): the list of possible classes. If not provided, classes
are loaded from :meth:`fiftyone.core.dataset.Dataset.classes` or
:meth:`fiftyone.core.dataset.Dataset.default_classes` if possible,
or else the observed ground truth/predicted labels are used
missing (None): a missing label string. Any unmatched objects are given
this label for results purposes
method (None): a string specifying the evaluation method to use.
For spatial object detection, the supported values are
``("coco", "open-images")`` and the default is ``"coco"``. For
temporal segment detection, the supported values are
``("activitynet")`` and the default is ``"activitynet"``
iou (0.50): the IoU threshold to use to determine matches
use_masks (False): whether to compute IoUs using the instances masks in
the ``mask`` attribute of the provided objects, which must be
:class:`fiftyone.core.labels.Detection` instances
use_boxes (False): whether to compute IoUs using the bounding boxes
of the provided :class:`fiftyone.core.labels.Polyline` instances
rather than using their actual geometries
classwise (True): whether to only match objects with the same class
label (True) or allow matches between classes (False)
**kwargs: optional keyword arguments for the constructor of the
:class:`DetectionEvaluationConfig` being used
Returns:
a :class:`DetectionResults`
"""
fov.validate_collection_label_fields(
samples,
(pred_field, gt_field),
(fol.Detections, fol.Polylines, fol.TemporalDetections),
same_type=True,
)
label_type = samples._get_label_field_type(gt_field)
is_temporal = issubclass(label_type, fol.TemporalDetections)
if is_temporal:
fov.validate_video_collection(samples)
else:
kwargs.update(dict(use_masks=use_masks, use_boxes=use_boxes))
config = _parse_config(
pred_field,
gt_field,
method,
is_temporal,
iou=iou,
classwise=classwise,
**kwargs,
)
if classes is None:
if pred_field in samples.classes:
classes = samples.classes[pred_field]
elif gt_field in samples.classes:
classes = samples.classes[gt_field]
elif samples.default_classes:
classes = samples.default_classes
eval_method = config.build()
eval_method.ensure_requirements()
eval_method.register_run(samples, eval_key)
eval_method.register_samples(samples)
if config.requires_additional_fields:
_samples = samples
else:
_samples = samples.select_fields([gt_field, pred_field])
processing_frames = samples._is_frame_field(pred_field)
if eval_key is not None:
tp_field = "%s_tp" % eval_key
fp_field = "%s_fp" % eval_key
fn_field = "%s_fn" % eval_key
# note: fields are manually declared so they'll exist even when
# `samples` is empty
dataset = samples._dataset
dataset._add_sample_field_if_necessary(tp_field, fof.IntField)
dataset._add_sample_field_if_necessary(fp_field, fof.IntField)
dataset._add_sample_field_if_necessary(fn_field, fof.IntField)
if processing_frames:
dataset._add_frame_field_if_necessary(tp_field, fof.IntField)
dataset._add_frame_field_if_necessary(fp_field, fof.IntField)
dataset._add_frame_field_if_necessary(fn_field, fof.IntField)
matches = []
logger.info("Evaluating detections...")
for sample in _samples.iter_samples(progress=True):
if processing_frames:
docs = sample.frames.values()
else:
docs = [sample]
sample_tp = 0
sample_fp = 0
sample_fn = 0
for doc in docs:
doc_matches = eval_method.evaluate(doc, eval_key=eval_key)
matches.extend(doc_matches)
tp, fp, fn = _tally_matches(doc_matches)
sample_tp += tp
sample_fp += fp
sample_fn += fn
if processing_frames and eval_key is not None:
doc[tp_field] = tp
doc[fp_field] = fp
doc[fn_field] = fn
if eval_key is not None:
sample[tp_field] = sample_tp
sample[fp_field] = sample_fp
sample[fn_field] = sample_fn
sample.save()
results = eval_method.generate_results(
samples, matches, eval_key=eval_key, classes=classes, missing=missing
)
eval_method.save_run_results(samples, eval_key, results)
return results
class DetectionEvaluationConfig(foe.EvaluationMethodConfig):
"""Base class for configuring :class:`DetectionEvaluation` instances.
Args:
pred_field: the name of the field containing the predicted
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
gt_field: the name of the field containing the ground truth
:class:`fiftyone.core.labels.Detections` or
:class:`fiftyone.core.labels.Polylines`
iou (None): the IoU threshold to use to determine matches
classwise (None): whether to only match objects with the same class
label (True) or allow matches between classes (False)
"""
def __init__(
self, pred_field, gt_field, iou=None, classwise=None, **kwargs
):
super().__init__(**kwargs)
self.pred_field = pred_field
self.gt_field = gt_field
self.iou = iou
self.classwise = classwise
@property
def requires_additional_fields(self):
"""Whether fields besides ``pred_field`` and ``gt_field`` are required
in order to perform evaluation.
If True then the entire samples will be loaded rather than using
:meth:`select_fields() <fiftyone.core.collections.SampleCollection.select_fields>`
to optimize.
"""
return False
class DetectionEvaluation(foe.EvaluationMethod):
"""Base class for detection evaluation methods.
Args:
config: a :class:`DetectionEvaluationConfig`
"""
def __init__(self, config):
super().__init__(config)
self.gt_field = None
self.pred_field = None
def register_samples(self, samples):
"""Registers the sample collection on which evaluation will be
performed.
This method will be called before the first call to
:meth:`evaluate`. Subclasses can extend this method to perform
any setup required for an evaluation run.
Args:
samples: a :class:`fiftyone.core.collections.SampleCollection`
"""
self.gt_field, _ = samples._handle_frame_field(self.config.gt_field)
self.pred_field, _ = samples._handle_frame_field(
self.config.pred_field
)
def evaluate(self, doc, eval_key=None):
"""Evaluates the ground truth and predictions in the given document.
Args:
doc: a :class:`fiftyone.core.document.Document`
eval_key (None): the evaluation key for this evaluation
Returns:
a list of matched ``(gt_label, pred_label, iou, pred_confidence)``
tuples
"""
raise NotImplementedError("subclass must implement evaluate()")
def generate_results(
self, samples, matches, eval_key=None, classes=None, missing=None
):
"""Generates aggregate evaluation results for the samples.
Subclasses may perform additional computations here such as IoU sweeps
in order to generate mAP, PR curves, etc.
Args:
samples: a :class:`fiftyone.core.collections.SampleCollection`
matches: a list of
``(gt_label, pred_label, iou, pred_confidence, gt_id, pred_id)``
matches. Either label can be ``None`` to indicate an unmatched
object
eval_key (None): the evaluation key for this evaluation
classes (None): the list of possible classes. If not provided, the
observed ground truth/predicted labels are used for results
purposes
missing (None): a missing label string. Any unmatched objects are
given this label for results purposes
Returns:
a :class:`DetectionResults`
"""
return DetectionResults(
matches,
eval_key=eval_key,
gt_field=self.config.gt_field,
pred_field=self.config.pred_field,
classes=classes,
missing=missing,
samples=samples,
)
def get_fields(self, samples, eval_key):
pred_field = self.config.pred_field
pred_type = samples._get_label_field_type(pred_field)
pred_key = "%s.%s.%s" % (
pred_field,
pred_type._LABEL_LIST_FIELD,
eval_key,
)
gt_field = self.config.gt_field
gt_type = samples._get_label_field_type(gt_field)
gt_key = "%s.%s.%s" % (gt_field, gt_type._LABEL_LIST_FIELD, eval_key)
fields = [
"%s_tp" % eval_key,
"%s_fp" % eval_key,
"%s_fn" % eval_key,
pred_key,
"%s_id" % pred_key,
"%s_iou" % pred_key,
gt_key,
"%s_id" % gt_key,
"%s_iou" % gt_key,
]
if samples._is_frame_field(gt_field):
prefix = samples._FRAMES_PREFIX + eval_key
fields.extend(
["%s_tp" % prefix, "%s_fp" % prefix, "%s_fn" % prefix]
)
return fields
def cleanup(self, samples, eval_key):
fields = [
"%s_tp" % eval_key,
"%s_fp" % eval_key,
"%s_fn" % eval_key,
]
try:
pred_field, _ = samples._handle_frame_field(self.config.pred_field)
pred_type = samples._get_label_field_type(self.config.pred_field)
pred_key = "%s.%s.%s" % (
pred_field,
pred_type._LABEL_LIST_FIELD,
eval_key,
)
fields.extend([pred_key, "%s_id" % pred_key, "%s_iou" % pred_key])
except ValueError:
# Field no longer exists, nothing to cleanup
pass
try:
gt_field, _ = samples._handle_frame_field(self.config.gt_field)
gt_type = samples._get_label_field_type(self.config.gt_field)
gt_key = "%s.%s.%s" % (
gt_field,
gt_type._LABEL_LIST_FIELD,
eval_key,
)
fields.extend([gt_key, "%s_id" % gt_key, "%s_iou" % gt_key])
except ValueError:
# Field no longer exists, nothing to cleanup
pass
if samples._is_frame_field(self.config.pred_field):
samples._dataset.delete_sample_fields(
["%s_tp" % eval_key, "%s_fp" % eval_key, "%s_fn" % eval_key],
error_level=1,
)
samples._dataset.delete_frame_fields(fields, error_level=1)
else:
samples._dataset.delete_sample_fields(fields, error_level=1)
def _validate_run(self, samples, eval_key, existing_info):
self._validate_fields_match(eval_key, "pred_field", existing_info)
self._validate_fields_match(eval_key, "gt_field", existing_info)
class DetectionResults(BaseEvaluationResults):
"""Class that stores the results of a detection evaluation.
Args:
matches: a list of
``(gt_label, pred_label, iou, pred_confidence, gt_id, pred_id)``
matches. Either label can be ``None`` to indicate an unmatched
object
eval_key (None): the evaluation key for this evaluation
gt_field (None): the name of the ground truth field
pred_field (None): the name of the predictions field
classes (None): the list of possible classes. If not provided, the
observed ground truth/predicted labels are used
missing (None): a missing label string. Any unmatched objects are given
this label for evaluation purposes
samples (None): the :class:`fiftyone.core.collections.SampleCollection`
for which the results were computed
"""
def __init__(
self,
matches,
eval_key=None,
gt_field=None,
pred_field=None,
classes=None,
missing=None,
samples=None,
):
if matches:
ytrue, ypred, ious, confs, ytrue_ids, ypred_ids = zip(*matches)
else:
ytrue, ypred, ious, confs, ytrue_ids, ypred_ids = (
[],
[],
[],
[],
[],
[],
)
super().__init__(
ytrue,
ypred,
confs=confs,
eval_key=eval_key,
gt_field=gt_field,
pred_field=pred_field,
ytrue_ids=ytrue_ids,
ypred_ids=ypred_ids,
classes=classes,
missing=missing,
samples=samples,
)
self.ious = np.array(ious)
@classmethod
def _from_dict(cls, d, samples, config, **kwargs):
ytrue = d["ytrue"]
ypred = d["ypred"]
ious = d["ious"]
confs = d.get("confs", None)
if confs is None:
confs = itertools.repeat(None)
ytrue_ids = d.get("ytrue_ids", None)
if ytrue_ids is None:
ytrue_ids = itertools.repeat(None)
ypred_ids = d.get("ypred_ids", None)
if ypred_ids is None:
ypred_ids = itertools.repeat(None)
eval_key = d.get("eval_key", None)
gt_field = d.get("gt_field", None)
pred_field = d.get("pred_field", None)
classes = d.get("classes", None)
missing = d.get("missing", None)
matches = list(zip(ytrue, ypred, ious, confs, ytrue_ids, ypred_ids))
return cls(
matches,
eval_key=eval_key,
gt_field=gt_field,
pred_field=pred_field,
classes=classes,
missing=missing,
samples=samples,
**kwargs,
)
def _parse_config(pred_field, gt_field, method, is_temporal, **kwargs):
if method is None:
if is_temporal:
method = "activitynet"
else:
method = "coco"
if method == "activitynet":
from .activitynet import ActivityNetEvaluationConfig
return ActivityNetEvaluationConfig(pred_field, gt_field, **kwargs)
if method == "coco":
from .coco import COCOEvaluationConfig
return COCOEvaluationConfig(pred_field, gt_field, **kwargs)
if method == "open-images":
from .openimages import OpenImagesEvaluationConfig
return OpenImagesEvaluationConfig(pred_field, gt_field, **kwargs)
raise ValueError("Unsupported evaluation method '%s'" % method)
def _tally_matches(matches):
tp = 0
fp = 0
fn = 0
for match in matches:
gt_label = match[0]
pred_label = match[1]
if gt_label is None:
fp += 1
elif pred_label is None:
fn += 1
elif gt_label != pred_label:
fp += 1
fn += 1
else:
tp += 1
return tp, fp, fn
| 34.408602
| 95
| 0.618698
|
1ed88c180e74ace750556227c7b1bc3b265c6193
| 776
|
py
|
Python
|
src/sagemaker/sklearn/__init__.py
|
pengk19/sagemaker-python-sdk
|
0866a304fea44522fd1e3b6c4509cd05dda064dd
|
[
"Apache-2.0"
] | 1
|
2019-10-06T14:03:07.000Z
|
2019-10-06T14:03:07.000Z
|
src/sagemaker/sklearn/__init__.py
|
pengk19/sagemaker-python-sdk
|
0866a304fea44522fd1e3b6c4509cd05dda064dd
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/sklearn/__init__.py
|
pengk19/sagemaker-python-sdk
|
0866a304fea44522fd1e3b6c4509cd05dda064dd
|
[
"Apache-2.0"
] | 1
|
2019-10-06T10:53:30.000Z
|
2019-10-06T10:53:30.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
from sagemaker.sklearn.estimator import SKLearn # noqa: F401
from sagemaker.sklearn.model import SKLearnModel, SKLearnPredictor # noqa: F401
| 43.111111
| 80
| 0.76933
|
ec1df24b7a0d0bf3e8517810be8b0590ad82c19f
| 580
|
py
|
Python
|
migrations/versions/eba13c37bbf0_add_comments.py
|
mutalisk999/Flog
|
5d836e26967b39faebdf2d5a2c558316bf93221b
|
[
"MIT"
] | 1
|
2020-08-24T03:39:52.000Z
|
2020-08-24T03:39:52.000Z
|
migrations/versions/eba13c37bbf0_add_comments.py
|
mutalisk999/Flog
|
5d836e26967b39faebdf2d5a2c558316bf93221b
|
[
"MIT"
] | null | null | null |
migrations/versions/eba13c37bbf0_add_comments.py
|
mutalisk999/Flog
|
5d836e26967b39faebdf2d5a2c558316bf93221b
|
[
"MIT"
] | null | null | null |
"""Add Feedback
Revision ID: eba13c37bbf0
Revises: 597e79657a6d
Create Date: 2020-08-04 16:23:24.102806
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "eba13c37bbf0"
down_revision = "597e79657a6d"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 20
| 66
| 0.653448
|
369cf76a5e2bf56b72977ee58de394b71b271d97
| 52,885
|
py
|
Python
|
tests/components/mqtt/test_init.py
|
SmarthomeNinja/core
|
f4b8a95205ea7d4126fc5e704da532cd8eed937e
|
[
"Apache-2.0"
] | null | null | null |
tests/components/mqtt/test_init.py
|
SmarthomeNinja/core
|
f4b8a95205ea7d4126fc5e704da532cd8eed937e
|
[
"Apache-2.0"
] | null | null | null |
tests/components/mqtt/test_init.py
|
SmarthomeNinja/core
|
f4b8a95205ea7d4126fc5e704da532cd8eed937e
|
[
"Apache-2.0"
] | null | null | null |
"""The tests for the MQTT component."""
from datetime import datetime, timedelta
import json
import ssl
import unittest
import pytest
import voluptuous as vol
from homeassistant.components import mqtt, websocket_api
from homeassistant.components.mqtt import debug_info
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_SERVICE,
EVENT_CALL_SERVICE,
EVENT_HOMEASSISTANT_STOP,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.async_mock import AsyncMock, MagicMock, call, mock_open, patch
from tests.common import (
MockConfigEntry,
async_fire_mqtt_message,
async_fire_time_changed,
async_mock_mqtt_component,
fire_mqtt_message,
get_test_home_assistant,
mock_device_registry,
mock_mqtt_component,
mock_registry,
threadsafe_coroutine_factory,
)
from tests.testing_config.custom_components.test.sensor import DEVICE_CLASSES
@pytest.fixture(autouse=True)
def mock_storage(hass_storage):
"""Autouse hass_storage for the TestCase tests."""
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def mock_mqtt():
"""Make sure connection is established."""
with patch("homeassistant.components.mqtt.MQTT") as mock_mqtt:
mock_mqtt.return_value.async_connect = AsyncMock(return_value=True)
mock_mqtt.return_value.async_disconnect = AsyncMock(return_value=True)
yield mock_mqtt
async def async_mock_mqtt_client(hass, config=None):
"""Mock the MQTT paho client."""
if config is None:
config = {mqtt.CONF_BROKER: "mock-broker"}
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().unsubscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = await async_setup_component(hass, mqtt.DOMAIN, {mqtt.DOMAIN: config})
assert result
await hass.async_block_till_done()
return mock_client()
mock_mqtt_client = threadsafe_coroutine_factory(async_mock_mqtt_client)
# pylint: disable=invalid-name
class TestMQTTComponent(unittest.TestCase):
"""Test the MQTT component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
self.calls = []
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop everything that was started."""
self.hass.stop()
@callback
def record_calls(self, *args):
"""Record calls."""
self.calls.append(args)
def aiohttp_client_stops_on_home_assistant_start(self):
"""Test if client stops on HA stop."""
self.hass.bus.fire(EVENT_HOMEASSISTANT_STOP)
self.hass.block_till_done()
assert self.hass.data["mqtt"].async_disconnect.called
def test_publish_calls_service(self):
"""Test the publishing of call to services."""
self.hass.bus.listen_once(EVENT_CALL_SERVICE, self.record_calls)
mqtt.publish(self.hass, "test-topic", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0][0].data["service_data"][mqtt.ATTR_TOPIC] == "test-topic"
assert (
self.calls[0][0].data["service_data"][mqtt.ATTR_PAYLOAD] == "test-payload"
)
def test_service_call_without_topic_does_not_publish(self):
"""Test the service call if topic is missing."""
self.hass.bus.fire(
EVENT_CALL_SERVICE,
{ATTR_DOMAIN: mqtt.DOMAIN, ATTR_SERVICE: mqtt.SERVICE_PUBLISH},
)
self.hass.block_till_done()
assert not self.hass.data["mqtt"].async_publish.called
def test_service_call_with_template_payload_renders_template(self):
"""Test the service call with rendered template.
If 'payload_template' is provided and 'payload' is not, then render it.
"""
mqtt.publish_template(self.hass, "test/topic", "{{ 1+1 }}")
self.hass.block_till_done()
assert self.hass.data["mqtt"].async_publish.called
assert self.hass.data["mqtt"].async_publish.call_args[0][1] == "2"
def test_service_call_with_payload_doesnt_render_template(self):
"""Test the service call with unrendered template.
If both 'payload' and 'payload_template' are provided then fail.
"""
payload = "not a template"
payload_template = "a template"
with pytest.raises(vol.Invalid):
self.hass.services.call(
mqtt.DOMAIN,
mqtt.SERVICE_PUBLISH,
{
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: payload,
mqtt.ATTR_PAYLOAD_TEMPLATE: payload_template,
},
blocking=True,
)
assert not self.hass.data["mqtt"].async_publish.called
def test_service_call_with_ascii_qos_retain_flags(self):
"""Test the service call with args that can be misinterpreted.
Empty payload message and ascii formatted qos and retain flags.
"""
self.hass.services.call(
mqtt.DOMAIN,
mqtt.SERVICE_PUBLISH,
{
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: "",
mqtt.ATTR_QOS: "2",
mqtt.ATTR_RETAIN: "no",
},
blocking=True,
)
assert self.hass.data["mqtt"].async_publish.called
assert self.hass.data["mqtt"].async_publish.call_args[0][2] == 2
assert not self.hass.data["mqtt"].async_publish.call_args[0][3]
def test_validate_topic(self):
"""Test topic name/filter validation."""
# Invalid UTF-8, must not contain U+D800 to U+DFFF.
with pytest.raises(vol.Invalid):
mqtt.valid_topic("\ud800")
with pytest.raises(vol.Invalid):
mqtt.valid_topic("\udfff")
# Topic MUST NOT be empty
with pytest.raises(vol.Invalid):
mqtt.valid_topic("")
# Topic MUST NOT be longer than 65535 encoded bytes.
with pytest.raises(vol.Invalid):
mqtt.valid_topic("ü" * 32768)
# UTF-8 MUST NOT include null character
with pytest.raises(vol.Invalid):
mqtt.valid_topic("bad\0one")
# Topics "SHOULD NOT" include these special characters
# (not MUST NOT, RFC2119). The receiver MAY close the connection.
mqtt.valid_topic("\u0001")
mqtt.valid_topic("\u001F")
mqtt.valid_topic("\u009F")
mqtt.valid_topic("\u009F")
mqtt.valid_topic("\uffff")
def test_validate_subscribe_topic(self):
"""Test invalid subscribe topics."""
mqtt.valid_subscribe_topic("#")
mqtt.valid_subscribe_topic("sport/#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport/#/")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("foo/bar#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("foo/#/bar")
mqtt.valid_subscribe_topic("+")
mqtt.valid_subscribe_topic("+/tennis/#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport+")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport+/")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport/+1")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport/+#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("bad+topic")
mqtt.valid_subscribe_topic("sport/+/player1")
mqtt.valid_subscribe_topic("/finance")
mqtt.valid_subscribe_topic("+/+")
mqtt.valid_subscribe_topic("$SYS/#")
def test_validate_publish_topic(self):
"""Test invalid publish topics."""
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("pub+")
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("pub/+")
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("1#")
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("bad+topic")
mqtt.valid_publish_topic("//")
# Topic names beginning with $ SHOULD NOT be used, but can
mqtt.valid_publish_topic("$SYS/")
def test_entity_device_info_schema(self):
"""Test MQTT entity device info validation."""
# just identifier
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({"identifiers": ["abcd"]})
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA({"identifiers": "abcd"})
# just connection
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{"connections": [["mac", "02:5b:26:a8:dc:12"]]}
)
# full device info
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{
"identifiers": ["helloworld", "hello"],
"connections": [["mac", "02:5b:26:a8:dc:12"], ["zigbee", "zigbee_id"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
}
)
# full device info with via_device
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{
"identifiers": ["helloworld", "hello"],
"connections": [["mac", "02:5b:26:a8:dc:12"], ["zigbee", "zigbee_id"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
"via_device": "test-hub",
}
)
# no identifiers
with pytest.raises(vol.Invalid):
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
}
)
# empty identifiers
with pytest.raises(vol.Invalid):
mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{"identifiers": [], "connections": [], "name": "Beer"}
)
# pylint: disable=invalid-name
class TestMQTTCallbacks(unittest.TestCase):
"""Test the MQTT callbacks."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_client(self.hass)
self.calls = []
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop everything that was started."""
self.hass.stop()
@callback
def record_calls(self, *args):
"""Record calls."""
self.calls.append(args)
def aiohttp_client_starts_on_home_assistant_mqtt_setup(self):
"""Test if client is connected after mqtt init on bootstrap."""
assert self.hass.data["mqtt"]._mqttc.connect.call_count == 1
def test_receiving_non_utf8_message_gets_logged(self):
"""Test receiving a non utf8 encoded message."""
mqtt.subscribe(self.hass, "test-topic", self.record_calls)
with self.assertLogs(level="WARNING") as test_handle:
fire_mqtt_message(self.hass, "test-topic", b"\x9a")
self.hass.block_till_done()
assert (
"WARNING:homeassistant.components.mqtt:Can't decode payload "
"b'\\x9a' on test-topic with encoding utf-8" in test_handle.output[0]
)
def test_all_subscriptions_run_when_decode_fails(self):
"""Test all other subscriptions still run when decode fails for one."""
mqtt.subscribe(self.hass, "test-topic", self.record_calls, encoding="ascii")
mqtt.subscribe(self.hass, "test-topic", self.record_calls)
fire_mqtt_message(self.hass, "test-topic", TEMP_CELSIUS)
self.hass.block_till_done()
assert len(self.calls) == 1
def test_subscribe_topic(self):
"""Test the subscription of a topic."""
unsub = mqtt.subscribe(self.hass, "test-topic", self.record_calls)
fire_mqtt_message(self.hass, "test-topic", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0][0].topic == "test-topic"
assert self.calls[0][0].payload == "test-payload"
unsub()
fire_mqtt_message(self.hass, "test-topic", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 1
def test_subscribe_deprecated(self):
"""Test the subscription of a topic using deprecated callback signature."""
calls = []
@callback
def record_calls(topic, payload, qos):
"""Record calls."""
calls.append((topic, payload, qos))
unsub = mqtt.subscribe(self.hass, "test-topic", record_calls)
fire_mqtt_message(self.hass, "test-topic", "test-payload")
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0][0] == "test-topic"
assert calls[0][1] == "test-payload"
unsub()
fire_mqtt_message(self.hass, "test-topic", "test-payload")
self.hass.block_till_done()
assert len(calls) == 1
def test_subscribe_deprecated_async(self):
"""Test the subscription of a topic using deprecated callback signature."""
calls = []
@callback
async def record_calls(topic, payload, qos):
"""Record calls."""
calls.append((topic, payload, qos))
unsub = mqtt.subscribe(self.hass, "test-topic", record_calls)
fire_mqtt_message(self.hass, "test-topic", "test-payload")
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0][0] == "test-topic"
assert calls[0][1] == "test-payload"
unsub()
fire_mqtt_message(self.hass, "test-topic", "test-payload")
self.hass.block_till_done()
assert len(calls) == 1
def test_subscribe_topic_not_match(self):
"""Test if subscribed topic is not a match."""
mqtt.subscribe(self.hass, "test-topic", self.record_calls)
fire_mqtt_message(self.hass, "another-test-topic", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 0
def test_subscribe_topic_level_wildcard(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, "test-topic/+/on", self.record_calls)
fire_mqtt_message(self.hass, "test-topic/bier/on", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0][0].topic == "test-topic/bier/on"
assert self.calls[0][0].payload == "test-payload"
def test_subscribe_topic_level_wildcard_no_subtree_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, "test-topic/+/on", self.record_calls)
fire_mqtt_message(self.hass, "test-topic/bier", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 0
def test_subscribe_topic_level_wildcard_root_topic_no_subtree_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, "test-topic/#", self.record_calls)
fire_mqtt_message(self.hass, "test-topic-123", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 0
def test_subscribe_topic_subtree_wildcard_subtree_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, "test-topic/#", self.record_calls)
fire_mqtt_message(self.hass, "test-topic/bier/on", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0][0].topic == "test-topic/bier/on"
assert self.calls[0][0].payload == "test-payload"
def test_subscribe_topic_subtree_wildcard_root_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, "test-topic/#", self.record_calls)
fire_mqtt_message(self.hass, "test-topic", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0][0].topic == "test-topic"
assert self.calls[0][0].payload == "test-payload"
def test_subscribe_topic_subtree_wildcard_no_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, "test-topic/#", self.record_calls)
fire_mqtt_message(self.hass, "another-test-topic", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 0
def test_subscribe_topic_level_wildcard_and_wildcard_root_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, "+/test-topic/#", self.record_calls)
fire_mqtt_message(self.hass, "hi/test-topic", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0][0].topic == "hi/test-topic"
assert self.calls[0][0].payload == "test-payload"
def test_subscribe_topic_level_wildcard_and_wildcard_subtree_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, "+/test-topic/#", self.record_calls)
fire_mqtt_message(self.hass, "hi/test-topic/here-iam", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0][0].topic == "hi/test-topic/here-iam"
assert self.calls[0][0].payload == "test-payload"
def test_subscribe_topic_level_wildcard_and_wildcard_level_no_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, "+/test-topic/#", self.record_calls)
fire_mqtt_message(self.hass, "hi/here-iam/test-topic", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 0
def test_subscribe_topic_level_wildcard_and_wildcard_no_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, "+/test-topic/#", self.record_calls)
fire_mqtt_message(self.hass, "hi/another-test-topic", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 0
def test_subscribe_topic_sys_root(self):
"""Test the subscription of $ root topics."""
mqtt.subscribe(self.hass, "$test-topic/subtree/on", self.record_calls)
fire_mqtt_message(self.hass, "$test-topic/subtree/on", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0][0].topic == "$test-topic/subtree/on"
assert self.calls[0][0].payload == "test-payload"
def test_subscribe_topic_sys_root_and_wildcard_topic(self):
"""Test the subscription of $ root and wildcard topics."""
mqtt.subscribe(self.hass, "$test-topic/#", self.record_calls)
fire_mqtt_message(self.hass, "$test-topic/some-topic", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0][0].topic == "$test-topic/some-topic"
assert self.calls[0][0].payload == "test-payload"
def test_subscribe_topic_sys_root_and_wildcard_subtree_topic(self):
"""Test the subscription of $ root and wildcard subtree topics."""
mqtt.subscribe(self.hass, "$test-topic/subtree/#", self.record_calls)
fire_mqtt_message(self.hass, "$test-topic/subtree/some-topic", "test-payload")
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0][0].topic == "$test-topic/subtree/some-topic"
assert self.calls[0][0].payload == "test-payload"
def test_subscribe_special_characters(self):
"""Test the subscription to topics with special characters."""
topic = "/test-topic/$(.)[^]{-}"
payload = "p4y.l[]a|> ?"
mqtt.subscribe(self.hass, topic, self.record_calls)
fire_mqtt_message(self.hass, topic, payload)
self.hass.block_till_done()
assert len(self.calls) == 1
assert self.calls[0][0].topic == topic
assert self.calls[0][0].payload == payload
def test_retained_message_on_subscribe_received(self):
"""Test every subscriber receives retained message on subscribe."""
def side_effect(*args):
async_fire_mqtt_message(self.hass, "test/state", "online")
return 0, 0
self.hass.data["mqtt"]._mqttc.subscribe.side_effect = side_effect
# Fake that the client is connected
self.hass.data["mqtt"].connected = True
calls_a = MagicMock()
mqtt.subscribe(self.hass, "test/state", calls_a)
self.hass.block_till_done()
assert calls_a.called
calls_b = MagicMock()
mqtt.subscribe(self.hass, "test/state", calls_b)
self.hass.block_till_done()
assert calls_b.called
def test_not_calling_unsubscribe_with_active_subscribers(self):
"""Test not calling unsubscribe() when other subscribers are active."""
# Fake that the client is connected
self.hass.data["mqtt"].connected = True
unsub = mqtt.subscribe(self.hass, "test/state", None)
mqtt.subscribe(self.hass, "test/state", None)
self.hass.block_till_done()
assert self.hass.data["mqtt"]._mqttc.subscribe.called
unsub()
self.hass.block_till_done()
assert not self.hass.data["mqtt"]._mqttc.unsubscribe.called
def test_restore_subscriptions_on_reconnect(self):
"""Test subscriptions are restored on reconnect."""
# Fake that the client is connected
self.hass.data["mqtt"].connected = True
mqtt.subscribe(self.hass, "test/state", None)
self.hass.block_till_done()
assert self.hass.data["mqtt"]._mqttc.subscribe.call_count == 1
self.hass.data["mqtt"]._mqtt_on_disconnect(None, None, 0)
self.hass.data["mqtt"]._mqtt_on_connect(None, None, None, 0)
self.hass.block_till_done()
assert self.hass.data["mqtt"]._mqttc.subscribe.call_count == 2
def test_restore_all_active_subscriptions_on_reconnect(self):
"""Test active subscriptions are restored correctly on reconnect."""
# Fake that the client is connected
self.hass.data["mqtt"].connected = True
self.hass.data["mqtt"]._mqttc.subscribe.side_effect = (
(0, 1),
(0, 2),
(0, 3),
(0, 4),
)
unsub = mqtt.subscribe(self.hass, "test/state", None, qos=2)
mqtt.subscribe(self.hass, "test/state", None)
mqtt.subscribe(self.hass, "test/state", None, qos=1)
self.hass.block_till_done()
expected = [
call("test/state", 2),
call("test/state", 0),
call("test/state", 1),
]
assert self.hass.data["mqtt"]._mqttc.subscribe.mock_calls == expected
unsub()
self.hass.block_till_done()
assert self.hass.data["mqtt"]._mqttc.unsubscribe.call_count == 0
self.hass.data["mqtt"]._mqtt_on_disconnect(None, None, 0)
self.hass.data["mqtt"]._mqtt_on_connect(None, None, None, 0)
self.hass.block_till_done()
expected.append(call("test/state", 1))
assert self.hass.data["mqtt"]._mqttc.subscribe.mock_calls == expected
async def test_setup_embedded_starts_with_no_config(hass):
"""Test setting up embedded server with no config."""
client_config = ("localhost", 1883, "user", "pass", None, "3.1.1")
with patch(
"homeassistant.components.mqtt.server.async_start",
return_value=(True, client_config),
) as _start:
await async_mock_mqtt_client(hass, {})
assert _start.call_count == 1
async def test_setup_embedded_with_embedded(hass):
"""Test setting up embedded server with no config."""
client_config = ("localhost", 1883, "user", "pass", None, "3.1.1")
with patch(
"homeassistant.components.mqtt.server.async_start",
return_value=(True, client_config),
) as _start:
await async_mock_mqtt_client(hass, {"embedded": None})
assert _start.call_count == 1
async def test_setup_logs_error_if_no_connect_broker(hass, caplog):
"""Test for setup failure if connection to broker is missing."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: "test-broker"})
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().connect = lambda *args: 1
assert await mqtt.async_setup_entry(hass, entry)
assert "Failed to connect to MQTT server:" in caplog.text
async def test_setup_raises_ConfigEntryNotReady_if_no_connect_broker(hass, caplog):
"""Test for setup failure if connection to broker is missing."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: "test-broker"})
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().connect = MagicMock(side_effect=OSError("Connection error"))
assert await mqtt.async_setup_entry(hass, entry)
assert "Failed to connect to MQTT server due to exception:" in caplog.text
async def test_setup_uses_certificate_on_certificate_set_to_auto(hass, mock_mqtt):
"""Test setup uses bundled certs when certificate is set to auto."""
entry = MockConfigEntry(
domain=mqtt.DOMAIN,
data={mqtt.CONF_BROKER: "test-broker", "certificate": "auto"},
)
assert await mqtt.async_setup_entry(hass, entry)
assert mock_mqtt.called
import requests.certs
expectedCertificate = requests.certs.where()
assert mock_mqtt.mock_calls[0][2]["certificate"] == expectedCertificate
async def test_setup_does_not_use_certificate_on_mqtts_port(hass, mock_mqtt):
"""Test setup doesn't use bundled certs when ssl set."""
entry = MockConfigEntry(
domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: "test-broker", "port": 8883}
)
assert await mqtt.async_setup_entry(hass, entry)
assert mock_mqtt.called
assert mock_mqtt.mock_calls[0][2]["port"] == 8883
import requests.certs
mqttsCertificateBundle = requests.certs.where()
assert mock_mqtt.mock_calls[0][2]["port"] != mqttsCertificateBundle
async def test_setup_without_tls_config_uses_tlsv1_under_python36(hass, mock_mqtt):
"""Test setup defaults to TLSv1 under python3.6."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: "test-broker"})
assert await mqtt.async_setup_entry(hass, entry)
assert mock_mqtt.called
import sys
if sys.hexversion >= 0x03060000:
expectedTlsVersion = ssl.PROTOCOL_TLS # pylint: disable=no-member
else:
expectedTlsVersion = ssl.PROTOCOL_TLSv1
assert mock_mqtt.mock_calls[0][2]["tls_version"] == expectedTlsVersion
async def test_setup_with_tls_config_uses_tls_version1_2(hass, mock_mqtt):
"""Test setup uses specified TLS version."""
entry = MockConfigEntry(
domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: "test-broker", "tls_version": "1.2"}
)
assert await mqtt.async_setup_entry(hass, entry)
assert mock_mqtt.called
assert mock_mqtt.mock_calls[0][2]["tls_version"] == ssl.PROTOCOL_TLSv1_2
async def test_setup_with_tls_config_of_v1_under_python36_only_uses_v1(hass, mock_mqtt):
"""Test setup uses TLSv1.0 if explicitly chosen."""
entry = MockConfigEntry(
domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: "test-broker", "tls_version": "1.0"}
)
assert await mqtt.async_setup_entry(hass, entry)
assert mock_mqtt.called
assert mock_mqtt.mock_calls[0][2]["tls_version"] == ssl.PROTOCOL_TLSv1
async def test_birth_message(hass):
"""Test sending birth message."""
mqtt_client = await async_mock_mqtt_client(
hass,
{
mqtt.CONF_BROKER: "mock-broker",
mqtt.CONF_BIRTH_MESSAGE: {
mqtt.ATTR_TOPIC: "birth",
mqtt.ATTR_PAYLOAD: "birth",
},
},
)
calls = []
mqtt_client.publish.side_effect = lambda *args: calls.append(args)
hass.data["mqtt"]._mqtt_on_connect(None, None, 0, 0)
await hass.async_block_till_done()
assert calls[-1] == ("birth", "birth", 0, False)
async def test_mqtt_subscribes_topics_on_connect(hass):
"""Test subscription to topic on connect."""
mqtt_client = await async_mock_mqtt_client(hass)
hass.data["mqtt"].subscriptions = [
mqtt.Subscription("topic/test", None),
mqtt.Subscription("home/sensor", None, 2),
mqtt.Subscription("still/pending", None),
mqtt.Subscription("still/pending", None, 1),
]
hass.add_job = MagicMock()
hass.data["mqtt"]._mqtt_on_connect(None, None, 0, 0)
await hass.async_block_till_done()
assert mqtt_client.disconnect.call_count == 0
expected = {"topic/test": 0, "home/sensor": 2, "still/pending": 1}
calls = {call[1][1]: call[1][2] for call in hass.add_job.mock_calls}
assert calls == expected
async def test_setup_fails_without_config(hass):
"""Test if the MQTT component fails to load with no config."""
assert not await async_setup_component(hass, mqtt.DOMAIN, {})
@pytest.mark.no_fail_on_log_exception
async def test_message_callback_exception_gets_logged(hass, caplog):
"""Test exception raised by message handler."""
await async_mock_mqtt_component(hass)
@callback
def bad_handler(*args):
"""Record calls."""
raise Exception("This is a bad message callback")
await mqtt.async_subscribe(hass, "test-topic", bad_handler)
async_fire_mqtt_message(hass, "test-topic", "test")
await hass.async_block_till_done()
assert (
"Exception in bad_handler when handling msg on 'test-topic':"
" 'test'" in caplog.text
)
async def test_mqtt_ws_subscription(hass, hass_ws_client):
"""Test MQTT websocket subscription."""
await async_mock_mqtt_component(hass)
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "mqtt/subscribe", "topic": "test-topic"})
response = await client.receive_json()
assert response["success"]
async_fire_mqtt_message(hass, "test-topic", "test1")
async_fire_mqtt_message(hass, "test-topic", "test2")
response = await client.receive_json()
assert response["event"]["topic"] == "test-topic"
assert response["event"]["payload"] == "test1"
response = await client.receive_json()
assert response["event"]["topic"] == "test-topic"
assert response["event"]["payload"] == "test2"
# Unsubscribe
await client.send_json({"id": 8, "type": "unsubscribe_events", "subscription": 5})
response = await client.receive_json()
assert response["success"]
async def test_dump_service(hass):
"""Test that we can dump a topic."""
await async_mock_mqtt_component(hass)
mopen = mock_open()
await hass.services.async_call(
"mqtt", "dump", {"topic": "bla/#", "duration": 3}, blocking=True
)
async_fire_mqtt_message(hass, "bla/1", "test1")
async_fire_mqtt_message(hass, "bla/2", "test2")
with patch("homeassistant.components.mqtt.open", mopen):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=3))
await hass.async_block_till_done()
writes = mopen.return_value.write.mock_calls
assert len(writes) == 2
assert writes[0][1][0] == "bla/1,test1\n"
assert writes[1][1][0] == "bla/2,test2\n"
async def test_mqtt_ws_remove_discovered_device(
hass, device_reg, entity_reg, hass_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal."""
config_entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", config_entry)
data = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
assert device_entry is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
# Verify device entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
assert device_entry is None
async def test_mqtt_ws_remove_discovered_device_twice(
hass, device_reg, hass_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal."""
config_entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", config_entry)
data = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
assert device_entry is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
await client.send_json(
{"id": 6, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == websocket_api.const.ERR_NOT_FOUND
async def test_mqtt_ws_remove_discovered_device_same_topic(
hass, device_reg, hass_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal."""
config_entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", config_entry)
data = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "availability_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
assert device_entry is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
await client.send_json(
{"id": 6, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == websocket_api.const.ERR_NOT_FOUND
async def test_mqtt_ws_remove_non_mqtt_device(
hass, device_reg, hass_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal of device belonging to other domain."""
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
assert device_entry is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == websocket_api.const.ERR_NOT_FOUND
async def test_mqtt_ws_get_device_debug_info(
hass, device_reg, hass_ws_client, mqtt_mock
):
"""Test MQTT websocket device debug info."""
config_entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", config_entry)
config = {
"device": {"identifiers": ["0AFFD2"]},
"platform": "mqtt",
"state_topic": "foobar/sensor",
"unique_id": "unique",
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}, set())
assert device_entry is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 5, "type": "mqtt/device/debug_info", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
expected_result = {
"entities": [
{
"entity_id": "sensor.mqtt_sensor",
"subscriptions": [{"topic": "foobar/sensor", "messages": []}],
"discovery_data": {
"payload": config,
"topic": "homeassistant/sensor/bla/config",
},
}
],
"triggers": [],
}
assert response["result"] == expected_result
async def test_debug_info_multiple_devices(hass, mqtt_mock):
"""Test we get correct debug_info when multiple devices are present."""
devices = [
{
"domain": "sensor",
"config": {
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"state_topic": "test-topic-sensor",
"unique_id": "unique",
},
},
{
"domain": "binary_sensor",
"config": {
"device": {"identifiers": ["0AFFD1"]},
"platform": "mqtt",
"state_topic": "test-topic-binary-sensor",
"unique_id": "unique",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD2"]},
"platform": "mqtt",
"topic": "test-topic1",
"type": "foo",
"subtype": "bar",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD3"]},
"platform": "mqtt",
"topic": "test-topic2",
"type": "ikk",
"subtype": "baz",
},
},
]
entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", entry)
registry = await hass.helpers.device_registry.async_get_registry()
for d in devices:
data = json.dumps(d["config"])
domain = d["domain"]
id = d["config"]["device"]["identifiers"][0]
async_fire_mqtt_message(hass, f"homeassistant/{domain}/{id}/config", data)
await hass.async_block_till_done()
for d in devices:
domain = d["domain"]
id = d["config"]["device"]["identifiers"][0]
device = registry.async_get_device({("mqtt", id)}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
if d["domain"] != "device_automation":
assert len(debug_info_data["entities"]) == 1
assert len(debug_info_data["triggers"]) == 0
discovery_data = debug_info_data["entities"][0]["discovery_data"]
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
topic = d["config"]["state_topic"]
assert {"topic": topic, "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
else:
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 1
discovery_data = debug_info_data["triggers"][0]["discovery_data"]
assert discovery_data["topic"] == f"homeassistant/{domain}/{id}/config"
assert discovery_data["payload"] == d["config"]
async def test_debug_info_multiple_entities_triggers(hass, mqtt_mock):
"""Test we get correct debug_info for a device with multiple entities and triggers."""
config = [
{
"domain": "sensor",
"config": {
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"state_topic": "test-topic-sensor",
"unique_id": "unique",
},
},
{
"domain": "binary_sensor",
"config": {
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"state_topic": "test-topic-binary-sensor",
"unique_id": "unique",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"topic": "test-topic1",
"type": "foo",
"subtype": "bar",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"topic": "test-topic2",
"type": "ikk",
"subtype": "baz",
},
},
]
entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", entry)
registry = await hass.helpers.device_registry.async_get_registry()
for c in config:
data = json.dumps(c["config"])
domain = c["domain"]
# Use topic as discovery_id
id = c["config"].get("topic", c["config"].get("state_topic"))
async_fire_mqtt_message(hass, f"homeassistant/{domain}/{id}/config", data)
await hass.async_block_till_done()
device_id = config[0]["config"]["device"]["identifiers"][0]
device = registry.async_get_device({("mqtt", device_id)}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 2
assert len(debug_info_data["triggers"]) == 2
for c in config:
# Test we get debug info for each entity and trigger
domain = c["domain"]
# Use topic as discovery_id
id = c["config"].get("topic", c["config"].get("state_topic"))
if c["domain"] != "device_automation":
discovery_data = [e["discovery_data"] for e in debug_info_data["entities"]]
topic = c["config"]["state_topic"]
assert {"topic": topic, "messages": []} in [
t for e in debug_info_data["entities"] for t in e["subscriptions"]
]
else:
discovery_data = [e["discovery_data"] for e in debug_info_data["triggers"]]
assert {
"topic": f"homeassistant/{domain}/{id}/config",
"payload": c["config"],
} in discovery_data
async def test_debug_info_non_mqtt(hass, device_reg, entity_reg):
"""Test we get empty debug_info for a device with non MQTT entities."""
DOMAIN = "sensor"
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {"platform": "test"}})
debug_info_data = await debug_info.info_for_device(hass, device_entry.id)
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 0
async def test_debug_info_wildcard(hass, mqtt_mock):
"""Test debug info."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/#",
"unique_id": "veryunique",
}
entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/#", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(hass, "sensor/abc", "123")
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {
"topic": "sensor/#",
"messages": [
{
"payload": "123",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "sensor/abc",
}
],
} in debug_info_data["entities"][0]["subscriptions"]
async def test_debug_info_filter_same(hass, mqtt_mock):
"""Test debug info removes messages with same timestamp."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/#",
"unique_id": "veryunique",
}
entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/#", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
dt1 = datetime(2019, 1, 1, 0, 0, 0)
dt2 = datetime(2019, 1, 1, 0, 0, 1)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = dt1
async_fire_mqtt_message(hass, "sensor/abc", "123")
async_fire_mqtt_message(hass, "sensor/abc", "123")
dt_utcnow.return_value = dt2
async_fire_mqtt_message(hass, "sensor/abc", "123")
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert len(debug_info_data["entities"][0]["subscriptions"][0]["messages"]) == 2
assert {
"topic": "sensor/#",
"messages": [
{
"payload": "123",
"qos": 0,
"retain": False,
"time": dt1,
"topic": "sensor/abc",
},
{
"payload": "123",
"qos": 0,
"retain": False,
"time": dt2,
"topic": "sensor/abc",
},
],
} == debug_info_data["entities"][0]["subscriptions"][0]
async def test_debug_info_same_topic(hass, mqtt_mock):
"""Test debug info."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/status",
"availability_topic": "sensor/status",
"unique_id": "veryunique",
}
entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/status", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(hass, "sensor/status", "123", qos=0, retain=False)
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {
"payload": "123",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "sensor/status",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
config["availability_topic"] = "sensor/availability"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(hass, "sensor/status", "123", qos=0, retain=False)
async def test_debug_info_qos_retain(hass, mqtt_mock):
"""Test debug info."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/#",
"unique_id": "veryunique",
}
entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/#", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("homeassistant.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(hass, "sensor/abc", "123", qos=0, retain=False)
async_fire_mqtt_message(hass, "sensor/abc", "123", qos=1, retain=True)
async_fire_mqtt_message(hass, "sensor/abc", "123", qos=2, retain=False)
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {
"payload": "123",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "sensor/abc",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
assert {
"payload": "123",
"qos": 1,
"retain": True,
"time": start_dt,
"topic": "sensor/abc",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
assert {
"payload": "123",
"qos": 2,
"retain": False,
"time": start_dt,
"topic": "sensor/abc",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
| 35.902919
| 90
| 0.629063
|
62c24eb52672c79548e7f48dc4d653a9e8bcf90f
| 3,549
|
py
|
Python
|
cogs/inventory.py
|
xPolar/WumpusHack
|
370be4c1cf47a61a298e789fc596a3a08721fe10
|
[
"MIT"
] | 3
|
2021-03-03T23:50:07.000Z
|
2022-03-01T02:46:04.000Z
|
cogs/inventory.py
|
xPolar/WumpusHack
|
370be4c1cf47a61a298e789fc596a3a08721fe10
|
[
"MIT"
] | null | null | null |
cogs/inventory.py
|
xPolar/WumpusHack
|
370be4c1cf47a61a298e789fc596a3a08721fe10
|
[
"MIT"
] | 1
|
2022-03-01T02:46:07.000Z
|
2022-03-01T02:46:07.000Z
|
"""
MIT License
Copyright (c) 2019 xPolar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Packages.
## Packages default to Python.
from bson.int64 import Int64
## Packages that have to be installed through the package manager.
import discord
from discord.ext import commands
## Packages on this machine.
import config
class Inventory(commands.Cog):
def __init__(self, bot : commands.AutoShardedBot) -> None:
"""Whenever the class gets initialized the following function will be executed.
Args:
bot (commands.AutoShardedBot): Our bot.
"""
self.bot = bot
async def prefix(self, message : discord.Message) -> str:
"""Get the prefix for a message.
Args:
message (discord.Message): The message to get the prefix for.
Returns:
str: The prefix that works for the server.
"""
document = config.prefix.clusters.find_one({"_id": Int64(message.guild.id)})
if message.guild:
return config.prefix
else:
return document['prefix'] if document else config.prefix
@commands.command(aliases = ["i", "inv"])
async def inventory(self, ctx : commands.Context) -> None:
"""View all of the items in your inventory.
Args:
ctx (commands.Context): Discord's context object.
"""
if not ctx.invoked_subcommand:
if not ctx.guild:
await ctx.message.delete()
document = config.cluster.data.users.find_one({"_id": Int64(ctx.author.id)})
if not document:
return await ctx.author.send(f"You don't have a computer, please do `{await self.get_prefix(ctx.message)}login` to start your adventure!")
elif document["online"] == False:
return await ctx.author.send(f"Your computer is currently offline, please do `{await self.get_prefix(ctx.message)}login` to turn it on!")
else:
inventory = []
index = 0
for item in document["inventory"]:
inventory.append(f"**{item['type'].upper()}** - `{item['name']}`\n{item['system']} GHz | {item['cost']} MSRP\nID: `{index}`")
index += 1
embed = discord.Embed(
title = "Inventory",
description = "\n\n".join(inventory) if inventory != [] else "**You have no items in your inventory!**",
color = config.maincolor
)
await ctx.author.send(embed = embed)
def setup(bot):
bot.add_cog(Inventory(bot))
| 38.576087
| 150
| 0.656241
|
f2d9093fe68a004f0aedc1501c24fc0a055f3a78
| 293
|
py
|
Python
|
src/main.py
|
Truta446/cardapio-digital-python-printer
|
5e69e445e5fb1b5a73837f27ef9e7f88c2c4efa9
|
[
"MIT"
] | null | null | null |
src/main.py
|
Truta446/cardapio-digital-python-printer
|
5e69e445e5fb1b5a73837f27ef9e7f88c2c4efa9
|
[
"MIT"
] | null | null | null |
src/main.py
|
Truta446/cardapio-digital-python-printer
|
5e69e445e5fb1b5a73837f27ef9e7f88c2c4efa9
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
from resources import Printing
app = Flask(__name__)
CORS(app)
api = Api(app)
api.add_resource(Printing, '/print')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| 19.533333
| 51
| 0.703072
|
c889a41616e5b74bd0aed4dd7147a5acbee57635
| 7,765
|
py
|
Python
|
hccpy/misc_scripts/data_validation.py
|
gma-coretechs/hccpy
|
59d23e91a927f8c90ffadb067e9d804b5505d503
|
[
"Apache-2.0"
] | null | null | null |
hccpy/misc_scripts/data_validation.py
|
gma-coretechs/hccpy
|
59d23e91a927f8c90ffadb067e9d804b5505d503
|
[
"Apache-2.0"
] | null | null | null |
hccpy/misc_scripts/data_validation.py
|
gma-coretechs/hccpy
|
59d23e91a927f8c90ffadb067e9d804b5505d503
|
[
"Apache-2.0"
] | null | null | null |
from itertools import chain
import logging
import sys
from pyspark.sql import functions as f
from pyspark.sql.session import SparkSession
from pyspark.sql.window import Window
from pyspark.sql.types import ArrayType, StringType
spark = SparkSession.builder.getOrCreate()
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
logger = logging.getLogger(__name__)
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setFormatter(logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
log_handler.setLevel(logging.DEBUG)
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG)
input_path = '/data/raw/'
output_path = '/data/data_science/powerBI/'
def write_output(df):
logger.info("CREATING MASTER DATASET")
logger.info("WRITING: {}".format(output_path + "data_validation_with_diag.parquet"))
df.write.mode('overwrite').parquet(output_path + 'data_validation_with_diag.parquet')
return df
def main():
dfRAF = spark.read.csv('wasbs://rdp-uploads@coretechsnmdev.blob.core.windows.net/NW_diag_HCC_raf_new_V22.csv',header=True, sep='|')
dfT1 = spark.read.csv('wasbs://rdp-uploads@coretechsnmdev.blob.core.windows.net/GMA_T1_Template_2017_2021.csv',header=True)
diab = spark.read.csv('wasbs://rdp-uploads@coretechsnmdev.blob.core.windows.net/NW_diab_cmd_memb_level.csv', header=True)
dfRAF = dfRAF.drop('BENE_SEX_CD')
dfT1_2020 = dfT1.filter(f.col('file_source')== 'P.A2620.ACO.QALR.D200210.T1200012_1-1')
dfRAF_2020= dfRAF.filter(f.col('claim_year')=='2018')
df20 = dfT1_2020.join(dfRAF_2020, on='BENE_MBI_ID', how='left')
# df20 = spark.read.csv('wasbs://rdp-uploads@coretechsnmdev.blob.core.windows.net/PY2019.csv', header=True)
cols_list = [['HCC_COL_1', 'HCC1'], ['HCC_COL_2', 'HCC2'], ['HCC_COL_3', 'HCC6'], ['HCC_COL_4', 'HCC8'], ['HCC_COL_5', 'HCC9'], ['HCC_COL_6', 'HCC10'],
['HCC_COL_7', 'HCC11'],['HCC_COL_8', 'HCC12'],['HCC_COL_9', 'HCC17'],['HCC_COL_10', 'HCC18'],['HCC_COL_11', 'HCC19'],['HCC_COL_12', 'HCC21'],
['HCC_COL_13', 'HCC22'],['HCC_COL_14', 'HCC23'],['HCC_COL_15', 'HCC27'],['HCC_COL_16', 'HCC28'],['HCC_COL_17', 'HCC29'], ['HCC_COL_18', 'HCC33'],
['HCC_COL_19', 'HCC34'],['HCC_COL_20', 'HCC35'],['HCC_COL_21', 'HCC39'],['HCC_COL_22', 'HCC40'],['HCC_COL_23', 'HCC46'],['HCC_COL_24', 'HCC47'],['HCC_COL_25', 'HCC48'],['HCC_COL_26', 'HCC54'],
['HCC_COL_27', 'HCC55'],['HCC_COL_28', 'HCC57'],['HCC_COL_29', 'HCC58'],['HCC_COL_30', 'HCC70'],['HCC_COL_31', 'HCC71'],['HCC_COL_32', 'HCC72'],['HCC_COL_33', 'HCC73'],
['HCC_COL_34', 'HCC74'],['HCC_COL_35', 'HCC75'],['HCC_COL_36', 'HCC76'],['HCC_COL_37', 'HCC77'],['HCC_COL_38', 'HCC78'],['HCC_COL_39', 'HCC79'],['HCC_COL_40', 'HCC80'],['HCC_COL_41', 'HCC82'],
['HCC_COL_42', 'HCC83'],['HCC_COL_43', 'HCC84'],['HCC_COL_44', 'HCC85'],['HCC_COL_45', 'HCC86'],['HCC_COL_46', 'HCC87'],['HCC_COL_47', 'HCC88'],
['HCC_COL_48', 'HCC96'],['HCC_COL_49', 'HCC99'],['HCC_COL_50', 'HCC100'],['HCC_COL_51', 'HCC103'],['HCC_COL_52', 'HCC104'],['HCC_COL_53', 'HCC106'],['HCC_COL_54', 'HCC107'],['HCC_COL_55', 'HCC108'],
['HCC_COL_56', 'HCC110'],['HCC_COL_57', 'HCC111'],['HCC_COL_58', 'HCC112'],['HCC_COL_59', 'HCC114'],['HCC_COL_60', 'HCC115'],['HCC_COL_61', 'HCC122'],['HCC_COL_62', 'HCC124'],['HCC_COL_63', 'HCC134'],
['HCC_COL_64', 'HCC135'],['HCC_COL_65', 'HCC136'],['HCC_COL_66', 'HCC137'],['HCC_COL_67', 'HCC157'],['HCC_COL_68', 'HCC158'],['HCC_COL_69', 'HCC161'],['HCC_COL_70', 'HCC162'],['HCC_COL_71', 'HCC166'],
['HCC_COL_72', 'HCC167'],['HCC_COL_73', 'HCC169'],['HCC_COL_74', 'HCC170'],['HCC_COL_75', 'HCC173'],['HCC_COL_76', 'HCC176'],['HCC_COL_77', 'HCC186'],['HCC_COL_78', 'HCC188'],['HCC_COL_79', 'HCC189']]
old_name = [str(cols_list[i][0]) for i in range(len(cols_list))] + ['BENE_MBI_ID']
new_name = [str(cols_list[i][1]) for i in range(len(cols_list))] + ['BENE_MBI_ID']
df20hcc = df20.select(old_name).toDF(*new_name)
new_name.remove('BENE_MBI_ID')
df20hcc = df20hcc.select(*[f.when(f.col(x) == 1, f.lit(x)).otherwise(f.lit('')).alias(x) for x in new_name], 'BENE_MBI_ID')
df20hcc = df20hcc.withColumn('HCC_ALR', f.concat_ws(',', *new_name))
df20hcc = df20hcc.withColumn("HCC_ALR", f.split(f.col("HCC_ALR"), ",\s*").cast(ArrayType(StringType())).alias("HCC_ALR"))
df20hcc = df20hcc.withColumn("HCC_ALR", f.expr("filter(HCC_ALR, elem -> elem != '')"))
df20hcc = df20hcc.select('BENE_MBI_ID', 'HCC_ALR')
df20 = df20.join(df20hcc, on=['BENE_MBI_ID'], how='left')
df20 = df20.withColumn("hcc_nobrackets", f.regexp_replace('hcc_lst',"\\[", ""))
df20 = df20.withColumn('hcc_nobrackets', f.regexp_replace('hcc_nobrackets', '\\]', ''))
df20 = df20.withColumn('hcc_nobrackets', f.regexp_replace('hcc_nobrackets', "\'", ''))
df20 = df20.withColumn('hcc_nobrackets', f.regexp_replace('hcc_nobrackets', " ", ''))
df20 = df20.withColumn('hcc_lst', f.split('hcc_nobrackets', ','))
df20 = df20.withColumn('HCC_GMA', f.expr("filter(hcc_lst, x -> x not rlike '[_]')"))
df20 = df20.withColumn('HCC_DIFF', f.array_except('HCC_ALR','HCC_GMA'))
df20 = df20.filter(f.col('HCC_GMA').isNotNull())
df20 = df20.select('BENE_MBI_ID', 'BENE_1ST_NAME', 'BENE_LAST_NAME', 'BENE_SEX_CD', 'ESRD_SCORE', 'DIS_SCORE', 'AGDU_SCORE', 'AGND_SCORE', 'DEM_ESRD_SCORE', 'DEM_DIS_SCORE', 'DEM_AGDU_SCORE', 'DEM_AGND_SCORE', 'BENE_AGE', 'concat_elig', 'oerc', 'source_year', 'claim_year', 'hcc_map', 'risk_score', 'risk_score_diff', 'details', 'hcc_lst_diff', 'hcc_map_diff', 'details_diff', 'cum_hcc_diff', 'HCC_ALR', 'HCC_GMA', 'HCC_DIFF')
diab = diab.select('BENE_MBI_ID', 'diagnosis_list', 'source_year', 'claim_year')
diab = diab.filter(f.col('claim_year')=='2018').filter(f.col('source_year')=='2018')
diab = diab.drop('claim_year','source_year')
df20 = df20.join(diab, on='BENE_MBI_ID', how='left')
df20 = df20.withColumn('HCC_ALR', f.col('HCC_ALR').cast('string'))
df20 = df20.withColumn('HCC_GMA', f.col('HCC_GMA').cast('string'))
df20 = df20.withColumn('HCC_DIFF', f.col('HCC_DIFF').cast('string'))
df20 = df20.withColumn('hcc_lst_diff', f.col('hcc_lst_diff').cast('string'))
df20 = df20.withColumn("HCC_ALR", f.regexp_replace('HCC_ALR',"\\,", "|"))
df20 = df20.withColumn("HCC_GMA", f.regexp_replace('HCC_GMA',"\\,", "|"))
df20 = df20.withColumn("HCC_DIFF", f.regexp_replace('HCC_DIFF',"\\,", "|"))
df20 = df20.withColumn("hcc_lst_diff", f.regexp_replace('hcc_lst_diff',"\\,", "|"))
# print(master.show(3, truncate=False))
df20 = df20.withColumnRenamed('risk_score', 'PROXY_RAF_SCORE')
df20 = df20.withColumnRenamed('risk_score_diff', 'OPPORTUNITY_RAF_SCORE')
df20 = df20.withColumnRenamed('hcc_map', 'HCCs_MAPPED_FROM_CCLFs')
df20 = df20.withColumnRenamed('details', 'HCC_RAF_DETAILS_COEFFICIENTS')
df20 = df20.withColumnRenamed('hcc_lst_diff', 'OPPORTUNITYvsPROXY_HCC_DELTA')
df20 = df20.withColumnRenamed('hcc_map_diff', 'OPPORTUNITYvsPROXY_HCC_DETAILS_DELTA')
df20 = df20.withColumnRenamed('cum_hcc_diff', 'CUMULATIVE_OPPORTUNITY_HCC_DELTA')
df20 = df20.withColumnRenamed('HCC_ALR', 'HCCs_MAPPED_FROM_ALRs')
df20 = df20.withColumnRenamed('HCC_DIFF', 'CCLFvsALR_HCC_DELTA')
df20 = df20.withColumnRenamed('diagnosis_list', 'CCLF_DIAGNOSIS_LIST')
write_output(df20)
df20.coalesce(1).write.mode('overwrite').option("header", "true").csv('wasbs://rdp-uploads@coretechsnmdev.blob.core.windows.net/data_validation_with_diag.csv')
if __name__ == "__main__":
logger.info('START')
main()
logger.info('END')
| 64.173554
| 431
| 0.665551
|
cfcaa48ee45f4dee82f7e96ecddeda1a6ec61c25
| 866
|
py
|
Python
|
expandAttr.py
|
mar-esther23/random-python
|
74bddf69aeb4efc3013181c397c71bebb01fb049
|
[
"MIT"
] | null | null | null |
expandAttr.py
|
mar-esther23/random-python
|
74bddf69aeb4efc3013181c397c71bebb01fb049
|
[
"MIT"
] | null | null | null |
expandAttr.py
|
mar-esther23/random-python
|
74bddf69aeb4efc3013181c397c71bebb01fb049
|
[
"MIT"
] | null | null | null |
def expandAttr(attr, wildcard='*'):
'''
attr is a list of 0, 1, *
converts * to 0 and 1 and creates new attractors
'''
n = attr.count(wildcard)
expanded = [attr]
while n > 0:
add = []
for e in expanded:
index = e.index(wildcard)
e0 = e[:]
e0[index] = '0'
e1 = e[:]
e1[index] = '1'
add.append(e0)
add.append(e1)
expanded += add
n-=1
expanded = [e for e in expanded if e.count(wildcard)==0 ]
return expanded
# with open('minThInsulin-attr-short.csv') as f: data=f.read()
# data = data.strip().split('\n')
# data = [d.strip().split(',') for d in data]
# f = open('minThInsulin-attr.csv','w')
# for d in data:
# # print d
# attr = expandAttr(d)
# for a in attr: f.write(','.join(a) +'\n')
# f.close()
| 24.055556
| 62
| 0.501155
|
77ea1042f49ebdacb699e348e37733e7cfec6add
| 4,217
|
py
|
Python
|
libcloud/__init__.py
|
r2ronoha/libcloud
|
1524a4c54d79284f1172b32e0d9598ec8b47eda1
|
[
"Apache-2.0"
] | null | null | null |
libcloud/__init__.py
|
r2ronoha/libcloud
|
1524a4c54d79284f1172b32e0d9598ec8b47eda1
|
[
"Apache-2.0"
] | null | null | null |
libcloud/__init__.py
|
r2ronoha/libcloud
|
1524a4c54d79284f1172b32e0d9598ec8b47eda1
|
[
"Apache-2.0"
] | 1
|
2019-08-05T10:12:02.000Z
|
2019-08-05T10:12:02.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libcloud provides a unified interface to the cloud computing resources.
:var __version__: Current version of libcloud
"""
import os
import codecs
import atexit
from libcloud.base import DriverType # NOQA
from libcloud.base import DriverTypeFactoryMap # NOQA
from libcloud.base import get_driver # NOQA
try:
# TODO: This import is slow and adds overhead in situations when no
# requests are made but it's necessary for detecting bad version of
# requests
import requests # NOQA
have_requests = True
except ImportError:
have_requests = False
__all__ = ["__version__", "enable_debug"]
__version__ = "3.5.2-dev"
def enable_debug(fo):
"""
Enable library wide debugging to a file-like object.
:param fo: Where to append debugging information
:type fo: File like object, only write operations are used.
"""
from libcloud.common.base import Connection
from libcloud.utils.loggingconnection import LoggingConnection
LoggingConnection.log = fo
Connection.conn_class = LoggingConnection
# Ensure the file handle is closed on exit
def close_file(fd):
try:
fd.close()
except Exception:
pass
atexit.register(close_file, fo)
def _init_once():
"""
Utility function that is ran once on Library import.
This checks for the LIBCLOUD_DEBUG environment variable, which if it exists
is where we will log debug information about the provider transports.
This also checks for known environment/dependency incompatibilities.
"""
path = os.getenv("LIBCLOUD_DEBUG")
if path:
mode = "a"
# Special case for /dev/stderr and /dev/stdout on Python 3.
from libcloud.utils.py3 import PY3
# Opening those files in append mode will throw "illegal seek"
# exception there.
# Late import to avoid setup.py related side affects
if path in ["/dev/stderr", "/dev/stdout"] and PY3:
mode = "w"
fo = codecs.open(path, mode, encoding="utf8")
enable_debug(fo)
# NOTE: We use lazy import to avoid unnecessary import time overhead
try:
import paramiko # NOQA
have_paramiko = True
except ImportError:
have_paramiko = False
if have_paramiko and hasattr(paramiko.util, "log_to_file"):
import logging
# paramiko always tries to open file path in append mode which
# won't work with /dev/{stdout, stderr} so we just ignore those
# errors
try:
paramiko.util.log_to_file(filename=path, level=logging.DEBUG)
except OSError as e:
if "illegal seek" not in str(e).lower():
raise e
# check for broken `yum install python-requests`
if have_requests and requests.__version__ == "2.6.0":
chardet_version = requests.packages.chardet.__version__
required_chardet_version = "2.3.0"
assert chardet_version == required_chardet_version, (
"Known bad version of requests detected! This can happen when "
"requests was installed from a source other than PyPI, e.g. via "
"a package manager such as yum. Please either install requests "
"from PyPI or run `pip install chardet==%s` to resolve this "
"issue." % required_chardet_version
)
_init_once()
| 32.945313
| 79
| 0.677496
|
a2f67e18bf2888336c501d3871251ff063283ee4
| 19
|
py
|
Python
|
projects/apscheduler/test.py
|
fleimgruber/python
|
2e735762c73651cffc027ca850b2a58d87d54b49
|
[
"Unlicense"
] | 25
|
2021-10-30T19:54:59.000Z
|
2022-03-29T06:11:02.000Z
|
projects/apscheduler/test.py
|
fleimgruber/python
|
2e735762c73651cffc027ca850b2a58d87d54b49
|
[
"Unlicense"
] | 21
|
2021-10-19T01:09:38.000Z
|
2022-03-24T16:08:53.000Z
|
projects/apscheduler/test.py
|
fleimgruber/python
|
2e735762c73651cffc027ca850b2a58d87d54b49
|
[
"Unlicense"
] | 3
|
2022-01-25T20:25:13.000Z
|
2022-03-08T02:58:50.000Z
|
import apscheduler
| 9.5
| 18
| 0.894737
|
c80b17aa616fde48d65581d00c4e8fa293aa1f96
| 1,063
|
py
|
Python
|
setup.py
|
KeepSafe/content-validator
|
30e59100f3251aee20b3165d42fceba15a3f5ede
|
[
"Apache-2.0"
] | 1
|
2018-04-25T19:42:47.000Z
|
2018-04-25T19:42:47.000Z
|
setup.py
|
KeepSafe/content-validator
|
30e59100f3251aee20b3165d42fceba15a3f5ede
|
[
"Apache-2.0"
] | 12
|
2015-07-21T11:01:53.000Z
|
2021-03-31T18:53:35.000Z
|
setup.py
|
KeepSafe/content-validator
|
30e59100f3251aee20b3165d42fceba15a3f5ede
|
[
"Apache-2.0"
] | 2
|
2016-11-05T04:25:35.000Z
|
2018-04-25T19:42:49.000Z
|
import os
from setuptools import setup, find_packages
version = '0.6.1'
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
install_requires = [
'aiohttp >=3, <3.4',
'Markdown',
'parse >=1, <2',
'beautifulsoup4 >=4, <5',
'lxml >=3',
]
setup(
name='content-validator',
version=version,
description=('Content validator looks at text content and preforms different validation tasks'),
long_description='\n\n'.join((read('README.md'), read('CHANGELOG'))),
classifiers=[
'License :: OSI Approved :: BSD License', 'Intended Audience :: Developers', 'Programming Language :: Python'
],
author='Keepsafe',
author_email='support@getkeepsafe.com',
url='https://github.com/KeepSafe/google-play-cmd/',
license='Apache',
packages=find_packages(exclude=['tests']),
package_data={},
namespace_packages=[],
install_requires=install_requires,
entry_points={'console_scripts': ['content-validator = validator:main']},
include_package_data=False)
| 27.973684
| 117
| 0.665099
|
3dd6ac5056f8367e3b6ab84e7e565251ba93f03e
| 5,086
|
py
|
Python
|
tests/test_note_formatter.py
|
RhetTbull/evernote-backup
|
0decb5aa0f7537817b10d7fa51cf7a279304e991
|
[
"MIT"
] | 54
|
2021-05-03T15:38:34.000Z
|
2022-03-31T13:08:13.000Z
|
tests/test_note_formatter.py
|
RhetTbull/evernote-backup
|
0decb5aa0f7537817b10d7fa51cf7a279304e991
|
[
"MIT"
] | 13
|
2021-05-19T00:02:47.000Z
|
2022-03-05T20:04:26.000Z
|
tests/test_note_formatter.py
|
RhetTbull/evernote-backup
|
0decb5aa0f7537817b10d7fa51cf7a279304e991
|
[
"MIT"
] | 10
|
2021-05-15T22:58:33.000Z
|
2022-03-27T15:05:00.000Z
|
from datetime import datetime
from evernote.edam.type.ttypes import (
Data,
Note,
NoteAttributes,
Resource,
ResourceAttributes,
)
from evernote_backup import note_formatter_util
from evernote_backup.note_formatter import NoteFormatter
test_note_data = Note(
guid="7473cb3f-411e-4545-9df4-5eb731de4358",
title="Test Title",
content="<test content>",
contentHash=b"1234",
contentLength=2706,
created=1612902877000,
updated=1617813805000,
active=True,
updateSequenceNum=6711,
notebookGuid="c2ab541f-b704-4051-a2fa-40805e0fbf74",
tagGuids=[
"a51d61c3-8ff6-475f-b7ac-d72caf2ec84d",
"9e7d0ea5-9ff8-46c7-9b43-ccc468ba1adb",
],
resources=[
Resource(
guid="fe747857-92ea-4633-b415-6b9946f67519",
noteGuid="7473cb3f-411e-4545-9df4-5eb731de4358",
mime="image/png",
width=403,
height=613,
active=True,
recognition=Data(bodyHash=b"1234", size=4332, body=b"1234"),
data=Data(bodyHash=b"1234", size=58387, body=b"1234"),
updateSequenceNum=6461,
attributes=ResourceAttributes(
fileName="test.png",
attachment=True,
),
)
],
attributes=NoteAttributes(
author="test@gmail.com",
source="desktop.win",
sourceURL="https://www.example.com/page?category=blog&post_id=123",
sourceApplication="evernote.win32",
),
tagNames=["test1", "test2"],
)
expected = """ <note>
<title>Test Title</title>
<created>20210209T203437Z</created>
<updated>20210407T164325Z</updated>
<tag>test1</tag>
<tag>test2</tag>
<note-attributes>
<author>test@gmail.com</author>
<source>desktop.win</source>
<source-url>https://www.example.com/page?category=blog&post_id=123</source-url>
<source-application>evernote.win32</source-application>
</note-attributes>
<content>
<![CDATA[<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<test content>]]>
</content>
<resource>
<data encoding="base64">
MTIzNA==
</data>
<mime>image/png</mime>
<width>403</width>
<height>613</height>
<resource-attributes>
<file-name>test.png</file-name>
<attachment>true</attachment>
</resource-attributes>
</resource>
</note>
"""
test_note_data_empty_tags = Note(
guid="7473cb3f-411e-4545-9df4-5eb731de4358",
title="Test Title",
content="<test content>",
contentHash=b"1234",
contentLength=2706,
created=1612902877000,
updated=1617813805000,
active=True,
updateSequenceNum=6711,
notebookGuid="c2ab541f-b704-4051-a2fa-40805e0fbf74",
attributes=NoteAttributes(
author="test@gmail.com",
source="desktop.win",
sourceApplication="evernote.win32",
),
)
expected_empty_tags = """ <note>
<title>Test Title</title>
<created>20210209T203437Z</created>
<updated>20210407T164325Z</updated>
<note-attributes>
<author>test@gmail.com</author>
<source>desktop.win</source>
<source-application>evernote.win32</source-application>
</note-attributes>
<content>
<![CDATA[<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<test content>]]>
</content>
</note>
"""
def test_formatter():
formatter = NoteFormatter()
formatted_note = formatter.format_note(test_note_data)
assert formatted_note == expected
def test_formatter_empty_tags_resources():
formatter = NoteFormatter()
formatted_note = formatter.format_note(test_note_data_empty_tags)
assert formatted_note == expected_empty_tags
def test_formatter_empty_note():
formatter = NoteFormatter()
test_empty_note = Note()
expected_empty_note = " <note>\n </note>\n"
formatted_note = formatter.format_note(test_empty_note)
assert formatted_note == expected_empty_note
def test_formatter_xml_note():
formatter = NoteFormatter()
test_xml_note = Note(content="<?xml test xml stuff ?>\ntest content")
expected_xml_note = (
" <note>\n"
" <content>\n"
' <![CDATA[<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n'
"test content]]>\n"
" </content>\n"
" </note>\n"
)
formatted_note = formatter.format_note(test_xml_note)
assert formatted_note == expected_xml_note
def test_note_from_future(mocker):
formatter = NoteFormatter()
# 9999-12-31 23:59:59
end_of_times = 253402300799999
# Emulate windows limit
mock_timestamp = mocker.patch(
"evernote_backup.note_formatter_util._get_max_timestamp"
)
mock_timestamp.return_value = 32503748400
note_from_future = Note(
title="test",
created=end_of_times,
updated=end_of_times,
)
formatted_note = formatter.format_note(note_from_future)
assert "<created>99991231T235959Z</created>" in formatted_note
assert "<updated>99991231T235959Z</updated>" in formatted_note
| 26.910053
| 89
| 0.651396
|
932792f01d89ecc02458c1d6989c0325db4352fb
| 6,123
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/netappfiles/tests/latest/test_account_commands.py
|
Rajwanshi/azure-cli
|
32ca1e31cf7ce3c92c68c64069365c697d10c981
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/netappfiles/tests/latest/test_account_commands.py
|
Rajwanshi/azure-cli
|
32ca1e31cf7ce3c92c68c64069365c697d10c981
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/netappfiles/tests/latest/test_account_commands.py
|
Rajwanshi/azure-cli
|
32ca1e31cf7ce3c92c68c64069365c697d10c981
|
[
"MIT"
] | 1
|
2020-12-22T00:28:33.000Z
|
2020-12-22T00:28:33.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
LOCATION = "eastus2"
# No tidy up of tests required. The resource group is automatically removed
# As a refactoring consideration for the future, consider use of authoring patterns desribed here
# https://github.com/Azure/azure-cli/blob/dev/doc/authoring_tests.md#sample-5-get-more-from-resourcegrouppreparer
class AzureNetAppFilesAccountServiceScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_tests_rg_')
def test_create_delete_account(self):
account_name = self.create_random_name(prefix='cli', length=24)
tags = 'Tag1=Value1 Tag2=Value2'
# create and check
# note : active directory checks are performed in their own subgroup test
account = self.cmd("az netappfiles account create --resource-group {rg} --account-name '%s' -l %s --tags %s" % (account_name, LOCATION, tags)).get_output_in_json()
assert account['name'] == account_name
assert account['tags']['Tag1'] == 'Value1'
assert account['tags']['Tag2'] == 'Value2'
account_list = self.cmd("netappfiles account list --resource-group {rg}").get_output_in_json()
assert len(account_list) > 0
# delete and recheck
self.cmd("az netappfiles account delete --resource-group {rg} --account-name '%s'" % account_name)
account_list = self.cmd("netappfiles account list --resource-group {rg}").get_output_in_json()
assert len(account_list) == 0
# and again with short forms and also unquoted
account = self.cmd("az netappfiles account create -g {rg} -a %s -l %s --tags %s" % (account_name, LOCATION, tags)).get_output_in_json()
assert account['name'] == account_name
# note: key case must match
assert account['activeDirectories'] is None
account_list = self.cmd("netappfiles account list --resource-group {rg}").get_output_in_json()
assert len(account_list) > 0
self.cmd("az netappfiles account delete --resource-group {rg} -a %s" % account_name)
account_list = self.cmd("netappfiles account list --resource-group {rg}").get_output_in_json()
assert len(account_list) == 0
@ResourceGroupPreparer(name_prefix='cli_tests_rg_')
def test_list_accounts(self):
accounts = [self.create_random_name(prefix='cli', length=24), self.create_random_name(prefix='cli', length=24)]
for account_name in accounts:
self.cmd("az netappfiles account create -g {rg} -a %s -l %s --tags Tag1=Value1" % (account_name, LOCATION)).get_output_in_json()
account_list = self.cmd("netappfiles account list -g {rg}").get_output_in_json()
assert len(account_list) == 2
for account_name in accounts:
self.cmd("az netappfiles account delete -g {rg} -a %s" % account_name)
account_list = self.cmd("netappfiles account list --resource-group {rg}").get_output_in_json()
assert len(account_list) == 0
@ResourceGroupPreparer(name_prefix='cli_tests_rg_')
def test_get_account_by_name(self):
account_name = self.create_random_name(prefix='cli', length=24)
account = self.cmd("az netappfiles account create -g {rg} -a %s -l %s" % (account_name, LOCATION)).get_output_in_json()
account = self.cmd("az netappfiles account show --resource-group {rg} -a %s" % account_name).get_output_in_json()
assert account['name'] == account_name
account_from_id = self.cmd("az netappfiles account show --ids %s" % account['id']).get_output_in_json()
assert account_from_id['name'] == account_name
@ResourceGroupPreparer(name_prefix='cli_tests_rg_')
def test_update_account(self):
# only tags are checked here due to complications of active directory in automated test
account_name = self.create_random_name(prefix='cli', length=24)
tag = "Tag1=Value1"
account = self.cmd("az netappfiles account create -g {rg} -a %s -l %s" % (account_name, LOCATION)).get_output_in_json()
account = self.cmd("az netappfiles account update --resource-group {rg} -a %s --tags %s" % (account_name, tag)).get_output_in_json()
assert account['name'] == account_name
assert account['tags']['Tag1'] == 'Value1'
assert account['activeDirectories'] is None
@ResourceGroupPreparer(name_prefix='cli_tests_rg_')
def test_active_directory(self):
account_name = self.create_random_name(prefix='cli', length=24)
# create an account as normal
account = self.cmd("az netappfiles account create -g {rg} -a %s -l %s --tags Tag1=Value1" % (account_name, LOCATION)).get_output_in_json()
assert account['name'] == account_name
# now add an active directory
acc_with_active_directory = self.cmd("netappfiles account ad add -g {rg} -n %s --username aduser --password aduser --smb-server-name SMBSERVER --dns '1.2.3.4' --domain westcentralus" % (account_name)).get_output_in_json()
assert acc_with_active_directory['name'] == account_name
assert acc_with_active_directory['activeDirectories'][0]['username'] == 'aduser'
# now add an active directory
active_directory = self.cmd("netappfiles account ad list -g {rg} -n %s" % (account_name)).get_output_in_json()
assert account['name'] == account_name
assert active_directory[0]['username'] == 'aduser'
# now remove using the previously obtained details
acc_with_active_directory = self.cmd("netappfiles account ad remove -g {rg} -n %s --active-directory %s" % (account_name, active_directory[0]['activeDirectoryId'])).get_output_in_json()
assert account['name'] == account_name
assert account['activeDirectories'] is None
| 57.224299
| 229
| 0.67075
|
b648cfe2aaeb4858fe272c216b5f80755ad1b296
| 879
|
py
|
Python
|
test/test_mesh.py
|
pyeprog/shapely_ext
|
5f3712a37bf577711f2f1e794c53cd885eee2684
|
[
"MIT"
] | 3
|
2020-05-23T16:27:43.000Z
|
2020-11-17T02:13:15.000Z
|
test/test_mesh.py
|
pyeprog/shapely_ext
|
5f3712a37bf577711f2f1e794c53cd885eee2684
|
[
"MIT"
] | null | null | null |
test/test_mesh.py
|
pyeprog/shapely_ext
|
5f3712a37bf577711f2f1e794c53cd885eee2684
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from shapely.geometry import box, Polygon, Point
from shapely_ext.mesh import TriMesher
class Test(TestCase):
def setUp(self) -> None:
self.mesher_11 = TriMesher(interpolate_distance=11)
self.mesher_5 = TriMesher(interpolate_distance=5)
def test_generate_tri_mesh_of_polygon(self):
rect = box(0, 0, 10, 10)
faces = self.mesher_11.mesh(rect) # non interpolation
self.assertTrue(all(isinstance(face, Polygon) for face in faces))
self.assertTrue(2, len(faces))
faces = self.mesher_5.mesh(rect)
self.assertTrue(all(isinstance(face, Polygon) for face in faces))
self.assertTrue(8, len(faces))
def test_generate_tri_mesh_of_non_polygon(self):
point = Point(0, 0)
with self.assertRaises(NotImplementedError):
self.mesher_11.mesh(point)
| 32.555556
| 73
| 0.68942
|
72bf6d7b88c60f47618ad4997f3a28d2da917595
| 3,648
|
py
|
Python
|
utils.py
|
anschwa/foodprint
|
3acc5377053a9eb48912e4712517457c491950bb
|
[
"MIT"
] | null | null | null |
utils.py
|
anschwa/foodprint
|
3acc5377053a9eb48912e4712517457c491950bb
|
[
"MIT"
] | null | null | null |
utils.py
|
anschwa/foodprint
|
3acc5377053a9eb48912e4712517457c491950bb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sqlite3
class DataBase:
def __init__(self, db_file):
self.db_file = db_file
self.rows = [("lamb", "meat", 39.2, 10400, "red", "red"),
("beef", "meat", 27, 15400, "red", "red"),
("pork", "meat", 12.1, 6000, "yellow", "yellow"),
("turkey", "meat", 10.9, 4300, "green", "green"),
("chicken", "meat", 6.9, 4300, "green", "green"),
("eggs", "gps", 4.8, 3265, "green", "green"),
("rice", "gps", 2.7, 2500, "red", "green"),
("tofu", "gps", 2, 926, "green", "green"),
("beans", "gps", 2, 4055, "red", "green"),
("lentils", "gps", 0.9, 4055, "red", "green"),
("peanut butter", "gps", 2.9, 628, "green", "green"),
("potatoes", "gps", 2.9, 322, "green", "green"),
("bread", "gps", 0.75, 1608, "yellow", "green"),
("tomatoes", "fruitvegg", 1.1, 322, "green", "green"),
("nuts", "fruitvegg", 2.3, 9063, "red", "yellow"),
("broccoli", "fruitvegg", 2, 322, "green", "green"),
("strawberries", "fruitvegg", 0.3, 322, "green", "green"),
("apple", "fruitvegg", 0.55, 962, "green", "green"),
("milk", "dairy", 1.9, 3180, "yellow", "green"),
("cheese", "dairy", 13.5, 3178, "yellow", "green"),
("yogurt", "dairy", 2.2, 778.05, "green", "green"),
("butter", "dairy", 23.8, 5553, "red", "yellow")]
self.init_db()
def init_db(self):
conn = sqlite3.connect(self.db_file)
cursor = conn.cursor()
cursor.execute("drop table if exists data;")
sql = """create table if not exists data (
id integer primary key,
food text not null,
type text not null,
co2 decimal not null,
water integer not null,
local text check (local in ('red', 'yellow', 'green')),
global text check (global in ('red', 'yellow', 'green')));"""
cursor.execute(sql)
insert = """insert into data (food, type, co2, water, local, global)
values (?,?,?,?,?,?);"""
cursor.executemany(insert, self.rows)
conn.commit()
cursor.close()
conn.close()
def get_food(self, food):
conn = sqlite3.connect(self.db_file)
cursor = conn.cursor()
food = food.lower()
sql = "select * from data where food = ?"
cursor.execute(sql, (food,))
result = cursor.fetchone()
conn.commit()
cursor.close()
conn.close()
response = None
if result is not None:
response = {"id": result[0],
"food": result[1],
"type": result[2],
"carbon": result[3],
"water": result[4],
"local": result[5],
"global": result[6]}
if response["type"] == "gps":
response["type"] = "Grains, Proteins, and Starch"
elif response["type"] == "fruitvegg":
response["type"] = "Fruits and Vegetables"
else:
response["type"] = response["type"].title()
return response
def fetch(self):
conn = sqlite3.connect(self.db_file)
cursor = conn.cursor()
cursor.execute("select food from data;")
response = cursor.fetchall()
conn.commit()
cursor.close()
conn.close()
return response
| 39.225806
| 79
| 0.462445
|
2c95743bbcdb36cab8512651d64d1538d511aabd
| 11,247
|
py
|
Python
|
poetry/factory.py
|
hongquan/poetry
|
d12f6421b1c34067e3968ddec2d821ae7f316af7
|
[
"MIT"
] | null | null | null |
poetry/factory.py
|
hongquan/poetry
|
d12f6421b1c34067e3968ddec2d821ae7f316af7
|
[
"MIT"
] | null | null | null |
poetry/factory.py
|
hongquan/poetry
|
d12f6421b1c34067e3968ddec2d821ae7f316af7
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import unicode_literals
import shutil
from typing import Dict
from typing import List
from typing import Optional
from clikit.api.io.io import IO
from .config.config import Config
from .config.file_config_source import FileConfigSource
from .io.null_io import NullIO
from .json import validate_object
from .locations import CONFIG_DIR
from .packages.dependency import Dependency
from .packages.locker import Locker
from .packages.project_package import ProjectPackage
from .poetry import Poetry
from .repositories.pypi_repository import PyPiRepository
from .spdx import license_by_id
from .utils._compat import Path
from .utils.toml_file import TomlFile
class Factory:
"""
Factory class to create various elements needed by Poetry.
"""
def create_poetry(
self, cwd=None, io=None
): # type: (Optional[Path], Optional[IO]) -> Poetry
if io is None:
io = NullIO()
poetry_file = self.locate(cwd)
local_config = TomlFile(poetry_file.as_posix()).read()
if "tool" not in local_config or "poetry" not in local_config["tool"]:
raise RuntimeError(
"[tool.poetry] section not found in {}".format(poetry_file.name)
)
local_config = local_config["tool"]["poetry"]
# Checking validity
check_result = self.validate(local_config)
if check_result["errors"]:
message = ""
for error in check_result["errors"]:
message += " - {}\n".format(error)
raise RuntimeError("The Poetry configuration is invalid:\n" + message)
# Load package
name = local_config["name"]
version = local_config["version"]
package = ProjectPackage(name, version, version)
package.root_dir = poetry_file.parent
for author in local_config["authors"]:
package.authors.append(author)
for maintainer in local_config.get("maintainers", []):
package.maintainers.append(maintainer)
package.description = local_config.get("description", "")
package.homepage = local_config.get("homepage")
package.repository_url = local_config.get("repository")
package.documentation_url = local_config.get("documentation")
try:
license_ = license_by_id(local_config.get("license", ""))
except ValueError:
license_ = None
package.license = license_
package.keywords = local_config.get("keywords", [])
package.classifiers = local_config.get("classifiers", [])
if "readme" in local_config:
package.readme = Path(poetry_file.parent) / local_config["readme"]
if "platform" in local_config:
package.platform = local_config["platform"]
if "dependencies" in local_config:
for name, constraint in local_config["dependencies"].items():
if name.lower() == "python":
package.python_versions = constraint
continue
if isinstance(constraint, list):
for _constraint in constraint:
package.add_dependency(name, _constraint)
continue
package.add_dependency(name, constraint)
if "dev-dependencies" in local_config:
for name, constraint in local_config["dev-dependencies"].items():
if isinstance(constraint, list):
for _constraint in constraint:
package.add_dependency(name, _constraint, category="dev")
continue
package.add_dependency(name, constraint, category="dev")
extras = local_config.get("extras", {})
for extra_name, requirements in extras.items():
package.extras[extra_name] = []
# Checking for dependency
for req in requirements:
req = Dependency(req, "*")
for dep in package.requires:
if dep.name == req.name:
dep.in_extras.append(extra_name)
package.extras[extra_name].append(dep)
break
if "build" in local_config:
package.build = local_config["build"]
if "include" in local_config:
package.include = local_config["include"]
if "exclude" in local_config:
package.exclude = local_config["exclude"]
if "packages" in local_config:
package.packages = local_config["packages"]
# Custom urls
if "urls" in local_config:
package.custom_urls = local_config["urls"]
# Moving lock if necessary (pyproject.lock -> poetry.lock)
lock = poetry_file.parent / "poetry.lock"
if not lock.exists():
# Checking for pyproject.lock
old_lock = poetry_file.with_suffix(".lock")
if old_lock.exists():
shutil.move(str(old_lock), str(lock))
locker = Locker(poetry_file.parent / "poetry.lock", local_config)
# Loading global configuration
config = self.create_config(io)
# Loading local configuration
local_config_file = TomlFile(poetry_file.parent / "poetry.toml")
if local_config_file.exists():
if io.is_debug():
io.write_line(
"Loading configuration file {}".format(local_config_file.path)
)
config.merge(local_config_file.read())
poetry = Poetry(poetry_file, local_config, package, locker, config)
# Configuring sources
for source in local_config.get("source", []):
repository = self.create_legacy_repository(source, config)
is_default = source.get("default", False)
is_secondary = source.get("secondary", False)
if io.is_debug():
message = "Adding repository {} ({})".format(
repository.name, repository.url
)
if is_default:
message += " and setting it as the default one"
elif is_secondary:
message += " and setting it as secondary"
io.write_line(message)
poetry.pool.add_repository(repository, is_default, secondary=is_secondary)
# Always put PyPI last to prefer private repositories
# but only if we have no other default source
if not poetry.pool.has_default():
poetry.pool.add_repository(PyPiRepository(), True)
else:
if io.is_debug():
io.write_line("Deactivating the PyPI repository")
return poetry
@classmethod
def create_config(cls, io=None): # type: (Optional[IO]) -> Config
if io is None:
io = NullIO()
config = Config()
# Load global config
config_file = TomlFile(Path(CONFIG_DIR) / "config.toml")
if config_file.exists():
if io.is_debug():
io.write_line(
"<debug>Loading configuration file {}</debug>".format(
config_file.path
)
)
config.merge(config_file.read())
config.set_config_source(FileConfigSource(config_file))
# Load global auth config
auth_config_file = TomlFile(Path(CONFIG_DIR) / "auth.toml")
if auth_config_file.exists():
if io.is_debug():
io.write_line(
"<debug>Loading configuration file {}</debug>".format(
auth_config_file.path
)
)
config.merge(auth_config_file.read())
config.set_auth_config_source(FileConfigSource(auth_config_file))
return config
def create_legacy_repository(
self, source, auth_config
): # type: (Dict[str, str], Config) -> LegacyRepository
from .repositories.auth import Auth
from .repositories.legacy_repository import LegacyRepository
from .utils.helpers import get_client_cert, get_cert, get_http_basic_auth
if "url" in source:
# PyPI-like repository
if "name" not in source:
raise RuntimeError("Missing [name] in source.")
else:
raise RuntimeError("Unsupported source specified")
name = source["name"]
url = source["url"]
credentials = get_http_basic_auth(auth_config, name)
if credentials:
auth = Auth(url, credentials[0], credentials[1])
else:
auth = None
return LegacyRepository(
name,
url,
auth=auth,
cert=get_cert(auth_config, name),
client_cert=get_client_cert(auth_config, name),
)
@classmethod
def validate(
cls, config, strict=False
): # type: (dict, bool) -> Dict[str, List[str]]
"""
Checks the validity of a configuration
"""
result = {"errors": [], "warnings": []}
# Schema validation errors
validation_errors = validate_object(config, "poetry-schema")
result["errors"] += validation_errors
if strict:
# If strict, check the file more thoroughly
# Checking license
license = config.get("license")
if license:
try:
license_by_id(license)
except ValueError:
result["errors"].append("{} is not a valid license".format(license))
if "dependencies" in config:
python_versions = config["dependencies"]["python"]
if python_versions == "*":
result["warnings"].append(
"A wildcard Python dependency is ambiguous. "
"Consider specifying a more explicit one."
)
# Checking for scripts with extras
if "scripts" in config:
scripts = config["scripts"]
for name, script in scripts.items():
if not isinstance(script, dict):
continue
extras = script["extras"]
for extra in extras:
if extra not in config["extras"]:
result["errors"].append(
'Script "{}" requires extra "{}" which is not defined.'.format(
name, extra
)
)
return result
@classmethod
def locate(cls, cwd): # type: (Path) -> Path
candidates = [Path(cwd)]
candidates.extend(Path(cwd).parents)
for path in candidates:
poetry_file = path / "pyproject.toml"
if poetry_file.exists():
return poetry_file
else:
raise RuntimeError(
"Poetry could not find a pyproject.toml file in {} or its parents".format(
cwd
)
)
| 34.289634
| 95
| 0.56824
|
54a72313491ee8f177ce3cf8a0eba068c74bd2a8
| 25,488
|
py
|
Python
|
sklearn/manifold/_spectral_embedding.py
|
huzq/scikit-learn
|
f862129f36786acbae3d9f2d161bbb72d77b87ec
|
[
"BSD-3-Clause"
] | 2
|
2022-03-16T17:33:38.000Z
|
2022-03-17T11:50:21.000Z
|
sklearn/manifold/_spectral_embedding.py
|
huzq/scikit-learn
|
f862129f36786acbae3d9f2d161bbb72d77b87ec
|
[
"BSD-3-Clause"
] | 9
|
2022-03-12T22:36:34.000Z
|
2022-03-27T06:47:36.000Z
|
sklearn/manifold/_spectral_embedding.py
|
huzq/scikit-learn
|
f862129f36786acbae3d9f2d161bbb72d77b87ec
|
[
"BSD-3-Clause"
] | 1
|
2020-02-16T05:40:12.000Z
|
2020-02-16T05:40:12.000Z
|
"""Spectral Embedding."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import eigsh
from scipy.sparse.csgraph import connected_components
from scipy.sparse.csgraph import laplacian as csgraph_laplacian
from ..base import BaseEstimator
from ..utils import (
check_array,
check_random_state,
check_symmetric,
)
from ..utils._arpack import _init_arpack_v0
from ..utils.extmath import _deterministic_vector_sign_flip
from ..utils.fixes import lobpcg
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph, NearestNeighbors
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node.
Parameters
----------
graph : array-like of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
node_id : int
The index of the query node of the graph.
Returns
-------
connected_components_matrix : array-like of shape (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node.
"""
n_node = graph.shape[0]
if sparse.issparse(graph):
# speed up row-wise access to boolean connection mask
graph = graph.tocsr()
connected_nodes = np.zeros(n_node, dtype=bool)
nodes_to_explore = np.zeros(n_node, dtype=bool)
nodes_to_explore[node_id] = True
for _ in range(n_node):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
if sparse.issparse(graph):
neighbors = graph[i].toarray().ravel()
else:
neighbors = graph[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def _graph_is_connected(graph):
"""Return whether the graph is connected (True) or Not (False).
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not.
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value, norm_laplacian):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition.
Parameters
----------
laplacian : {ndarray, sparse matrix}
The graph laplacian.
value : float
The value of the diagonal.
norm_laplacian : bool
Whether the value of the diagonal should be changed or not.
Returns
-------
laplacian : {array, sparse matrix}
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
if norm_laplacian:
laplacian.flat[:: n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
if norm_laplacian:
diag_idx = laplacian.row == laplacian.col
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(
adjacency,
*,
n_components=8,
eigen_solver=None,
random_state=None,
eigen_tol=0.0,
norm_laplacian=True,
drop_first=True,
):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
norm_laplacian : bool, default=True
If True, then compute symmetric normalized Laplacian.
drop_first : bool, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : ndarray of shape (n_samples, n_components)
The reduced samples.
Notes
-----
Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
has one connected component. If there graph has many components, the first
few eigenvectors will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* :doi:`"Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method",
Andrew V. Knyazev
<10.1137/S1064827500366124>`
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError as e:
if eigen_solver == "amg":
raise ValueError(
"The eigen_solver was set to 'amg', but pyamg is not available."
) from e
if eigen_solver is None:
eigen_solver = "arpack"
elif eigen_solver not in ("arpack", "lobpcg", "amg"):
raise ValueError(
"Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver
)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn(
"Graph is not fully connected, spectral embedding may not work as expected."
)
laplacian, dd = csgraph_laplacian(
adjacency, normed=norm_laplacian, return_diag=True
)
if (
eigen_solver == "arpack"
or eigen_solver != "lobpcg"
and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)
):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
v0 = _init_arpack_v0(laplacian.shape[0], random_state)
_, diffusion_map = eigsh(
laplacian, k=n_components, sigma=1.0, which="LM", tol=eigen_tol, v0=v0
)
embedding = diffusion_map.T[n_components::-1]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
elif eigen_solver == "amg":
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
laplacian = check_array(
laplacian, dtype=[np.float64, np.float32], accept_sparse=True
)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# The Laplacian matrix is always singular, having at least one zero
# eigenvalue, corresponding to the trivial eigenvector, which is a
# constant. Using a singular matrix for preconditioning may result in
# random failures in LOBPCG and is not supported by the existing
# theory:
# see https://doi.org/10.1007/s10208-015-9297-1
# Shift the Laplacian so its diagononal is not all ones. The shift
# does change the eigenpairs however, so we'll feed the shifted
# matrix to the solver and afterward set it back to the original.
diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])
laplacian += diag_shift
ml = smoothed_aggregation_solver(check_array(laplacian, accept_sparse="csr"))
laplacian -= diag_shift
M = ml.aspreconditioner()
# Create initial approximation X to eigenvectors
X = random_state.standard_normal(size=(laplacian.shape[0], n_components + 1))
X[:, 0] = dd.ravel()
X = X.astype(laplacian.dtype)
_, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.0e-5, largest=False)
embedding = diffusion_map.T
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
if eigen_solver == "lobpcg":
laplacian = check_array(
laplacian, dtype=[np.float64, np.float32], accept_sparse=True
)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
_, diffusion_map = eigh(laplacian, check_finite=False)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
else:
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension and create initial
# approximation X to eigenvectors
X = random_state.standard_normal(
size=(laplacian.shape[0], n_components + 1)
)
X[:, 0] = dd.ravel()
X = X.astype(laplacian.dtype)
_, diffusion_map = lobpcg(
laplacian, X, tol=1e-5, largest=False, maxiter=2000
)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
n_components : int, default=2
The dimension of the projected subspace.
affinity : {'nearest_neighbors', 'rbf', 'precomputed', \
'precomputed_nearest_neighbors'} or callable, \
default='nearest_neighbors'
How to construct the affinity matrix.
- 'nearest_neighbors' : construct the affinity matrix by computing a
graph of nearest neighbors.
- 'rbf' : construct the affinity matrix by computing a radial basis
function (RBF) kernel.
- 'precomputed' : interpret ``X`` as a precomputed affinity matrix.
- 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph
of precomputed nearest neighbors, and constructs the affinity matrix
by selecting the ``n_neighbors`` nearest neighbors.
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, default=None
Kernel coefficient for rbf kernel. If None, gamma will be set to
1/n_features.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems.
If None, then ``'arpack'`` is used.
n_neighbors : int, default=None
Number of nearest neighbors for nearest_neighbors graph building.
If None, n_neighbors will be set to max(n_samples/10, 1).
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
embedding_ : ndarray of shape (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : ndarray of shape (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_neighbors_ : int
Number of nearest neighbors effectively used.
See Also
--------
Isomap : Non-linear dimensionality reduction through Isometric Mapping.
References
----------
- :doi:`A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
<10.1007/s11222-007-9033-z>`
- On Spectral Clustering: Analysis and an algorithm, 2001
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- :doi:`Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
<10.1109/34.868688>`
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import SpectralEmbedding
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = SpectralEmbedding(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
"""
def __init__(
self,
n_components=2,
*,
affinity="nearest_neighbors",
gamma=None,
random_state=None,
eigen_solver=None,
n_neighbors=None,
n_jobs=None,
):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
def _more_tags(self):
return {
"pairwise": self.affinity
in ["precomputed", "precomputed_nearest_neighbors"]
}
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : array-like of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Y: Ignored
Returns
-------
affinity_matrix of shape (n_samples, n_samples)
"""
if self.affinity == "precomputed":
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == "precomputed_nearest_neighbors":
estimator = NearestNeighbors(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
).fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
return self.affinity_matrix_
if self.affinity == "nearest_neighbors":
if sparse.issparse(X):
warnings.warn(
"Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity"
)
self.affinity = "rbf"
else:
self.n_neighbors_ = (
self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1)
)
self.affinity_matrix_ = kneighbors_graph(
X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs
)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (
self.affinity_matrix_ + self.affinity_matrix_.T
)
return self.affinity_matrix_
if self.affinity == "rbf":
self.gamma_ = self.gamma if self.gamma is not None else 1.0 / X.shape[1]
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix}, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X, accept_sparse="csr", ensure_min_samples=2)
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, str):
if self.affinity not in {
"nearest_neighbors",
"rbf",
"precomputed",
"precomputed_nearest_neighbors",
}:
raise ValueError(
"%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable."
% self.affinity
)
elif not callable(self.affinity):
raise ValueError(
"'affinity' is expected to be an affinity name or a callable. Got: %s"
% self.affinity
)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(
affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state,
)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix} of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Spectral embedding of the training matrix.
"""
self.fit(X)
return self.embedding_
| 37.928571
| 88
| 0.630728
|
22b694fe859e0e5f21e418d0bc0a50c5c4defb9f
| 6,366
|
py
|
Python
|
tests/test_ops_binary.py
|
evelynfay/ngraph-onnx
|
a5ba2745ee6151c98255bb736ef5f116ce0c1e3c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ops_binary.py
|
evelynfay/ngraph-onnx
|
a5ba2745ee6151c98255bb736ef5f116ce0c1e3c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ops_binary.py
|
evelynfay/ngraph-onnx
|
a5ba2745ee6151c98255bb736ef5f116ce0c1e3c
|
[
"Apache-2.0"
] | null | null | null |
# ******************************************************************************
# Copyright 2018-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import print_function, division
import onnx
import numpy as np
import pytest
from onnx.helper import make_tensor_value_info, make_graph, make_model
from tests.utils import run_model
def import_and_compute(op_type, input_data_left, input_data_right, opset=7, **node_attributes):
inputs = [np.array(input_data_left), np.array(input_data_right)]
onnx_node = onnx.helper.make_node(op_type, inputs=['x', 'y'], outputs=['z'], **node_attributes)
input_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, value.shape)
for name, value in zip(onnx_node.input, inputs)]
output_tensors = [make_tensor_value_info(name, onnx.TensorProto.FLOAT, ())
for name in onnx_node.output]
graph = make_graph([onnx_node], 'compute_graph', input_tensors, output_tensors)
model = make_model(graph, producer_name='NgraphBackend')
model.opset_import[0].version = opset
return run_model(model, inputs)[0]
def test_add_opset4():
assert np.array_equal(import_and_compute('Add', 1, 2, opset=4),
np.array(3, dtype=np.float32))
assert np.array_equal(import_and_compute('Add', [1], [2], opset=4),
np.array([3], dtype=np.float32))
assert np.array_equal(import_and_compute('Add', [1, 2], [3, 4], opset=4),
np.array([4, 6], dtype=np.float32))
assert np.array_equal(import_and_compute('Add', [1, 2, 3], [4, 5, 6], opset=4),
np.array([5, 7, 9], dtype=np.float32))
assert np.array_equal(import_and_compute('Add', [[1, 2, 3], [4, 5, 6]], [7, 8, 9], broadcast=1, opset=4),
np.array([[8, 10, 12], [11, 13, 15]], dtype=np.float32))
# shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar
left_operand = np.ones((2, 3, 4, 5)).astype(np.float32)
assert np.array_equal(import_and_compute('Add', left_operand, 8, broadcast=1, opset=4),
left_operand + 8)
# shape(A) = (2, 3, 4, 5), shape(B) = (5,)
left_operand = np.ones((2, 3, 4, 5), dtype=np.float32)
right_operand = np.random.rand(5).astype(np.float32)
import_and_compute('Add', left_operand, right_operand, broadcast=1, opset=4)
# shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)
left_operand = np.ones((2, 3, 4, 5), dtype=np.float32)
right_operand = np.random.rand(4, 5).astype(np.float32)
assert np.array_equal(import_and_compute('Add', left_operand, right_operand, broadcast=1, opset=4),
left_operand + right_operand)
# shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1
left_operand = np.ones((2, 3, 4, 5), dtype=np.float32)
right_operand = np.random.rand(3, 4).astype(np.float32)
assert np.array_equal(
import_and_compute('Add', left_operand, right_operand, broadcast=1, axis=1, opset=4),
left_operand + right_operand.reshape(1, 3, 4, 1))
# shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0
left_operand = np.ones((2, 3, 4, 5), dtype=np.float32)
right_operand = np.random.rand(2).astype(np.float32)
assert np.array_equal(
import_and_compute('Add', left_operand, right_operand, broadcast=1, axis=0, opset=4),
left_operand + right_operand.reshape(2, 1, 1, 1))
@pytest.mark.parametrize('left_shape,right_shape', [
((1,), (1,)),
((256, 256, 3), (3,)),
((5, 4), (1,)),
((5, 4), (4,)),
((15, 3, 5), (3, 5)),
((15, 3, 5), (15, 1, 5)),
((15, 3, 5), (3, 1)),
((8, 1, 6, 1), (7, 1, 5)),
])
def test_add_opset7(left_shape, right_shape):
"""Test Add-7 operator, which uses numpy-style broadcasting."""
left_input = np.ones(left_shape)
right_input = np.ones(right_shape)
assert np.array_equal(import_and_compute('Add', left_input, right_input),
left_input + right_input)
def test_sub():
assert np.array_equal(import_and_compute('Sub', 20, 1),
np.array(19, dtype=np.float32))
assert np.array_equal(import_and_compute('Sub', [20], [1]),
np.array([19], dtype=np.float32))
assert np.array_equal(import_and_compute('Sub', [20, 19], [1, 2]),
np.array([19, 17], dtype=np.float32))
assert np.array_equal(import_and_compute('Sub', [[1, 2, 3], [4, 5, 6]], [7, 8, 9], broadcast=1),
np.array([[-6, -6, -6], [-3, -3, -3]], dtype=np.float32))
def test_mul():
assert np.array_equal(import_and_compute('Mul', 2, 3),
np.array(6, dtype=np.float32))
assert np.array_equal(import_and_compute('Mul', [2], [3]),
np.array([6], dtype=np.float32))
assert np.array_equal(import_and_compute('Mul', [2, 3], [4, 5]),
np.array([8, 15], dtype=np.float32))
assert np.array_equal(import_and_compute('Mul', [[1, 2, 3], [4, 5, 6]], [7, 8, 9], broadcast=1),
np.array([[7, 16, 27], [28, 40, 54]], dtype=np.float32))
def test_div():
assert np.array_equal(import_and_compute('Div', 6, 3),
np.array(2, dtype=np.float32))
assert np.array_equal(import_and_compute('Div', [6], [3]),
np.array([2], dtype=np.float32))
assert np.array_equal(import_and_compute('Div', [6, 8], [3, 2]),
np.array([2, 4], dtype=np.float32))
assert np.array_equal(import_and_compute('Div', [[10, 20, 30], [40, 50, 60]], [2, 5, 6], broadcast=1),
np.array([[5, 4, 5], [20, 10, 10]], dtype=np.float32))
| 42.724832
| 109
| 0.587496
|
e42a5f908630b86bc60a162c009b1a054d8dbddc
| 6,962
|
py
|
Python
|
main.py
|
vaibhavkrkm/Cookie-Clicker-Remake
|
1438cd7a56defaa50af49b6ccbfc2e50f2b18df6
|
[
"MIT"
] | null | null | null |
main.py
|
vaibhavkrkm/Cookie-Clicker-Remake
|
1438cd7a56defaa50af49b6ccbfc2e50f2b18df6
|
[
"MIT"
] | null | null | null |
main.py
|
vaibhavkrkm/Cookie-Clicker-Remake
|
1438cd7a56defaa50af49b6ccbfc2e50f2b18df6
|
[
"MIT"
] | null | null | null |
import pygame
from sys import exit as EXIT
def QUIT():
pygame.quit()
EXIT()
def save_cookies(clicked_cookies, clicked_cookies_best):
if(clicked_cookies > clicked_cookies_best):
clicked_cookies_best = clicked_cookies
with open("cookies_highscore.data", "w") as f:
f.write(str(clicked_cookies_best))
def load_cookies():
with open("cookies_highscore.data", "r") as f:
clicked_cookies_best = int(f.read())
return clicked_cookies_best
pygame.mixer.pre_init(frequency=44100, size=16, channels=1, buffer=512)
pygame.init()
SCREENWIDTH = SCREENHEIGHT = 800
FPS = 60
CLOCK = pygame.time.Clock()
game_display = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption("Cookie Clicker Remake with Pygame")
pygame.mouse.set_visible(False)
# loading the font
game_font = pygame.font.Font("font.TTF", 75)
cookies_font = pygame.font.Font("font.TTF", 40)
# loading assets
background = pygame.transform.scale(pygame.image.load("background.png"), (SCREENWIDTH, SCREENHEIGHT)).convert_alpha()
cursor = pygame.image.load("cursor.png").convert_alpha()
# buttons
reset_button = pygame.image.load("reset_button.png").convert_alpha()
reset_button_clicked = pygame.image.load("reset_button_clicked.png").convert_alpha()
sound_button = pygame.image.load("sound_button.png").convert_alpha()
sound_button_clicked = pygame.image.load("sound_button_clicked.png").convert_alpha()
clicked_cookies_best = load_cookies()
cookie = pygame.transform.scale(pygame.image.load("cookie.png"), (350, 350)).convert_alpha()
cookie_rect = cookie.get_rect()
cookie_rect.x = SCREENWIDTH // 2 - cookie.get_width() // 2
cookie_rect.y = SCREENHEIGHT // 2 - cookie.get_height() // 2
cookie_clicked = pygame.transform.scale(pygame.image.load("cookie.png"), (365, 365)).convert_alpha()
cookie_clicked_rect = cookie_clicked.get_rect()
cookie_clicked_rect.x = SCREENWIDTH // 2 - cookie_clicked.get_width() // 2
cookie_clicked_rect.y = SCREENHEIGHT // 2 - cookie_clicked.get_height() // 2
cookie_sound = pygame.mixer.Sound("click.wav")
button_sound = pygame.mixer.Sound("button_sound.wav")
positive1_sound = pygame.mixer.Sound("positive1.wav")
positive2_sound = pygame.mixer.Sound("positive2.wav")
sound_on = True
click_timer = 0.1
click_timer_running = False
click_timer_initial_time = None
clicked_cookies = 0
# text(s)
title_text = game_font.render("Cookie Clicker", True, (233, 153, 51))
best_title_text = cookies_font.render("Best", True, (251, 206, 17))
best_cookies_text = cookies_font.render(f"{clicked_cookies_best} cookies", True, (251, 206, 17))
current_title_text = cookies_font.render("Current", True, (233, 153, 51))
current_cookies_text = cookies_font.render(f"{clicked_cookies} cookies", True, (233, 153, 51))
run = True
while run:
CLOCK.tick(FPS)
# event section start
for event in pygame.event.get():
if(event.type == pygame.QUIT):
save_cookies(clicked_cookies, clicked_cookies_best)
QUIT()
if(event.type == pygame.MOUSEBUTTONDOWN):
if(event.button == 1):
if(cookie_rect.collidepoint(event.pos)):
# cookie click
if(sound_on):
pygame.mixer.Sound.play(cookie_sound)
clicked_cookies += 1
current_cookies_text = cookies_font.render(f"{clicked_cookies} cookies", True, (233, 153, 51))
if(clicked_cookies != 0 and sound_on):
if(clicked_cookies % 1000 == 0):
# play positive2 sound
pygame.mixer.Sound.play(positive2_sound)
elif(clicked_cookies % 100 == 0):
# play positive1 sound
pygame.mixer.Sound.play(positive1_sound)
click_timer_running = True
click_timer_initial_time = pygame.time.get_ticks() / 1000
elif(event.pos[0] >= SCREENWIDTH - 40 - reset_button.get_width() and event.pos[0] <= SCREENWIDTH - 40 and event.pos[1] >= SCREENHEIGHT // 2 - reset_button.get_height() // 2 and event.pos[1] <= SCREENHEIGHT // 2 - reset_button.get_height() // 2 + reset_button.get_height()):
# reset button click
if(sound_on):
pygame.mixer.Sound.play(button_sound)
save_cookies(clicked_cookies, clicked_cookies_best)
clicked_cookies = 0
current_cookies_text = cookies_font.render(f"{clicked_cookies} cookies", True, (233, 153, 51))
clicked_cookies_best = load_cookies()
best_cookies_text = cookies_font.render(f"{clicked_cookies_best} cookies", True, (251, 206, 17))
elif(event.pos[0] >= 0 + 40 and event.pos[0] <= 0 + 40 + sound_button.get_width() and event.pos[1] >= SCREENHEIGHT // 2 - sound_button.get_height() // 2 and event.pos[1] <= SCREENHEIGHT // 2 - sound_button.get_height() // 2 + sound_button.get_height()):
# sound button click
if(sound_on):
pygame.mixer.Sound.play(button_sound)
sound_on = not sound_on
# event section end
if(click_timer_running):
if(pygame.time.get_ticks() / 1000 - click_timer_initial_time >= click_timer):
click_timer_initial_time = None
click_timer_running = False
# getting the current mouse position
mouse_pos = pygame.mouse.get_pos()
# filling the display surface
game_display.blit(background, (0, 0))
# displaying the seperator line(s)
pygame.draw.line(game_display, (233, 153, 51), (SCREENWIDTH // 2, 620), (SCREENWIDTH // 2, 760))
# displaying the text(s)
game_display.blit(title_text, (SCREENWIDTH // 2 - title_text.get_width() // 2, 75)) # title
game_display.blit(current_title_text, (SCREENWIDTH // 2 + 50, 620))
game_display.blit(current_cookies_text, (SCREENWIDTH // 2 + 50, 700))
game_display.blit(best_title_text, (SCREENWIDTH // 2 - 50 - best_title_text.get_width(), 620))
game_display.blit(best_cookies_text, (SCREENWIDTH // 2 - 50 - best_cookies_text.get_width(), 700))
# button(s)
if(mouse_pos[0] >= SCREENWIDTH - 40 - reset_button.get_width() and mouse_pos[0] <= SCREENWIDTH - 40 and mouse_pos[1] >= SCREENHEIGHT // 2 - reset_button.get_height() // 2 and mouse_pos[1] <= SCREENHEIGHT // 2 - reset_button.get_height() // 2 + reset_button.get_height()):
game_display.blit(reset_button_clicked, (SCREENWIDTH - 40 - reset_button.get_width(), SCREENHEIGHT // 2 - reset_button.get_height() // 2))
else:
game_display.blit(reset_button, (SCREENWIDTH - 40 - reset_button.get_width(), SCREENHEIGHT // 2 - reset_button.get_height() // 2))
if(mouse_pos[0] >= 0 + 40 and mouse_pos[0] <= 0 + 40 + sound_button.get_width() and mouse_pos[1] >= SCREENHEIGHT // 2 - sound_button.get_height() // 2 and mouse_pos[1] <= SCREENHEIGHT // 2 - sound_button.get_height() // 2 + sound_button.get_height()):
game_display.blit(sound_button_clicked, (0 + 40, SCREENHEIGHT // 2 - sound_button.get_height() // 2))
else:
game_display.blit(sound_button, (0 + 40, SCREENHEIGHT // 2 - sound_button.get_height() // 2))
# displaying the cookie
if(not click_timer_running):
game_display.blit(cookie, cookie_rect)
else:
game_display.blit(cookie_clicked, cookie_clicked_rect)
# displaying the custom mouse cursor
game_display.blit(cursor, mouse_pos)
# updating the display surface
pygame.display.update()
| 41.195266
| 277
| 0.729963
|
d0335851d1af28b114341c203539110722fa086b
| 6,376
|
py
|
Python
|
CalibTracker/SiStripChannelGain/test/testSSTGain_PCL_FromRECO_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
CalibTracker/SiStripChannelGain/test/testSSTGain_PCL_FromRECO_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
CalibTracker/SiStripChannelGain/test/testSSTGain_PCL_FromRECO_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
# Auto generated configuration file
# with command line options: stepALCA --datatier ALCARECO --conditions auto:run2_data -s ALCA:PromptCalibProdSiStripGains --eventcontent ALCARECO -n 1000 --dasquery=file dataset=/ZeroBias/Run2016C-SiStripCalMinBias-18Apr2017-v1/ALCARECO run=276243 --no_exec
import FWCore.ParameterSet.Config as cms
import os
from Configuration.StandardSequences.Eras import eras
import Utilities.General.cmssw_das_client as das_client
###################################################################
def getFileNames_das_client():
###################################################################
"""Return files for given DAS query via das_client"""
files = []
query = "dataset dataset=/ZeroBias/Run2*SiStripCalMinBias-*/ALCARECO site=T2_CH_CERN"
jsondict = das_client.get_data(query)
status = jsondict['status']
if status != 'ok':
print "DAS query status: %s"%(status)
return files
data = jsondict['data']
viableDS = []
for element in data:
viableDS.append(element['dataset'][0]['name'])
print "Using Dataset:",viableDS[-1]
query = "file dataset=%s site=T2_CH_CERN | grep file.name" % viableDS[-1]
jsondict = das_client.get_data(query)
status = jsondict['status']
if status != 'ok':
print "DAS query status: %s"%(status)
return files
mongo_query = jsondict['mongo_query']
filters = mongo_query['filters']
data = jsondict['data']
files = []
for row in data:
the_file = [r for r in das_client.get_value(row, filters['grep'])][0]
if len(the_file) > 0 and not the_file in files:
files.append(the_file)
return files
###################################################################
process = cms.Process('testFromALCARECO')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
###################################################################
# Messages
###################################################################
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.categories.append("SiStripGainsPCLWorker")
process.MessageLogger.destinations = cms.untracked.vstring("cout")
process.MessageLogger.cout = cms.untracked.PSet(
threshold = cms.untracked.string("DEBUG"),
default = cms.untracked.PSet(limit = cms.untracked.int32(0)),
FwkReport = cms.untracked.PSet(limit = cms.untracked.int32(-1),
reportEvery = cms.untracked.int32(1000)
),
SiStripGainsPCLWorker = cms.untracked.PSet( limit = cms.untracked.int32(-1)),
)
process.MessageLogger.statistics.append('cout')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.AlCaRecoStreams_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
INPUTFILES=getFileNames_das_client()
if len(INPUTFILES)==0:
print "** WARNING: ** According to a DAS query no suitable data for test is available. Skipping test"
os._exit(0)
myFiles = cms.untracked.vstring()
myFiles.extend([INPUTFILES[0][0].replace("\"","")])
# Input source
process.source = cms.Source("PoolSource",
fileNames = myFiles,
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet()
# Additional output definition
process.ALCARECOStreamPromptCalibProdSiStripGains = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('pathALCARECOPromptCalibProdSiStripGains')
),
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('ALCARECO'),
filterName = cms.untracked.string('PromptCalibProdSiStripGains')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('PromptCalibProdSiStripGains.root'),
outputCommands = cms.untracked.vstring('drop *',
'keep *_MEtoEDMConvertSiStripGains_*_*'
)
)
# Other statements
process.ALCARECOEventContent.outputCommands.extend(process.OutALCARECOPromptCalibProdSiStripGains_noDrop.outputCommands)
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
# Path and EndPath definitions
process.endjob_step = cms.EndPath(process.endOfProcess)
process.ALCARECOStreamPromptCalibProdSiStripGainsOutPath = cms.EndPath(process.ALCARECOStreamPromptCalibProdSiStripGains)
# Schedule definition
process.schedule = cms.Schedule(process.pathALCARECOPromptCalibProdSiStripGains,process.endjob_step,process.ALCARECOStreamPromptCalibProdSiStripGainsOutPath)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| 47.93985
| 257
| 0.592378
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.