hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
611a48e8877904f720ec695f79c97f5f0e9fd035
| 2,342
|
py
|
Python
|
app/lib/pushkin/pushkin/database/migrations/env.py
|
krzyhook/pushkin-on-docker
|
05d192d0b4c753bcd41aba0a66394ae39dd78fc6
|
[
"MIT"
] | null | null | null |
app/lib/pushkin/pushkin/database/migrations/env.py
|
krzyhook/pushkin-on-docker
|
05d192d0b4c753bcd41aba0a66394ae39dd78fc6
|
[
"MIT"
] | null | null | null |
app/lib/pushkin/pushkin/database/migrations/env.py
|
krzyhook/pushkin-on-docker
|
05d192d0b4c753bcd41aba0a66394ae39dd78fc6
|
[
"MIT"
] | null | null | null |
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# this will overwrite the ini-file sqlalchemy.url path
# with the path given in the config of the main code
from pushkin import config as pushkin_config
config.set_main_option('sqlalchemy.url', pushkin_config.sqlalchemy_url)
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from pushkin.database import model
target_metadata = model.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 30.415584
| 72
| 0.713493
|
54fe8710c63aa1e310bd3a9f4285cadc8080cad2
| 9,917
|
py
|
Python
|
catboost/benchmarks/training_speed/plot.py
|
jochenater/catboost
|
de2786fbc633b0d6ea6a23b3862496c6151b95c2
|
[
"Apache-2.0"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
catboost/benchmarks/training_speed/plot.py
|
jochenater/catboost
|
de2786fbc633b0d6ea6a23b3862496c6151b95c2
|
[
"Apache-2.0"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
catboost/benchmarks/training_speed/plot.py
|
jochenater/catboost
|
de2786fbc633b0d6ea6a23b3862496c6151b95c2
|
[
"Apache-2.0"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
import argparse
import json
import os
import numpy as np
from matplotlib import pyplot as plt
from log_parser import read_results
FONT_DICT = {'fontsize': 20}
FIGURE_SIZE = (10, 5)
def plot_time_per_iter(tracks, figsize=FIGURE_SIZE, title=None, save_path='time_per_iter.png'):
fig = plt.figure(figsize=figsize)
time_per_iters = []
algs = tracks.keys()
for alg_name in algs:
time_per_iter_alg = []
for track in tracks[alg_name]:
# aggregating statistic over different tracks
time_per_iter = track.get_time_per_iter()
time_per_iter_alg.extend(time_per_iter)
time_per_iters.append(time_per_iter_alg)
if title is not None:
plt.title(title, FONT_DICT)
for i, alg_name in enumerate(algs):
print(alg_name)
print(np.median(time_per_iters[i]))
plt.ylabel('Seconds', FONT_DICT)
plt.boxplot(time_per_iters, labels=algs)
if os.path.exists(save_path):
print('WARNING: file ' + save_path + ' already exists')
plt.savefig(save_path, dpi=100)
plt.close(fig)
def plot_quality(tracks, from_iter, to_iter, figsize=FIGURE_SIZE, title=None, save_path='quality.png'):
fig = plt.figure(figsize=figsize)
if title is not None:
plt.title(title, FONT_DICT)
flat_tracks = []
for alg in tracks.keys():
flat_tracks += tracks[alg]
first_track = flat_tracks[0]
task_type = first_track.task_type
metric = 'Error' if task_type == 'Classification' or task_type == 'Multiclass' else 'RMSE'
plt.xlabel('iteration', FONT_DICT)
plt.ylabel(metric, FONT_DICT)
lines = []
names = []
for track in flat_tracks:
_, values = track.get_series()
cur_to_iter = to_iter
if to_iter is None or to_iter > track.get_fit_iterations():
cur_to_iter = track.get_fit_iterations()
values = values[from_iter:cur_to_iter]
x_values = np.arange(from_iter, cur_to_iter)
line, = plt.plot(x_values, values)
lines.append(line)
names.append(str(track))
plt.legend(lines, names, prop={'size': 9})
if os.path.exists(save_path):
print('WARNING: file ' + save_path + ' already exists')
plt.savefig(save_path, dpi=100)
plt.close(fig)
def plot_quality_vs_time(tracks, best_quality, low_percent=0.8, num_bins=100, only_min=False,
figsize=FIGURE_SIZE, title=None, save_path='time_distr.png'):
fig = plt.figure(figsize=figsize)
if title is not None:
plt.title(title, FONT_DICT)
plt.xlabel('Quality (%)', FONT_DICT)
plt.ylabel('Time to obtain (sec)', FONT_DICT)
algs = tracks.keys()
up_percent = 1. - low_percent
for i, alg_name in enumerate(algs):
bins = [[] for j in range(num_bins + 1)]
for track in tracks[alg_name]:
time_series, values = track.get_series()
time_series = time_series - time_series[0]
for time, value in zip(time_series, values):
percent = value / best_quality - 1.
if percent > up_percent:
continue
idx = int(np.round(num_bins * percent / up_percent))
bins[idx].append(time)
time_median = []
time_q2 = []
time_min = []
x_values = []
for k, times in enumerate(bins):
if len(times) > 0:
time_median.append(np.median(times))
time_q2.append(np.quantile(times, 0.75))
time_min.append(np.min(times))
x_values.append(float(k) / num_bins * up_percent)
cur_min = time_min[0]
for t in range(1, len(time_min)):
if time_min[t] > cur_min:
time_min[t] = cur_min
else:
cur_min = time_min[t]
error_plus = np.array(time_q2) - np.array(time_median)
error_minus = np.array(time_median) - np.array(time_min)
x_values = np.array(x_values) - (float(i) - 1.) * up_percent / num_bins / 4.
x_values = 1. - x_values
if only_min:
plt.plot(x_values, time_min, label=alg_name)
else:
plt.errorbar(x=x_values, y=time_median, yerr=[error_minus, error_plus], fmt='o-', barsabove=True,
capsize=2, linewidth=2, label=alg_name)
plt.legend(fontsize='large')
if os.path.exists(save_path):
print('WARNING: file ' + save_path + ' already exists')
plt.savefig(save_path, dpi=100)
plt.close(fig)
def params_to_str(params):
return ''.join(map(lambda (key, value): '{}{}'.format(key, str(value)), params.items()))
def get_best(tracks, top=1):
algorithms = tracks.keys()
best_tracks = {}
for algorithm_name in algorithms:
best_scores = map(lambda track: track.get_best_score(), tracks[algorithm_name])
idx_best = np.argsort(best_scores)[:top]
best_tracks[algorithm_name] = map(lambda idx: tracks[algorithm_name][idx], idx_best)
return best_tracks
def filter_tracks(tracks, params_cases):
filtered_tracks = {}
for alg in tracks.keys():
filtered_tracks[alg] = []
for track in tracks[alg]:
if all([track.params_dict[param_name] in params_cases[param_name] for param_name in params_cases.keys()]):
filtered_tracks[alg].append(track)
return filtered_tracks
ONLY_TYPES = {
'cat-cpu': ['catboost-CPU'],
'xgb-cpu': ['xgboost-CPU'],
'lgb-cpu': ['lightgbm-CPU'],
'cat-gpu': ['catboost-GPU'],
'xgb-gpu': ['xgboost-GPU'],
'lgb-gpu': ['lightgbm-GPU'],
'cat': ['catboost-CPU', 'catboost-GPU'],
'xgb': ['xgboost-CPU', 'xgboost-GPU'],
'lgb': ['lightgbm-CPU', 'lightgbm-GPU'],
'cpu': ['catboost-CPU', 'xgboost-CPU', 'lightgbm-CPU'],
'gpu': ['catboost-GPU', 'xgboost-GPU', 'lightgbm-GPU']
}
def get_default_file_name(plot_type, params):
default_file_names = {
'best': 'best_quality.png',
'quality-vs-time': 'quality_vs_time.png',
'time-per-iter': 'time_per_iter.png'
}
if plot_type in default_file_names.keys():
return default_file_names[plot_type]
if plot_type == 'custom':
return params_to_str(params) + '.png'
def plot_experiment(tracks, experiment_name, args):
file_name = args.file_name if args.file_name else get_default_file_name(args.type, args.params_cases)
save_dir = os.path.join(args.out_dir, experiment_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, file_name)
if args.only:
filtered_tracks = {}
for only_type in args.only:
for alg_name in ONLY_TYPES[only_type]:
filtered_tracks[alg_name] = tracks[alg_name]
tracks = filtered_tracks
if args.params_cases:
with open(args.params_cases) as f:
params_cases = json.load(f)
tracks = filter_tracks(tracks, params_cases)
if args.type == 'quality-vs-time':
best_tracks = get_best(tracks)
best_quality = min(map(lambda tracks: tracks[0].get_best_score(), best_tracks.values()))
print(best_quality)
if args.top:
tracks = get_best(tracks, top=args.top)
plot_quality_vs_time(tracks, best_quality=best_quality, low_percent=args.low_percent, only_min=args.only_min,
figsize=args.fig_size, num_bins=args.num_bins, save_path=save_path)
if args.type == 'best':
best_tracks = get_best(tracks, top=args.top)
for alg in best_tracks.keys():
for track in best_tracks[alg]:
print(track)
print(track.get_best_score())
plot_quality(best_tracks, args.from_iter, args.to_iter, figsize=args.fig_size,
title=args.title, save_path=save_path)
if args.type == 'custom':
plot_quality(tracks, args.from_iter, args.to_iter,
figsize=args.fig_size, title=args.title, save_path=save_path)
if args.type == 'time-per-iter':
plot_time_per_iter(tracks, figsize=args.fig_size, title=args.title, save_path=save_path)
def main():
plot_functions = {
'time-per-iter': plot_time_per_iter,
'best': plot_quality,
'quality-vs-time': plot_quality_vs_time,
'custom': plot_quality
}
parser = argparse.ArgumentParser()
parser.add_argument('--type', choices=plot_functions.keys(), required=True)
parser.add_argument('--only', nargs='+', choices=ONLY_TYPES.keys(), required=False)
parser.add_argument('-i', '--results-file', required=True)
parser.add_argument('-t', '--title')
parser.add_argument('-f', '--fig-size', nargs=2, type=int, default=FIGURE_SIZE)
parser.add_argument('-o', '--out-dir', default='plots')
parser.add_argument('--params-cases', help='draw plots only with those params (tracks filtering)'
' path to json file, each line corresponds to learner '
'parameter (e.g. max_depth) and list of its values')
parser.add_argument('--from-iter', type=int, default=0, help='only custom, best modes')
parser.add_argument('--to-iter', type=int, default=None, help='only custom, best modes')
parser.add_argument('--low-percent', type=float, default=0.9, help='only quality-vs-time mode')
parser.add_argument('--num-bins', type=int, default=200, help='only quality-vs-time mode')
parser.add_argument('--only-min', action='store_true', help='only quality-vs-time mode')
parser.add_argument('--top', type=int, default=3, help='only best mode')
args = parser.parse_args()
tracks = read_results(args.results_file)
for experiment_name in tracks:
plot_experiment(tracks[experiment_name], experiment_name, args)
if __name__ == '__main__':
main()
| 32.729373
| 118
| 0.629223
|
7fcf227cf47da39e0ac00f9de2dc74f384627ed1
| 12,575
|
py
|
Python
|
src/tests/file_download_feature_test.py
|
andrewheberle/script-server
|
5d744179a2ab65f6d375b9b697c60732d9e5957f
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/tests/file_download_feature_test.py
|
andrewheberle/script-server
|
5d744179a2ab65f6d375b9b697c60732d9e5957f
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/tests/file_download_feature_test.py
|
andrewheberle/script-server
|
5d744179a2ab65f6d375b9b697c60732d9e5957f
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
import os
import unittest
from features import file_download_feature
from model.script_configs import Parameter
from tests import test_utils
from utils import file_utils
class TestFileMatching(unittest.TestCase):
def test_simple_match(self):
files = file_download_feature.find_matching_files('/home/user/test.txt', None)
self.assertEqual(files, ['/home/user/test.txt'])
def test_single_asterisk_1_match(self):
test_utils.create_file('test.txt')
files = file_download_feature.find_matching_files('*/test.txt', None)
self.assertEqual(files, [os.path.join(test_utils.temp_folder, 'test.txt')])
def test_single_asterisk_2_matches(self):
test_utils.create_file('test1.txt')
test_utils.create_file('test2.txt')
files = file_download_feature.find_matching_files('*/test*.txt', None)
self.assertCountEqual(files, [
os.path.join(test_utils.temp_folder, 'test1.txt'),
os.path.join(test_utils.temp_folder, 'test2.txt')
])
def test_double_asterisk_match(self):
test_utils.create_file(os.path.join('test', 'test.txt'))
files = set(file_download_feature.find_matching_files(test_utils.temp_folder + '/**', None))
self.assertCountEqual(files, {
os.path.join(test_utils.temp_folder, ''),
os.path.join(test_utils.temp_folder, 'test'),
os.path.join(test_utils.temp_folder, 'test', 'test.txt')
})
def test_double_asterisk_match_multiple_files(self):
test_utils.create_file(os.path.join('f1', 'test1.txt'))
test_utils.create_file(os.path.join('f1', 'test2.txt'))
test_utils.create_file(os.path.join('f2', 'test3.txt'))
files = set(file_download_feature.find_matching_files(test_utils.temp_folder + '/**', None))
self.assertCountEqual(files, {
os.path.join(test_utils.temp_folder, ''),
os.path.join(test_utils.temp_folder, 'f1'),
os.path.join(test_utils.temp_folder, 'f1', 'test1.txt'),
os.path.join(test_utils.temp_folder, 'f1', 'test2.txt'),
os.path.join(test_utils.temp_folder, 'f2'),
os.path.join(test_utils.temp_folder, 'f2', 'test3.txt')
})
def test_double_asterisk_match_multiple_files_when_complex(self):
test_utils.create_file(os.path.join('f1', 'test1.txt'))
test_utils.create_file(os.path.join('f1', 'test2.txt'))
test_utils.create_file(os.path.join('d2', 'test3.txt'))
test_utils.create_file(os.path.join('d2', 'd3', 'test4.txt'))
test_utils.create_file(os.path.join('d3', 'd4', 'd5', 'test5.png'))
test_utils.create_file(os.path.join('d3', 'd6', 'd7', 'test6.txt'))
temp_folder = file_utils.normalize_path(test_utils.temp_folder)
files = set(file_download_feature.find_matching_files(temp_folder + '/d*/**/*.txt', None))
self.assertCountEqual(files, {
os.path.join(temp_folder, 'd2', 'test3.txt'),
os.path.join(temp_folder, 'd2', 'd3', 'test4.txt'),
os.path.join(temp_folder, 'd3', 'd6', 'd7', 'test6.txt')
})
def test_regex_only_0_matches(self):
files = file_download_feature.find_matching_files('#\d+#', 'some text without numbers')
self.assertEqual(files, [])
def test_regex_only_1_match(self):
files = file_download_feature.find_matching_files('#(\/[^\/]+)+#', 'the text is in /home/username/text.txt')
self.assertEqual(files, ['/home/username/text.txt'])
def test_regex_only_3_matches(self):
files = file_download_feature.find_matching_files('#(\/([\w.\-]|(\\\ ))+)+#', 'found files: '
'/home/username/text.txt, '
'/tmp/data.dat, '
'/opt/software/script\ server/read_me.md')
self.assertEqual(files, ['/home/username/text.txt', '/tmp/data.dat', '/opt/software/script\ server/read_me.md'])
def test_regex_only_any_path_linux_3_matches(self):
test_utils.set_linux()
files = file_download_feature.find_matching_files('##any_path#', 'found files: '
'/home/username/text.txt, '
'/tmp/data.dat, '
'/opt/software/script\ server/read_me.md')
self.assertEqual(files, ['/home/username/text.txt', '/tmp/data.dat', '/opt/software/script\ server/read_me.md'])
def test_regex_only_any_path_win_3_matches(self):
test_utils.set_win()
files = file_download_feature.find_matching_files('##any_path#', 'found files: '
'C:\\Users\\username\\text.txt, '
'D:\\windows\\System32, '
'C:\\Program\ Files\\script\ server\\read_me.md')
self.assertEqual(files, ['C:\\Users\\username\\text.txt',
'D:\\windows\\System32',
'C:\\Program\ Files\\script\ server\\read_me.md'])
def test_regex_only_search_user_home_win(self):
test_utils.set_win()
files = file_download_feature.find_matching_files('##any_path#', 'found files: '
'~\\text.txt')
self.assertEqual(files, ['~\\text.txt'])
def test_1_regex_and_text_no_matches(self):
files = file_download_feature.find_matching_files('/home/username/#\d+#', 'username=some_name\n '
'folder=some_folder\n '
'time=now')
self.assertEqual(files, [])
def test_1_regex_and_text_1_match(self):
files = file_download_feature.find_matching_files('/home/username/#\d+#', 'username=some_name\n '
'folder=some_folder\n '
'time=153514')
self.assertEqual(files, ['/home/username/153514'])
def test_1_regex_and_text_3_matches(self):
files = file_download_feature.find_matching_files('/home/username/#\d+#', 'username=some_name\n '
'folder=some_folder\n '
'time=153514\n '
'age=18, size=256Mb')
self.assertEqual(files, ['/home/username/153514', '/home/username/18', '/home/username/256'])
def test_1_regex_with_first_group_and_text_1_match(self):
files = file_download_feature.find_matching_files('/home/#1#username=(\w+)#/file.txt', 'username=some_name\n '
'folder=some_folder\n '
'time=153514\n '
'age=18, size=256Mb')
self.assertEqual(files, ['/home/some_name/file.txt'])
def test_1_regex_with_second_group_and_text_2_matches(self):
files = file_download_feature.find_matching_files('/home/username/#2#=(some_(\w+))#.txt',
'username=some_name\n '
'folder=some_folder\n '
'time=153514\n '
'age=18, size=256Mb')
self.assertEqual(files, ['/home/username/name.txt', '/home/username/folder.txt'])
def test_2_regexes_1_match(self):
files = file_download_feature.find_matching_files('/home/#2#username=((\w+))#/#1#time=(\d+)#.txt',
'username=some_name\n '
'folder=some_folder\n '
'time=153514\n '
'age=18, size=256Mb')
self.assertEqual(files, ['/home/some_name/153514.txt'])
def test_1_regex_and_asterisk(self):
test_utils.create_file(os.path.join('some_folder', 'file.txt'))
files = file_download_feature.find_matching_files('*/#1#folder=(\w+)#/*.txt', 'username=some_name\n '
'folder=some_folder\n '
'time=153514\n '
'age=18, size=256Mb')
self.assertEqual(files, [os.path.join(test_utils.temp_folder, 'some_folder', 'file.txt')])
def create_file(self, filepath):
if not os.path.exists(test_utils.temp_folder):
os.makedirs(test_utils.temp_folder)
filename = os.path.basename(filepath)
folder = os.path.join(test_utils.temp_folder, os.path.dirname(filepath))
if not os.path.exists(folder):
os.makedirs(folder)
file_utils.write_file(os.path.join(folder, filename), 'test text')
def setUp(self):
test_utils.setup()
def tearDown(self):
test_utils.cleanup()
class TestParametersSubstitute(unittest.TestCase):
def test_no_parameters(self):
files = file_download_feature.substitute_parameter_values([], ['/home/user/test.txt'], [])
self.assertEqual(files, ['/home/user/test.txt'])
def test_single_replace(self):
parameter = Parameter()
parameter.name = 'param1'
files = file_download_feature.substitute_parameter_values(
[parameter],
['/home/user/${param1}.txt'],
{'param1': 'val1'})
self.assertEqual(files, ['/home/user/val1.txt'])
def test_two_replaces(self):
param1 = Parameter()
param1.name = 'param1'
param2 = Parameter()
param2.name = 'param2'
files = file_download_feature.substitute_parameter_values(
[param1, param2],
['/home/${param2}/${param1}.txt'],
{'param1': 'val1', 'param2': 'val2'})
self.assertEqual(files, ['/home/val2/val1.txt'])
def test_two_replaces_in_two_files(self):
param1 = Parameter()
param1.name = 'param1'
param2 = Parameter()
param2.name = 'param2'
files = file_download_feature.substitute_parameter_values(
[param1, param2],
['/home/${param2}/${param1}.txt', '/tmp/${param2}.txt', '/${param1}'],
{'param1': 'val1', 'param2': 'val2'})
self.assertEqual(files, ['/home/val2/val1.txt', '/tmp/val2.txt', '/val1'])
def test_no_pattern_match(self):
param1 = Parameter()
param1.name = 'param1'
files = file_download_feature.substitute_parameter_values(
[param1],
['/home/user/${paramX}.txt'],
{'param1': 'val1'})
self.assertEqual(files, ['/home/user/${paramX}.txt'])
def test_skip_secure_replace(self):
param1 = Parameter()
param1.name = 'param1'
param1.secure = True
files = file_download_feature.substitute_parameter_values(
[param1],
['/home/user/${param1}.txt'],
{'param1': 'val1'})
self.assertEqual(files, ['/home/user/${param1}.txt'])
def test_skip_flag_replace(self):
param1 = Parameter()
param1.name = 'param1'
param1.no_value = True
files = file_download_feature.substitute_parameter_values(
[param1],
['/home/user/${param1}.txt'],
{'param1': 'val1'})
self.assertEqual(files, ['/home/user/${param1}.txt'])
if __name__ == '__main__':
unittest.main()
| 44.122807
| 128
| 0.523101
|
d5064dc29e70472b7ba954f03a2137da70c3dd8a
| 7,604
|
py
|
Python
|
lib/utils/voxelizer.py
|
Wei2624/domain-adaptation
|
8b1c241bb64451a8a5adc34132c75f41bed9bb7f
|
[
"MIT"
] | null | null | null |
lib/utils/voxelizer.py
|
Wei2624/domain-adaptation
|
8b1c241bb64451a8a5adc34132c75f41bed9bb7f
|
[
"MIT"
] | null | null | null |
lib/utils/voxelizer.py
|
Wei2624/domain-adaptation
|
8b1c241bb64451a8a5adc34132c75f41bed9bb7f
|
[
"MIT"
] | null | null | null |
from fcn.config import cfg
import numpy as np
class Voxelizer(object):
def __init__(self, grid_size, num_classes):
self.grid_size = grid_size
self.num_classes = num_classes
self.margin = 0.3
self.min_x = 0
self.min_y = 0
self.min_z = 0
self.max_x = 0
self.max_y = 0
self.max_z = 0
self.step_x = 0
self.step_y = 0
self.step_z = 0
self.voxelized = False
self.height = 0
self.width = 0
def setup(self, min_x, min_y, min_z, max_x, max_y, max_z):
self.min_x = min_x
self.min_y = min_y
self.min_z = min_z
self.max_x = max_x
self.max_y = max_y
self.max_z = max_z
# step size
self.step_x = (max_x - min_x) / self.grid_size
self.step_y = (max_y - min_y) / self.grid_size
self.step_z = (max_z - min_z) / self.grid_size
self.voxelized = True
def draw(self, labels, colors, ax):
for i in range(1, len(colors)):
index = np.where(labels == i)
X = index[0] * self.step_x + self.min_x
Y = index[1] * self.step_y + self.min_y
Z = index[2] * self.step_z + self.min_z
ax.scatter(X, Y, Z, c=colors[i], marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
set_axes_equal(ax)
def reset(self):
self.min_x = 0
self.min_y = 0
self.min_z = 0
self.max_x = 0
self.max_y = 0
self.max_z = 0
self.step_x = 0
self.step_y = 0
self.step_z = 0
self.voxelized = False
def voxelize(self, points):
if not self.voxelized:
# compute the boundary of the 3D points
Xmin = np.nanmin(points[0,:]) - self.margin
Xmax = np.nanmax(points[0,:]) + self.margin
Ymin = np.nanmin(points[1,:]) - self.margin
Ymax = np.nanmax(points[1,:]) + self.margin
Zmin = np.nanmin(points[2,:]) - self.margin
Zmax = np.nanmax(points[2,:]) + self.margin
self.min_x = Xmin
self.min_y = Ymin
self.min_z = Zmin
self.max_x = Xmax
self.max_y = Ymax
self.max_z = Zmax
# step size
self.step_x = (Xmax-Xmin) / self.grid_size
self.step_y = (Ymax-Ymin) / self.grid_size
self.step_z = (Zmax-Zmin) / self.grid_size
self.voxelized = True
# compute grid indexes
indexes = np.zeros_like(points, dtype=np.float32)
indexes[0,:] = np.floor((points[0,:] - self.min_x) / self.step_x)
indexes[1,:] = np.floor((points[1,:] - self.min_y) / self.step_y)
indexes[2,:] = np.floor((points[2,:] - self.min_z) / self.step_z)
# crash the grid indexes
# grid_indexes = indexes[0,:] * self.grid_size * self.grid_size + indexes[1,:] * self.grid_size + indexes[2,:]
# I = np.isnan(grid_indexes)
# grid_indexes[I] = -1
# grid_indexes = grid_indexes.reshape(self.height, self.width).astype(np.int32)
return indexes
# backproject pixels into 3D points
def backproject(self, im_depth, meta_data):
depth = im_depth.astype(np.float32, copy=True) / meta_data['factor_depth']
# compute projection matrix
P = meta_data['projection_matrix']
P = np.matrix(P)
Pinv = np.linalg.pinv(P)
# compute the 3D points
height = depth.shape[0]
width = depth.shape[1]
self.height = height
self.width = width
# camera location
C = meta_data['camera_location']
C = np.matrix(C).transpose()
Cmat = np.tile(C, (1, width*height))
# construct the 2D points matrix
x, y = np.meshgrid(np.arange(width), np.arange(height))
ones = np.ones((height, width), dtype=np.float32)
x2d = np.stack((x, y, ones), axis=2).reshape(width*height, 3)
# backprojection
x3d = Pinv * x2d.transpose()
x3d[0,:] = x3d[0,:] / x3d[3,:]
x3d[1,:] = x3d[1,:] / x3d[3,:]
x3d[2,:] = x3d[2,:] / x3d[3,:]
x3d = x3d[:3,:]
# compute the ray
R = x3d - Cmat
# compute the norm
N = np.linalg.norm(R, axis=0)
# normalization
R = np.divide(R, np.tile(N, (3,1)))
# compute the 3D points
X = Cmat + np.multiply(np.tile(depth.reshape(1, width*height), (3, 1)), R)
# mask
index = np.where(im_depth.flatten() == 0)
X[:,index] = np.nan
return np.array(X)
# backproject pixels into 3D points in camera's coordinate system
def backproject_camera(self, im_depth, meta_data):
depth = im_depth.astype(np.float32, copy=True) / meta_data['factor_depth']
# get intrinsic matrix
K = meta_data['intrinsic_matrix']
K = np.matrix(K)
Kinv = np.linalg.inv(K)
if cfg.FLIP_X:
Kinv[0, 0] = -1 * Kinv[0, 0]
Kinv[0, 2] = -1 * Kinv[0, 2]
# compute the 3D points
width = depth.shape[1]
height = depth.shape[0]
# construct the 2D points matrix
x, y = np.meshgrid(np.arange(width), np.arange(height))
ones = np.ones((height, width), dtype=np.float32)
x2d = np.stack((x, y, ones), axis=2).reshape(width*height, 3)
# backprojection
R = Kinv * x2d.transpose()
# compute the 3D points
X = np.multiply(np.tile(depth.reshape(1, width*height), (3, 1)), R)
# mask
index = np.where(im_depth.flatten() == 0)
X[:,index] = np.nan
return np.array(X)
def check_points(self, points, pose):
# transform the points
R = pose[0:3, 0:3]
T = pose[0:3, 3].reshape((3,1))
points = np.dot(R, points) + np.tile(T, (1, points.shape[1]))
Xmin = np.nanmin(points[0,:])
Xmax = np.nanmax(points[0,:])
Ymin = np.nanmin(points[1,:])
Ymax = np.nanmax(points[1,:])
Zmin = np.nanmin(points[2,:])
Zmax = np.nanmax(points[2,:])
if Xmin >= self.min_x and Xmax <= self.max_x and Ymin >= self.min_y and Ymax <= self.max_y and Zmin >= self.min_z and Zmax <= self.max_z:
return True
else:
print 'points x limit: {} {}'.format(Xmin, Xmax)
print 'points y limit: {} {}'.format(Ymin, Ymax)
print 'points z limit: {} {}'.format(Zmin, Zmax)
return False
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
| 33.06087
| 145
| 0.557207
|
f41ebeeda05d6501f0162739876b727e0f2cda1b
| 739
|
py
|
Python
|
gen_charmap.py
|
matikij/rotelhex
|
d505c7d6d0e1827b63bd4d8875ecbc39044be5ef
|
[
"MIT"
] | null | null | null |
gen_charmap.py
|
matikij/rotelhex
|
d505c7d6d0e1827b63bd4d8875ecbc39044be5ef
|
[
"MIT"
] | null | null | null |
gen_charmap.py
|
matikij/rotelhex
|
d505c7d6d0e1827b63bd4d8875ecbc39044be5ef
|
[
"MIT"
] | null | null | null |
import pprint
import rotelhex
import time
r=rotelhex.Rotel(port="/dev/serial0")
time.sleep(1)
r._display.label_change=True
r.label_change()
time.sleep(2) # just in case
last_char = r._display._current_char.decode('cp850')
charmap = [last_char]
while True:
print("outer: {}".format(last_char))
r.char_next()
while last_char == r._display._current_char.decode('cp850'):
print(r._display)
time.sleep(0.1)
last_char = r._display._current_char.decode('cp850')
if last_char == charmap[0]:
break
else:
charmap.append(last_char)
charmap[0] = ' ' # actually the first char is a space
with open("rotelhex/charmap.py","w") as f:
f.write("CHARMAP = ")
f.write(pprint.pformat(charmap))
| 26.392857
| 64
| 0.679296
|
a27b41ddaf38753c8e5476feef8a77a5aee14370
| 12,066
|
py
|
Python
|
scripts/predict_uq.py
|
gmackall/deep-quant
|
1c0081795c8be9b7513697b367e9a1381f7f742a
|
[
"MIT"
] | 122
|
2017-10-04T04:34:24.000Z
|
2022-03-10T23:59:22.000Z
|
scripts/predict_uq.py
|
emrekesici/deep-quant
|
19ae66d25924c4bc4b09879d82794013140d2a8b
|
[
"MIT"
] | 20
|
2018-02-07T16:34:10.000Z
|
2020-07-21T08:45:59.000Z
|
scripts/predict_uq.py
|
Henrywcj/MLII
|
bf639e82f64b11b4af973570bccdfe5e2ed25533
|
[
"MIT"
] | 51
|
2017-11-16T15:42:13.000Z
|
2022-03-19T00:46:57.000Z
|
# Copyright 2016 Euclidean Technologies Management LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import sys
import copy
import math
import numpy as np
import tensorflow as tf
import regex as re
import pandas as pd
from tensorflow.python.platform import gfile
from batch_generator import BatchGenerator
from utils import model_utils,data_utils
import utils
def print_vector(name,v):
print("%s: "%name,end='')
for i in range(len(v)):
print("%.2f "%v[i],end=' ')
print()
def predict(config):
if config.UQ_model_type == 'MVE':
predict_mve(config)
elif config.UQ_model_type == 'PIE':
predict_pie(config)
def predict_mve(config):
datafile = config.datafile
if config.predict_datafile is not None:
datafile = config.predict_datafile
print("Loading data from %s ..."%datafile)
path = utils.data_utils.get_data_path(config.data_dir,datafile)
config.batch_size = 1
batches = BatchGenerator(path,
config,
require_targets=config.require_targets,
verbose=True)
batches.cache(verbose=True)
tf_config = tf.ConfigProto( allow_soft_placement=True ,
log_device_placement=False )
tf_config.gpu_options.allow_growth = True
# Initialize DataFrames
df_target = pd.DataFrame()
df_output = pd.DataFrame()
df_variance = pd.DataFrame()
df_mse = pd.DataFrame()
df_mse_var = pd.DataFrame()
df_list = [df_target, df_output, df_variance, df_mse, df_mse_var]
with tf.Graph().as_default(), tf.Session(config=tf_config) as session:
model = model_utils.get_model(session, config, verbose=True)
perfs = dict()
perfs_p = dict()
for i in range(batches.num_batches):
batch = batches.next_batch()
(mse, mse_var, preds, preds_variance) = model.step(session, batch, keep_prob=config.keep_prob_pred,
uq=config.UQ, UQ_model_type='MVE')
# (mse, preds) = model.debug_step(session, batch)
date = batch_to_date(batch)
key = batch_to_key(batch)
if math.isnan(mse) is False:
if date not in perfs:
perfs[date] = list()
perfs_p[date] = list()
perfs[date].append(mse)
perfs_p[date].append(mse_var)
# Print according to the options
if config.pretty_print_preds:
pretty_print_predictions(batches, batch, preds, preds_variance, mse, mse_var)
elif config.print_preds:
print_predictions(config, batches, batch, preds, preds_variance, mse, mse_var)
# Get values and update DataFrames if df_dirname is provided in config
if config.df_dirname is not None:
# Get all the values
target_val = get_value(batches, batch, 'target')
output_val = get_value(batches, batch, 'output', preds)
variance_val = get_value(batches, batch, 'variance', preds_variance)
mse_val = mse
mse_var_val = mse_var
values_list = [target_val, output_val, variance_val, mse_val, mse_var_val]
# Update DataFrames
for j in range(len(df_list)):
assert(len(df_list) == len(values_list))
df_list[j] = update_df(df_list[j], date, key, values_list[j])
# Save the DataFrames
if config.df_dirname:
if not os.path.isdir(config.df_dirname):
os.makedirs(config.df_dirname)
save_names = ['target-df.pkl', 'output-df.pkl', 'variance-df.pkl', 'mse-df.pkl', 'mse-var-df.pkl']
for j in range(len(df_list)):
assert(len(df_list) == len(save_names))
df_list[j].to_pickle(os.path.join(config.df_dirname, save_names[j]))
# MSE Outfile
if config.mse_outfile is not None:
with open(config.mse_outfile, "w") as f:
for date in sorted(perfs):
mean = np.mean(perfs[date])
print("%s %.6f %d"%(date, mean, len(perfs[date])), file=f)
total_mean = np.mean( [x for v in perfs.values() for x in v] )
print("Total %.6f" % total_mean, file=f)
f.closed
else:
exit()
# MSE with variance outfile
if config.mse_var_outfile is not None:
with open(config.mse_var_outfile, "w") as f:
for date in sorted(perfs_p):
mean = np.mean(perfs_p[date])
print("%s %.6f %d"%(date, mean, len(perfs_p[date])), file=f)
total_mean = np.mean( [x for v in perfs_p.values() for x in v] )
print("Total %.6f" % total_mean,file=f)
f.closed
else:
exit()
def predict_pie(config):
""" Doesn't use print options. Only outputs dataframes"""
datafile = config.datafile
if config.predict_datafile is not None:
datafile = config.predict_datafile
print("Loading data from %s ..."%datafile)
path = utils.data_utils.get_data_path(config.data_dir,datafile)
config.batch_size = 1
batches = BatchGenerator(path,
config,
require_targets=config.require_targets,
verbose=True)
batches.cache(verbose=True)
tf_config = tf.ConfigProto( allow_soft_placement=True ,
log_device_placement=False )
# Initialize DataFrames
df_target = pd.DataFrame()
df_output_lb = pd.DataFrame()
df_output_ub = pd.DataFrame()
df_list = [df_target, df_output_lb, df_output_ub]
with tf.Graph().as_default(), tf.Session(config=tf_config) as session:
model = model_utils.get_model(session, config, verbose=True)
for i in range(batches.num_batches):
batch = batches.next_batch()
(mpiw, _, _, preds_lb, preds_ub) = model.step(session, batch, keep_prob=config.keep_prob_pred,
uq=config.UQ, UQ_model_type='PIE')
# (mse, preds) = model.debug_step(session, batch)
date = batch_to_date(batch)
key = batch_to_key(batch)
# Dummy input to be consistent with the rest of the predictions printing options. MSE = 0.0. It is not
# evaluated in PIE case
mse_dummy = mse_var_dummy = 0.0
# Print every n iterations to check the progress for monitoring
if i % 10000 == 0:
pretty_print_predictions( batches, batch, preds_lb, preds_ub, mse_dummy, mse_var_dummy)
# Get values and update DataFrames if df_dirname is provided in config
if config.df_dirname is not None:
# Get all values
target_val = get_value(batches, batch, 'target')
output_lb_val = get_value(batches, batch, 'output_lb', preds_lb)
output_ub_val = get_value(batches, batch, 'output_ub', preds_ub)
values_list = [target_val, output_lb_val, output_ub_val]
# Update DataFrames
for j in range(len(df_list)):
assert(len(df_list) == len(values_list))
df_list[j] = update_df(df_list[j], date, key, values_list[j])
# Save the DataFrames
if not os.path.isdir(config.df_dirname):
os.makedirs(config.df_dirname)
save_names = ['target-df.pkl', 'output-lb-df.pkl', 'output-ub-df.pkl']
for j in range(len(df_list)):
assert(len(df_list) == len(save_names))
df_list[j].to_pickle(os.path.join(config.df_dirname, save_names[j]))
return
def batch_to_key(batch):
idx = batch.seq_lengths[0]-1
assert(0 <= idx)
assert(idx < len(batch.attribs))
return batch.attribs[idx][0][0]
def batch_to_date(batch):
idx = batch.seq_lengths[0]-1
assert(0 <= idx)
assert(idx < len(batch.attribs))
if (batch.attribs[idx][0] is None):
print(idx)
exit()
return batch.attribs[idx][0][1]
def pretty_print_predictions(batches, batch, preds, preds_variances, mse, mse_var):
key = batch_to_key(batch)
date = batch_to_date(batch)
L = batch.seq_lengths[0]
targets = batch.targets[L-1][0]
outputs = preds[0]
variances = preds_variances[0]
# variances = np.exp(-1*variances) # for precision formulation
np.set_printoptions(suppress=True)
np.set_printoptions(precision=3)
print("%s %s mse=%.8f mse_var=%.8f"%(date, key, mse, mse_var))
inputs = batch.inputs
for i in range(L):
print_vector("input[t-%d]"%(L-i-1), batches.get_raw_inputs(batch, 0, inputs[i][0]))
print_vector("output[t+1]", batches.get_raw_outputs(batch, 0, outputs))
print_vector("target[t+1]", batches.get_raw_outputs(batch, 0, targets))
print_vector("variance[t+1]", batches.get_raw_outputs(batch, 0, variances))
print("--------------------------------")
sys.stdout.flush()
def print_predictions(config, batches, batch, preds, preds_variances, mse, mse_var):
key = batch_to_key(batch)
date = batch_to_date(batch)
inputs = batch.inputs[-1][0]
outputs = preds[0]
variances = preds_variances[0]
np.set_printoptions(suppress=True)
np.set_printoptions(precision=3)
# Raw outputs
out = batches.get_raw_outputs(batch, 0, outputs)
prec = batches.get_raw_outputs(batch, 0, variances)
if config.print_normalized_outputs:
out_str = 'out ' + ' '.join(["%.3f" % outputs[i] for i in range(len(outputs))])
prec_str = 'var ' + ' '.join(["%.3f" % variances[i] for i in range(len(variances))])
else:
out_str = 'out ' + ' '.join(["%.3f"%out[i] for i in range(len(out))])
prec_str = 'var ' + ' '.join(["%.3f" % prec[i] for i in range(len(prec))])
print("%s %s %s %s"%(date, key, out_str, str(mse)))
print("%s %s %s %s" % (date, key, prec_str, str(mse_var)))
sys.stdout.flush()
def update_df(df, date, key, value):
"""
Updates the dataframe with key as column, date as index
:param df: Dataframe to be updated
:param date: date
:param key: gvkey
:param value: value to be inserted
:return: updated df
"""
date = pd.to_datetime(date, format="%Y%m")
df.loc[date, key] = value
return df
def get_value(batches, batch, field, predictions=None, output_field=3):
"""
Extracts the appropriate field value from batch or predictions
:param batches:
:param batch: batch
:param field: field
:param predictions: predictions eg outputs, variances
:param output_field: field to be extracted
:return: value from batch or mse value
"""
assert(field in ['target', 'output', 'variance', 'output_lb', 'output_ub'])
if field == 'target':
l = batch.seq_lengths[0]
targets = batch.targets[l - 1][0]
value = batches.get_raw_outputs(batch, 0, targets)[output_field]
else:
value = batches.get_raw_outputs(batch, 0, predictions[0])[output_field]
return value
| 35.488235
| 114
| 0.605254
|
8d600939b0c880477bd78a83c3d30205361e9b03
| 309
|
py
|
Python
|
demo_cli/cli.py
|
rstms/demo_cli
|
7539f74e8cf378e4068d60e8f53e094e6400cb70
|
[
"MIT"
] | null | null | null |
demo_cli/cli.py
|
rstms/demo_cli
|
7539f74e8cf378e4068d60e8f53e094e6400cb70
|
[
"MIT"
] | null | null | null |
demo_cli/cli.py
|
rstms/demo_cli
|
7539f74e8cf378e4068d60e8f53e094e6400cb70
|
[
"MIT"
] | null | null | null |
"""Console script for demo_cli."""
import sys
import click
@click.command()
@click.option('-t', '--test', type=str, default=None)
def main(test):
"""Console script for demo_cli."""
click.echo(f"Test mode {test}.")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 17.166667
| 53
| 0.634304
|
ed095c6032f08644f66c087b819912cb562a2747
| 636
|
py
|
Python
|
runlmc/approx/ski.py
|
vlad17/run-lmc
|
bddb7df2aa3dcec17380d858325bf50502fffb4e
|
[
"BSD-3-Clause"
] | 29
|
2017-06-13T21:45:54.000Z
|
2021-12-15T02:03:10.000Z
|
runlmc/approx/ski.py
|
vlad17/run-lmc
|
bddb7df2aa3dcec17380d858325bf50502fffb4e
|
[
"BSD-3-Clause"
] | 3
|
2020-06-11T02:34:23.000Z
|
2022-02-26T18:03:45.000Z
|
runlmc/approx/ski.py
|
vlad17/run-lmc
|
bddb7df2aa3dcec17380d858325bf50502fffb4e
|
[
"BSD-3-Clause"
] | 9
|
2017-03-12T16:36:56.000Z
|
2021-03-02T05:10:36.000Z
|
# Copyright (c) 2016, Vladimir Feinberg
# Licensed under the BSD 3-clause license (see LICENSE)
from ..linalg.composition import Composition
from ..linalg.matrix import Matrix
# TODO(test)
class SKI(Composition):
def __init__(self, K, W, WT):
self.W = W
self.K = K
self.WT = WT
super().__init__([
Matrix.wrap(W.shape, W.dot),
K,
Matrix.wrap(WT.shape, WT.dot)])
def as_numpy(self):
WKT = self.W.dot(self.K.as_numpy().T)
return self.W.dot(WKT.T)
def upper_eig_bound(self):
return self.K.upper_eig_bound() * self.shape[0] / self.m
| 26.5
| 64
| 0.600629
|
991a1d6a8ac2bf46b81fd4aacf4df10a97c62b1b
| 352
|
py
|
Python
|
web-frameworks/django/corey_schafer_youtube_tutorial/users/signals.py
|
suroegin-learning/learn-python
|
be5bda86add0dcd6f2fd3db737bb7d0d3ec5f853
|
[
"MIT"
] | null | null | null |
web-frameworks/django/corey_schafer_youtube_tutorial/users/signals.py
|
suroegin-learning/learn-python
|
be5bda86add0dcd6f2fd3db737bb7d0d3ec5f853
|
[
"MIT"
] | null | null | null |
web-frameworks/django/corey_schafer_youtube_tutorial/users/signals.py
|
suroegin-learning/learn-python
|
be5bda86add0dcd6f2fd3db737bb7d0d3ec5f853
|
[
"MIT"
] | null | null | null |
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
from .models import Profile
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
new_profile = Profile.objects.create(user=instance)
new_profile.save()
| 29.333333
| 59
| 0.767045
|
c369139bd85b919581de9ddd9ab6a0d857f10626
| 6,466
|
py
|
Python
|
tests/hogwarts/fatlady.py
|
alixryu/getpost1.0
|
c3bb7f15261a5a50fe79103c1fa1008817250ae1
|
[
"MIT"
] | 1
|
2019-08-29T00:03:08.000Z
|
2019-08-29T00:03:08.000Z
|
tests/hogwarts/fatlady.py
|
alixryu/getpost1.0
|
c3bb7f15261a5a50fe79103c1fa1008817250ae1
|
[
"MIT"
] | null | null | null |
tests/hogwarts/fatlady.py
|
alixryu/getpost1.0
|
c3bb7f15261a5a50fe79103c1fa1008817250ae1
|
[
"MIT"
] | null | null | null |
""":mod:`tests.hogwarts.fatlady` --- Test module of getpost.hogwarts.fatlady
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from nose.tools import eq_, ok_
from flask import session as user_session
from getpost.hogwarts.fatlady import authenticate_user, deauthenticate_user
from getpost.hogwarts.fatlady import create_account
from getpost.models import Account, Student
from getpost.orm import Session
from .. import app, driver
email_address = 'asquirrel@oberlin.edu'
password = 'tappansquare'
remember_me = True
t_number = 'T01123456'
db_session = Session()
def test_authenticate_user():
# test with non-existing account
account = authenticate_user(
'zzz@oberlin.edu', password, remember_me
)
ok_(account is None)
ok_('user_id' not in user_session)
ok_('email_address' not in user_session)
# test with existing account, wrong password
account = authenticate_user(email_address, 'zzzz', remember_me)
ok_(account is None)
ok_('user_id' not in user_session)
ok_('email_address' not in user_session)
# test with existing account, correct password
account = authenticate_user(email_address, password, remember_me)
ok_(account is not None)
eq_(int(user_session['user_id']), account.id)
eq_(user_session['email_address'], email_address)
def test_login_selenium():
# TODO: test remember_me
# test with non-existing account
driver.get('http://localhost:5000/auth/login/')
driver.find_element_by_name('email').send_keys('zzzz@oberlin.edu')
driver.find_element_by_name('password').send_keys(password)
driver.find_element_by_name('submit').click()
ok_('Hello '+email_address not in driver.page_source)
ok_('Log Out' not in driver.page_source)
# test with existing account, wrong password
driver.get('http://localhost:5000/auth/login/')
driver.find_element_by_name('email').send_keys(email_address)
driver.find_element_by_name('password').send_keys('zzzz')
driver.find_element_by_name('submit').click()
ok_('Hello '+email_address not in driver.page_source)
ok_('Log Out' not in driver.page_source)
# test with existing account, correct password
driver.get('http://localhost:5000/auth/login/')
driver.find_element_by_name('email').send_keys(email_address)
driver.find_element_by_name('password').send_keys(password)
driver.find_element_by_name('submit').click()
ok_('Hello '+email_address in driver.page_source)
ok_('Log Out' in driver.page_source)
def test_deauthenticate_user():
deauthenticate_user()
ok_('user_id' not in user_session)
ok_('email_address' not in user_session)
def test_logout_selenium():
driver.find_element_by_link_text('Log Out').click()
ok_('Hello '+email_address not in driver.page_source)
ok_('Log Out' not in driver.page_source)
eq_(driver.current_url, 'http://localhost:5000/')
def test_create_account():
# test with a new, match account
eq_(create_account(email_address, t_number, password), 1)
# test existence of new Account, StudentRole object
account = db_session.query(
Account
).filter_by(email_address=email_address).first()
ok_(account)
ok_(account.student)
ok_(account.student.student)
# test with already existing account
eq_(create_account(email_address, t_number, password), -1)
# test with no match account
eq_(create_account('zzzz@oberlin.edu', t_number, password), 0)
# remove account
db_session.delete(account)
db_session.commit()
account = db_session.query(
Account
).filter_by(email_address=email_address).first()
ok_(account is None)
def test_signup_selenium():
# test correct values
driver.get('http://localhost:5000/auth/signup/student/')
driver.find_element_by_name('email').send_keys(email_address)
driver.find_element_by_name('t_number').send_keys(t_number)
driver.find_element_by_name('password').send_keys(password)
driver.find_element_by_name('password2').send_keys(password)
driver.find_element_by_name('submit').click()
ok_('Login' in driver.title)
# test with different password
driver.get('http://localhost:5000/auth/signup/student/')
driver.find_element_by_name('email').send_keys(email_address)
driver.find_element_by_name('t_number').send_keys(t_number)
driver.find_element_by_name('password').send_keys(password)
driver.find_element_by_name('password2').send_keys(password+'a')
driver.find_element_by_name('submit').click()
ok_('Signup' in driver.title)
# test with existing account
driver.get('http://localhost:5000/auth/signup/student/')
driver.find_element_by_name('email').send_keys(email_address)
driver.find_element_by_name('t_number').send_keys(t_number)
driver.find_element_by_name('password').send_keys(password)
driver.find_element_by_name('password2').send_keys(password)
driver.find_element_by_name('submit').click()
ok_('Signup' in driver.title)
# test with no match
driver.get('http://localhost:5000/auth/signup/student/')
driver.find_element_by_name('email').send_keys('zzzz@oberlin.edu')
driver.find_element_by_name('t_number').send_keys(t_number)
driver.find_element_by_name('password').send_keys(password)
driver.find_element_by_name('password2').send_keys(password+'a')
driver.find_element_by_name('submit').click()
ok_('Signup' in driver.title)
def test_authentication_integration():
# create dummy student instance
student = Student(
first_name='Albino',
last_name='Squirrel',
ocmr='0000',
t_number=t_number,
email_address=email_address
)
db_session.add(student)
db_session.commit()
# sign up
test_create_account()
test_signup_selenium()
# controller function tests
with app.test_request_context():
test_authenticate_user()
test_deauthenticate_user()
# selenium tests
test_login_selenium()
test_logout_selenium()
# delete dummy account
account = db_session.query(
Account
).filter_by(email_address=email_address).first()
db_session.delete(account)
db_session.commit()
# delete dummy student instance
student = db_session.query(
Student
).filter_by(email_address=email_address).first()
db_session.delete(student)
db_session.commit()
| 34.21164
| 77
| 0.717754
|
9bf720efff5ed169791dc4e37d95963dfc772dac
| 4,043
|
py
|
Python
|
Model.py
|
Mohsen-code/music-player-python
|
ba797ef3d79e7e427ff73b699ebf00e6274548fd
|
[
"MIT"
] | null | null | null |
Model.py
|
Mohsen-code/music-player-python
|
ba797ef3d79e7e427ff73b699ebf00e6274548fd
|
[
"MIT"
] | null | null | null |
Model.py
|
Mohsen-code/music-player-python
|
ba797ef3d79e7e427ff73b699ebf00e6274548fd
|
[
"MIT"
] | null | null | null |
import json
FILE_NAME = "play_lists.json"
MUSIC_FILE_NAME = "music_of_play_lists.json"
def get_play_lists():
play_lists_json_content = []
try:
play_lists_content = open(FILE_NAME, "r")
play_lists_json_content = json.loads(play_lists_content.read())
except Exception:
file = open(FILE_NAME, "w")
file.write("[]")
file.close()
return play_lists_json_content
def is_play_list_exist(play_list_id):
play_lists = get_play_lists()
for (index, play_list) in enumerate(play_lists):
if play_list['id'] == play_list_id:
return True
return False
def add_play_list(play_list_object):
play_lists = get_play_lists()
play_lists.append(play_list_object)
file = open(FILE_NAME, "w")
file.write(json.dumps(play_lists))
file.close()
def get_play_list_by_title(title):
play_lists = get_play_lists()
filtered_play_lists = list(filter(lambda paly_list: paly_list['title'] == title, play_lists))
return filtered_play_lists
def update_play_list(updated_play_list):
play_lists = get_play_lists()
play_list_index = None
for (index, play_list) in enumerate(play_lists):
if play_list['id'] == updated_play_list['id']:
play_list_index = index
break
if play_list_index == None:
return False
play_lists[play_list_index] = updated_play_list
file = open(FILE_NAME, "w")
file.write(json.dumps(play_lists))
file.close()
return True
def remove_play_list(play_list_id):
if is_play_list_exist(play_list_id):
play_lists = get_play_lists()
filtered_play_list = list(filter(lambda play_list: play_list['id'] != play_list_id, play_lists))
file = open(FILE_NAME, "w")
file.write(json.dumps(filtered_play_list))
file.close()
return True
return False
def get_music_of_play_list(play_list_id):
if is_play_list_exist(play_list_id):
music_json_content = []
try:
music_content = open(MUSIC_FILE_NAME, "r")
music_json_content = json.loads(music_content.read())
except Exception:
file = open(MUSIC_FILE_NAME, "w")
file.write("[]")
file.close()
return music_json_content
return None
def add_music_to_play_list(play_list_id, music_data):
musics = get_music_of_play_list(play_list_id)
if musics != None:
musics.append(music_data)
file = open(MUSIC_FILE_NAME, "w")
file.write(json.dumps(musics))
file.close()
return True
return False
def get_music_by_title(play_list_id, title):
musics = get_music_of_play_list(play_list_id)
filtered_musics = []
if musics != None:
filtered_musics = list(filter(lambda music: music['title'] == title, musics))
return filtered_musics
def get_music_by_id(play_list_id, music_id):
musics = get_music_of_play_list(play_list_id)
filtered_musics = []
if musics != None:
filtered_musics = list(filter(lambda music: music['id'] == music_id, musics))
return filtered_musics[0] if len(filtered_musics) > 0 else None
def update_music(play_list_id, music_data):
musics = get_music_of_play_list(play_list_id)
if musics != None:
music_index = None
for (index, music) in enumerate(musics):
if music['id'] == music_data['id']:
music_index = index
break
musics[music_index] = music_data
file = open(MUSIC_FILE_NAME, "w")
file.write(json.dumps(musics))
file.close()
return True
return False
def remove_music(play_list_id, music_id):
musics = get_music_of_play_list(play_list_id)
if musics != None:
filtered_musics = list(filter(lambda music: music['id'] != music_id, musics))
file = open(MUSIC_FILE_NAME, "w")
file.write(json.dumps(filtered_musics))
file.close()
return True
return False
| 28.076389
| 104
| 0.653228
|
99866689a62b6c932c902e473d99ee05aad33f7a
| 6,324
|
py
|
Python
|
pyglet/libs/win32/winkey.py
|
swipswaps/pyglet
|
2bfd7ee52482b805ae076cf5036c5628e8a72224
|
[
"BSD-3-Clause"
] | 1
|
2020-04-12T15:20:34.000Z
|
2020-04-12T15:20:34.000Z
|
pyglet/libs/win32/winkey.py
|
swipswaps/pyglet
|
2bfd7ee52482b805ae076cf5036c5628e8a72224
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/libs/win32/winkey.py
|
swipswaps/pyglet
|
2bfd7ee52482b805ae076cf5036c5628e8a72224
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2019 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from pyglet.window import key
from .constants import *
keymap = {
ord('A'): key.A,
ord('B'): key.B,
ord('C'): key.C,
ord('D'): key.D,
ord('E'): key.E,
ord('F'): key.F,
ord('G'): key.G,
ord('H'): key.H,
ord('I'): key.I,
ord('J'): key.J,
ord('K'): key.K,
ord('L'): key.L,
ord('M'): key.M,
ord('N'): key.N,
ord('O'): key.O,
ord('P'): key.P,
ord('Q'): key.Q,
ord('R'): key.R,
ord('S'): key.S,
ord('T'): key.T,
ord('U'): key.U,
ord('V'): key.V,
ord('W'): key.W,
ord('X'): key.X,
ord('Y'): key.Y,
ord('Z'): key.Z,
ord('0'): key._0,
ord('1'): key._1,
ord('2'): key._2,
ord('3'): key._3,
ord('4'): key._4,
ord('5'): key._5,
ord('6'): key._6,
ord('7'): key._7,
ord('8'): key._8,
ord('9'): key._9,
ord('\b'): key.BACKSPACE,
# By experiment:
0x14: key.CAPSLOCK,
0x5d: key.MENU,
# VK_LBUTTON: ,
# VK_RBUTTON: ,
VK_CANCEL: key.CANCEL,
# VK_MBUTTON: ,
# VK_BACK: ,
VK_TAB: key.TAB,
# VK_CLEAR: ,
VK_RETURN: key.RETURN,
VK_SHIFT: key.LSHIFT,
VK_CONTROL: key.LCTRL,
VK_MENU: key.LALT,
VK_PAUSE: key.PAUSE,
# VK_CAPITAL: ,
# VK_KANA: ,
# VK_HANGEUL: ,
# VK_HANGUL: ,
# VK_JUNJA: ,
# VK_FINAL: ,
# VK_HANJA: ,
# VK_KANJI: ,
VK_ESCAPE: key.ESCAPE,
# VK_CONVERT: ,
# VK_NONCONVERT: ,
# VK_ACCEPT: ,
# VK_MODECHANGE: ,
VK_SPACE: key.SPACE,
VK_PRIOR: key.PAGEUP,
VK_NEXT: key.PAGEDOWN,
VK_END: key.END,
VK_HOME: key.HOME,
VK_LEFT: key.LEFT,
VK_UP: key.UP,
VK_RIGHT: key.RIGHT,
VK_DOWN: key.DOWN,
# VK_SELECT: ,
VK_PRINT: key.PRINT,
# VK_EXECUTE: ,
# VK_SNAPSHOT: ,
VK_INSERT: key.INSERT,
VK_DELETE: key.DELETE,
VK_HELP: key.HELP,
VK_LWIN: key.LWINDOWS,
VK_RWIN: key.RWINDOWS,
# VK_APPS: ,
VK_NUMPAD0: key.NUM_0,
VK_NUMPAD1: key.NUM_1,
VK_NUMPAD2: key.NUM_2,
VK_NUMPAD3: key.NUM_3,
VK_NUMPAD4: key.NUM_4,
VK_NUMPAD5: key.NUM_5,
VK_NUMPAD6: key.NUM_6,
VK_NUMPAD7: key.NUM_7,
VK_NUMPAD8: key.NUM_8,
VK_NUMPAD9: key.NUM_9,
VK_MULTIPLY: key.NUM_MULTIPLY,
VK_ADD: key.NUM_ADD,
# VK_SEPARATOR: ,
VK_SUBTRACT: key.NUM_SUBTRACT,
VK_DECIMAL: key.NUM_DECIMAL,
VK_DIVIDE: key.NUM_DIVIDE,
VK_F1: key.F1,
VK_F2: key.F2,
VK_F3: key.F3,
VK_F4: key.F4,
VK_F5: key.F5,
VK_F6: key.F6,
VK_F7: key.F7,
VK_F8: key.F8,
VK_F9: key.F9,
VK_F10: key.F10,
VK_F11: key.F11,
VK_F12: key.F12,
VK_F13: key.F13,
VK_F14: key.F14,
VK_F15: key.F15,
VK_F16: key.F16,
# VK_F17: ,
# VK_F18: ,
# VK_F19: ,
# VK_F20: ,
# VK_F21: ,
# VK_F22: ,
# VK_F23: ,
# VK_F24: ,
VK_NUMLOCK: key.NUMLOCK,
VK_SCROLL: key.SCROLLLOCK,
VK_LSHIFT: key.LSHIFT,
VK_RSHIFT: key.RSHIFT,
VK_LCONTROL: key.LCTRL,
VK_RCONTROL: key.RCTRL,
VK_LMENU: key.LALT,
VK_RMENU: key.RALT,
# VK_PROCESSKEY: ,
# VK_ATTN: ,
# VK_CRSEL: ,
# VK_EXSEL: ,
# VK_EREOF: ,
# VK_PLAY: ,
# VK_ZOOM: ,
# VK_NONAME: ,
# VK_PA1: ,
# VK_OEM_CLEAR: ,
# VK_XBUTTON1: ,
# VK_XBUTTON2: ,
# VK_VOLUME_MUTE: ,
# VK_VOLUME_DOWN: ,
# VK_VOLUME_UP: ,
# VK_MEDIA_NEXT_TRACK: ,
# VK_MEDIA_PREV_TRACK: ,
# VK_MEDIA_PLAY_PAUSE: ,
# VK_BROWSER_BACK: ,
# VK_BROWSER_FORWARD: ,
}
# Keys that must be translated via MapVirtualKey, as the virtual key code
# is language and keyboard dependent.
chmap = {
ord('!'): key.EXCLAMATION,
ord('"'): key.DOUBLEQUOTE,
ord('#'): key.HASH,
ord('$'): key.DOLLAR,
ord('%'): key.PERCENT,
ord('&'): key.AMPERSAND,
ord("'"): key.APOSTROPHE,
ord('('): key.PARENLEFT,
ord(')'): key.PARENRIGHT,
ord('*'): key.ASTERISK,
ord('+'): key.PLUS,
ord(','): key.COMMA,
ord('-'): key.MINUS,
ord('.'): key.PERIOD,
ord('/'): key.SLASH,
ord(':'): key.COLON,
ord(';'): key.SEMICOLON,
ord('<'): key.LESS,
ord('='): key.EQUAL,
ord('>'): key.GREATER,
ord('?'): key.QUESTION,
ord('@'): key.AT,
ord('['): key.BRACKETLEFT,
ord('\\'): key.BACKSLASH,
ord(']'): key.BRACKETRIGHT,
ord('\x5e'): key.ASCIICIRCUM,
ord('_'): key.UNDERSCORE,
ord('\x60'): key.GRAVE,
ord('`'): key.QUOTELEFT,
ord('{'): key.BRACELEFT,
ord('|'): key.BAR,
ord('}'): key.BRACERIGHT,
ord('~'): key.ASCIITILDE,
}
| 27.258621
| 78
| 0.579064
|
9e2365cfc8f49f3ad21270dfa99c546acdd8b50b
| 9,475
|
py
|
Python
|
test/mitmproxy/addons/test_proxyauth.py
|
luzpaz/mitmproxy
|
6b5b71aefaffebeea9eb0003a25f039686c5b785
|
[
"MIT"
] | null | null | null |
test/mitmproxy/addons/test_proxyauth.py
|
luzpaz/mitmproxy
|
6b5b71aefaffebeea9eb0003a25f039686c5b785
|
[
"MIT"
] | null | null | null |
test/mitmproxy/addons/test_proxyauth.py
|
luzpaz/mitmproxy
|
6b5b71aefaffebeea9eb0003a25f039686c5b785
|
[
"MIT"
] | null | null | null |
import binascii
import pytest
from unittest import mock
from mitmproxy import exceptions
from mitmproxy.addons import proxyauth
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test import tutils
class TestMkauth:
def test_mkauth_scheme(self):
assert proxyauth.mkauth('username', 'password') == 'basic dXNlcm5hbWU6cGFzc3dvcmQ=\n'
@pytest.mark.parametrize('scheme, expected', [
('', ' dXNlcm5hbWU6cGFzc3dvcmQ=\n'),
('basic', 'basic dXNlcm5hbWU6cGFzc3dvcmQ=\n'),
('foobar', 'foobar dXNlcm5hbWU6cGFzc3dvcmQ=\n'),
])
def test_mkauth(self, scheme, expected):
assert proxyauth.mkauth('username', 'password', scheme) == expected
class TestParseHttpBasicAuth:
@pytest.mark.parametrize('input', [
'',
'foo bar',
'basic abc',
'basic ' + binascii.b2a_base64(b"foo").decode("ascii"),
])
def test_parse_http_basic_auth_error(self, input):
with pytest.raises(ValueError):
proxyauth.parse_http_basic_auth(input)
def test_parse_http_basic_auth(self):
input = proxyauth.mkauth("test", "test")
assert proxyauth.parse_http_basic_auth(input) == ("basic", "test", "test")
class TestProxyAuth:
@pytest.mark.parametrize('mode, expected', [
('', False),
('foobar', False),
('regular', True),
('upstream:', True),
('upstream:foobar', True),
])
def test_is_proxy_auth(self, mode, expected):
up = proxyauth.ProxyAuth()
with taddons.context(up) as ctx:
ctx.options.mode = mode
assert up.is_proxy_auth() is expected
@pytest.mark.parametrize('is_proxy_auth, expected', [
(True, 'Proxy-Authorization'),
(False, 'Authorization'),
])
def test_which_auth_header(self, is_proxy_auth, expected):
up = proxyauth.ProxyAuth()
with mock.patch('mitmproxy.addons.proxyauth.ProxyAuth.is_proxy_auth', return_value=is_proxy_auth):
assert up.which_auth_header() == expected
@pytest.mark.parametrize('is_proxy_auth, expected_status_code, expected_header', [
(True, 407, 'Proxy-Authenticate'),
(False, 401, 'WWW-Authenticate'),
])
def test_auth_required_response(self, is_proxy_auth, expected_status_code, expected_header):
up = proxyauth.ProxyAuth()
with mock.patch('mitmproxy.addons.proxyauth.ProxyAuth.is_proxy_auth', return_value=is_proxy_auth):
resp = up.auth_required_response()
assert resp.status_code == expected_status_code
assert expected_header in resp.headers.keys()
def test_check(self):
up = proxyauth.ProxyAuth()
with taddons.context(up) as ctx:
ctx.configure(up, proxyauth="any", mode="regular")
f = tflow.tflow()
assert not up.check(f)
f.request.headers["Proxy-Authorization"] = proxyauth.mkauth(
"test", "test"
)
assert up.check(f)
f.request.headers["Proxy-Authorization"] = "invalid"
assert not up.check(f)
f.request.headers["Proxy-Authorization"] = proxyauth.mkauth(
"test", "test", scheme="unknown"
)
assert not up.check(f)
ctx.configure(up, proxyauth="test:test")
f.request.headers["Proxy-Authorization"] = proxyauth.mkauth(
"test", "test"
)
assert up.check(f)
ctx.configure(up, proxyauth="test:foo")
assert not up.check(f)
ctx.configure(
up,
proxyauth="@" + tutils.test_data.path(
"mitmproxy/net/data/htpasswd"
)
)
f.request.headers["Proxy-Authorization"] = proxyauth.mkauth(
"test", "test"
)
assert up.check(f)
f.request.headers["Proxy-Authorization"] = proxyauth.mkauth(
"test", "foo"
)
assert not up.check(f)
with mock.patch('ldap3.Server', return_value="ldap://fake_server:389 - cleartext"):
with mock.patch('ldap3.Connection', search="test"):
with mock.patch('ldap3.Connection.search', return_value="test"):
ctx.configure(
up,
proxyauth="ldap:localhost:cn=default,dc=cdhdt,dc=com:password:ou=application,dc=cdhdt,dc=com"
)
f.request.headers["Proxy-Authorization"] = proxyauth.mkauth(
"test", "test"
)
assert up.check(f)
f.request.headers["Proxy-Authorization"] = proxyauth.mkauth(
"", ""
)
assert not up.check(f)
def test_authenticate(self):
up = proxyauth.ProxyAuth()
with taddons.context(up) as ctx:
ctx.configure(up, proxyauth="any", mode="regular")
f = tflow.tflow()
assert not f.response
up.authenticate(f)
assert f.response.status_code == 407
f = tflow.tflow()
f.request.headers["Proxy-Authorization"] = proxyauth.mkauth(
"test", "test"
)
up.authenticate(f)
assert not f.response
assert not f.request.headers.get("Proxy-Authorization")
f = tflow.tflow()
ctx.configure(up, mode="reverse")
assert not f.response
up.authenticate(f)
assert f.response.status_code == 401
f = tflow.tflow()
f.request.headers["Authorization"] = proxyauth.mkauth(
"test", "test"
)
up.authenticate(f)
assert not f.response
assert not f.request.headers.get("Authorization")
def test_configure(self):
up = proxyauth.ProxyAuth()
with taddons.context(up) as ctx:
with pytest.raises(exceptions.OptionsError):
ctx.configure(up, proxyauth="foo")
ctx.configure(up, proxyauth="foo:bar")
assert up.singleuser == ["foo", "bar"]
ctx.configure(up, proxyauth=None)
assert up.singleuser is None
ctx.configure(up, proxyauth="any")
assert up.nonanonymous
ctx.configure(up, proxyauth=None)
assert not up.nonanonymous
with mock.patch('ldap3.Server', return_value="ldap://fake_server:389 - cleartext"):
with mock.patch('ldap3.Connection', return_value="test"):
ctx.configure(up, proxyauth="ldap:localhost:cn=default,dc=cdhdt,dc=com:password:ou=application,dc=cdhdt,dc=com")
assert up.ldapserver
ctx.configure(up, proxyauth="ldaps:localhost:cn=default,dc=cdhdt,dc=com:password:ou=application,dc=cdhdt,dc=com")
assert up.ldapserver
with pytest.raises(exceptions.OptionsError):
ctx.configure(up, proxyauth="ldap:test:test:test")
with pytest.raises(exceptions.OptionsError):
ctx.configure(up, proxyauth="ldap:fake_serveruid=?dc=example,dc=com:person")
with pytest.raises(exceptions.OptionsError):
ctx.configure(up, proxyauth="ldapssssssss:fake_server:dn:password:tree")
with pytest.raises(exceptions.OptionsError):
ctx.configure(
up,
proxyauth= "@" + tutils.test_data.path("mitmproxy/net/data/server.crt")
)
with pytest.raises(exceptions.OptionsError):
ctx.configure(up, proxyauth="@nonexistent")
ctx.configure(
up,
proxyauth= "@" + tutils.test_data.path(
"mitmproxy/net/data/htpasswd"
)
)
assert up.htpasswd
assert up.htpasswd.check_password("test", "test")
assert not up.htpasswd.check_password("test", "foo")
ctx.configure(up, proxyauth=None)
assert not up.htpasswd
with pytest.raises(exceptions.OptionsError):
ctx.configure(up, proxyauth="any", mode="transparent")
with pytest.raises(exceptions.OptionsError):
ctx.configure(up, proxyauth="any", mode="socks5")
def test_handlers(self):
up = proxyauth.ProxyAuth()
with taddons.context(up) as ctx:
ctx.configure(up, proxyauth="any", mode="regular")
f = tflow.tflow()
assert not f.response
up.requestheaders(f)
assert f.response.status_code == 407
f = tflow.tflow()
f.request.method = "CONNECT"
assert not f.response
up.http_connect(f)
assert f.response.status_code == 407
f = tflow.tflow()
f.request.method = "CONNECT"
f.request.headers["Proxy-Authorization"] = proxyauth.mkauth(
"test", "test"
)
up.http_connect(f)
assert not f.response
f2 = tflow.tflow(client_conn=f.client_conn)
up.requestheaders(f2)
assert not f2.response
assert f2.metadata["proxyauth"] == ('test', 'test')
| 37.599206
| 133
| 0.564433
|
dbc908d24b4fafa7266400c9cdda5d0858cc4a9c
| 2,382
|
py
|
Python
|
libs/numpy/core/tests/test_abc.py
|
rocketbot-cl/recognition
|
cca8a87070ccaca3a26e37345c36ab1bf836e258
|
[
"MIT"
] | 353
|
2020-12-10T10:47:17.000Z
|
2022-03-31T23:08:29.000Z
|
libs/numpy/core/tests/test_abc.py
|
rocketbot-cl/recognition
|
cca8a87070ccaca3a26e37345c36ab1bf836e258
|
[
"MIT"
] | 80
|
2020-12-10T09:54:22.000Z
|
2022-03-30T22:08:45.000Z
|
libs/numpy/core/tests/test_abc.py
|
rocketbot-cl/recognition
|
cca8a87070ccaca3a26e37345c36ab1bf836e258
|
[
"MIT"
] | 63
|
2020-12-10T17:10:34.000Z
|
2022-03-28T16:27:07.000Z
|
from numpy.testing import assert_
import numbers
import numpy as np
from numpy.core.numerictypes import sctypes
class TestABC:
def test_abstract(self):
assert_(issubclass(np.number, numbers.Number))
assert_(issubclass(np.inexact, numbers.Complex))
assert_(issubclass(np.complexfloating, numbers.Complex))
assert_(issubclass(np.floating, numbers.Real))
assert_(issubclass(np.integer, numbers.Integral))
assert_(issubclass(np.signedinteger, numbers.Integral))
assert_(issubclass(np.unsignedinteger, numbers.Integral))
def test_floats(self):
for t in sctypes['float']:
assert_(isinstance(t(), numbers.Real),
"{0} is not instance of Real".format(t.__name__))
assert_(issubclass(t, numbers.Real),
"{0} is not subclass of Real".format(t.__name__))
assert_(not isinstance(t(), numbers.Rational),
"{0} is instance of Rational".format(t.__name__))
assert_(not issubclass(t, numbers.Rational),
"{0} is subclass of Rational".format(t.__name__))
def test_complex(self):
for t in sctypes['complex']:
assert_(isinstance(t(), numbers.Complex),
"{0} is not instance of Complex".format(t.__name__))
assert_(issubclass(t, numbers.Complex),
"{0} is not subclass of Complex".format(t.__name__))
assert_(not isinstance(t(), numbers.Real),
"{0} is instance of Real".format(t.__name__))
assert_(not issubclass(t, numbers.Real),
"{0} is subclass of Real".format(t.__name__))
def test_int(self):
for t in sctypes['int']:
assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
def test_uint(self):
for t in sctypes['uint']:
assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
| 43.309091
| 74
| 0.585642
|
f440b3dfb878daa427c71f6b56879c882bee2420
| 412
|
py
|
Python
|
test/hummingbot/logger/test_logger_util_functions.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 542
|
2021-12-17T22:34:31.000Z
|
2022-03-31T14:36:23.000Z
|
test/hummingbot/logger/test_logger_util_functions.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 291
|
2021-12-17T20:07:53.000Z
|
2022-03-31T11:07:23.000Z
|
test/hummingbot/logger/test_logger_util_functions.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 220
|
2021-12-17T12:41:23.000Z
|
2022-03-31T23:03:22.000Z
|
import unittest
from dataclasses import dataclass
from hummingbot.logger import log_encoder
class LoggerUtilFunctionsTest(unittest.TestCase):
def test_log_encoder_encodes_dataclasses(self):
@dataclass
class DummyDataClass:
one: int
two: float
encoded = log_encoder(DummyDataClass(one=1, two=2.0))
self.assertEqual({"one": 1, "two": 2.0}, encoded)
| 24.235294
| 61
| 0.684466
|
14c23434c700772b53d94d7c4d9915772529715a
| 9,665
|
py
|
Python
|
contrib/bitrpc/bitrpc.py
|
xdevrepo/XREPCoin
|
dacc88443b7f6285595f69feadaea99b9c7f1a86
|
[
"MIT"
] | 1
|
2018-11-04T21:39:53.000Z
|
2018-11-04T21:39:53.000Z
|
contrib/bitrpc/bitrpc.py
|
xdevrepo/XREPCoin
|
dacc88443b7f6285595f69feadaea99b9c7f1a86
|
[
"MIT"
] | null | null | null |
contrib/bitrpc/bitrpc.py
|
xdevrepo/XREPCoin
|
dacc88443b7f6285595f69feadaea99b9c7f1a86
|
[
"MIT"
] | null | null | null |
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:15505")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:15505")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 28.594675
| 101
| 0.573513
|
aa7e3f576a126e92732fd46580b18cee6c6cf4d4
| 28,974
|
py
|
Python
|
txdav/caldav/datastore/scheduling/imip/test/test_outbound.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 462
|
2016-08-14T17:43:24.000Z
|
2022-03-17T07:38:16.000Z
|
txdav/caldav/datastore/scheduling/imip/test/test_outbound.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 72
|
2016-09-01T23:19:35.000Z
|
2020-02-05T02:09:26.000Z
|
txdav/caldav/datastore/scheduling/imip/test/test_outbound.py
|
backwardn/ccs-calendarserver
|
13c706b985fb728b9aab42dc0fef85aae21921c3
|
[
"Apache-2.0"
] | 171
|
2016-08-16T03:50:30.000Z
|
2022-03-26T11:49:55.000Z
|
##
# Copyright (c) 2008-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
from cStringIO import StringIO
from pycalendar.datetime import DateTime
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, succeed
from twisted.trial import unittest
from twisted.web.template import Element, renderer, flattenString
from twistedcaldav.config import config
from twistedcaldav.ical import Component
from txdav.caldav.datastore.scheduling.imip.outbound import IMIPInvitationWork
from txdav.caldav.datastore.scheduling.imip.outbound import MailSender
from txdav.caldav.datastore.scheduling.imip.outbound import StringFormatTemplateLoader
from txdav.common.datastore.test.util import buildStore
from twext.enterprise.jobs.jobitem import JobItem
import email
from email.iterators import typed_subpart_iterator
import os
initialInviteText = u"""BEGIN:VCALENDAR
VERSION:2.0
METHOD:REQUEST
BEGIN:VEVENT
UID:CFDD5E46-4F74-478A-9311-B3FF905449C3
DTSTART:20200325T154500Z
DTEND:20200325T164500Z
ATTENDEE;CN=Th\xe9 Attendee;CUTYPE=INDIVIDUAL;PARTSTAT=NEEDS-ACTION;RSVP=TRU
E:mailto:attendee@example.com
ATTENDEE;CN=Th\xe9 Organizer;CUTYPE=INDIVIDUAL;EMAIL=organizer@example.com;P
ARTSTAT=ACCEPTED:urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A
ATTENDEE;CN=An Attendee without CUTYPE;EMAIL=nocutype@example.com;PARTSTAT=A
CCEPTED:urn:uuid:4DB528DC-3E60-44FA-9546-2A00FCDCFFAB
ATTENDEE;EMAIL=nocn@example.com;PARTSTAT=ACCEPTED:urn:uuid:A592CF8B-4FC8-4E4
F-B543-B2F29A7EEB0B
ORGANIZER;CN=Th\xe9 Organizer;EMAIL=organizer@example.com:urn:uuid:C3B38B00-
4166-11DD-B22C-A07C87E02F6A
SUMMARY:testing outbound( )\\nEmbedded: Header
DESCRIPTION:awesome description with "<" and "&"
END:VEVENT
END:VCALENDAR
""".encode("utf-8")
inviteTextNoTimezone = u"""BEGIN:VCALENDAR
VERSION:2.0
METHOD:REQUEST
BEGIN:VEVENT
UID:CFDD5E46-4F74-478A-9311-B3FF905449C3
DTSTART;TZID=America/New_York:20200325T154500
DTEND;TZID=America/New_York:20200325T164500Z
ATTENDEE;CN=Th\xe9 Attendee;CUTYPE=INDIVIDUAL;PARTSTAT=NEEDS-ACTION;RSVP=TRU
E:mailto:attendee@example.com
ATTENDEE;CN=Th\xe9 Organizer;CUTYPE=INDIVIDUAL;EMAIL=organizer@example.com;P
ARTSTAT=ACCEPTED:urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A
ATTENDEE;CN=An Attendee without CUTYPE;EMAIL=nocutype@example.com;PARTSTAT=A
CCEPTED:urn:uuid:4DB528DC-3E60-44FA-9546-2A00FCDCFFAB
ATTENDEE;EMAIL=nocn@example.com;PARTSTAT=ACCEPTED:urn:uuid:A592CF8B-4FC8-4E4
F-B543-B2F29A7EEB0B
ORGANIZER;CN=Th\xe9 Organizer;EMAIL=organizer@example.com:urn:uuid:C3B38B00-
4166-11DD-B22C-A07C87E02F6A
SUMMARY:t\xe9sting outbound( )
DESCRIPTION:awesome description with "<" and "&"
END:VEVENT
END:VCALENDAR
""".encode("utf-8")
inviteTextWithTimezone = u"""BEGIN:VCALENDAR
VERSION:2.0
METHOD:REQUEST
BEGIN:VTIMEZONE
TZID:America/New_York
X-LIC-LOCATION:America/New_York
BEGIN:STANDARD
DTSTART:18831118T120358
RDATE:18831118T120358
TZNAME:EST
TZOFFSETFROM:-045602
TZOFFSETTO:-0500
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19180331T020000
RRULE:FREQ=YEARLY;UNTIL=19190330T070000Z;BYDAY=-1SU;BYMONTH=3
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:19181027T020000
RRULE:FREQ=YEARLY;UNTIL=19191026T060000Z;BYDAY=-1SU;BYMONTH=10
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
BEGIN:STANDARD
DTSTART:19200101T000000
RDATE:19200101T000000
RDATE:19420101T000000
RDATE:19460101T000000
RDATE:19670101T000000
TZNAME:EST
TZOFFSETFROM:-0500
TZOFFSETTO:-0500
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19200328T020000
RDATE:19200328T020000
RDATE:19740106T020000
RDATE:19750223T020000
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:19201031T020000
RDATE:19201031T020000
RDATE:19450930T020000
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19210424T020000
RRULE:FREQ=YEARLY;UNTIL=19410427T070000Z;BYDAY=-1SU;BYMONTH=4
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:19210925T020000
RRULE:FREQ=YEARLY;UNTIL=19410928T060000Z;BYDAY=-1SU;BYMONTH=9
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19420209T020000
RDATE:19420209T020000
TZNAME:EWT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:DAYLIGHT
DTSTART:19450814T190000
RDATE:19450814T190000
TZNAME:EPT
TZOFFSETFROM:-0400
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:DAYLIGHT
DTSTART:19460428T020000
RRULE:FREQ=YEARLY;UNTIL=19660424T070000Z;BYDAY=-1SU;BYMONTH=4
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:19460929T020000
RRULE:FREQ=YEARLY;UNTIL=19540926T060000Z;BYDAY=-1SU;BYMONTH=9
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
BEGIN:STANDARD
DTSTART:19551030T020000
RRULE:FREQ=YEARLY;UNTIL=19661030T060000Z;BYDAY=-1SU;BYMONTH=10
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19670430T020000
RRULE:FREQ=YEARLY;UNTIL=19730429T070000Z;BYDAY=-1SU;BYMONTH=4
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:19671029T020000
RRULE:FREQ=YEARLY;UNTIL=20061029T060000Z;BYDAY=-1SU;BYMONTH=10
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:19760425T020000
RRULE:FREQ=YEARLY;UNTIL=19860427T070000Z;BYDAY=-1SU;BYMONTH=4
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:DAYLIGHT
DTSTART:19870405T020000
RRULE:FREQ=YEARLY;UNTIL=20060402T070000Z;BYDAY=1SU;BYMONTH=4
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:DAYLIGHT
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
UID:CFDD5E46-4F74-478A-9311-B3FF905449C3
DTSTART;TZID=America/New_York:20200325T154500
DTEND;TZID=America/New_York:20200325T164500Z
ATTENDEE;CN=Th\xe9 Attendee;CUTYPE=INDIVIDUAL;PARTSTAT=NEEDS-ACTION;RSVP=TRU
E:mailto:attendee@example.com
ATTENDEE;CN=Th\xe9 Organizer;CUTYPE=INDIVIDUAL;EMAIL=organizer@example.com;P
ARTSTAT=ACCEPTED:urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A
ATTENDEE;CN=An Attendee without CUTYPE;EMAIL=nocutype@example.com;PARTSTAT=A
CCEPTED:urn:uuid:4DB528DC-3E60-44FA-9546-2A00FCDCFFAB
ATTENDEE;EMAIL=nocn@example.com;PARTSTAT=ACCEPTED:urn:uuid:A592CF8B-4FC8-4E4
F-B543-B2F29A7EEB0B
ORGANIZER;CN=Th\xe9 Organizer;EMAIL=organizer@example.com:urn:uuid:C3B38B00-
4166-11DD-B22C-A07C87E02F6A
SUMMARY:t\xe9sting outbound( )
DESCRIPTION:awesome description with "<" and "&"
END:VEVENT
END:VCALENDAR
""".encode("utf-8")
ORGANIZER = "urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A"
ATTENDEE = "mailto:attendee@example.com"
ICALUID = "CFDD5E46-4F74-478A-9311-B3FF905449C3"
class DummySMTPSender(object):
def __init__(self):
self.reset()
self.shouldSucceed = True
def reset(self):
self.sendMessageCalled = False
self.fromAddr = None
self.toAddr = None
self.msgId = None
self.message = None
def sendMessage(self, fromAddr, toAddr, msgId, message):
self.sendMessageCalled = True
self.fromAddr = fromAddr
self.toAddr = toAddr
self.msgId = msgId
self.message = message
return succeed(self.shouldSucceed)
class OutboundTests(unittest.TestCase):
@inlineCallbacks
def setUp(self):
self.store = yield buildStore(self, None)
self.directory = self.store.directoryService()
self.sender = MailSender(
"server@example.com", 7, DummySMTPSender(),
language="en")
def _getSender(ignored):
return self.sender
self.patch(IMIPInvitationWork, "getMailSender", _getSender)
@inlineCallbacks
def test_work(self):
txn = self.store.newTransaction()
yield txn.enqueue(
IMIPInvitationWork,
fromAddr=ORGANIZER,
toAddr=ATTENDEE,
icalendarText=initialInviteText.replace("\n", "\r\n"),
)
yield txn.commit()
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
txn = self.store.newTransaction()
record = (yield txn.imipGetToken(
ORGANIZER,
ATTENDEE,
ICALUID
))
self.assertTrue(record is not None)
record = (yield txn.imipLookupByToken(record.token))[0]
yield txn.commit()
self.assertEquals(record.organizer, ORGANIZER)
self.assertEquals(record.attendee, ATTENDEE)
self.assertEquals(record.icaluid, ICALUID)
@inlineCallbacks
def test_workFailure(self):
self.sender.smtpSender.shouldSucceed = False
txn = self.store.newTransaction()
yield txn.enqueue(
IMIPInvitationWork,
fromAddr=ORGANIZER,
toAddr=ATTENDEE,
icalendarText=initialInviteText.replace("\n", "\r\n"),
)
yield txn.commit()
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
def _interceptEmail(
self, inviteState, calendar, orgEmail, orgCn,
attendees, fromAddress, replyToAddress, toAddress, language="en"
):
self.inviteState = inviteState
self.calendar = calendar
self.orgEmail = orgEmail
self.orgCn = orgCn
self.attendees = attendees
self.fromAddress = fromAddress
self.replyToAddress = replyToAddress
self.toAddress = toAddress
self.language = language
self.results = self._actualGenerateEmail(
inviteState, calendar,
orgEmail, orgCn, attendees, fromAddress, replyToAddress, toAddress,
language=language)
return self.results
@inlineCallbacks
def test_outbound(self):
"""
Make sure outbound( ) stores tokens properly so they can be looked up
"""
config.Scheduling.iMIP.Sending.Address = "server@example.com"
self.patch(config.Localization, "LocalesDirectory", os.path.join(os.path.dirname(__file__), "locales"))
self._actualGenerateEmail = self.sender.generateEmail
self.patch(self.sender, "generateEmail", self._interceptEmail)
data = (
# Initial invite
(
initialInviteText,
"CFDD5E46-4F74-478A-9311-B3FF905449C3",
"urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A",
"mailto:attendee@example.com",
"new",
"organizer@example.com",
u"Th\xe9 Organizer",
[
(u'Th\xe9 Attendee', u'attendee@example.com'),
(u'Th\xe9 Organizer', u'organizer@example.com'),
(u'An Attendee without CUTYPE', u'nocutype@example.com'),
(None, u'nocn@example.com'),
],
u"Th\xe9 Organizer <organizer@example.com>",
"=?utf-8?q?Th=C3=A9_Organizer_=3Corganizer=40example=2Ecom=3E?=",
"attendee@example.com",
"Event invitation: testing outbound( ) Embedded: Header",
),
# Update
(
u"""BEGIN:VCALENDAR
VERSION:2.0
METHOD:REQUEST
BEGIN:VEVENT
UID:CFDD5E46-4F74-478A-9311-B3FF905449C3
DTSTART:20100325T154500Z
DTEND:20100325T164500Z
ATTENDEE;CN=Th\xe9 Attendee;CUTYPE=INDIVIDUAL;PARTSTAT=NEEDS-ACTION;RSVP=TRUE:
mailto:attendee@example.com
ATTENDEE;CN=Th\xe9 Organizer;CUTYPE=INDIVIDUAL;EMAIL=organizer@example.com;PAR
TSTAT=ACCEPTED:urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A
ORGANIZER;CN=Th\xe9 Organizer;EMAIL=organizer@example.com:urn:uuid:C3B38B00-41
66-11DD-B22C-A07C87E02F6A
SUMMARY:t\xe9sting outbound( ) *update*
END:VEVENT
END:VCALENDAR
""".encode("utf-8"),
"CFDD5E46-4F74-478A-9311-B3FF905449C3",
"urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A",
"mailto:attendee@example.com",
"update",
"organizer@example.com",
u"Th\xe9 Organizer",
[
(u'Th\xe9 Attendee', u'attendee@example.com'),
(u'Th\xe9 Organizer', u'organizer@example.com')
],
u"Th\xe9 Organizer <organizer@example.com>",
"=?utf-8?q?Th=C3=A9_Organizer_=3Corganizer=40example=2Ecom=3E?=",
"attendee@example.com",
"=?utf-8?q?Event_update=3A_t=C3=A9sting_outbound=28_=29_*update*?=",
),
# Reply
(
u"""BEGIN:VCALENDAR
VERSION:2.0
METHOD:REPLY
BEGIN:VEVENT
UID:DFDD5E46-4F74-478A-9311-B3FF905449C4
DTSTART:20100325T154500Z
DTEND:20100325T164500Z
ATTENDEE;CN=Th\xe9 Attendee;CUTYPE=INDIVIDUAL;EMAIL=attendee@example.com;PARTST
AT=ACCEPTED:urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A
ORGANIZER;CN=Th\xe9 Organizer;EMAIL=organizer@example.com:mailto:organizer@exam
ple.com
SUMMARY:t\xe9sting outbound( ) *reply*
END:VEVENT
END:VCALENDAR
""".encode("utf-8"),
None,
"urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A",
"mailto:organizer@example.com",
"reply",
"organizer@example.com",
u"Th\xe9 Organizer",
[
(u'Th\xe9 Attendee', u'attendee@example.com'),
],
"attendee@example.com",
"attendee@example.com",
"organizer@example.com",
"=?utf-8?q?Event_reply=3A_t=C3=A9sting_outbound=28_=29_*reply*?=",
),
)
for (
inputCalendar, UID, inputOriginator, inputRecipient, inviteState,
outputOrganizerEmail, outputOrganizerName, outputAttendeeList,
outputFrom, encodedFrom, outputRecipient, outputSubject
) in data:
txn = self.store.newTransaction()
yield self.sender.outbound(
txn,
inputOriginator,
inputRecipient,
Component.fromString(inputCalendar.replace("\n", "\r\n")),
onlyAfter=DateTime(2010, 1, 1, 0, 0, 0)
)
yield txn.commit()
msg = email.message_from_string(self.sender.smtpSender.message)
self.assertEquals(msg["From"], encodedFrom)
self.assertEquals(self.inviteState, inviteState)
self.assertEquals(self.orgEmail, outputOrganizerEmail)
self.assertEquals(self.orgCn, outputOrganizerName)
self.assertEquals(self.attendees, outputAttendeeList)
self.assertEquals(self.fromAddress, outputFrom)
self.assertEquals(self.toAddress, outputRecipient)
self.assertEquals(msg["Subject"], outputSubject)
if UID: # The organizer is local, and server is sending to remote
# attendee
txn = self.store.newTransaction()
record = (yield txn.imipGetToken(inputOriginator, inputRecipient, UID))
yield txn.commit()
self.assertNotEquals(record, None)
self.assertEquals(
msg["Reply-To"],
"server+%s@example.com" % (record.token,))
# Make sure attendee property for organizer exists and matches
# the CUA of the organizer property
orgValue = self.calendar.getOrganizerProperty().value()
self.assertEquals(
orgValue,
self.calendar.getAttendeeProperty([orgValue]).value()
)
else: # Reply only -- the attendee is local, and server is sending reply to remote organizer
self.assertEquals(msg["Reply-To"], self.fromAddress)
# Check that we don't send any messages for events completely in
# the past.
self.sender.smtpSender.reset()
txn = self.store.newTransaction()
yield self.sender.outbound(
txn,
inputOriginator,
inputRecipient,
Component.fromString(inputCalendar.replace("\n", "\r\n")),
onlyAfter=DateTime(2021, 1, 1, 0, 0, 0)
)
yield txn.commit()
self.assertFalse(self.sender.smtpSender.sendMessageCalled)
@inlineCallbacks
def test_tokens(self):
txn = self.store.newTransaction()
self.assertEquals((yield txn.imipLookupByToken("xyzzy")), [])
yield txn.commit()
txn = self.store.newTransaction()
record1 = (yield txn.imipCreateToken("organizer", "attendee", "icaluid"))
yield txn.commit()
txn = self.store.newTransaction()
record2 = (yield txn.imipGetToken("organizer", "attendee", "icaluid"))
yield txn.commit()
self.assertEquals(record1.token, record2.token)
txn = self.store.newTransaction()
record = (yield txn.imipLookupByToken(record1.token))[0]
self.assertEquals(
[record.organizer, record.attendee, record.icaluid],
["organizer", "attendee", "icaluid"])
yield txn.commit()
txn = self.store.newTransaction()
yield txn.imipRemoveToken(record1.token)
yield txn.commit()
txn = self.store.newTransaction()
self.assertEquals((yield txn.imipLookupByToken(record1.token)), [])
yield txn.commit()
@inlineCallbacks
def test_mailtoTokens(self):
"""
Make sure old mailto tokens are still honored
"""
organizerEmail = "mailto:organizer@example.com"
# Explictly store a token with mailto: CUA for organizer
# (something that doesn't happen any more, but did in the past)
txn = self.store.newTransaction()
origRecord = (yield txn.imipCreateToken(
organizerEmail,
"mailto:attendee@example.com",
"CFDD5E46-4F74-478A-9311-B3FF905449C3"
))
yield txn.commit()
inputCalendar = initialInviteText
UID = "CFDD5E46-4F74-478A-9311-B3FF905449C3"
inputOriginator = "urn:uuid:C3B38B00-4166-11DD-B22C-A07C87E02F6A"
inputRecipient = "mailto:attendee@example.com"
txn = self.store.newTransaction()
yield self.sender.outbound(
txn, inputOriginator, inputRecipient,
Component.fromString(inputCalendar.replace("\n", "\r\n")),
onlyAfter=DateTime(2010, 1, 1, 0, 0, 0))
yield txn.commit()
# Verify we didn't create a new token...
txn = self.store.newTransaction()
record = (yield txn.imipGetToken(inputOriginator, inputRecipient, UID))
yield txn.commit()
self.assertEquals(record, None)
# But instead kept the old one...
txn = self.store.newTransaction()
record = (yield txn.imipGetToken(organizerEmail, inputRecipient, UID))
yield txn.commit()
self.assertEquals(record.token, origRecord.token)
def generateSampleEmail(self, caltext=initialInviteText):
"""
Invoke L{MailHandler.generateEmail} and parse the result.
"""
calendar = Component.fromString(caltext)
msgID, msgTxt = self.sender.generateEmail(
inviteState='new',
calendar=calendar,
orgEmail=u"user01@localhost",
orgCN=u"User Z\xe9ro One",
attendees=[(u"Us\xe9r One", "user01@localhost"),
(u"User 2", "user02@localhost")],
fromAddress="user01@localhost",
replyToAddress="imip-system@localhost",
toAddress="user03@localhost",
)
message = email.message_from_string(msgTxt)
return msgID, message
def test_generateEmail(self):
"""
L{MailHandler.generateEmail} generates a MIME-formatted email with a
text/plain part, a text/html part, and a text/calendar part.
"""
msgID, message = self.generateSampleEmail()
self.assertEquals(message['Message-ID'], msgID)
expectedTypes = set(["text/plain", "text/html", "text/calendar"])
actualTypes = set([
part.get_content_type() for part in message.walk()
if part.get_content_type().startswith("text/")
])
self.assertEquals(actualTypes, expectedTypes)
def test_generateEmail_noOrganizerCN(self):
"""
L{MailHandler.generateEmail} generates a MIME-formatted email when
the organizer property has no CN parameter.
"""
calendar = Component.fromString(initialInviteText)
_ignore_msgID, msgTxt = self.sender.generateEmail(
inviteState='new',
calendar=calendar,
orgEmail=u"user01@localhost",
orgCN=None,
attendees=[(u"Us\xe9r One", "user01@localhost"),
(u"User 2", "user02@localhost")],
fromAddress="user01@localhost",
replyToAddress="imip-system@localhost",
toAddress="user03@localhost",
)
message = email.message_from_string(msgTxt)
self.assertTrue(message is not None)
def test_generateEmail_noAttendeeCN(self):
"""
L{MailHandler.generateEmail} generates a MIME-formatted email when
the attendee property has no CN parameter.
"""
calendar = Component.fromString(initialInviteText)
_ignore_msgID, msgTxt = self.sender.generateEmail(
inviteState='new',
calendar=calendar,
orgEmail=u"user01@localhost",
orgCN=u"User Z\xe9ro One",
attendees=[(None, "user01@localhost"),
(None, "user02@localhost")],
fromAddress="user01@localhost",
replyToAddress="imip-system@localhost",
toAddress="user03@localhost",
)
message = email.message_from_string(msgTxt)
self.assertTrue(message is not None)
def test_messageID(self):
"""
L{SMTPSender.betterMessageID} generates a Message-ID domain matching
the L{config.ServerHostName} value.
"""
self.patch(config, "ServerHostName", "calendar.example.com")
msgID, message = self.generateSampleEmail()
self.assertEquals(message['Message-ID'], msgID)
self.assertEqual(msgID[:-1].split("@")[1], config.ServerHostName)
def test_alwaysIncludeTimezones(self):
"""
L{MailHandler.generateEmail} generates a MIME-formatted email with a
text/plain part, a text/html part, and a text/calendar part.
"""
_ignore, message = self.generateSampleEmail(inviteTextWithTimezone)
calparts = tuple(typed_subpart_iterator(message, "text", "calendar"))
self.assertEqual(len(calparts), 1)
caldata = calparts[0].get_payload(decode=True)
self.assertTrue("BEGIN:VTIMEZONE" in caldata)
self.assertTrue("TZID:America/New_York" in caldata)
_ignore, message = self.generateSampleEmail(inviteTextNoTimezone)
calparts = tuple(typed_subpart_iterator(message, "text", "calendar"))
self.assertEqual(len(calparts), 1)
caldata = calparts[0].get_payload(decode=True)
self.assertTrue("BEGIN:VTIMEZONE" in caldata)
self.assertTrue("TZID:America/New_York" in caldata)
def test_emailEncoding(self):
"""
L{MailHandler.generateEmail} will preserve any non-ASCII characters
present in the fields that it formats in the message body.
"""
_ignore_msgID, message = self.generateSampleEmail()
textPart = partByType(message, "text/plain")
htmlPart = partByType(message, "text/html")
plainText = textPart.get_payload(decode=True).decode(
textPart.get_content_charset()
)
htmlText = htmlPart.get_payload(decode=True).decode(
htmlPart.get_content_charset()
)
self.assertIn(u"Us\u00e9r One", plainText)
self.assertIn(u'<a href="mailto:user01@localhost">Us\u00e9r One</a>',
htmlText)
# The same assertion, but with the organizer's form.
self.assertIn(
u'<a href="mailto:user01@localhost">User Z\u00e9ro One</a>',
htmlText)
def test_emailQuoting(self):
"""
L{MailHandler.generateEmail} will HTML-quote all relevant fields in the
HTML part, but not the text/plain part.
"""
_ignore_msgID, message = self.generateSampleEmail()
htmlPart = partByType(message, "text/html").get_payload(decode=True)
plainPart = partByType(message, "text/plain").get_payload(decode=True)
expectedPlain = 'awesome description with "<" and "&"'
expectedHTML = expectedPlain.replace("&", "&").replace("<", "<")
self.assertIn(expectedPlain, plainPart)
self.assertIn(expectedHTML, htmlPart)
def test_stringFormatTemplateLoader(self):
"""
L{StringFormatTemplateLoader.load} will convert a template with
C{%(x)s}-format slots by converting it to a template with C{<t:slot
name="x" />} slots, and a renderer on the document element named
according to the constructor argument.
"""
class StubElement(Element):
loader = StringFormatTemplateLoader(
lambda: StringIO(
"<test><alpha>%(slot1)s</alpha>%(other)s</test>"
),
"testRenderHere"
)
@renderer
def testRenderHere(self, request, tag):
return tag.fillSlots(slot1="hello",
other="world")
result = []
flattenString(None, StubElement()).addCallback(result.append)
self.assertEquals(
list(result),
["<test><alpha>hello</alpha>world</test>"]
)
def test_templateLoaderWithAttributes(self):
"""
L{StringFormatTemplateLoader.load} will convert a template with
C{%(x)s}-format slots inside attributes into t:attr elements containing
t:slot slots.
"""
class StubElement(Element):
loader = StringFormatTemplateLoader(
lambda: StringIO(
'<test><alpha beta="before %(slot1)s after">inner</alpha>'
'%(other)s</test>'
),
"testRenderHere"
)
@renderer
def testRenderHere(self, request, tag):
return tag.fillSlots(slot1="hello",
other="world")
result = []
flattenString(None, StubElement()).addCallback(result.append)
self.assertEquals(
result,
[
'<test><alpha beta="before hello after">'
'inner</alpha>world</test>'
]
)
def test_templateLoaderTagSoup(self):
"""
L{StringFormatTemplateLoader.load} will convert a template with
C{%(x)s}-format slots into t:slot slots, and render a well-formed output
document, even if the input is malformed (i.e. missing necessary closing
tags).
"""
class StubElement(Element):
loader = StringFormatTemplateLoader(
lambda: StringIO(
'<test><alpha beta="before %(slot1)s after">inner</alpha>'
'%(other)s'
),
"testRenderHere"
)
@renderer
def testRenderHere(self, request, tag):
return tag.fillSlots(slot1="hello",
other="world")
result = []
flattenString(None, StubElement()).addCallback(result.append)
self.assertEquals(result,
['<test><alpha beta="before hello after">'
'inner</alpha>world</test>'])
def test_scrubHeader(self):
self.assertEquals(self.sender._scrubHeader("ABC"), "ABC")
self.assertEquals(self.sender._scrubHeader("ABC: 123\nXYZ: 456"), "ABC: 123 XYZ: 456")
def partByType(message, contentType):
"""
Retrieve a MIME part from an L{email.message.Message} based on a content
type.
"""
for part in message.walk():
if part.get_content_type() == contentType:
return part
raise KeyError(contentType)
| 35.12
| 111
| 0.655312
|
e348301d55217d9c0911a9f9d197fcc801a557b8
| 5,526
|
py
|
Python
|
versions/live_ready_versions/version_axle/core/query.py
|
di2ag/bkb-pathway-provider
|
42824f22868c5c5d777da3facb4209744bcc6f96
|
[
"MIT"
] | null | null | null |
versions/live_ready_versions/version_axle/core/query.py
|
di2ag/bkb-pathway-provider
|
42824f22868c5c5d777da3facb4209744bcc6f96
|
[
"MIT"
] | 7
|
2021-01-13T22:25:46.000Z
|
2021-07-29T15:26:06.000Z
|
versions/live_ready_versions/version_axle/core/query.py
|
NCATSTranslator/chp
|
00668fd3d50a48fdd75abbeacaf173a3ad41942d
|
[
"Apache-2.0"
] | 2
|
2021-01-14T19:06:24.000Z
|
2021-01-26T15:02:12.000Z
|
import os
import sys
import pickle
import json
class Query:
def __init__(self, evidence=dict(), targets=list(), marginal_evidence=None, type='updating', name='query0', meta_evidence=None, meta_targets=None):
self.evidence = evidence
self.targets = targets
self.marginal_evidence = marginal_evidence
self.type = type
self.name = name
self.meta_evidence = meta_evidence
self.meta_targets = meta_targets
self.result = None
self.bkb = None
self.independ_queries = None
self.independ_result = None
self.compute_time = -1
def save(self, directory, only_json=False):
if not only_json:
#-- Save a pickle file
pickle_filename = os.path.join(directory, '{}.pk'.format(self.name))
with open(pickle_filename, 'wb') as pickle_file:
pickle.dump(file=pickle_file, obj=self)
#-- Save out each piece in seperate files
with open(os.path.join(directory, '{}.evid'.format(self.name)), 'w') as f_:
for comp_name, state_name in self.evidence.items():
f_.write('{},{}\n'.format(comp_name, state_name))
with open(os.path.join(directory, '{}.targ'.format(self.name)), 'w') as f_:
for comp_name in self.targets:
f_.write('{}\n'.format(comp_name))
#-- Save out bkb
if self.bkb is not None:
self.bkb.save('{}.bkb'.format(self.name))
#-- Save out JSON query info
json_dict = {'evidence': self.evidence,
'targets': self.targets,
'type': self.type,
'meta_evidence': self.meta_evidence,
'meta_targets': self.meta_targets}
#-- Potentially save out JSON Results
if self.result is not None:
inode_contrib = self.result.process_inode_contributions()
result_dict = {'result': {'Updates': self.result.process_updates(),
'Contributions': {' '.join(target): df.to_dict()
for target, df in self.result.contribs_to_dataframes(inode_contrib).items()},
'Explanations': self.getExplanations()}}
json_dict.update(result_dict)
json_file = os.path.join(directory, '{}.json'.format(self.name))
with open(json_file, 'w') as f_:
json.dump(json_dict, f_)
if only_json:
return json_file
else:
return pickle_filename, json_file
def read(self, query_file, file_format='pickle'):
if file_format == 'pickle':
with open(query_file, 'rb') as qf_:
return pickle.load(qf_)
elif file_format == 'json':
with open(query_file, 'r') as qf_:
query_dict = json.load(qf_)
return Query(**query_dict)
else:
raise ValueError('Unrecognized file format: {}'.format(file_format))
def getExplanations(self):
explain_dict = dict()
if self.independ_result is not None:
explain_dict['Assumptions'] = 'Query assumes independence between genetic evidence.'
else:
explain_dict['Assumptions'] = 'Query does not assume independence between genetic evidence.'
inode_dict = self.result.process_inode_contributions()
explain_dict['Sensitivity'] = list()
for target, contrib_dict in inode_dict.items():
target_str = ' '.join(target)
most_sig_inodes = list()
max_contrib = -1
for inode, contrib in contrib_dict.items():
inode_str = ' '.join(inode)
if contrib > max_contrib:
most_sig_inodes = [inode_str]
max_contrib = contrib
elif contrib == max_contrib:
most_sig_inodes.append(inode_str)
else:
continue
contrib_explain = 'The most sensitivity variables for {} are {}'.format(target_str,
', '.join(most_sig_inodes))
explain_dict['Sensitivity'].append(contrib_explain)
explain_dict['MostSignificantPatients'] = ['Unknown']
return explain_dict
def getReport(self):
string = '---- Query Details -----\n'
string += 'Demographic Evidence:\n'
if self.meta_evidence is not None:
for evid in self.meta_evidence:
string += '\t{} {} {}\n'.format(evid[0], evid[1], evid[2])
string += 'Evidence:\n'
for rvName, stateName in self.evidence.items():
string += '\t{} = {}\n'.format(rvName, stateName)
string += 'Targets:\n'
for target in self.targets:
string += '\t{}\n'.format(target)
print(string)
if self.result is not None:
self.result.summary()
print('Computed in {} sec.'.format(self.compute_time))
elif self.independ_result is not None:
print('---- Results Using Independence Assumption -----')
for update, state_dict in self.independ_result.items():
print('\t{}'.format(update))
for state, prob in state_dict.items():
print('\t\t{} = {}'.format(state, prob))
else:
print('No results found.')
| 43.511811
| 151
| 0.552479
|
f52a667be222b3211b704a9908c51d3c74c37f50
| 3,610
|
py
|
Python
|
python_modules/libraries/dagster-gcp/dagster_gcp/gcs/file_manager.py
|
souterjk/dagster
|
8b744a4959bb04ff9587cfee82a796404fcbc89e
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-gcp/dagster_gcp/gcs/file_manager.py
|
souterjk/dagster
|
8b744a4959bb04ff9587cfee82a796404fcbc89e
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-gcp/dagster_gcp/gcs/file_manager.py
|
souterjk/dagster
|
8b744a4959bb04ff9587cfee82a796404fcbc89e
|
[
"Apache-2.0"
] | 1
|
2019-09-11T03:02:27.000Z
|
2019-09-11T03:02:27.000Z
|
import io
import uuid
from contextlib import contextmanager
from dagster import check, usable_as_dagster_type
from dagster.core.storage.file_manager import (
FileHandle,
FileManager,
TempfileManager,
check_file_like_obj,
)
from google.cloud import storage # type: ignore
@usable_as_dagster_type
class GCSFileHandle(FileHandle):
"""A reference to a file on GCS."""
def __init__(self, gcs_bucket: str, gcs_key: str):
self._gcs_bucket = check.str_param(gcs_bucket, "gcs_bucket")
self._gcs_key = check.str_param(gcs_key, "gcs_key")
@property
def gcs_bucket(self) -> str:
"""str: The name of the GCS bucket."""
return self._gcs_bucket
@property
def gcs_key(self) -> str:
"""str: The GCS key."""
return self._gcs_key
@property
def path_desc(self) -> str:
"""str: The file's GCS URL."""
return self.gcs_path
@property
def gcs_path(self) -> str:
"""str: The file's GCS URL."""
return "gs://{bucket}/{key}".format(bucket=self.gcs_bucket, key=self.gcs_key)
class GCSFileManager(FileManager):
def __init__(self, client, gcs_bucket, gcs_base_key):
self._client = check.inst_param(client, "client", storage.client.Client)
self._gcs_bucket = check.str_param(gcs_bucket, "gcs_bucket")
self._gcs_base_key = check.str_param(gcs_base_key, "gcs_base_key")
self._local_handle_cache = {}
self._temp_file_manager = TempfileManager()
def copy_handle_to_local_temp(self, file_handle):
self._download_if_not_cached(file_handle)
return self._get_local_path(file_handle)
def _download_if_not_cached(self, file_handle):
if not self._file_handle_cached(file_handle):
# instigate download
temp_file_obj = self._temp_file_manager.tempfile()
temp_name = temp_file_obj.name
bucket_obj = self._client.bucket(file_handle.gcs_bucket)
bucket_obj.blob(file_handle.gcs_key).download_to_file(temp_file_obj)
self._local_handle_cache[file_handle.gcs_path] = temp_name
return file_handle
@contextmanager
def read(self, file_handle, mode="rb"):
check.inst_param(file_handle, "file_handle", GCSFileHandle)
check.str_param(mode, "mode")
check.param_invariant(mode in {"r", "rb"}, "mode")
self._download_if_not_cached(file_handle)
with open(self._get_local_path(file_handle), mode) as file_obj:
yield file_obj
def _file_handle_cached(self, file_handle):
return file_handle.gcs_path in self._local_handle_cache
def _get_local_path(self, file_handle):
return self._local_handle_cache[file_handle.gcs_path]
def read_data(self, file_handle):
with self.read(file_handle, mode="rb") as file_obj:
return file_obj.read()
def write_data(self, data, ext=None):
check.inst_param(data, "data", bytes)
return self.write(io.BytesIO(data), mode="wb", ext=ext)
def write(self, file_obj, mode="wb", ext=None):
check_file_like_obj(file_obj)
gcs_key = self.get_full_key(str(uuid.uuid4()) + (("." + ext) if ext is not None else ""))
bucket_obj = self._client.bucket(self._gcs_bucket)
bucket_obj.blob(gcs_key).upload_from_file(file_obj)
return GCSFileHandle(self._gcs_bucket, gcs_key)
def get_full_key(self, file_key):
return "{base_key}/{file_key}".format(base_key=self._gcs_base_key, file_key=file_key)
def delete_local_temp(self):
self._temp_file_manager.close()
| 34.711538
| 97
| 0.681717
|
c518e36bfcee36d7f7bf258c2dd6a4493cf25629
| 10,812
|
py
|
Python
|
docs/source/conf.py
|
itbabu/cmsplugin-googleplus
|
084782bf8f9c01081563895fcc90f5390d8b127b
|
[
"MIT"
] | 1
|
2015-03-14T14:34:33.000Z
|
2015-03-14T14:34:33.000Z
|
docs/source/conf.py
|
itbabu/cmsplugin-googleplus
|
084782bf8f9c01081563895fcc90f5390d8b127b
|
[
"MIT"
] | 4
|
2015-02-08T13:13:45.000Z
|
2021-06-08T20:09:49.000Z
|
docs/source/conf.py
|
itbabu/cmsplugin-googleplus
|
084782bf8f9c01081563895fcc90f5390d8b127b
|
[
"MIT"
] | 1
|
2015-03-24T15:06:02.000Z
|
2015-03-24T15:06:02.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# cmsplugin-googleplus documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 8 17:28:23 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'cmsplugin-googleplus'
copyright = '2015, Marco Badan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5.2'
# The full version, including alpha/beta/rc tags.
release = '0.5.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# Use RTD theme locally
html_theme = 'default'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cmsplugin-googleplusdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'cmsplugin-googleplus.tex', 'cmsplugin-googleplus Documentation',
'Marco Badan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cmsplugin-googleplus', 'cmsplugin-googleplus Documentation',
['Marco Badan'], 1)
]
# Autodoc settings
autoclass_content = 'class'
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cmsplugin-googleplus', 'cmsplugin-googleplus Documentation',
'Marco Badan', 'cmsplugin-googleplus', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'cmsplugin-googleplus'
epub_author = 'Marco Badan'
epub_publisher = 'Marco Badan'
epub_copyright = '2015, Marco Badan'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'cmsplugin-googleplus'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 31.158501
| 80
| 0.720681
|
38fa0ff5c3e18f0f0d790ca35f5055d7fc548098
| 530
|
py
|
Python
|
3]. Competitive Programming/03]. LeetCode/1]. Problems/Python/0043)_Multiply_Strings.py
|
MLinesCode/The-Complete-FAANG-Preparation
|
2d0c7e8940eb2a58caaf4e978e548c08dd1f9a52
|
[
"MIT"
] | 6,969
|
2021-05-29T11:38:30.000Z
|
2022-03-31T19:31:49.000Z
|
3]. Competitive Programming/03]. LeetCode/1]. Problems/Python/0043)_Multiply_Strings.py
|
MLinesCode/The-Complete-FAANG-Preparation
|
2d0c7e8940eb2a58caaf4e978e548c08dd1f9a52
|
[
"MIT"
] | 75
|
2021-06-15T07:59:43.000Z
|
2022-02-22T14:21:52.000Z
|
3]. Competitive Programming/03]. LeetCode/1]. Problems/Python/0043)_Multiply_Strings.py
|
MLinesCode/The-Complete-FAANG-Preparation
|
2d0c7e8940eb2a58caaf4e978e548c08dd1f9a52
|
[
"MIT"
] | 1,524
|
2021-05-29T16:03:36.000Z
|
2022-03-31T17:46:13.000Z
|
class Solution:
def multiply(self, num1: str, num2: str) -> str:
res = [0] * (len(num1)+len(num2))
for i in range(len(num1)-1, -1, -1):
carry = 0
for j in range(len(num2)-1, -1, -1):
tmp = (ord(num1[i])-ord('0'))*(ord(num2[j])-ord('0')) + carry
carry = (res[i+j+1]+tmp) // 10
res[i+j+1] = (res[i+j+1]+tmp) % 10
res[i] += carry
res = ''.join(map(str, res))
return '0' if not res.lstrip('0') else res.lstrip('0')
| 44.166667
| 77
| 0.44717
|
20b15fd11283f07e48df3be4ec7ee21ec58143d2
| 607
|
py
|
Python
|
1019-next-greater-node-in-linked-list/1019-next-greater-node-in-linked-list.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
1019-next-greater-node-in-linked-list/1019-next-greater-node-in-linked-list.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
1019-next-greater-node-in-linked-list/1019-next-greater-node-in-linked-list.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def nextLargerNodes(self, head: Optional[ListNode]) -> List[int]:
nums, curr = [], head
while curr:
nums.append(curr.val)
curr = curr.next
stack = []
result = [0] * len(nums)
for idx in range(len(nums)):
while stack and (nums[stack[len(stack)-1]] < nums[idx]):
result[stack.pop()] = nums[idx]
stack.append(idx)
return result
| 33.722222
| 69
| 0.538715
|
8b473ac992a8deb0345735ce6b22c26ec32bdd88
| 793
|
py
|
Python
|
experiences/serializers.py
|
jordifierro/abidria-api
|
d7689783bf23fbe43c395b07572a1380654652cd
|
[
"MIT"
] | 93
|
2017-08-12T09:41:21.000Z
|
2022-03-19T20:04:41.000Z
|
experiences/serializers.py
|
jordifierro/abidria-api
|
d7689783bf23fbe43c395b07572a1380654652cd
|
[
"MIT"
] | 1
|
2017-10-09T16:49:10.000Z
|
2017-10-13T18:07:29.000Z
|
experiences/serializers.py
|
jordifierro/abidria-api
|
d7689783bf23fbe43c395b07572a1380654652cd
|
[
"MIT"
] | 25
|
2017-08-18T04:31:23.000Z
|
2022-02-20T20:31:47.000Z
|
from abidria.serializers import PictureSerializer
class MultipleExperiencesSerializer:
@staticmethod
def serialize(experiences):
return [ExperienceSerializer.serialize(experience) for experience in experiences]
class ExperienceSerializer:
@staticmethod
def serialize(experience):
return {
'id': str(experience.id),
'title': experience.title,
'description': experience.description,
'picture': PictureSerializer.serialize(experience.picture),
'author_id': experience.author_id,
'author_username': experience.author_username,
'is_mine': experience.is_mine,
'is_saved': experience.is_saved
}
| 31.72
| 89
| 0.614124
|
b7ce47f12e7582ce6d2e1e2169963a7b443b9328
| 16,508
|
py
|
Python
|
composer/algorithms/hparams.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
composer/algorithms/hparams.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
composer/algorithms/hparams.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
import textwrap
from dataclasses import asdict, dataclass
from typing import Optional
import yahp as hp
from composer.algorithms.algorithm_hparams import AlgorithmHparams
from composer.algorithms.alibi import Alibi
from composer.algorithms.augmix import AugMix
from composer.algorithms.blurpool import BlurPool
from composer.algorithms.channels_last import ChannelsLast
from composer.algorithms.colout import ColOut
from composer.algorithms.cutmix import CutMix
from composer.algorithms.cutout import CutOut
from composer.algorithms.factorize import Factorize
from composer.algorithms.ghost_batchnorm import GhostBatchNorm
from composer.algorithms.label_smoothing import LabelSmoothing
from composer.algorithms.layer_freezing import LayerFreezing
from composer.algorithms.mixup import MixUp
from composer.algorithms.no_op_model import NoOpModel
from composer.algorithms.progressive_resizing import ProgressiveResizing
from composer.algorithms.randaugment import RandAugment
from composer.algorithms.sam import SAM
from composer.algorithms.scale_schedule import ScaleSchedule
from composer.algorithms.selective_backprop import SelectiveBackprop
from composer.algorithms.seq_length_warmup import SeqLengthWarmup
from composer.algorithms.squeeze_excite import SqueezeExcite
from composer.algorithms.stochastic_depth import StochasticDepth
from composer.algorithms.stochastic_depth.stochastic_depth import (_STOCHASTIC_LAYER_MAPPING,
_validate_stochastic_hparams)
from composer.algorithms.swa import SWA
@dataclass
class AlibiHparams(AlgorithmHparams):
"""See :class:`Alibi`"""
position_embedding_attribute: str = hp.required("attribute name of position embeddings within the model. "
"For example in HuggingFace's GPT2, the position "
"embeddings are 'transformer.wpe'")
attention_module_name: str = hp.required("module/class that will have its self-attention "
"function replaced. For example, in HuggingFace's "
"GPT, the self-attention module is "
"'transformers.models.gpt2.modeling_gpt2.GPT2Attention'")
attr_to_replace: str = hp.required("model attribute that self-attention function will "
"replace. For example, in HuggingFace's "
"GPT2, the self-attention function is '_attn'")
alibi_attention: str = hp.required("new self-attention function in which ALiBi is "
"implemented. Used to replace "
"'{attention_module}.{attr_to_replace}'")
mask_replacement_function: Optional[str] = hp.optional(
"function to replace model's attention mask. This is "
"sometimes necessary for evaluating on sequence "
" lengths longer than the model was initialized to accommodate.",
default=None)
heads_per_layer: Optional[int] = hp.optional(
'Number of attention heads per layer. If '
'"None", will attempt to determine from model.config.n_head.',
default=None)
max_sequence_length: int = hp.optional('Maximum allowable sequence length', default=8192)
train_sequence_length_scaling: float = hp.optional(
'Amount by which to scale training sequence length. One batch of training data '
'will be reshaped from size (sequence_length, batch) to '
'(sequence_length*train_sequence_length_scaling, batch/train_sequence_length_scaling)',
default=0.25)
def initialize_object(self) -> "Alibi":
return Alibi(**asdict(self))
@dataclass
class AugMixHparams(AlgorithmHparams):
"""See :class:`AugMix`"""
severity: int = hp.optional(doc="Intensity of each augmentation. Ranges from 0 (none) to 10 (maximum)", default=3)
depth: int = hp.optional(doc="Number of augmentations to compose in a row", default=-1)
width: int = hp.optional(doc="Number of parallel augmentation sequences to combine", default=3)
alpha: float = hp.optional(doc="Mixing parameter for clean vs. augmented images.", default=1.0)
augmentation_set: str = hp.optional(
doc=
"Set of augmentations to sample from. 'all', 'safe' (only augmentations that don't appear on CIFAR10C/ImageNet10C), or 'original'",
default="all")
def initialize_object(self) -> AugMix:
return AugMix(**asdict(self))
@dataclass
class BlurPoolHparams(AlgorithmHparams):
"""See :class:`BlurPool`"""
replace_convs: bool = hp.optional('Replace Conv2d with BlurConv2d if stride > 1', default=True)
replace_maxpools: bool = hp.optional('Replace MaxPool2d with BlurMaxPool2d', default=True)
blur_first: bool = hp.optional('Blur input before convolution', default=True)
def initialize_object(self) -> "BlurPool":
return BlurPool(**asdict(self))
@dataclass
class ChannelsLastHparams(AlgorithmHparams):
"""ChannelsLast has no hyperparameters, so this class has no member variables."""
def initialize_object(self) -> ChannelsLast:
return ChannelsLast()
@dataclass
class ColOutHparams(AlgorithmHparams):
"""See :class:`ColOut`"""
p_row: float = hp.optional(doc="Fraction of rows to drop", default=0.15)
p_col: float = hp.optional(doc="Fraction of cols to drop", default=0.15)
batch: bool = hp.optional(doc="Run ColOut at the batch level", default=True)
def initialize_object(self) -> ColOut:
return ColOut(**asdict(self))
@dataclass
class CutMixHparams(AlgorithmHparams):
"""See :class:`CutMix`"""
num_classes: int = hp.required('Number of classes in the task labels.')
alpha: float = hp.optional('Strength of interpolation, should be >= 0. No interpolation if alpha=0.', default=1.0)
def initialize_object(self) -> CutMix:
return CutMix(**asdict(self))
@dataclass
class CutOutHparams(AlgorithmHparams):
"""See :class:`CutOut`"""
n_holes: int = hp.optional('Number of holes to cut out', default=1)
length: float = hp.optional('Relative or absolute side length of the square hole to cut out', default=0.5)
def initialize_object(self) -> CutOut:
return CutOut(**asdict(self))
@dataclass
class FactorizeHparams(AlgorithmHparams):
"""See :class:`Factorize`"""
factorize_convs: bool = hp.optional(
doc='Whether to factorize convolutional layers',
default=True,
)
factorize_linears: bool = hp.optional(
doc='Whether to factorize linear layers',
default=True,
)
min_channels: int = hp.optional(
doc=('Minimum number of channels in a Conv2d module' + ' for it to be factorized.'),
default=512,
)
latent_channels: float = hp.optional(
doc='Number or relative fraction of channels in factorized convolution latent representations',
default=0.25,
)
min_features: int = hp.optional(
doc=('Minimum number of features in a Linear module' + ' for it to be factorized.'),
default=512,
)
latent_features: float = hp.optional(
doc='Number or relative fraction of features in factorized linear latent representations',
default=0.25,
)
def initialize_object(self) -> Factorize:
return Factorize(**asdict(self))
@dataclass
class GhostBatchNormHparams(AlgorithmHparams):
"""See :class:`GhostBatchNorm`"""
ghost_batch_size: int = hp.optional(doc='Size of sub-batches to normalize over', default=32)
def initialize_object(self) -> GhostBatchNorm:
return GhostBatchNorm(**asdict(self))
@dataclass
class LabelSmoothingHparams(AlgorithmHparams):
"""See :class:`LabelSmoothing`"""
alpha: float = hp.optional(doc='smoothing factor', default=0.1)
def initialize_object(self) -> LabelSmoothing:
return LabelSmoothing(**asdict(self))
@dataclass
class LayerFreezingHparams(AlgorithmHparams):
"""See :class:`LayerFreezing`"""
freeze_start: float = hp.optional(doc='The percentage of epochs to run before freezing begins.', default=0.5)
freeze_level: float = hp.optional(doc='Scale factor for the percentage of the network to freeze.', default=1.0)
def initialize_object(self) -> LayerFreezing:
return LayerFreezing(**asdict(self))
@dataclass
class MixUpHparams(AlgorithmHparams):
"""See :class:`MixUp`"""
num_classes: int = hp.required('Number of classes in the task labels.')
alpha: float = hp.optional('Strength of interpolation, should be >= 0. No interpolation if alpha=0.', default=0.2)
def initialize_object(self) -> MixUp:
return MixUp(**asdict(self))
@dataclass
class NoOpModelHparams(AlgorithmHparams):
def initialize_object(self) -> NoOpModel:
return NoOpModel()
@dataclass
class ProgressiveResizingHparams(AlgorithmHparams):
"""See :class:`ProgressiveResizing`"""
mode: str = hp.optional(doc="Type of scaling to perform", default="resize")
initial_scale: float = hp.optional(doc="Initial scale factor", default=0.5)
finetune_fraction: float = hp.optional(doc="Fraction of training to reserve for finetuning on full-sized inputs",
default=0.2)
resize_targets: bool = hp.optional(doc="Also resize targets", default=False)
def initialize_object(self) -> ProgressiveResizing:
return ProgressiveResizing(**asdict(self))
@dataclass
class RandAugmentHparams(AlgorithmHparams):
"""See :class:`RandAugment`"""
severity: int = hp.optional(doc="Intensity of each augmentation. Ranges from 0 (none) to 10 (maximum)", default=9)
depth: int = hp.optional(doc="Number of augmentations to compose in a row", default=2)
augmentation_set: str = hp.optional(
doc=
"Set of augmentations to sample from. 'all', 'safe' (only augmentations that don't appear on CIFAR10C/ImageNet10C), or 'original'",
default="all")
def initialize_object(self) -> "RandAugment":
return RandAugment(**asdict(self))
@dataclass
class SAMHparams(AlgorithmHparams):
"""See :class:`SAM`"""
rho: float = hp.optional(doc='The neighborhood size parameter of SAM. Must be greater than 0.', default=0.05)
epsilon: float = hp.optional(doc='A small value added to gradient norm for numerical stability.', default=1.0e-12)
interval: int = hp.optional(doc='SAM will run once per `interval` steps. A value of 1 will cause'
'SAM to run every step. Steps on which SAM runs take roughly twice'
'as much time to complete.',
default=1)
def initialize_object(self) -> SAM:
return SAM(**asdict(self))
@dataclass
class ScaleScheduleHparams(AlgorithmHparams):
"""See :class:`ScaleSchedule`"""
ratio: float = hp.optional('Ratio to scale the schedule.', default=1.0)
def initialize_object(self) -> "ScaleSchedule":
return ScaleSchedule(**asdict(self))
@dataclass
class SelectiveBackpropHparams(AlgorithmHparams):
"""See :class:`SelectiveBackprop`"""
start: float = hp.optional(doc="SB interval start, as fraction of training duration", default=0.5)
end: float = hp.optional(doc="SB interval end, as fraction of training duration", default=0.9)
keep: float = hp.optional(doc="fraction of minibatch to select and keep for gradient computation", default=0.5)
scale_factor: float = hp.optional(doc="scale for downsampling input for selection forward pass", default=0.5)
interrupt: int = hp.optional(doc="interrupt SB with a vanilla minibatch step every 'interrupt' batches", default=2)
def initialize_object(self) -> SelectiveBackprop:
return SelectiveBackprop(**asdict(self))
@dataclass
class SeqLengthWarmupHparams(AlgorithmHparams):
duration: float = hp.optional("Fraction of total training time to apply sequential length warmup learning.",
default=0.3)
min_seq_length: int = hp.optional("Starting sequence length.", default=8)
max_seq_length: int = hp.optional("End sequence length", default=1024)
step_size: int = hp.optional("Sequence length step size", default=8)
truncate: bool = hp.optional("Truncate tensors or reshape extra tokens to new examples.", default=True)
def initialize_object(self) -> "SeqLengthWarmup":
return SeqLengthWarmup(**asdict(self))
@dataclass
class StochasticDepthHparams(AlgorithmHparams):
"""See :class:`StochasticDepth`"""
target_layer_name: str = hp.required(
f'Reference name of layer to replace. "block" method can be {list(_STOCHASTIC_LAYER_MAPPING["block"].keys())}.'
f' "sample" method can be {list(_STOCHASTIC_LAYER_MAPPING["sample"].keys())}.')
stochastic_method: str = hp.optional('The version of stochastic depth to use. One of ["sample", "block"].',
default='block')
drop_rate: float = hp.optional('The probability of dropping a block or sample.', default=0.2)
drop_distribution: str = hp.optional(
'"Uniform" keeps the drop rate the same across blocks. "linear" linearly'
' increases the drop rate with block depth until it reaches `drop_rate`.',
default='linear')
use_same_gpu_seed: bool = hp.optional(
'Whether or not to drop the same blocks across GPUs. Only used with "block" method.', default=True)
drop_warmup: str = hp.optional(textwrap.dedent("""\
Time string to represent the amount of training to warmup the `drop_rate`.
Only use with "block" stochastic method."""),
default="0dur")
def initialize_object(self) -> StochasticDepth:
return StochasticDepth(**asdict(self))
def validate(self):
super().validate()
_validate_stochastic_hparams(target_layer_name=self.target_layer_name,
stochastic_method=self.stochastic_method,
drop_rate=self.drop_rate,
drop_distribution=self.drop_distribution,
drop_warmup=self.drop_warmup)
@dataclass
class SqueezeExciteHparams(AlgorithmHparams):
"""See :class:`SqueezeExcite`"""
latent_channels: float = hp.optional(
doc='Dimensionality of hidden layer within the added MLP.',
default=64,
)
min_channels: int = hp.optional(
doc='Minimum number of channels in a Conv2d layer'
' for a squeeze-excite block to be placed after it.',
default=128,
)
def initialize_object(self) -> SqueezeExcite:
return SqueezeExcite(**asdict(self))
@dataclass
class SWAHparams(AlgorithmHparams):
"""See :class:`~.composer.algorithms.swa.SWA`"""
swa_start: str = hp.optional(
doc='Time string denoting the amount of training '
'completed before stochastic weight averaging begins. Currently only units of '
'duration (e.g. "0.7dur") and epoch (e.g "50ep") are supported.',
default="0.7dur",
)
swa_end: str = hp.optional(
doc='Time string denoting amount of training completed before the baseline '
'(non-averaged) model is replaced with the stochastic weight averaged model. '
'Currently only units of duration (e.g. "0.97dur") and epoch (e.g "88ep") are supported.',
default="0.97dur")
schedule_swa_lr: bool = hp.optional(doc='Flag to determine whether to apply an SWA-specific LR schedule during the '
'period in which SWA is active.',
default=False)
anneal_strategy: str = hp.optional(doc='SWA learning rate annealing schedule strategy. '
'"linear" for linear annealing, "cos" for cosine annealing.',
default='linear')
anneal_epochs: int = hp.optional(
doc='Number of epochs over which to anneal SWA learning rate.',
default=10,
)
swa_lr: Optional[float] = hp.optional(
doc='The final learning rate to anneal towards with this scheduler. '
'Set to None for no annealing.',
default=None,
)
def initialize_object(self):
return SWA(**asdict(self))
| 42.437018
| 139
| 0.675733
|
216c673e2166f5b04d87910e5861565daa8ad925
| 6,759
|
py
|
Python
|
roles/bf_arch_desktop/files/config/ranger/devicons.py
|
Blackfell/ansible-hax
|
4c421c20a6c3462134ab0b24a09eddc0f84b27fd
|
[
"BSD-2-Clause"
] | 22
|
2020-05-17T04:48:19.000Z
|
2021-06-05T02:05:56.000Z
|
roles/bf_arch_desktop/files/config/ranger/devicons.py
|
Blackfell/ansible-hax
|
4c421c20a6c3462134ab0b24a09eddc0f84b27fd
|
[
"BSD-2-Clause"
] | 84
|
2020-01-03T07:28:16.000Z
|
2022-03-09T08:02:47.000Z
|
roles/bf_arch_desktop/files/config/ranger/devicons.py
|
Blackfell/ansible-hax
|
4c421c20a6c3462134ab0b24a09eddc0f84b27fd
|
[
"BSD-2-Clause"
] | 3
|
2020-12-28T06:35:27.000Z
|
2021-04-02T22:02:06.000Z
|
#!/usr/bin/python
# coding=UTF-8
# These glyphs, and the mapping of file extensions to glyphs
# has been copied from the vimscript code that is present in
# https://github.com/ryanoasis/vim-devicons
import re;
import os;
# all those glyphs will show as weird squares if you don't have the correct patched font
# My advice is to use NerdFonts which can be found here:
# https://github.com/ryanoasis/nerd-fonts
file_node_extensions = {
'7z' : '',
'ai' : '',
'apk' : '',
'avi' : '',
'bat' : '',
'bmp' : '',
'bz2' : '',
'c' : '',
'c++' : '',
'cab' : '',
'cc' : '',
'clj' : '',
'cljc' : '',
'cljs' : '',
'coffee' : '',
'conf' : '',
'cp' : '',
'cpio' : '',
'cpp' : '',
'css' : '',
'cxx' : '',
'd' : '',
'dart' : '',
'db' : '',
'deb' : '',
'diff' : '',
'dump' : '',
'edn' : '',
'ejs' : '',
'epub' : '',
'erl' : '',
'f#' : '',
'fish' : '',
'flac' : '',
'flv' : '',
'fs' : '',
'fsi' : '',
'fsscript' : '',
'fsx' : '',
'gem' : '',
'gif' : '',
'go' : '',
'gz' : '',
'gzip' : '',
'hbs' : '',
'hrl' : '',
'hs' : '',
'htm' : '',
'html' : '',
'ico' : '',
'ini' : '',
'java' : '',
'jl' : '',
'jpeg' : '',
'jpg' : '',
'js' : '',
'json' : '',
'jsx' : '',
'less' : '',
'lha' : '',
'lhs' : '',
'log' : '',
'lua' : '',
'lzh' : '',
'lzma' : '',
'markdown' : '',
'md' : '',
'mkv' : '',
'ml' : 'λ',
'mli' : 'λ',
'mov' : '',
'mp3' : '',
'mp4' : '',
'mpeg' : '',
'mpg' : '',
'mustache' : '',
'ogg' : '',
'pdf' : '',
'php' : '',
'pl' : '',
'pm' : '',
'png' : '',
'psb' : '',
'psd' : '',
'py' : '',
'pyc' : '',
'pyd' : '',
'pyo' : '',
'rar' : '',
'rb' : '',
'rc' : '',
'rlib' : '',
'rpm' : '',
'rs' : '',
'rss' : '',
'scala' : '',
'scss' : '',
'sh' : '',
'slim' : '',
'sln' : '',
'sql' : '',
'styl' : '',
'suo' : '',
't' : '',
'tar' : '',
'tgz' : '',
'ts' : '',
'twig' : '',
'vim' : '',
'vimrc' : '',
'wav' : '',
'xml' : '',
'xul' : '',
'xz' : '',
'yml' : '',
'zip' : '',
}
dir_node_exact_matches = {
# English
'.git' : '',
'Desktop' : '',
'Documents' : '',
'Downloads' : '',
'Dropbox' : '',
'Music' : '',
'Pictures' : '',
'Public' : '',
'Templates' : '',
'Videos' : '',
# French
'Bureau' : '',
'Documents' : '',
'Images' : '',
'Musique' : '',
'Publique' : '',
'Téléchargements' : '',
'Vidéos' : '',
# Portuguese
'Documentos' : '',
'Imagens' : '',
'Modelos' : '',
'Música' : '',
'Público' : '',
'Vídeos' : '',
'Área de trabalho' : '',
# Italian
'Documenti' : '',
'Immagini' : '',
'Modelli' : '',
'Musica' : '',
'Pubblici' : '',
'Scaricati' : '',
'Scrivania' : '',
'Video' : '',
# German
'Bilder' : '',
'Dokumente' : '',
'Musik' : '',
'Schreibtisch' : '',
'Vorlagen' : '',
'Öffentlich' : '',
}
file_node_exact_matches = {
'.Xdefaults' : '',
'.Xresources' : '',
'.bashprofile' : '',
'.bashrc' : '',
'.dmrc' : '',
'.ds_store' : '',
'.fasd' : '',
'.gitconfig' : '',
'.gitignore' : '',
'.jack-settings' : '',
'.mime.types' : '',
'.nvidia-settings-rc' : '',
'.pam_environment' : '',
'.profile' : '',
'.recently-used' : '',
'.selected_editor' : '',
'.vimrc' : '',
'.xinputrc' : '',
'config' : '',
'dropbox' : '',
'exact-match-case-sensitive-1.txt' : 'X1',
'exact-match-case-sensitive-2' : 'X2',
'favicon.ico' : '',
'gruntfile.coffee' : '',
'gruntfile.js' : '',
'gruntfile.ls' : '',
'gulpfile.coffee' : '',
'gulpfile.js' : '',
'gulpfile.ls' : '',
'ini' : '',
'ledger' : '',
'license' : '',
'mimeapps.list' : '',
'node_modules' : '',
'procfile' : '',
'react.jsx' : '',
'user-dirs.dirs' : '',
}
def devicon(file):
if file.is_directory: return dir_node_exact_matches.get(file.relative_path, '')
return file_node_exact_matches.get(file.relative_path, file_node_extensions.get(file.extension, ''))
| 30.863014
| 103
| 0.239976
|
6abd273ce7ba90ef72a4ed5718593c5c1f8b4388
| 7,554
|
py
|
Python
|
fluent_contents/tests/test_admin.py
|
francofuji/django-fluent-contents
|
03da447ef0854b0e6a6f8ff39d9281d11efc8587
|
[
"Apache-2.0"
] | null | null | null |
fluent_contents/tests/test_admin.py
|
francofuji/django-fluent-contents
|
03da447ef0854b0e6a6f8ff39d9281d11efc8587
|
[
"Apache-2.0"
] | null | null | null |
fluent_contents/tests/test_admin.py
|
francofuji/django-fluent-contents
|
03da447ef0854b0e6a6f8ff39d9281d11efc8587
|
[
"Apache-2.0"
] | null | null | null |
from pprint import pformat
from django.contrib.admin import AdminSite
from django.contrib.admin.templatetags.admin_urls import admin_urlname
from django.contrib.auth.models import User
from django.contrib.messages.middleware import MessageMiddleware
from django.core.urlresolvers import reverse
from django.test import RequestFactory
from django.test.utils import override_settings # import location for Django 1.6-
from fluent_contents.models import Placeholder
from fluent_contents.tests import factories
from fluent_contents.tests.testapp.admin import PlaceholderFieldTestPageAdmin
from fluent_contents.tests.testapp.models import PlaceholderFieldTestPage, RawHtmlTestItem
from fluent_contents.tests.utils import AppTestCase
class AdminTest(AppTestCase):
"""
Test the admin functions.
"""
def setUp(self):
# Admin objects for all tests.
self.factory = RequestFactory()
self.admin_site = AdminSite()
self.admin_user = User.objects.get(is_superuser=True)
self.settings = override_settings(
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
)
self.settings.enable()
def tearDown(self):
self.settings.disable()
def test_add_page(self):
"""
Test adding an object with placeholder field via the admin.
"""
self.admin_site.register(PlaceholderFieldTestPage, PlaceholderFieldTestPageAdmin)
modeladmin = self.admin_site._registry[PlaceholderFieldTestPage]
# Get all post data.
# Includes all inlines, so all inline formsets of other plugins will be added (with TOTAL_FORMS 0)
contents_slot = PlaceholderFieldTestPage.contents.slot
formdata = self._get_management_form_data(modeladmin)
formdata.update({
'title': 'TEST1',
'placeholder-fs-TOTAL_FORMS': '1',
'placeholder-fs-MAX_NUM_FORMS': '', # Needed for Django <= 1.4.3
'placeholder-fs-INITIAL_FORMS': '0', # Needed for Django 1.3
'placeholder-fs-0-slot': contents_slot,
'placeholder-fs-0-role': Placeholder.MAIN,
'rawhtmltestitem-TOTAL_FORMS': '1',
'rawhtmltestitem-MAX_NUM_FORMS': '',
'rawhtmltestitem-INITIAL_FORMS': '0',
'rawhtmltestitem-0-placeholder': '', # The placeholder is not defined yet, as item is not yet created.
'rawhtmltestitem-0-placeholder_slot': contents_slot, # BaseContentItemFormSet resolves the placeholder after it's created
'rawhtmltestitem-0-sort_order': '1',
'rawhtmltestitem-0-html': u'<b>foo</b>',
})
# Make a POST to the admin page.
response = self._post_add(modeladmin, formdata)
self.assertEqual(response.status_code, 302, "No redirect, received:\n\n{0}".format(self._render_response(response)))
# Check that the page exists.
page = PlaceholderFieldTestPage.objects.get(title='TEST1')
# Check that the placeholder is created,
# and properly links back to it's parent.
placeholder = page.contents
self.assertEqual(placeholder.slot, contents_slot)
self.assertEqual(placeholder.role, Placeholder.MAIN)
self.assertEqual(placeholder.parent, page)
# Check that the ContentItem is created,
# and properly links back to it's parent.
rawhtmltestitem = RawHtmlTestItem.objects.get(html=u'<b>foo</b>')
self.assertEqual(rawhtmltestitem.placeholder, placeholder)
self.assertEqual(rawhtmltestitem.parent, page)
# Also check reverse relation of placeholder
rawhtmltestitem = placeholder.contentitems.all()[0]
self.assertEqual(rawhtmltestitem.html, u'<b>foo</b>')
def test_copy_language_backend(self):
"""
Testing how the copy button works.
It calls the ``get_placeholder_data_view`` function.
"""
self.admin_site.register(PlaceholderFieldTestPage, PlaceholderFieldTestPageAdmin)
modeladmin = self.admin_site._registry[PlaceholderFieldTestPage]
page = factories.create_page()
placeholder = factories.create_placeholder(page=page)
item1 = factories.create_content_item(RawHtmlTestItem, placeholder=placeholder, html='AA')
item2 = factories.create_content_item(RawHtmlTestItem, placeholder=placeholder, html='BB')
request = self._get_request(admin_urlname(page._meta, 'get_placeholder_data'))
data = modeladmin.get_placeholder_data_view(request, page.pk).jsondata
self.assertEqual(len(data['formset_forms']), 2)
self.assertEqual(data['formset_forms'][0]['model'], 'RawHtmlTestItem')
self.assertEqual(data['formset_forms'][0]['contentitem_id'], item1.pk)
self.assertEqual(data['formset_forms'][1]['contentitem_id'], item2.pk)
self.assertTrue('AA' in data['formset_forms'][0]['html'])
def _get_request(self, url, data=None):
request = self.factory.post(url, data=data or {})
request.csrf_processing_done = True
request.session = {}
request.user = self.admin_user
MessageMiddleware().process_request(request)
return request
def _post_add(self, modeladmin, formdata):
opts = modeladmin.opts
url = reverse('admin:{0}_{1}_add'.format(*_get_url_format(opts)))
# Build request
# Add properties which middleware would typically do
request = self._get_request(url, data=formdata)
# Make a direct call, circumvents login page.
return modeladmin.add_view(request)
def _get_management_form_data(self, modeladmin):
"""
Return the formdata that the management forms need.
"""
opts = modeladmin.opts
url = reverse('admin:{0}_{1}_add'.format(*_get_url_format(opts)))
request = self.factory.get(url)
request.user = self.admin_user
if hasattr(modeladmin, 'get_inline_instances'):
inline_instances = modeladmin.get_inline_instances(request) # Django 1.4
else:
inline_instances = [inline_class(modeladmin.model, self.admin_site) for inline_class in modeladmin.inlines]
forms = []
for inline_instance in inline_instances:
FormSet = inline_instance.get_formset(request)
formset = FormSet(instance=modeladmin.model())
forms.append(formset.management_form)
# In a primitive way, get the form fields.
# This is not exactly the same as a POST, since that runs through clean()
formdata = {}
for form in forms:
for boundfield in form:
formdata[boundfield.html_name] = boundfield.value()
return formdata
def _render_response(self, response):
if hasattr(response, 'render'):
# TemplateResponse
return u"== Context ==\n{0}\n\n== Response ==\n{1}".format(pformat(response.context_data), response.render().content)
else:
return response.content
def _get_url_format(opts):
try:
return opts.app_label, opts.model_name # Django 1.7 format
except AttributeError:
return opts.app_label, opts.module_name
| 43.413793
| 135
| 0.674609
|
ad4a6d64d10adb4deb58f8e0c67c56b4f96afadf
| 10,077
|
py
|
Python
|
python/paddle/__init__.py
|
CaiZixin/Paddle
|
63da846de0d9a717dae36939608aacb28baba2e7
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/__init__.py
|
CaiZixin/Paddle
|
63da846de0d9a717dae36939608aacb28baba2e7
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/__init__.py
|
CaiZixin/Paddle
|
63da846de0d9a717dae36939608aacb28baba2e7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from paddle.check_import_scipy import check_import_scipy
check_import_scipy(os.name)
try:
from paddle.version import full_version as __version__
from paddle.version import commit as __git_commit__
except ImportError:
import sys
sys.stderr.write('''Warning with import paddle: you should not
import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
)
import paddle.reader
import paddle.dataset
import paddle.batch
batch = batch.batch
import paddle.compat
import paddle.distributed
import paddle.sysconfig
import paddle.tensor
import paddle.nn
import paddle.framework
import paddle.imperative
import paddle.incubate.complex as complex
# TODO: define alias in tensor and framework directory
from .tensor.random import randperm
from .tensor.attribute import rank #DEFINE_ALIAS
from .tensor.attribute import shape #DEFINE_ALIAS
# from .tensor.creation import create_tensor #DEFINE_ALIAS
# from .tensor.creation import create_lod_tensor #DEFINE_ALIAS
# from .tensor.creation import create_random_int_lodtensor #DEFINE_ALIAS
from .tensor.creation import crop_tensor #DEFINE_ALIAS
from .tensor.creation import diag #DEFINE_ALIAS
from .tensor.creation import eye #DEFINE_ALIAS
from .tensor.creation import fill_constant #DEFINE_ALIAS
# from .tensor.creation import get_tensor_from_selected_rows #DEFINE_ALIAS
from .tensor.creation import linspace #DEFINE_ALIAS
from .tensor.creation import ones #DEFINE_ALIAS
from .tensor.creation import ones_like #DEFINE_ALIAS
from .tensor.creation import zeros #DEFINE_ALIAS
from .tensor.creation import zeros_like #DEFINE_ALIAS
from .tensor.creation import arange #DEFINE_ALIAS
from .tensor.creation import eye #DEFINE_ALIAS
from .tensor.creation import full #DEFINE_ALIAS
from .tensor.creation import full_like #DEFINE_ALIAS
from .tensor.creation import triu #DEFINE_ALIAS
from .tensor.creation import tril #DEFINE_ALIAS
from .tensor.creation import meshgrid #DEFINE_ALIAS
from .tensor.io import save #DEFINE_ALIAS
from .tensor.io import load #DEFINE_ALIAS
from .tensor.linalg import matmul #DEFINE_ALIAS
from .tensor.linalg import dot #DEFINE_ALIAS
# from .tensor.linalg import einsum #DEFINE_ALIAS
from .tensor.linalg import norm #DEFINE_ALIAS
from .tensor.linalg import transpose #DEFINE_ALIAS
from .tensor.linalg import dist #DEFINE_ALIAS
from .tensor.linalg import t #DEFINE_ALIAS
from .tensor.linalg import cross #DEFINE_ALIAS
from .tensor.linalg import cholesky #DEFINE_ALIAS
# from .tensor.linalg import tensordot #DEFINE_ALIAS
from .tensor.linalg import bmm #DEFINE_ALIAS
from .tensor.logic import equal #DEFINE_ALIAS
from .tensor.logic import greater_equal #DEFINE_ALIAS
from .tensor.logic import greater_than #DEFINE_ALIAS
from .tensor.logic import is_empty #DEFINE_ALIAS
from .tensor.logic import isfinite #DEFINE_ALIAS
from .tensor.logic import less_equal #DEFINE_ALIAS
from .tensor.logic import less_than #DEFINE_ALIAS
from .tensor.logic import logical_and #DEFINE_ALIAS
from .tensor.logic import logical_not #DEFINE_ALIAS
from .tensor.logic import logical_or #DEFINE_ALIAS
from .tensor.logic import logical_xor #DEFINE_ALIAS
from .tensor.logic import not_equal #DEFINE_ALIAS
from .tensor.logic import reduce_all #DEFINE_ALIAS
from .tensor.logic import reduce_any #DEFINE_ALIAS
from .tensor.logic import allclose #DEFINE_ALIAS
from .tensor.logic import elementwise_equal #DEFINE_ALIAS
# from .tensor.logic import isnan #DEFINE_ALIAS
from .tensor.manipulation import cast #DEFINE_ALIAS
from .tensor.manipulation import concat #DEFINE_ALIAS
from .tensor.manipulation import expand #DEFINE_ALIAS
from .tensor.manipulation import expand_as #DEFINE_ALIAS
from .tensor.manipulation import flatten #DEFINE_ALIAS
from .tensor.manipulation import gather #DEFINE_ALIAS
# from .tensor.manipulation import gather_nd #DEFINE_ALIAS
from .tensor.manipulation import reshape #DEFINE_ALIAS
from .tensor.manipulation import reverse #DEFINE_ALIAS
from .tensor.manipulation import scatter #DEFINE_ALIAS
# from .tensor.manipulation import scatter_nd_add #DEFINE_ALIAS
# from .tensor.manipulation import scatter_nd #DEFINE_ALIAS
# from .tensor.manipulation import shard_index #DEFINE_ALIAS
from .tensor.manipulation import slice #DEFINE_ALIAS
from .tensor.manipulation import split #DEFINE_ALIAS
from .tensor.manipulation import squeeze #DEFINE_ALIAS
from .tensor.manipulation import stack #DEFINE_ALIAS
from .tensor.manipulation import strided_slice #DEFINE_ALIAS
from .tensor.manipulation import transpose #DEFINE_ALIAS
from .tensor.manipulation import unique #DEFINE_ALIAS
# from .tensor.manipulation import unique_with_counts #DEFINE_ALIAS
from .tensor.manipulation import unsqueeze #DEFINE_ALIAS
from .tensor.manipulation import unstack #DEFINE_ALIAS
from .tensor.manipulation import flip #DEFINE_ALIAS
# from .tensor.manipulation import unbind #DEFINE_ALIAS
from .tensor.manipulation import roll #DEFINE_ALIAS
from .tensor.math import abs #DEFINE_ALIAS
from .tensor.math import acos #DEFINE_ALIAS
from .tensor.math import asin #DEFINE_ALIAS
from .tensor.math import atan #DEFINE_ALIAS
from .tensor.math import ceil #DEFINE_ALIAS
from .tensor.math import cos #DEFINE_ALIAS
from .tensor.math import cumsum #DEFINE_ALIAS
from .tensor.math import elementwise_add #DEFINE_ALIAS
from .tensor.math import elementwise_div #DEFINE_ALIAS
from .tensor.math import elementwise_floordiv #DEFINE_ALIAS
from .tensor.math import elementwise_max #DEFINE_ALIAS
from .tensor.math import elementwise_min #DEFINE_ALIAS
from .tensor.math import elementwise_mod #DEFINE_ALIAS
from .tensor.math import elementwise_mul #DEFINE_ALIAS
from .tensor.math import elementwise_pow #DEFINE_ALIAS
from .tensor.math import elementwise_sub #DEFINE_ALIAS
from .tensor.math import exp #DEFINE_ALIAS
from .tensor.math import floor #DEFINE_ALIAS
# from .tensor.math import increment #DEFINE_ALIAS
from .tensor.math import log #DEFINE_ALIAS
from .tensor.math import mul #DEFINE_ALIAS
# from .tensor.math import multiplex #DEFINE_ALIAS
from .tensor.math import pow #DEFINE_ALIAS
from .tensor.math import reciprocal #DEFINE_ALIAS
from .tensor.math import reduce_max #DEFINE_ALIAS
from .tensor.math import reduce_min #DEFINE_ALIAS
from .tensor.math import reduce_prod #DEFINE_ALIAS
from .tensor.math import reduce_sum #DEFINE_ALIAS
from .tensor.math import round #DEFINE_ALIAS
from .tensor.math import rsqrt #DEFINE_ALIAS
from .tensor.math import scale #DEFINE_ALIAS
from .tensor.math import sign #DEFINE_ALIAS
from .tensor.math import sin #DEFINE_ALIAS
from .tensor.math import sqrt #DEFINE_ALIAS
from .tensor.math import square #DEFINE_ALIAS
from .tensor.math import stanh #DEFINE_ALIAS
from .tensor.math import sum #DEFINE_ALIAS
# from .tensor.math import sums #DEFINE_ALIAS
from .tensor.math import tanh #DEFINE_ALIAS
from .tensor.math import elementwise_sum #DEFINE_ALIAS
from .tensor.math import max #DEFINE_ALIAS
from .tensor.math import min #DEFINE_ALIAS
from .tensor.math import mm #DEFINE_ALIAS
from .tensor.math import div #DEFINE_ALIAS
from .tensor.math import add #DEFINE_ALIAS
from .tensor.math import atan #DEFINE_ALIAS
from .tensor.math import logsumexp #DEFINE_ALIAS
from .tensor.math import inverse #DEFINE_ALIAS
from .tensor.math import log1p #DEFINE_ALIAS
from .tensor.math import erf #DEFINE_ALIAS
from .tensor.math import addcmul #DEFINE_ALIAS
from .tensor.math import addmm #DEFINE_ALIAS
from .tensor.math import clamp #DEFINE_ALIAS
from .tensor.math import trace #DEFINE_ALIAS
from .tensor.math import kron #DEFINE_ALIAS
# from .tensor.random import gaussin #DEFINE_ALIAS
# from .tensor.random import uniform #DEFINE_ALIAS
from .tensor.random import shuffle #DEFINE_ALIAS
from .tensor.random import randn #DEFINE_ALIAS
from .tensor.random import rand #DEFINE_ALIAS
from .tensor.random import randint #DEFINE_ALIAS
from .tensor.random import randperm #DEFINE_ALIAS
from .tensor.search import argmax #DEFINE_ALIAS
from .tensor.search import argmin #DEFINE_ALIAS
from .tensor.search import argsort #DEFINE_ALIAS
from .tensor.search import has_inf #DEFINE_ALIAS
from .tensor.search import has_nan #DEFINE_ALIAS
# from .tensor.search import masked_select #DEFINE_ALIAS
from .tensor.search import topk #DEFINE_ALIAS
from .tensor.search import where #DEFINE_ALIAS
from .tensor.search import index_select #DEFINE_ALIAS
from .tensor.search import nonzero #DEFINE_ALIAS
from .tensor.search import sort #DEFINE_ALIAS
from .framework.random import manual_seed #DEFINE_ALIAS
from .tensor.search import index_sample #DEFINE_ALIAS
from .tensor.stat import mean #DEFINE_ALIAS
from .tensor.stat import reduce_mean #DEFINE_ALIAS
from .tensor.stat import std #DEFINE_ALIAS
from .tensor.stat import var #DEFINE_ALIAS
# from .tensor.tensor import Tensor #DEFINE_ALIAS
# from .tensor.tensor import LoDTensor #DEFINE_ALIAS
# from .tensor.tensor import LoDTensorArray #DEFINE_ALIAS
from .fluid.dygraph.base import enable_dygraph #DEFINE_ALIAS
from .fluid.dygraph.base import disable_dygraph #DEFINE_ALIAS
from .fluid.framework import in_dygraph_mode #DEFINE_ALIAS
enable_imperative = enable_dygraph #DEFINE_ALIAS
disable_imperative = disable_dygraph #DEFINE_ALIAS
in_imperative_mode = in_dygraph_mode
| 47.758294
| 90
| 0.803811
|
d465495ba3ef02f9a5050408c04398c95a46e2e0
| 6,366
|
py
|
Python
|
utils/update_readme.py
|
ahti123/osxphotos
|
53c701cc0ebd38db255c1ce694391b38dbb5fe01
|
[
"MIT"
] | 656
|
2019-08-14T14:10:44.000Z
|
2022-03-28T15:25:42.000Z
|
utils/update_readme.py
|
ahti123/osxphotos
|
53c701cc0ebd38db255c1ce694391b38dbb5fe01
|
[
"MIT"
] | 557
|
2019-10-14T19:00:02.000Z
|
2022-03-28T00:48:30.000Z
|
utils/update_readme.py
|
ahti123/osxphotos
|
53c701cc0ebd38db255c1ce694391b38dbb5fe01
|
[
"MIT"
] | 58
|
2019-12-27T01:39:33.000Z
|
2022-02-26T22:18:49.000Z
|
""" Automatically update certain sections of README.md for osxphotos
Also updates osxphotos/phototemplate.md
"""
# This is a pretty "dumb" script that searches the README.md for
# certain tags, expressed as HTML comments, and replaces text between
# those tags. The following replacements are made:
# 1. the output of "osxphotos help export"
# 2. the template substitution table
# Running this script ensures the above sections of the README.md contain
# the most current information, updated directly from the code.
import re
from click.testing import CliRunner
from osxphotos.cli import help
from osxphotos.phototemplate import (
FILTER_VALUES,
TEMPLATE_SUBSTITUTIONS,
TEMPLATE_SUBSTITUTIONS_MULTI_VALUED,
)
TEMPLATE_HELP = "osxphotos/phototemplate.md"
TUTORIAL_HELP = "osxphotos/tutorial.md"
USAGE_START = (
"<!-- OSXPHOTOS-EXPORT-USAGE:START - Do not remove or modify this section -->"
)
USAGE_STOP = "<!-- OSXPHOTOS-EXPORT-USAGE:END -->"
TEMPLATE_TABLE_START = (
"<!-- OSXPHOTOS-TEMPLATE-TABLE:START - Do not remove or modify this section -->"
)
TEMPLATE_TABLE_STOP = "<!-- OSXPHOTOS-TEMPLATE-TABLE:END -->"
TEMPLATE_HELP_START = (
"<!-- OSXPHOTOS-TEMPLATE-HELP:START - Do not remove or modify this section -->"
)
TEMPLATE_HELP_STOP = "<!-- OSXPHOTOS-TEMPLATE-HELP:END -->"
TEMPLATE_FILTER_TABLE_START = (
"!-- OSXPHOTOS-FILTER-TABLE:START - Do not remove or modify this section -->"
)
TEMPLATE_FILTER_TABLE_STOP = "<!-- OSXPHOTOS-FILTER-TABLE:END -->"
TUTORIAL_START = "<!-- OSXPHOTOS-TUTORIAL:START -->"
TUTORIAL_STOP = "<!-- OSXPHOTOS-TUTORIAL:END -->"
TUTORIAL_HEADER_START = "<!-- OSXPHOTOS-TUTORIAL-HEADER:START -->"
TUTORIAL_HEADER_STOP = "<!-- OSXPHOTOS-TUTORIAL-HEADER:END -->"
TEMPLATE_SYSTEM_LINK_START = "<!-- OSXPHOTOS-TEMPLATE-SYSTEM-LINK:START -->"
TEMPLATE_SYSTEM_LINK_STOP = "<!-- OSXPHOTOS-TEMPLATE-SYSTEM-LINK:END -->"
def generate_template_table():
""" generate template substitution table for README.md """
template_table = "| Substitution | Description |"
template_table += "\n|--------------|-------------|"
for subst, descr in [
*TEMPLATE_SUBSTITUTIONS.items(),
*TEMPLATE_SUBSTITUTIONS_MULTI_VALUED.items(),
]:
template_table += f"\n|{subst}|{descr}|"
return template_table
def generate_help_text(command):
""" generate output of `osxphotos help command` """
runner = CliRunner()
# get current help text
with runner.isolated_filesystem():
result = runner.invoke(help, [command])
help_txt = result.output
# running the help command above doesn't output the full "Usage" line
help_txt = help_txt.replace(f"Usage: {command}", f"Usage: osxphotos {command}")
return help_txt
def replace_text(text, start_tag, stop_tag, replacement_text, prefix="", postfix=""):
""" replace text between start/stop tags with new text
Args:
text: str, original text
start_tag: str, tag to find at beginning of replacement
stop_tag: str, tag to find at end of replacement
prefix: optional prefix that will go between start_tag and replacement_text
postfix: optional postfix that will go between replacement_text and stop_tag
replacement_text: str, new text to place between start_tag, stop_tag
Returns:
str
"""
# sanity check to ensure tags are present
if start_tag not in text:
raise ValueError(f"start_tag {start_tag} not in text")
if stop_tag not in text:
raise ValueError(f"stop_tag {stop_tag} not in text")
begin = end = ""
try:
begin = text.split(start_tag)[0]
end = text.split(stop_tag)[1]
except IndexError as e:
# didn't find one of the delimiters
raise ValueError(f"Unable to parse input: {e}")
return begin + start_tag + prefix + replacement_text + postfix + stop_tag + end
def main():
""" update README.md """
# update phototemplate.md with info on filters
print(f"Updating {TEMPLATE_HELP}")
filter_help = "\n".join(f"- {f}: {descr}" for f, descr in FILTER_VALUES.items())
with open(TEMPLATE_HELP) as file:
template_help = file.read()
template_help = replace_text(
template_help,
TEMPLATE_FILTER_TABLE_START,
TEMPLATE_FILTER_TABLE_STOP,
filter_help,
prefix="\n",
postfix="\n",
)
with open(TEMPLATE_HELP, "w") as file:
file.write(template_help)
# update the help text for `osxphotos help export`
print("Updating help for `osxphotos help export`")
with open("README.md", "r") as file:
readme = file.read()
help_txt = generate_help_text("export")
new_readme = replace_text(
readme, USAGE_START, USAGE_STOP, help_txt, prefix="\n```\n", postfix="\n```\n"
)
# update the template substitution table
print("Updating template substitution table")
template_table = generate_template_table()
new_readme = replace_text(
new_readme,
TEMPLATE_TABLE_START,
TEMPLATE_TABLE_STOP,
template_table,
prefix="\n",
postfix="\n",
)
# update the template system docs
print("Updating template system help")
with open(TEMPLATE_HELP) as fd:
template_help = fd.read()
new_readme = replace_text(
new_readme,
TEMPLATE_HELP_START,
TEMPLATE_HELP_STOP,
template_help,
prefix="\n",
postfix="\n",
)
# update the tutorial
print("Updating tutorial")
with open(TUTORIAL_HELP) as fd:
tutorial_help = fd.read()
# indent all Markdown headers one more level
tutorial_help = re.sub(r"^([#]+)", r"\1#", tutorial_help, flags=re.MULTILINE)
# insert link for Template System
tutorial_help = replace_text(
tutorial_help,
TEMPLATE_SYSTEM_LINK_START,
TEMPLATE_SYSTEM_LINK_STOP,
"[Template System](#template-system)",
)
# remove Tutorial Header
tutorial_help = replace_text(
tutorial_help, TUTORIAL_HEADER_START, TUTORIAL_HEADER_STOP, ""
)
# insert tutorial text into readme
new_readme = replace_text(new_readme, TUTORIAL_START, TUTORIAL_STOP, tutorial_help)
print("Writing new README.md")
with open("README.md", "w") as file:
file.write(new_readme)
if __name__ == "__main__":
main()
| 31.514851
| 87
| 0.672793
|
634d576934cb27482e2a0175e523996f353783da
| 2,176
|
py
|
Python
|
coaching_sessions/anagrams/group_anagrams.py
|
Mrsteveson/Review
|
0dc401e9ba45efcc4cccfddfd425f72ced96e562
|
[
"MIT"
] | null | null | null |
coaching_sessions/anagrams/group_anagrams.py
|
Mrsteveson/Review
|
0dc401e9ba45efcc4cccfddfd425f72ced96e562
|
[
"MIT"
] | null | null | null |
coaching_sessions/anagrams/group_anagrams.py
|
Mrsteveson/Review
|
0dc401e9ba45efcc4cccfddfd425f72ced96e562
|
[
"MIT"
] | null | null | null |
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
# go with what we already know off the bat from the problem statement
# we're working with anagrams: a word formed from another by rearranging its letters
# the output needs to be in the form of an array of arrays
# we need to be able to tell if two words are anagrams
# we're working with strings
# work through the given examples with pencil and paper (or whatever scratch medium you
# like using)
# also expand on given examples or create new ones and work through them to broader and
# reinforce your own understanding of the problem
# what data structure should we use to hold our groupings?
# linked list since we're working with a group of arrays
# expected output type is array of arrays
# would that be a suitable data structure for us to use?
# if we're using an array of arrays: for the current word, sort it and check it against the
# sorted version of the latest word in each group
# using an array of arrays, we end up performing more sorts than we need to
# idea: in order to reduce the number of redundant sorts, let's save the sorted version of
# each word and associate it with its grouping
# because we want to keep track of the association between the sorted word and its grouping,
# use a dictionary with the sorted word as key and array of words as value
# how do we determine if two words are anagrams?
# 1. if two words, after they are sorted, exhibit the same letters in the same order
# 2. use dictionaries to store the letters of one of the strings as keys with values as counts
# of each letter; go through the other string, decrementing the letter in the dictionary's count
# until we see that either the two words are not anagrams or they are
# how do we take our dictionary and convert it into the expected output type?
# return the values of our dictionary using dict.values()
def groupAnagrams(strs):
hash = {}
for i in strs:
key = ''.join(sorted(i))
if hash.get(key):
hash[key].append(i)
else:
hash[key] = [i]
return hash.values()
| 51.809524
| 97
| 0.711397
|
1a0096c14acd135368bc0b0b34e6c22197557d93
| 2,469
|
py
|
Python
|
ppmessage/scripts/install_brews.py
|
x-debug/ppmessage_fork
|
a2cb51333b2bfed92fb81ae130c825d0eada7c69
|
[
"MIT"
] | 3
|
2018-07-22T10:56:42.000Z
|
2020-01-14T10:33:26.000Z
|
ppmessage/scripts/install_brews.py
|
x-debug/ppmessage_fork
|
a2cb51333b2bfed92fb81ae130c825d0eada7c69
|
[
"MIT"
] | null | null | null |
ppmessage/scripts/install_brews.py
|
x-debug/ppmessage_fork
|
a2cb51333b2bfed92fb81ae130c825d0eada7c69
|
[
"MIT"
] | 7
|
2018-03-22T05:27:47.000Z
|
2021-01-19T13:03:17.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, dingguijin@gmail.com
# All rights reserved
#
"""
install_pips.py use pip to install all python modules which ppmessage depends on.
"""
import os
import sys
import platform
import traceback
import subprocess
def _color_print(str, color="red"):
if color == "red":
print('\033[1;31;40m')
if color == "green":
print('\033[1;32;40m')
print(str)
print('\033[0m')
return
def _install():
basic_list = ["pcre", "dos2unix", "hg", "autoconf", "libtool", "automake", "redis", "libmagic", "mysql", "libjpeg", "libffi", "faac", "fdk-aac", "lame", "mercurial", "makedepend", "mp3val", "ossp-uuid", "readline", "watchman", "wget", "x264", "xvid", "xz", "yasm"]
for should_item in basic_list:
install_cmd = "brew install " + should_item
_color_print("%s" % install_cmd, "green")
_missed = []
try:
subprocess.check_output(install_cmd, shell=True)
except:
_missed.append(should_item)
if len(_missed) > 0:
_color_print("failed to run: `brew install %s`" % str(_missed))
sys.exit()
install_cmds = [
"brew tap homebrew/services",
"brew tap homebrew/nginx",
"brew install nginx-full --with-upload-module",
"brew install ffmpeg --with-fdk-aac --with-opencore-amr --with-libvorbis --with-opus"
]
for install_cmd in install_cmds:
_color_print("%s" % install_cmd, "green")
try:
subprocess.check_output(install_cmd, shell=True)
except:
_color_print("failed to run: %s" % install_cmd)
sys.exit()
return
def _check_brew():
which_cmd = "which brew"
no_brew = False
try:
subprocess.check_output(which_cmd, shell=True)
except:
_color_print("No brew install, auto download...")
no_brew = True
install_cmd = '/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"'
if no_brew:
try:
subprocess.check_output(install_cmd, shell=True)
except:
_color_print("Failed to execute: %s" % install_cmd)
sys.exit()
return
def _check_uid():
if 0 == os.getuid():
_color_print("Don't use root privilege")
sys.exit()
return
if __name__ == "__main__":
_check_uid()
_check_brew()
_install()
| 27.433333
| 268
| 0.600243
|
a799a8ba5591cd65e29980048ad090fb3211ad33
| 1,599
|
py
|
Python
|
xlsxwriter/test/comparison/test_chart_axis17.py
|
totdiao/XlsxWriter
|
3d65858d8933bddb8262d500bcc2005f28fde645
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_chart_axis17.py
|
totdiao/XlsxWriter
|
3d65858d8933bddb8262d500bcc2005f28fde645
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_chart_axis17.py
|
totdiao/XlsxWriter
|
3d65858d8933bddb8262d500bcc2005f28fde645
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_axis17.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43812736, 45705088]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_y_axis({'log_base': 10})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 25.790323
| 79
| 0.565353
|
550680415f289eca66dc41d7092fae395cf62803
| 897
|
py
|
Python
|
test/test_ip.py
|
liaohongdong/novel-scrapy
|
f73afef5b5abcc961279f1cd30136a0257281cb7
|
[
"MIT"
] | null | null | null |
test/test_ip.py
|
liaohongdong/novel-scrapy
|
f73afef5b5abcc961279f1cd30136a0257281cb7
|
[
"MIT"
] | null | null | null |
test/test_ip.py
|
liaohongdong/novel-scrapy
|
f73afef5b5abcc961279f1cd30136a0257281cb7
|
[
"MIT"
] | null | null | null |
import requests
import pprint
def get_proxy():
return requests.get("http://39.108.115.177:5010/get/").content
def delete_proxy(proxy):
requests.get("http://39.108.115.177:5010/delete/?proxy={}".format(bytes.decode(proxy)))
def getHtml():
# ....
retry_count = 3
proxy = get_proxy()
print(proxy)
while retry_count > 0:
try:
html = requests.get('http://www.baidu.com', proxies={"http": "http://{}".format(bytes.decode(proxy))})
# 使用代理访问
return html
except Exception:
retry_count -= 1
# 出错3次, 删除代理池中代理
delete_proxy(bytes.decode(proxy))
return None
a = getHtml()
pp = pprint.PrettyPrinter(indent=4)
ddd = pp.pformat(a.__dict__)
print(ddd)
dst = requests.get(
'http://www.qidian.com/finish?action=hidden&orderId=&style=1&pageSize=20&siteid=1&pubflag=0&hiddenField=2&page=2')
print(dst)
| 23.605263
| 118
| 0.634337
|
6e4ff29b2b220f375e0459ad6c1424b3a78906c2
| 3,169
|
py
|
Python
|
sensu_handler_rocketchat/__init__.py
|
pdreker-pa/sensu-handler-rocketchat
|
50a11f25c085425a71d6b34adb5342506106b424
|
[
"Apache-2.0"
] | 1
|
2019-06-17T08:22:05.000Z
|
2019-06-17T08:22:05.000Z
|
sensu_handler_rocketchat/__init__.py
|
pdreker-pa/sensu-handler-rocketchat
|
50a11f25c085425a71d6b34adb5342506106b424
|
[
"Apache-2.0"
] | null | null | null |
sensu_handler_rocketchat/__init__.py
|
pdreker-pa/sensu-handler-rocketchat
|
50a11f25c085425a71d6b34adb5342506106b424
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Patrick Dreker <patrick@dreker.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sensu_plugin import SensuHandler
import pprint
import urllib2, json, os, sys, argparse
class RocketHandler(SensuHandler):
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
'-c',
'--config',
required = False,
default = "rockethandler",
help = 'config section to use'
)
(self.options, self.remain) = self.parser.parse_known_args()
self.config_space = vars(self.options)["config"]
super(RocketHandler, self).__init__()
def handle(self):
message_payload = {}
if "channel" in self.settings[self.config_space]:
message_payload["channel"] = self.settings[self.config_space]["channel"]
if "nickname" in self.settings[self.config_space]:
message_payload["username"] = self.settings[self.config_space]["nickname"]
message_payload["attachments"] = []
att = {}
att["title"] = "%s (%s): %s" % (self.translate_status(self.event["check"]["status"]),
self.event["check"]["name"],
self.event["client"]["name"])
if "dashboard_url" in self.settings[self.config_space]:
att["title_link"] = self.settings[self.config_space]["dashboard_url"]
if "pretext" in self.settings[self.config_space]:
att["pretext"] = self.settings[self.config_space]["pretext"]
att["color"] = self.status_to_color(self.event["check"]["status"])
att["ts"] = self.event["timestamp"]
att_fields = []
if "thresholds" in self.event["check"]:
for key,value in self.event["check"]["thresholds"].iteritems():
att_fields.append({"title": key, "value": str(value), "short": True})
att["fields"] = att_fields
att["text"] = self.event["check"]["output"]
message_payload["attachments"].append(att)
req = urllib2.Request(self.settings[self.config_space]["hook_url"])
req.add_header('Content-Type', 'application/json')
payload = json.dumps(message_payload)
response = urllib2.urlopen(req, payload)
if response.getcode() is not 200:
print "Posting to Rocketchat failed!"
def translate_status(self, status):
return ["OK", "WARNING", "CRITICAL", "UNKNOWN"][status]
def status_to_color(self, status):
return ["#36a64f", "#FFCC00", "#FF0000", "#6600CC"][status]
if __name__ == "__main__":
f = RocketHandler()
| 38.180723
| 93
| 0.625749
|
e2408ebef6330333fa367ba1b8aeeb1290293498
| 34,528
|
py
|
Python
|
tests/test_checkout.py
|
PanthraxDigital/PD_Anzee_BE
|
070998427db470044fdccb1cf75a5b0015677d73
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_checkout.py
|
PanthraxDigital/PD_Anzee_BE
|
070998427db470044fdccb1cf75a5b0015677d73
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_checkout.py
|
PanthraxDigital/PD_Anzee_BE
|
070998427db470044fdccb1cf75a5b0015677d73
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from unittest.mock import Mock, patch
import pytest
from django.urls import reverse
from django_countries.fields import Country
from freezegun import freeze_time
from prices import Money, TaxedMoney
from saleor.account.models import Address
from saleor.checkout import views
from saleor.checkout.forms import CartVoucherForm, CountryForm
from saleor.checkout.utils import (
add_variant_to_cart, change_billing_address_in_cart,
change_shipping_address_in_cart, clear_shipping_method, create_order,
get_cart_data_for_checkout,
get_prices_of_products_in_discounted_categories, get_taxes_for_cart,
get_voucher_discount_for_cart, get_voucher_for_cart,
is_valid_shipping_method, recalculate_cart_discount,
remove_voucher_from_cart)
from saleor.core.exceptions import InsufficientStock
from saleor.core.utils.taxes import (
ZERO_MONEY, ZERO_TAXED_MONEY, get_taxes_for_country)
from saleor.discount import DiscountValueType, VoucherType
from saleor.discount.models import NotApplicable, Voucher
from saleor.product.models import Category
from saleor.shipping.models import ShippingZone
from .utils import compare_taxes, get_redirect_location
def test_country_form_country_choices():
form = CountryForm(data={'csrf': '', 'country': 'PL'})
assert form.fields['country'].choices == []
zone = ShippingZone.objects.create(countries=['PL', 'DE'], name='Europe')
form = CountryForm(data={'csrf': '', 'country': 'PL'})
expected_choices = [
(country.code, country.name) for country in zone.countries]
expected_choices = sorted(
expected_choices, key=lambda choice: choice[1])
assert form.fields['country'].choices == expected_choices
def test_is_valid_shipping_method(
cart_with_item, address, shipping_zone, vatlayer):
cart = cart_with_item
cart.shipping_address = address
cart.save()
# no shipping method assigned
assert not is_valid_shipping_method(cart, vatlayer, None)
shipping_method = shipping_zone.shipping_methods.first()
cart.shipping_method = shipping_method
cart.save()
assert is_valid_shipping_method(cart, vatlayer, None)
zone = ShippingZone.objects.create(name='DE', countries=['DE'])
shipping_method.shipping_zone = zone
shipping_method.save()
assert not is_valid_shipping_method(cart, vatlayer, None)
def test_clear_shipping_method(cart, shipping_method):
cart.shipping_method = shipping_method
cart.save()
clear_shipping_method(cart)
cart.refresh_from_db()
assert not cart.shipping_method
@pytest.mark.parametrize('cart_length, is_shipping_required, redirect_url', [
(0, True, reverse('cart:index')),
(0, False, reverse('cart:index')),
(1, True, reverse('checkout:shipping-address')),
(1, False, reverse('checkout:summary'))])
def test_view_checkout_index(
monkeypatch, rf, cart_length, is_shipping_required, redirect_url):
cart = Mock(
__len__=Mock(return_value=cart_length),
is_shipping_required=Mock(return_value=is_shipping_required))
monkeypatch.setattr(
'saleor.checkout.utils.get_cart_from_request', lambda req, qs: cart)
url = reverse('checkout:index')
request = rf.get(url, follow=True)
response = views.checkout_index(request)
assert response.url == redirect_url
def test_view_checkout_index_authorized_user(
authorized_client, customer_user, request_cart_with_item):
request_cart_with_item.user = customer_user
request_cart_with_item.save()
url = reverse('checkout:index')
response = authorized_client.get(url, follow=True)
redirect_url = reverse('checkout:shipping-address')
assert response.request['PATH_INFO'] == redirect_url
def test_view_checkout_shipping_address(client, request_cart_with_item):
url = reverse('checkout:shipping-address')
data = {
'email': 'test@example.com',
'first_name': 'John',
'last_name': 'Doe',
'street_address_1': 'Aleje Jerozolimskie 2',
'street_address_2': '',
'city': 'Warszawa',
'city_area': '',
'country_area': '',
'postal_code': '00-374',
'phone': '+48536984008',
'country': 'PL'}
response = client.get(url)
assert response.request['PATH_INFO'] == url
response = client.post(url, data, follow=True)
redirect_url = reverse('checkout:shipping-method')
assert response.request['PATH_INFO'] == redirect_url
assert request_cart_with_item.email == 'test@example.com'
def test_view_checkout_shipping_address_with_invalid_data(
client, request_cart_with_item):
url = reverse('checkout:shipping-address')
data = {
'email': 'test@example.com',
'first_name': 'John',
'last_name': 'Doe',
'street_address_1': 'Aleje Jerozolimskie 2',
'street_address_2': '',
'city': 'Warszawa',
'city_area': '',
'country_area': '',
'postal_code': '00-37412',
'phone': '+48536984008',
'country': 'PL'}
response = client.post(url, data, follow=True)
assert response.request['PATH_INFO'] == url
def test_view_checkout_shipping_address_authorized_user(
authorized_client, customer_user, request_cart_with_item):
request_cart_with_item.user = customer_user
request_cart_with_item.save()
url = reverse('checkout:shipping-address')
data = {'address': customer_user.default_billing_address.pk}
response = authorized_client.post(url, data, follow=True)
redirect_url = reverse('checkout:shipping-method')
assert response.request['PATH_INFO'] == redirect_url
assert request_cart_with_item.email == customer_user.email
def test_view_checkout_shipping_address_without_shipping(
request_cart, product_without_shipping, client):
variant = product_without_shipping.variants.get()
add_variant_to_cart(request_cart, variant)
url = reverse('checkout:shipping-address')
response = client.get(url)
assert response.status_code == 302
assert get_redirect_location(response) == reverse('checkout:summary')
assert not request_cart.email
def test_view_checkout_shipping_method(
client, shipping_zone, address, request_cart_with_item):
request_cart_with_item.shipping_address = address
request_cart_with_item.email = 'test@example.com'
request_cart_with_item.save()
url = reverse('checkout:shipping-method')
data = {'shipping_method': shipping_zone.shipping_methods.first().pk}
response = client.get(url)
assert response.request['PATH_INFO'] == url
response = client.post(url, data, follow=True)
redirect_url = reverse('checkout:summary')
assert response.request['PATH_INFO'] == redirect_url
def test_view_checkout_shipping_method_authorized_user(
authorized_client, customer_user, shipping_zone, address,
request_cart_with_item):
request_cart_with_item.user = customer_user
request_cart_with_item.email = customer_user.email
request_cart_with_item.shipping_address = address
request_cart_with_item.save()
url = reverse('checkout:shipping-method')
data = {'shipping_method': shipping_zone.shipping_methods.first().pk}
response = authorized_client.get(url)
assert response.request['PATH_INFO'] == url
response = authorized_client.post(url, data, follow=True)
redirect_url = reverse('checkout:summary')
assert response.request['PATH_INFO'] == redirect_url
def test_view_checkout_shipping_method_without_shipping(
request_cart, product_without_shipping, client):
variant = product_without_shipping.variants.get()
add_variant_to_cart(request_cart, variant)
url = reverse('checkout:shipping-method')
response = client.get(url)
assert response.status_code == 302
assert get_redirect_location(response) == reverse('checkout:summary')
def test_view_checkout_shipping_method_without_address(
request_cart_with_item, client):
url = reverse('checkout:shipping-method')
response = client.get(url)
assert response.status_code == 302
redirect_url = reverse('checkout:shipping-address')
assert get_redirect_location(response) == redirect_url
@patch('saleor.checkout.views.summary.send_order_confirmation')
def test_view_checkout_summary(
mock_send_confirmation, client, shipping_zone, address,
request_cart_with_item):
request_cart_with_item.shipping_address = address
request_cart_with_item.email = 'test@example.com'
request_cart_with_item.shipping_method = (
shipping_zone.shipping_methods.first())
request_cart_with_item.save()
url = reverse('checkout:summary')
data = {'address': 'shipping_address'}
response = client.get(url)
assert response.request['PATH_INFO'] == url
response = client.post(url, data, follow=True)
order = response.context['order']
assert order.user_email == 'test@example.com'
redirect_url = reverse('order:payment', kwargs={'token': order.token})
assert response.request['PATH_INFO'] == redirect_url
mock_send_confirmation.delay.assert_called_once_with(order.pk)
@patch('saleor.checkout.views.summary.send_order_confirmation')
def test_view_checkout_summary_authorized_user(
mock_send_confirmation, authorized_client, customer_user,
shipping_zone, address, request_cart_with_item):
request_cart_with_item.shipping_address = address
request_cart_with_item.user = customer_user
request_cart_with_item.email = customer_user.email
request_cart_with_item.shipping_method = (
shipping_zone.shipping_methods.first())
request_cart_with_item.save()
url = reverse('checkout:summary')
data = {'address': 'shipping_address'}
response = authorized_client.get(url)
assert response.request['PATH_INFO'] == url
response = authorized_client.post(url, data, follow=True)
order = response.context['order']
assert order.user_email == customer_user.email
redirect_url = reverse('order:payment', kwargs={'token': order.token})
assert response.request['PATH_INFO'] == redirect_url
mock_send_confirmation.delay.assert_called_once_with(order.pk)
@patch('saleor.checkout.views.summary.send_order_confirmation')
def test_view_checkout_summary_save_language(
mock_send_confirmation, authorized_client, customer_user,
shipping_zone, address, request_cart_with_item, settings):
settings.LANGUAGE_CODE = 'en'
user_language = 'fr'
authorized_client.cookies[settings.LANGUAGE_COOKIE_NAME] = user_language
url = reverse('set_language')
data = {'language': 'fr'}
authorized_client.post(url, data)
request_cart_with_item.shipping_address = address
request_cart_with_item.user = customer_user
request_cart_with_item.email = customer_user.email
request_cart_with_item.shipping_method = (
shipping_zone.shipping_methods.first())
request_cart_with_item.save()
url = reverse('checkout:summary')
data = {'address': 'shipping_address'}
response = authorized_client.get(url, HTTP_ACCEPT_LANGUAGE=user_language)
assert response.request['PATH_INFO'] == url
response = authorized_client.post(
url, data, follow=True, HTTP_ACCEPT_LANGUAGE=user_language)
order = response.context['order']
assert order.user_email == customer_user.email
assert order.language_code == user_language
redirect_url = reverse('order:payment', kwargs={'token': order.token})
assert response.request['PATH_INFO'] == redirect_url
mock_send_confirmation.delay.assert_called_once_with(order.pk)
def test_view_checkout_summary_without_address(request_cart_with_item, client):
url = reverse('checkout:summary')
response = client.get(url)
assert response.status_code == 302
redirect_url = reverse('checkout:shipping-address')
assert get_redirect_location(response) == redirect_url
def test_view_checkout_summary_without_shipping_zone(
request_cart_with_item, client, address):
request_cart_with_item.shipping_address = address
request_cart_with_item.email = 'test@example.com'
request_cart_with_item.save()
url = reverse('checkout:summary')
response = client.get(url)
assert response.status_code == 302
redirect_url = reverse('checkout:shipping-method')
assert get_redirect_location(response) == redirect_url
def test_view_checkout_summary_with_invalid_voucher(
client, request_cart_with_item, shipping_zone, address, voucher):
voucher.usage_limit = 3
voucher.save()
request_cart_with_item.shipping_address = address
request_cart_with_item.email = 'test@example.com'
request_cart_with_item.shipping_method = (
shipping_zone.shipping_methods.first())
request_cart_with_item.save()
url = reverse('checkout:summary')
voucher_url = '{url}?next={url}'.format(url=url)
data = {'discount-voucher': voucher.code}
response = client.post(voucher_url, data, follow=True, HTTP_REFERER=url)
assert response.context['cart'].voucher_code == voucher.code
voucher.used = 3
voucher.save()
data = {'address': 'shipping_address'}
response = client.post(url, data, follow=True)
cart = response.context['cart']
assert not cart.voucher_code
assert not cart.discount_amount
assert not cart.discount_name
response = client.post(url, data, follow=True)
order = response.context['order']
assert not order.voucher
assert not order.discount_amount
assert not order.discount_name
def test_view_checkout_summary_with_invalid_voucher_code(
client, request_cart_with_item, shipping_zone, address):
request_cart_with_item.shipping_address = address
request_cart_with_item.email = 'test@example.com'
request_cart_with_item.shipping_method = (
shipping_zone.shipping_methods.first())
request_cart_with_item.save()
url = reverse('checkout:summary')
voucher_url = '{url}?next={url}'.format(url=url)
data = {'discount-voucher': 'invalid-code'}
response = client.post(voucher_url, data, follow=True, HTTP_REFERER=url)
assert 'voucher' in response.context['voucher_form'].errors
assert response.context['cart'].voucher_code is None
def test_view_checkout_place_order_with_expired_voucher_code(
client, request_cart_with_item, shipping_zone, address, voucher):
cart = request_cart_with_item
# add shipping information to the cart
cart.shipping_address = address
cart.email = 'test@example.com'
cart.shipping_method = (
shipping_zone.shipping_methods.first())
# set voucher to be expired
yesterday = datetime.date.today() - datetime.timedelta(days=1)
voucher.end_date = yesterday
voucher.save()
# put the voucher code to cart
cart.voucher_code = voucher.code
# save the cart
cart.save()
checkout_url = reverse('checkout:summary')
# place order
data = {'address': 'shipping_address'}
response = client.post(checkout_url, data, follow=True)
# order should not have been placed
assert response.request['PATH_INFO'] == checkout_url
# ensure the voucher was removed
cart.refresh_from_db()
assert not cart.voucher_code
def test_view_checkout_place_order_with_item_out_of_stock(
client, request_cart_with_item,
shipping_zone, address, voucher, product):
cart = request_cart_with_item
variant = product.variants.get()
# add shipping information to the cart
cart.shipping_address = address
cart.email = 'test@example.com'
cart.shipping_method = shipping_zone.shipping_methods.first()
cart.save()
# make the variant be out of stock
variant.quantity = 0
variant.save()
checkout_url = reverse('checkout:summary')
redirect_url = reverse('cart:index')
# place order
data = {'address': 'shipping_address'}
response = client.post(checkout_url, data, follow=True)
# order should have been aborted,
# and user should have been redirected to its cart
assert response.request['PATH_INFO'] == redirect_url
def test_view_checkout_place_order_without_shipping_address(
client, request_cart_with_item, shipping_zone):
cart = request_cart_with_item
# add shipping information to the cart
cart.email = 'test@example.com'
cart.shipping_method = (
shipping_zone.shipping_methods.first())
# save the cart
cart.save()
checkout_url = reverse('checkout:summary')
redirect_url = reverse('checkout:shipping-address')
# place order
data = {'address': 'shipping_address'}
response = client.post(checkout_url, data, follow=True)
# order should have been aborted,
# and user should have been redirected to its cart
assert response.request['PATH_INFO'] == redirect_url
def test_view_checkout_summary_remove_voucher(
client, request_cart_with_item, shipping_zone, voucher, address):
request_cart_with_item.shipping_address = address
request_cart_with_item.email = 'test@example.com'
request_cart_with_item.shipping_method = (
shipping_zone.shipping_methods.first())
request_cart_with_item.save()
remove_voucher_url = reverse('checkout:summary')
voucher_url = '{url}?next={url}'.format(url=remove_voucher_url)
data = {'discount-voucher': voucher.code}
response = client.post(
voucher_url, data, follow=True, HTTP_REFERER=remove_voucher_url)
assert response.context['cart'].voucher_code == voucher.code
url = reverse('checkout:remove-voucher')
response = client.post(url, follow=True, HTTP_REFERER=remove_voucher_url)
assert not response.context['cart'].voucher_code
def test_create_order_insufficient_stock(
request_cart, customer_user, product_without_shipping):
variant = product_without_shipping.variants.get()
add_variant_to_cart(request_cart, variant, 10, check_quantity=False)
request_cart.user = customer_user
request_cart.billing_address = customer_user.default_billing_address
request_cart.save()
with pytest.raises(InsufficientStock):
create_order(
request_cart, 'tracking_code', discounts=None, taxes=None)
def test_note_in_created_order(request_cart_with_item, address):
request_cart_with_item.shipping_address = address
request_cart_with_item.note = 'test_note'
request_cart_with_item.save()
order = create_order(
request_cart_with_item, 'tracking_code', discounts=None, taxes=None)
assert order.customer_note == request_cart_with_item.note
@pytest.mark.parametrize(
'total, discount_value, discount_type, min_amount_spent, discount_amount', [
('100', 10, DiscountValueType.FIXED, None, 10),
('100.05', 10, DiscountValueType.PERCENTAGE, 100, 10)])
def test_get_discount_for_cart_value_voucher(
total, discount_value, discount_type, min_amount_spent,
discount_amount):
voucher = Voucher(
code='unique',
type=VoucherType.VALUE,
discount_value_type=discount_type,
discount_value=discount_value,
min_amount_spent=(
Money(min_amount_spent, 'USD')
if min_amount_spent is not None else None))
subtotal = TaxedMoney(net=Money(total, 'USD'), gross=Money(total, 'USD'))
cart = Mock(get_subtotal=Mock(return_value=subtotal))
discount = get_voucher_discount_for_cart(voucher, cart)
assert discount == Money(discount_amount, 'USD')
def test_get_discount_for_cart_value_voucher_not_applicable():
voucher = Voucher(
code='unique',
type=VoucherType.VALUE,
discount_value_type=DiscountValueType.FIXED,
discount_value=10,
min_amount_spent=Money(100, 'USD'))
subtotal = TaxedMoney(net=Money(10, 'USD'), gross=Money(10, 'USD'))
cart = Mock(get_subtotal=Mock(return_value=subtotal))
with pytest.raises(NotApplicable) as e:
get_voucher_discount_for_cart(voucher, cart)
assert e.value.min_amount_spent == Money(100, 'USD')
@pytest.mark.parametrize(
'shipping_cost, shipping_country_code, discount_value, discount_type,'
'countries, expected_value', [
(10, None, 50, DiscountValueType.PERCENTAGE, [], 5),
(10, None, 20, DiscountValueType.FIXED, [], 10),
(10, 'PL', 20, DiscountValueType.FIXED, [], 10),
(5, 'PL', 5, DiscountValueType.FIXED, ['PL'], 5)])
def test_get_discount_for_cart_shipping_voucher(
shipping_cost, shipping_country_code, discount_value,
discount_type, countries, expected_value):
subtotal = TaxedMoney(net=Money(100, 'USD'), gross=Money(100, 'USD'))
shipping_total = TaxedMoney(
net=Money(shipping_cost, 'USD'), gross=Money(shipping_cost, 'USD'))
cart = Mock(
get_subtotal=Mock(return_value=subtotal),
is_shipping_required=Mock(return_value=True),
shipping_method=Mock(
get_total=Mock(return_value=shipping_total)),
shipping_address=Mock(country=Country(shipping_country_code)))
voucher = Voucher(
code='unique', type=VoucherType.SHIPPING,
discount_value_type=discount_type,
discount_value=discount_value,
countries=countries)
discount = get_voucher_discount_for_cart(voucher, cart)
assert discount == Money(expected_value, 'USD')
def test_get_discount_for_cart_shipping_voucher_all_countries():
subtotal = TaxedMoney(net=Money(100, 'USD'), gross=Money(100, 'USD'))
shipping_total = TaxedMoney(net=Money(10, 'USD'), gross=Money(10, 'USD'))
cart = Mock(
get_subtotal=Mock(return_value=subtotal),
is_shipping_required=Mock(return_value=True),
shipping_method=Mock(get_total=Mock(return_value=shipping_total)),
shipping_address=Mock(country=Country('PL')))
voucher = Voucher(
code='unique', type=VoucherType.SHIPPING,
discount_value_type=DiscountValueType.PERCENTAGE,
discount_value=50, countries=[])
discount = get_voucher_discount_for_cart(voucher, cart)
assert discount == Money(5, 'USD')
def test_get_discount_for_cart_shipping_voucher_limited_countries():
subtotal = TaxedMoney(net=Money(100, 'USD'), gross=Money(100, 'USD'))
shipping_total = TaxedMoney(net=Money(10, 'USD'), gross=Money(10, 'USD'))
cart = Mock(
get_subtotal=Mock(return_value=subtotal),
is_shipping_required=Mock(return_value=True),
shipping_method=Mock(get_total=Mock(return_value=shipping_total)),
shipping_address=Mock(country=Country('PL')))
voucher = Voucher(
code='unique', type=VoucherType.SHIPPING,
discount_value_type=DiscountValueType.PERCENTAGE,
discount_value=50, countries=['UK', 'DE'])
with pytest.raises(NotApplicable):
get_voucher_discount_for_cart(voucher, cart)
@pytest.mark.parametrize(
'is_shipping_required, shipping_method, discount_value, discount_type,'
'countries, min_amount_spent, subtotal, error_msg', [
(True, Mock(shipping_zone=Mock(countries=['PL'])),
10, DiscountValueType.FIXED, ['US'], None, Money(10, 'USD'),
'This offer is not valid in your country.'),
(True, None, 10, DiscountValueType.FIXED,
[], None, Money(10, 'USD'),
'Please select a shipping method first.'),
(False, None, 10, DiscountValueType.FIXED,
[], None, Money(10, 'USD'),
'Your order does not require shipping.'),
(True, Mock(price=Money(10, 'USD')), 10,
DiscountValueType.FIXED, [], 5, Money(2, 'USD'),
'This offer is only valid for orders over $5.00.')])
def test_get_discount_for_cart_shipping_voucher_not_applicable(
is_shipping_required, shipping_method, discount_value,
discount_type, countries, min_amount_spent, subtotal, error_msg):
subtotal_price = TaxedMoney(net=subtotal, gross=subtotal)
cart = Mock(
get_subtotal=Mock(return_value=subtotal_price),
is_shipping_required=Mock(return_value=is_shipping_required),
shipping_method=shipping_method)
voucher = Voucher(
code='unique', type=VoucherType.SHIPPING,
discount_value_type=discount_type,
discount_value=discount_value,
min_amount_spent=(
Money(min_amount_spent, 'USD')
if min_amount_spent is not None else None),
countries=countries)
with pytest.raises(NotApplicable) as e:
get_voucher_discount_for_cart(voucher, cart)
assert str(e.value) == error_msg
def test_get_discount_for_cart_product_voucher_not_applicable(monkeypatch):
monkeypatch.setattr(
'saleor.checkout.utils.get_prices_of_discounted_products',
lambda cart, product: [])
voucher = Voucher(
code='unique', type=VoucherType.PRODUCT,
discount_value_type=DiscountValueType.FIXED,
discount_value=10)
voucher.save()
cart = Mock()
with pytest.raises(NotApplicable) as e:
get_voucher_discount_for_cart(voucher, cart)
assert str(e.value) == 'This offer is only valid for selected items.'
def test_get_discount_for_cart_collection_voucher_not_applicable(monkeypatch):
monkeypatch.setattr(
'saleor.checkout.utils.get_prices_of_products_in_discounted_collections', # noqa
lambda cart, product: [])
voucher = Voucher(
code='unique', type=VoucherType.COLLECTION,
discount_value_type=DiscountValueType.FIXED,
discount_value=10)
voucher.save()
cart = Mock()
with pytest.raises(NotApplicable) as e:
get_voucher_discount_for_cart(voucher, cart)
assert str(e.value) == 'This offer is only valid for selected items.'
def test_cart_voucher_form_invalid_voucher_code(
monkeypatch, request_cart_with_item):
form = CartVoucherForm(
{'voucher': 'invalid'}, instance=request_cart_with_item)
assert not form.is_valid()
assert 'voucher' in form.errors
def test_cart_voucher_form_voucher_not_applicable(
voucher, request_cart_with_item):
voucher.min_amount_spent = 200
voucher.save()
form = CartVoucherForm(
{'voucher': voucher.code}, instance=request_cart_with_item)
assert not form.is_valid()
assert 'voucher' in form.errors
def test_cart_voucher_form_active_queryset_voucher_not_active(
voucher, request_cart_with_item):
assert Voucher.objects.count() == 1
voucher.start_date = datetime.date.today() + datetime.timedelta(days=1)
voucher.save()
form = CartVoucherForm(
{'voucher': voucher.code}, instance=request_cart_with_item)
qs = form.fields['voucher'].queryset
assert qs.count() == 0
def test_cart_voucher_form_active_queryset_voucher_active(
voucher, request_cart_with_item):
assert Voucher.objects.count() == 1
voucher.start_date = datetime.date.today()
voucher.save()
form = CartVoucherForm(
{'voucher': voucher.code}, instance=request_cart_with_item)
qs = form.fields['voucher'].queryset
assert qs.count() == 1
def test_cart_voucher_form_active_queryset_after_some_time(
voucher, request_cart_with_item):
assert Voucher.objects.count() == 1
voucher.start_date = datetime.date(year=2016, month=6, day=1)
voucher.end_date = datetime.date(year=2016, month=6, day=2)
voucher.save()
with freeze_time('2016-05-31'):
form = CartVoucherForm(
{'voucher': voucher.code}, instance=request_cart_with_item)
assert form.fields['voucher'].queryset.count() == 0
with freeze_time('2016-06-01'):
form = CartVoucherForm(
{'voucher': voucher.code}, instance=request_cart_with_item)
assert form.fields['voucher'].queryset.count() == 1
with freeze_time('2016-06-03'):
form = CartVoucherForm(
{'voucher': voucher.code}, instance=request_cart_with_item)
assert form.fields['voucher'].queryset.count() == 0
def test_get_taxes_for_cart(cart, vatlayer):
taxes = get_taxes_for_cart(cart, vatlayer)
compare_taxes(taxes, vatlayer)
def test_get_taxes_for_cart_with_shipping_address(cart, address, vatlayer):
address.country = 'DE'
address.save()
cart.shipping_address = address
cart.save()
taxes = get_taxes_for_cart(cart, vatlayer)
compare_taxes(taxes, get_taxes_for_country(Country('DE')))
def test_get_taxes_for_cart_with_shipping_address_taxes_not_handled(
cart, settings, address, vatlayer):
settings.VATLAYER_ACCESS_KEY = ''
address.country = 'DE'
address.save()
cart.shipping_address = address
cart.save()
assert not get_taxes_for_cart(cart, None)
def test_get_voucher_for_cart(cart_with_voucher, voucher):
cart_voucher = get_voucher_for_cart(cart_with_voucher)
assert cart_voucher == voucher
def test_get_voucher_for_cart_expired_voucher(cart_with_voucher, voucher):
date_yesterday = datetime.date.today() - datetime.timedelta(days=1)
voucher.end_date = date_yesterday
voucher.save()
cart_voucher = get_voucher_for_cart(cart_with_voucher)
assert cart_voucher is None
def test_get_voucher_for_cart_no_voucher_code(cart):
cart_voucher = get_voucher_for_cart(cart)
assert cart_voucher is None
def test_remove_voucher_from_cart(cart_with_voucher, voucher_translation_fr):
cart = cart_with_voucher
remove_voucher_from_cart(cart)
assert not cart.voucher_code
assert not cart.discount_name
assert not cart.translated_discount_name
assert cart.discount_amount == ZERO_MONEY
def test_recalculate_cart_discount(
cart_with_voucher, voucher, voucher_translation_fr, settings):
settings.LANGUAGE_CODE = 'fr'
voucher.discount_value = 10
voucher.save()
recalculate_cart_discount(cart_with_voucher, None, None)
assert cart_with_voucher.translated_discount_name == voucher_translation_fr.name # noqa
assert cart_with_voucher.discount_amount == Money('10.00', 'USD')
def test_recalculate_cart_discount_voucher_not_applicable(
cart_with_voucher, voucher):
cart = cart_with_voucher
voucher.min_amount_spent = 100
voucher.save()
recalculate_cart_discount(cart_with_voucher, None, None)
assert not cart.voucher_code
assert not cart.discount_name
assert cart.discount_amount == ZERO_MONEY
def test_recalculate_cart_discount_expired_voucher(cart_with_voucher, voucher):
cart = cart_with_voucher
date_yesterday = datetime.date.today() - datetime.timedelta(days=1)
voucher.end_date = date_yesterday
voucher.save()
recalculate_cart_discount(cart_with_voucher, None, None)
assert not cart.voucher_code
assert not cart.discount_name
assert cart.discount_amount == ZERO_MONEY
def test_get_cart_data_for_checkout(cart_with_voucher, vatlayer):
line_price = TaxedMoney(
net=Money('24.39', 'USD'), gross=Money('30.00', 'USD'))
expected_data = {
'cart': cart_with_voucher,
'cart_are_taxes_handled': True,
'cart_lines': [(cart_with_voucher.lines.first(), line_price)],
'cart_shipping_price': ZERO_TAXED_MONEY,
'cart_subtotal': line_price,
'cart_total': line_price - cart_with_voucher.discount_amount}
data = get_cart_data_for_checkout(
cart_with_voucher, discounts=None, taxes=vatlayer)
assert data == expected_data
def test_change_address_in_cart(cart, address):
change_shipping_address_in_cart(cart, address)
change_billing_address_in_cart(cart, address)
cart.refresh_from_db()
assert cart.shipping_address == address
assert cart.billing_address == address
def test_change_address_in_cart_to_none(cart, address):
cart.shipping_address = address
cart.billing_address = address.get_copy()
cart.save()
change_shipping_address_in_cart(cart, None)
change_billing_address_in_cart(cart, None)
cart.refresh_from_db()
assert cart.shipping_address is None
assert cart.billing_address is None
def test_change_address_in_cart_to_same(cart, address):
cart.shipping_address = address
cart.billing_address = address.get_copy()
cart.save(update_fields=['shipping_address', 'billing_address'])
shipping_address_id = cart.shipping_address.id
billing_address_id = cart.billing_address.id
change_shipping_address_in_cart(cart, address)
change_billing_address_in_cart(cart, address)
cart.refresh_from_db()
assert cart.shipping_address.id == shipping_address_id
assert cart.billing_address.id == billing_address_id
def test_change_address_in_cart_to_other(cart, address):
address_id = address.id
cart.shipping_address = address
cart.billing_address = address.get_copy()
cart.save(update_fields=['shipping_address', 'billing_address'])
other_address = Address.objects.create(country=Country('DE'))
change_shipping_address_in_cart(cart, other_address)
change_billing_address_in_cart(cart, other_address)
cart.refresh_from_db()
assert cart.shipping_address == other_address
assert cart.billing_address == other_address
assert not Address.objects.filter(id=address_id).exists()
def test_change_address_in_cart_from_user_address_to_other(
cart, customer_user, address):
address_id = address.id
cart.user = customer_user
cart.shipping_address = address
cart.billing_address = address.get_copy()
cart.save(update_fields=['shipping_address', 'billing_address'])
other_address = Address.objects.create(country=Country('DE'))
change_shipping_address_in_cart(cart, other_address)
change_billing_address_in_cart(cart, other_address)
cart.refresh_from_db()
assert cart.shipping_address == other_address
assert cart.billing_address == other_address
assert Address.objects.filter(id=address_id).exists()
def test_get_prices_of_products_in_discounted_categories(cart_with_item):
lines = cart_with_item.lines.all()
# There's no discounted categories, therefore all of them are discoutned
discounted_lines = get_prices_of_products_in_discounted_categories(
lines, [])
assert [
line.variant.get_price()
for line in lines
for item in range(line.quantity)] == discounted_lines
discounted_category = Category.objects.create(
name='discounted', slug='discounted')
discounted_lines = get_prices_of_products_in_discounted_categories(
lines, [discounted_category])
# None of the lines are belongs to the discounted category
assert not discounted_lines
| 35.891892
| 92
| 0.730306
|
dfef6d5443efebb622048e70bf6851c70ce72f65
| 1,468
|
py
|
Python
|
ASTTool.py
|
lxchx/SimpleShellParser
|
873cdd394b5c9faf02d442532e72c7b863f8aad7
|
[
"MIT"
] | null | null | null |
ASTTool.py
|
lxchx/SimpleShellParser
|
873cdd394b5c9faf02d442532e72c7b863f8aad7
|
[
"MIT"
] | null | null | null |
ASTTool.py
|
lxchx/SimpleShellParser
|
873cdd394b5c9faf02d442532e72c7b863f8aad7
|
[
"MIT"
] | null | null | null |
import copy
# 生成统一的node格式,方便传输
def make_node(name: str, attrs_dict: dict):
result = dict()
result['name'] = name
result['attrs'] = attrs_dict
return result
# 删除包围string的单引号, 如果有
def del_single_quo(string: str):
if len(string) >= 3 and string[0] == "\'" and string[-1] == "\'":
return string[1:-1]
else:
return string
#整理并生成重定向语句的node
def make_redirect_node(redirect_tok: str):
node = make_node('redirect',
{
'type': int(), # 0是重定向输入,1是重定向输出,2是追加输出
'from': str(),
'to': str()
})
tok = copy.copy(redirect_tok)
if tok[0].isnumeric(): # 指定了被重定向源,而且一定是重定向输出
node['attrs']['from'] = copy.copy(tok[0])
tok = tok[1:] # 截掉被重定向源,方便统一处理
# 判断重定向类型
if tok[0] == '>':
re_type = 1 if (tok[1] != '>') else 2
node['attrs']['type'] = re_type
tok = tok[re_type:] # 截掉重定向符号
else:
node['attrs']['type'] = 0
tok = tok[1:] # 截掉重定向符号
# 剩下的tok必然是to
node['attrs']['to'] = copy.copy(del_single_quo(tok))
return node
if __name__ == '__main__':
a = make_node('a', {'a1': 'a11', 'a2': 'a22'})
print(a)
b = make_node('b', {'b1': 'b11', 'b2': 'b22'})
print(b)
print(a)
print(del_single_quo("'good '?' morning!'"))
redirect_tok = "2>>'file'"
print(redirect_tok)
print(make_redirect_node(redirect_tok))
| 25.754386
| 69
| 0.524523
|
4c40e6e3dddc9c73fda0b17ee27d7f0333aff01b
| 2,706
|
py
|
Python
|
Evolution/ES.py
|
Yangjiaxi/toys
|
e806cc14b1da9fb75b927b84c787801dc7e8a6bf
|
[
"MIT"
] | null | null | null |
Evolution/ES.py
|
Yangjiaxi/toys
|
e806cc14b1da9fb75b927b84c787801dc7e8a6bf
|
[
"MIT"
] | null | null | null |
Evolution/ES.py
|
Yangjiaxi/toys
|
e806cc14b1da9fb75b927b84c787801dc7e8a6bf
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
from tqdm import tqdm
sb.set()
sb.set_style("whitegrid")
plt.rcParams['figure.figsize'] = [4, 4]
plt.rcParams['figure.dpi'] = 144
plt.rcParams['font.family'] = "Arial Unicode MS"
plt.rcParams['axes.unicode_minus'] = False
DNA_SIZE = 1 # DNA (real number)
DNA_BOUND = [0, 5] # solution upper and lower bounds
N_GENERATIONS = 150
POP_SIZE = 100 # population size
N_KID = 10 # n kids per generation
def F(x):
return np.sin(10 * x) * x + np.cos(4 * x) * x
# return np.sin(0.75*x**3)*x + np.cos(2*x) * x
# find non-zero fitness for selection
def get_fitness(pred):
return pred.flatten()
def make_kid(pop, n_kid):
# generate empty kid holder
kids = {'DNA': np.empty((n_kid, DNA_SIZE))}
kids['mut_strength'] = np.empty_like(kids['DNA'])
for kv, ks in zip(kids['DNA'], kids['mut_strength']):
# crossover (roughly half p1 and half p2)
p1, p2 = np.random.choice(np.arange(POP_SIZE), size=2, replace=False)
cp = np.random.randint(
0, 2, DNA_SIZE, dtype=np.bool) # crossover points
kv[cp] = pop['DNA'][p1, cp]
kv[~cp] = pop['DNA'][p2, ~cp]
ks[cp] = pop['mut_strength'][p1, cp]
ks[~cp] = pop['mut_strength'][p2, ~cp]
# mutate (change DNA based on normal distribution)
ks[:] = np.maximum(ks + (np.random.rand(*ks.shape) - 0.5),
0.) # must > 0
kv += ks * np.random.randn(*kv.shape)
kv[:] = np.clip(kv, *DNA_BOUND) # clip the mutated value
return kids
def kill_bad(pop, kids):
for key in ['DNA', 'mut_strength']:
pop[key] = np.vstack((pop[key], kids[key]))
# pop = kids
fitness = get_fitness(F(pop['DNA'])) # calculate global fitness
idx = np.arange(pop['DNA'].shape[0])
# selected by fitness ranking (not value)
good_idx = idx[fitness.argsort()][-POP_SIZE:]
for key in ['DNA', 'mut_strength']:
pop[key] = pop[key][good_idx]
return pop
pop = dict(
DNA=5 * np.random.rand(1, DNA_SIZE).repeat(
POP_SIZE, axis=0), # initialize the pop DNA values
mut_strength=np.random.rand(
POP_SIZE, DNA_SIZE)) # initialize the pop mutation strength values
plt.ion() # something about plotting
x = np.linspace(*DNA_BOUND, 200)
plt.plot(x, F(x))
for gen in tqdm(range(N_GENERATIONS)):
# something about plotting
if 'sca' in globals():
sca.remove()
sca = plt.scatter(
pop['DNA'], F(pop['DNA']), s=50, lw=0, c='green', alpha=0.8)
plt.pause(0.1)
# ES part
kids = make_kid(pop, N_KID)
pop = kill_bad(pop, kids) # keep some good parent for elitism
plt.ioff()
plt.show()
| 30.404494
| 77
| 0.611604
|
cbc93fe3c60304d1d59f5c93dbaed170b4781e8a
| 774
|
py
|
Python
|
tests/conftest.py
|
iburinoc/aiogoogle
|
6f702c0d19599fce1bd36b7a32922e76aaa74008
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
iburinoc/aiogoogle
|
6f702c0d19599fce1bd36b7a32922e76aaa74008
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
iburinoc/aiogoogle
|
6f702c0d19599fce1bd36b7a32922e76aaa74008
|
[
"MIT"
] | null | null | null |
import os
import json
import pytest
from aiogoogle.models import Request
from aiogoogle import Aiogoogle
from aiogoogle.resource import GoogleAPI
@pytest.fixture('function')
def open_discovery_document():
def wrapped(name, version):
current_dir = os.getcwd()
file_name = current_dir + '/tests/data/' + name + '_' + version + '_discovery_doc.json'
with open(file_name, 'r') as discovery_doc:
discovery_doc_dict = json.loads(discovery_doc.read())
return discovery_doc_dict
return wrapped
@pytest.fixture('function')
def create_api(open_discovery_document):
def wrapped(name, version):
disc_doc = open_discovery_document(name, version)
return GoogleAPI(discovery_document=disc_doc)
return wrapped
| 29.769231
| 95
| 0.723514
|
334310299ca3888b1ccd256a824dd89d719134e1
| 534
|
py
|
Python
|
build/lib/swaggerjmx/settings.py
|
cdlaimin/swaggerjmx
|
40001d5454d5bfef1cc6c8c2b5df9323c4da6143
|
[
"MIT"
] | 71
|
2020-07-01T02:09:20.000Z
|
2022-03-30T01:51:35.000Z
|
build/lib/swaggerjmx/settings.py
|
cdlaimin/swaggerjmx
|
40001d5454d5bfef1cc6c8c2b5df9323c4da6143
|
[
"MIT"
] | 6
|
2020-07-03T07:38:08.000Z
|
2021-06-23T03:05:21.000Z
|
build/lib/swaggerjmx/settings.py
|
cdlaimin/swaggerjmx
|
40001d5454d5bfef1cc6c8c2b5df9323c4da6143
|
[
"MIT"
] | 29
|
2020-07-03T07:19:28.000Z
|
2022-01-27T18:01:42.000Z
|
# -*- coding: utf-8 -*-
"""
Parameter initialization is a global variable by default. When calling the relevant API,
you need to inherit the setting class and set the corresponding parameters.
"""
class Settings(object):
"""
# swagger_url
ST.swagger_url = 'https://www.baidu.com/'
# 报告生成路径
ST.report_path = 'jmx'
# 可以传入swagger_url展示的json文件路径
swagger_url_json_path = None
"""
report_path = 'jmx' # 默认在当前路径生成jmx文件夹
swagger_url = None
swagger_url_json_path = None
swagger_json = None
| 23.217391
| 88
| 0.681648
|
c7e02eee39432072a5ff131be1108b7d29f1f393
| 3,714
|
py
|
Python
|
yolo-create-dataset.py
|
IASATTS/MLP-Face-Dataset
|
6a98b2342cb34003b4423d2424693decf55a0853
|
[
"MIT"
] | null | null | null |
yolo-create-dataset.py
|
IASATTS/MLP-Face-Dataset
|
6a98b2342cb34003b4423d2424693decf55a0853
|
[
"MIT"
] | null | null | null |
yolo-create-dataset.py
|
IASATTS/MLP-Face-Dataset
|
6a98b2342cb34003b4423d2424693decf55a0853
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os.path
import shutil
import random
import pathlib
import glob
create_dataset_folder = False
# Specify the required size of the dataset, equals to number of files in image folder if = None
datasetSize = None
pathImages = "./images/crop_ts_and_others/"
pathLabelsRoot = "./labels/ts_and_others/"
pathDataset = "./datasets/dataset/" # Warning : pathDataset will also set the path to the training+validation folder in the YAML file. That path will be used when training the final yolov5 model.
def create_dataset_folders():
create_folder(pathDataset + "train/images")
create_folder(pathDataset + "train/labels")
create_folder(pathDataset + "validation/images")
create_folder(pathDataset + "validation/labels")
create_folder(pathDataset + "test/images")
create_folder(pathDataset + "test/labels")
def create_folder(path):
os.makedirs(path, exist_ok=False)
#Find the image corresponding to the given label.
def getImageFromFilename(filename):
image_name = filename.split(".")[0]
# Try to find a matching image for the label, continue the loop if no match is found.
matchingImages = glob.glob(pathImages + image_name + ".*")
if (len(matchingImages) == 0):
return None
return matchingImages[0]
# -------------------------
# Sepparate in 3 datasets
# -------------------------
# Create dataset folder
if (create_dataset_folder):
try:
shutil.rmtree(pathDataset)
create_dataset_folders()
except:
print("Dataset folder doesn't exist")
create_dataset_folders()
labels = []
# Go through all the labels in the given path, recursively
for dirpath, dirs, files in os.walk(pathLabelsRoot):
for filename in files:
file_path = os.path.join(dirpath, filename)
_, ext = os.path.splitext(file_path)
if (filename == "labels.txt" or ext == ".zip" or ext == ".rar"):
continue
labels.append((filename, dirpath + "/"))
# datasetSize = all images if not specified.
if(datasetSize is None):
datasetSize = len(labels)
validation_size = int(0.1 * datasetSize)
test_size = int(0.05 * datasetSize)
train_size = int(datasetSize - validation_size - test_size)
random.shuffle(labels)
labels = labels[:datasetSize]
x = 0
# Separate the dataset into folder using the chosen split
print("Separate into folders")
for labelFileName, labelPath in labels:
#Find the image corresponding to the given label.
imageFilePath = getImageFromFilename(labelFileName)
if (imageFilePath is None):
continue
imageFileName = os.path.basename(imageFilePath)
if(x < train_size):
pathDest = pathDataset + "train/"
elif(x < (train_size + validation_size)):
pathDest = pathDataset + "validation/"
else:
pathDest = pathDataset + "test/"
# Copy the given label to the dataset labels folder
shutil.copyfile(labelPath + labelFileName, pathDest + "labels/" + labelFileName)
#original = PIL.Image.open(pathImages + filename)
#original.save(pathDest + "images/" + filename, format="png", quality=100)
# Copy the given image to the dataset images folder
shutil.copyfile(pathImages + imageFileName, pathDest + "images/" + imageFileName)
print("Processed image " + str(x))
x = x + 1
# Create data.yaml
with open(pathDataset + "data.yaml", "w+") as f:
path = os.path.abspath(pathDataset).replace("\\","/")
f.write("train: " + path + "/train/images\n")
f.write("val: " + path + "/validation/images\n")
f.write("\n")
f.write("nc: 2\n")
f.write("names: ['ts', 'other']")
print("Dataset creation done")
| 31.74359
| 195
| 0.669898
|
95d9db27a7dcb55fcb23ce20d7b11bca4417181d
| 2,449
|
py
|
Python
|
tests/integration/test_zookeeper_config/test.py
|
pdv-ru/ClickHouse
|
0ff975bcf3008fa6c6373cbdfed16328e3863ec5
|
[
"Apache-2.0"
] | 15,577
|
2019-09-23T11:57:53.000Z
|
2022-03-31T18:21:48.000Z
|
tests/integration/test_zookeeper_config/test.py
|
pdv-ru/ClickHouse
|
0ff975bcf3008fa6c6373cbdfed16328e3863ec5
|
[
"Apache-2.0"
] | 16,476
|
2019-09-23T11:47:00.000Z
|
2022-03-31T23:06:01.000Z
|
tests/integration/test_zookeeper_config/test.py
|
pdv-ru/ClickHouse
|
0ff975bcf3008fa6c6373cbdfed16328e3863ec5
|
[
"Apache-2.0"
] | 3,633
|
2019-09-23T12:18:28.000Z
|
2022-03-31T15:55:48.000Z
|
import time
import pytest
import logging
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
node1 = cluster.add_instance('node1', with_zookeeper=True,
main_configs=["configs/remote_servers.xml", "configs/zookeeper_config_root_a.xml"])
node2 = cluster.add_instance('node2', with_zookeeper=True,
main_configs=["configs/remote_servers.xml", "configs/zookeeper_config_root_a.xml"])
node3 = cluster.add_instance('node3', with_zookeeper=True,
main_configs=["configs/remote_servers.xml", "configs/zookeeper_config_root_b.xml"])
def create_zk_roots(zk):
zk.ensure_path('/root_a')
zk.ensure_path('/root_b')
logging.debug(f"Create ZK roots:{zk.get_children('/')}")
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.add_zookeeper_startup_command(create_zk_roots)
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_chroot_with_same_root(started_cluster):
for i, node in enumerate([node1, node2]):
node.query('DROP TABLE IF EXISTS simple SYNC')
node.query('''
CREATE TABLE simple (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
'''.format(replica=node.name))
for j in range(2): # Second insert to test deduplication
node.query("INSERT INTO simple VALUES ({0}, {0})".format(i))
time.sleep(1)
assert node1.query('select count() from simple').strip() == '2'
assert node2.query('select count() from simple').strip() == '2'
def test_chroot_with_different_root(started_cluster):
for i, node in [(1, node1), (3, node3)]:
node.query('DROP TABLE IF EXISTS simple_different SYNC')
node.query('''
CREATE TABLE simple_different (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple_different', '{replica}', date, id, 8192);
'''.format(replica=node.name))
for j in range(2): # Second insert to test deduplication
node.query("INSERT INTO simple_different VALUES ({0}, {0})".format(i))
assert node1.query('select count() from simple_different').strip() == '1'
assert node3.query('select count() from simple_different').strip() == '1'
| 42.224138
| 115
| 0.670069
|
cd1125ad7d55af2b550652f2132a1aee00f78b07
| 1,308
|
py
|
Python
|
libcloud/test/common/test_ovh.py
|
mgorny/libcloud
|
ea364c58cd954d6b076c4e30bb283f33381a1246
|
[
"Apache-2.0"
] | 1,435
|
2015-01-07T05:32:51.000Z
|
2022-03-25T19:39:34.000Z
|
libcloud/test/common/test_ovh.py
|
mgorny/libcloud
|
ea364c58cd954d6b076c4e30bb283f33381a1246
|
[
"Apache-2.0"
] | 1,158
|
2015-01-04T18:08:42.000Z
|
2022-03-24T14:34:57.000Z
|
libcloud/test/common/test_ovh.py
|
mgorny/libcloud
|
ea364c58cd954d6b076c4e30bb283f33381a1246
|
[
"Apache-2.0"
] | 832
|
2015-01-05T09:20:21.000Z
|
2022-03-24T19:22:19.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from libcloud.test import MockHttp
FORMAT_URL = re.compile(r'[./?=&-]')
class BaseOvhMockHttp(MockHttp):
def _get_method_name(self, type, use_param, qs, path):
if type:
meth_name = '_json%s_%s_%s' % (FORMAT_URL.sub('_', path), 'get', type)
return meth_name
return "_json"
def _json(self, method, url, body, headers):
meth_name = '_json%s_%s' % (FORMAT_URL.sub('_', url), method.lower())
return getattr(self, meth_name)(method, url, body, headers)
| 39.636364
| 82
| 0.714067
|
538a81899a4ec8edab20b45901129caedca22290
| 3,841
|
py
|
Python
|
docs/conf.py
|
epics-containers/pvi
|
c4f1f59cb7aff851d2147f5a156a8d5cf65b63a9
|
[
"Apache-2.0"
] | 1
|
2022-01-31T15:29:59.000Z
|
2022-01-31T15:29:59.000Z
|
docs/conf.py
|
dls-controls/pvi
|
e4a2654edc78b5a4cbe4a976f0b28c1a3bf32964
|
[
"Apache-2.0"
] | 6
|
2021-12-17T16:48:15.000Z
|
2022-01-21T11:03:55.000Z
|
docs/conf.py
|
dls-controls/pvi
|
e4a2654edc78b5a4cbe4a976f0b28c1a3bf32964
|
[
"Apache-2.0"
] | 1
|
2022-01-31T15:30:03.000Z
|
2022-01-31T15:30:03.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import pvi
# -- General configuration ------------------------------------------------
# General information about the project.
project = "pvi"
# The full version, including alpha/beta/rc tags.
release = pvi.__version__
# The short X.Y version.
if "+" in release:
# Not on a tag
version = "master"
else:
version = release
extensions = [
# Use this for generating API docs
"sphinx.ext.autodoc",
# This can parse google style docstrings
"sphinx.ext.napoleon",
# For linking to external sphinx documentation
"sphinx.ext.intersphinx",
# Add links to source code in API docs
"sphinx.ext.viewcode",
# Adds the inheritance-diagram generation directive
"sphinx.ext.inheritance_diagram",
]
# If true, Sphinx will warn about all references where the target cannot
# be found.
nitpicky = True
# A list of (type, target) tuples (by default empty) that should be ignored when
# generating warnings in "nitpicky mode". Note that type should include the
# domain name if present. Example entries would be ('py:func', 'int') or
# ('envvar', 'LD_LIBRARY_PATH').
nitpick_ignore = [("py:func", "int")]
# Both the class’ and the __init__ method’s docstring are concatenated and
# inserted into the main body of the autoclass directive
autoclass_content = "both"
# Order the members by the order they appear in the source code
autodoc_member_order = "bysource"
# Don't inherit docstrings from baseclasses
autodoc_inherit_docstrings = False
# Output graphviz directive produced images in a scalable format
graphviz_output_format = "svg"
# The name of a reST role (builtin or Sphinx extension) to use as the default
# role, that is, for text marked up `like this`
default_role = "any"
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# These patterns also affect html_static_path and html_extra_path
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# This means you can link things like `str` and `asyncio` to the relevant
# docs in the python documentation.
intersphinx_mapping = dict(python=("https://docs.python.org/3/", None))
# A dictionary of graphviz graph attributes for inheritance diagrams.
inheritance_graph_attrs = dict(rankdir="TB")
# Common links that should be available on every page
rst_epilog = """
.. _Diamond Light Source:
http://www.diamond.ac.uk
"""
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme_github_versions"
# Options for the sphinx rtd theme, use DLS blue
html_theme_options = dict(style_nav_header_background="rgb(7, 43, 93)")
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# Add some CSS classes for columns and other tweaks in a custom css file
html_css_files = ["theme_overrides.css"]
# Logo
html_logo = "images/pvi-logo.svg"
html_favicon = "images/pvi-favicon.ico"
| 32.550847
| 80
| 0.727935
|
b7743d9d3fbfa389572fbb3532d503fb934e45a3
| 1,129
|
py
|
Python
|
tests/packagedcode/test_windows.py
|
sthagen/nexB-scancode-toolkit
|
12cc1286df78af898fae76fa339da2bb50ad51b9
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
tests/packagedcode/test_windows.py
|
sthagen/nexB-scancode-toolkit
|
12cc1286df78af898fae76fa339da2bb50ad51b9
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
tests/packagedcode/test_windows.py
|
sthagen/nexB-scancode-toolkit
|
12cc1286df78af898fae76fa339da2bb50ad51b9
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import os
from packagedcode import windows
from packages_test_utils import PackageTester
from scancode_config import REGEN_TEST_FIXTURES
class TestWindows(PackageTester):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_gosum_is_package_data_file(self):
test_file = self.get_test_loc('windows/mum/test.mum')
assert windows.MicrosoftUpdateManifestHandler.is_datafile(test_file)
def test_windows_mum_parse(self):
test_file = self.get_test_loc('windows/mum/test.mum')
expected_loc = self.get_test_loc('windows/mum/test.mum.expected')
package_data = windows.MicrosoftUpdateManifestHandler.parse(test_file)
self.check_packages_data(package_data, expected_loc, regen=REGEN_TEST_FIXTURES)
| 38.931034
| 87
| 0.769708
|
e608fc1878a29a316e382bd2014b285bfed3e204
| 6,875
|
py
|
Python
|
list/regnet200M.py
|
PHL22/Backbone
|
b3f103fdab657f2fca494c4cdf2f8195dc837105
|
[
"MIT"
] | null | null | null |
list/regnet200M.py
|
PHL22/Backbone
|
b3f103fdab657f2fca494c4cdf2f8195dc837105
|
[
"MIT"
] | null | null | null |
list/regnet200M.py
|
PHL22/Backbone
|
b3f103fdab657f2fca494c4cdf2f8195dc837105
|
[
"MIT"
] | null | null | null |
import numpy as np
from .reglayers200M import AnyNet, WrappedModel
import torch
from .build import BACKBONE_REGISTRY
from .backbone import Backbone
from detectron2.modeling import ShapeSpec
regnet_200M_config = {'WA': 36.44, 'W0': 24, 'WM': 2.49, 'DEPTH': 13, 'GROUP_W': 8, 'BOT_MUL': 1}
regnet_400M_config = {'WA': 24.48, 'W0': 24, 'WM': 2.54, 'DEPTH': 22, 'GROUP_W': 16, 'BOT_MUL': 1}
regnet_600M_config = {'WA': 36.97, 'W0': 48, 'WM': 2.24, 'DEPTH': 16, 'GROUP_W': 24, 'BOT_MUL': 1}
regnet_800M_config = {'WA': 35.73, 'W0': 56, 'WM': 2.28, 'DEPTH': 16, 'GROUP_W': 16, 'BOT_MUL': 1}
regnet_1600M_config = {'WA': 34.01, 'W0': 80, 'WM': 2.25, 'DEPTH': 18, 'GROUP_W': 24, 'BOT_MUL': 1}
regnet_3200M_config = {'WA': 26.31, 'W0': 88, 'WM': 2.25, 'DEPTH': 25, 'GROUP_W': 48, 'BOT_MUL': 1}
regnet_4000M_config = {'WA': 38.65, 'W0': 96, 'WM': 2.43, 'DEPTH': 23, 'GROUP_W': 40, 'BOT_MUL': 1}
regnet_6400M_config = {'WA': 60.83, 'W0': 184, 'WM': 2.07, 'DEPTH': 17, 'GROUP_W': 56, 'BOT_MUL': 1}
# model_paths = {
# 'regnet_200m': '../ckpts/regnet_200m.pth.tar',
# 'regnet_400m': '../ckpts/regnet_400m.pth.tar',
# 'regnet_600m': '../ckpts/regnet_600m.pth.tar',
# 'regnet_800m': '../ckpts/regnet_800m.pth.tar',
# 'regnet_1600m': '../ckpts/regnet_1600m.pth.tar',
# 'regnet_3200m': '../ckpts/regnet_3200m.pth.tar',
# }
model_paths = {
'regnet_200m': 'regnet_200m.pth.tar',
'regnet_400m': 'regnet_400m.pth.tar',
'regnet_600m': '../ckpts/regnet_600m.pth.tar',
'regnet_800m': '../ckpts/regnet_800m.pth.tar',
'regnet_1600m': '../ckpts/regnet_1600m.pth.tar',
'regnet_3200m': '../ckpts/regnet_3200m.pth.tar',
}
def quantize_float(f, q):
"""Converts a float to closest non-zero int divisible by q."""
return int(round(f / q) * q)
def adjust_ws_gs_comp(ws, bms, gs):
"""Adjusts the compatibility of widths and groups."""
ws_bot = [int(w * b) for w, b in zip(ws, bms)]
gs = [min(g, w_bot) for g, w_bot in zip(gs, ws_bot)]
ws_bot = [quantize_float(w_bot, g) for w_bot, g in zip(ws_bot, gs)]
ws = [int(w_bot / b) for w_bot, b in zip(ws_bot, bms)]
return ws, gs
def get_stages_from_blocks(ws, rs):
"""Gets ws/ds of network at each stage from per block values."""
ts_temp = zip(ws + [0], [0] + ws, rs + [0], [0] + rs)
ts = [w != wp or r != rp for w, wp, r, rp in ts_temp]
s_ws = [w for w, t in zip(ws, ts[:-1]) if t]
s_ds = np.diff([d for d, t in zip(range(len(ts)), ts) if t]).tolist()
return s_ws, s_ds
def generate_regnet(w_a, w_0, w_m, d, q=8):
"""Generates per block ws from RegNet parameters."""
assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0
ws_cont = np.arange(d) * w_a + w_0
ks = np.round(np.log(ws_cont / w_0) / np.log(w_m)) # ks = [0,1,2...,3...]
ws = w_0 * np.power(w_m, ks) # float channel for 4 stages
ws = np.round(np.divide(ws, q)) * q # make it divisible by 8
num_stages, max_stage = len(np.unique(ws)), ks.max() + 1
ws, ws_cont = ws.astype(int).tolist(), ws_cont.tolist()
# ws: width list, num_stages: 4, max_stage: 4.0, wscont: float before round width
return ws, num_stages, max_stage, ws_cont
class RegNet(AnyNet):
"""RegNet model."""
def __init__(self, cfg, **kwargs):
# Generate RegNet ws per block
b_ws, num_s, _, _ = generate_regnet(
cfg['WA'], cfg['W0'], cfg['WM'], cfg['DEPTH']
)
# Convert to per stage format
ws, ds = get_stages_from_blocks(b_ws, b_ws)
# Generate group widths and bot muls
gws = [cfg['GROUP_W'] for _ in range(num_s)]
bms = [cfg['BOT_MUL'] for _ in range(num_s)]
# Adjust the compatibility of ws and gws
ws, gws = adjust_ws_gs_comp(ws, bms, gws)
# Use the same stride for each stage, stride set to 2
ss = [2 for _ in range(num_s)]
# Use SE for RegNetY
se_r = None
# Construct the model
STEM_W = 32
kwargs = {
"stem_w": STEM_W,
"ss": ss,
"ds": ds,
"ws": ws,
"bms": bms,
"gws": gws,
"se_r": se_r,
"nc": 1000,
}
super(RegNet, self).__init__(**kwargs)
def regnet_200M(pretrained=False, **kwargs):
model = RegNet(regnet_200M_config, **kwargs)
if pretrained:
model = WrappedModel(model)
state_dict = torch.load(model_paths['regnet_200m'])
model.load_state_dict(state_dict)
return model
def regnet_400M(pretrained=False, **kwargs):
model = RegNet(regnet_400M_config, **kwargs)
if pretrained:
model = WrappedModel(model)
state_dict = torch.load(model_paths['regnet_400m'])
model.load_state_dict(state_dict)
return model
def regnet_600M(pretrained=False, **kwargs):
model = RegNet(regnet_600M_config, **kwargs)
if pretrained:
model = WrappedModel(model)
state_dict = torch.load(model_paths['regnet_600m'])
model.load_state_dict(state_dict)
return model
def regnet_800M(pretrained=False, **kwargs):
model = RegNet(regnet_800M_config, **kwargs)
if pretrained:
model = WrappedModel(model)
state_dict = torch.load(model_paths['regnet_800m'])
model.load_state_dict(state_dict)
return model
def regnet_1600M(pretrained=False, **kwargs):
model = RegNet(regnet_1600M_config, **kwargs)
if pretrained:
model = WrappedModel(model)
state_dict = torch.load(model_paths['regnet_1600m'])
model.load_state_dict(state_dict)
return model
def regnet_3200M(pretrained=False, **kwargs):
model = RegNet(regnet_3200M_config, **kwargs)
if pretrained:
model = WrappedModel(model)
state_dict = torch.load(model_paths['regnet_3200m'])
model.load_state_dict(state_dict)
return model
def regnet_4000M(pretrained=False, **kwargs):
model = RegNet(regnet_4000M_config, **kwargs)
if pretrained:
model = WrappedModel(model)
state_dict = torch.load(model_paths['regnet_4000m'])
model.load_state_dict(state_dict)
return model
def regnet_6400M(pretrained=False, **kwargs):
model = RegNet(regnet_6400M_config, **kwargs)
if pretrained:
model = WrappedModel(model)
state_dict = torch.load(model_paths['regnet_6400m'])
model.load_state_dict(state_dict)
return model
@BACKBONE_REGISTRY.register()
def build_regnet200M_backbone(cfg, input_shape):
return regnet_200M()
if __name__ == '__main__':
net=regnet_200M(pretrained=True)
from torchsummary import summary
summary(net, (3, 224, 224))
pass
pass
| 37.36413
| 101
| 0.6048
|
d532ee864144b2498fccb002ea7d8d1e76ec1b68
| 211
|
py
|
Python
|
oil_management/oil_management/doctype/branch/test_branch.py
|
ahishamali10/oil_management
|
74b9e8a29355cde7d2a309e088c9b8c376dbfa61
|
[
"MIT"
] | null | null | null |
oil_management/oil_management/doctype/branch/test_branch.py
|
ahishamali10/oil_management
|
74b9e8a29355cde7d2a309e088c9b8c376dbfa61
|
[
"MIT"
] | null | null | null |
oil_management/oil_management/doctype/branch/test_branch.py
|
ahishamali10/oil_management
|
74b9e8a29355cde7d2a309e088c9b8c376dbfa61
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, ayman nasser and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestBranch(unittest.TestCase):
pass
| 19.181818
| 51
| 0.758294
|
6f544bfaabaa08da570750a414c8bfa863ef4c3b
| 3,928
|
py
|
Python
|
workflow/scripts/plot_kmers_stats.py
|
akcorut/kGWASflow
|
5f82eb0a348354b2b80f8351a16ba2ff238e07c5
|
[
"MIT"
] | 2
|
2022-01-10T11:14:44.000Z
|
2022-01-22T15:30:51.000Z
|
workflow/scripts/plot_kmers_stats.py
|
akcorut/kGWASflow
|
5f82eb0a348354b2b80f8351a16ba2ff238e07c5
|
[
"MIT"
] | null | null | null |
workflow/scripts/plot_kmers_stats.py
|
akcorut/kGWASflow
|
5f82eb0a348354b2b80f8351a16ba2ff238e07c5
|
[
"MIT"
] | 1
|
2022-01-11T14:20:01.000Z
|
2022-01-11T14:20:01.000Z
|
import csv
import os, argparse, glob, shutil
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy as sp
from scipy import stats
## Add argument parsers
parser = argparse.ArgumentParser(description='')
parser.add_argument('-i', '--in_dir', help='Input path for kmc log files', type=str, required=True)
parser.add_argument('-o1', '--out_table', help='Path for output tables', type=str, required=True)
parser.add_argument('-o2', '--out_plot', help='Path for output plots', type=str, required=True)
args = parser.parse_args()
## Gather dir (sample names as directory) names in a list
path= args.in_dir
dir_names= os.listdir(path)
## Define a function to generate k-mers count stats table
# for each accession/sample. Accession name, no. total reads and no. of
# unique k-mers as columns, samples are rows.
def generate_kmers_stats_tab(dir_path, file_name, dir_names):
unique_kmers=[]
total_reads=[]
accessions=[]
## For each accession/sample gather accesssion name,
# from dir names and no. total reads and no. of unique
# k-mers from kmc log files.
for dir in dir_names:
if os.path.isfile(dir_path + '/' + dir + '/' + file_name):
df= pd.read_csv(dir_path + '/' + dir + '/' + file_name, delimiter = ":", header=None)
df= df.dropna()
unique_kmers.append(df.iloc[6][1])
total_reads.append(df.iloc[9][1])
accessions.append(dir)
## Create k-mers count stats table.
target_df = pd.DataFrame(
{'accessions': accessions,
'unique_kmers': unique_kmers,
'total_reads': total_reads
})
## Convert unique k-mers and total reads values to integer.
target_df["unique_kmers"] = pd.to_numeric(target_df["unique_kmers"])
target_df["total_reads"] = pd.to_numeric(target_df["total_reads"])
return target_df
## Define a function to generete plots from k-mers
# count stats table (above). This function generates
# two plots, one is a scatter plot for only one of the
# KMC runs (canonized or non-cononized) and the other
# plot is a joint scatter plot that uses both KMC runs.
# This funtion also fits a line.
def plot_kmers_stats(target_df, out_path, plot_name):
# Plot the single plot
X=target_df["unique_kmers"]
Y=target_df["total_reads"]
plt.figure(figsize=(9, 9))
plt.scatter(X, Y, alpha=0.5)
slope, intercept = np.polyfit(X, Y, 1)
plt.plot(X, slope*X + intercept,color="red")
plt.xlabel("No. of unique kmers")
plt.ylabel("No. of total reads")
plt.title(plot_name)
plt.savefig(out_path + "/" + plot_name + '.scatter_plot.pdf')
# Plot the joint plot.
sns_plot_2= sns.jointplot(x="unique_kmers", y="total_reads",
data=target_df, kind="reg", height=9, joint_kws={'line_kws':{'color':'red'}})
r, p = stats.pearsonr(target_df["unique_kmers"], target_df["total_reads"])
phantom, = sns_plot_2.ax_joint.plot([], [], linestyle="", alpha=0)
sns_plot_2.ax_joint.legend([phantom],['r={:f}, p={:f}'.format(r,p)])
plt.suptitle(plot_name)
plt.savefig(out_path + "/" + plot_name + '.joint_plot.pdf')
## Generate stats tables for both canonized and non-canonized k-mers
kmc_canon_stats= generate_kmers_stats_tab(dir_path=path, file_name="kmc_canon.log", dir_names=dir_names)
kmc_non_canon_stats= generate_kmers_stats_tab(dir_path=path, file_name="kmc_all.log", dir_names=dir_names)
## Writing out KMC stats as a tsv table
kmc_canon_stats.to_csv(args.out_table + '/' + "kmc_canon.stats.tsv", index=False, sep="\t")
kmc_non_canon_stats.to_csv(args.out_table + '/' + "kmc_all.stats.tsv", index=False, sep="\t")
## Plot the stats
plot_kmers_stats(target_df=kmc_canon_stats, out_path=args.out_plot, plot_name="kmc_canon_total_reads_vs_unique_kmers")
plot_kmers_stats(target_df=kmc_non_canon_stats, out_path=args.out_plot, plot_name="kmc_all_total_reads_vs_unique_kmers")
| 44.134831
| 120
| 0.704939
|
31428d20ef4259856b861814df281ed25ae02e8b
| 2,055
|
py
|
Python
|
bann/b_test_train_prepare/pytorch/p_test/functions/crosstab.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
bann/b_test_train_prepare/pytorch/p_test/functions/crosstab.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
bann/b_test_train_prepare/pytorch/p_test/functions/crosstab.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""".. moduleauthor:: Artur Lissin"""
from typing import List, Tuple, Dict
from bann.b_test_train_prepare.container.test.rttff_c import merge_ttff_fun, RtTfF, \
check_ttff_merged
from bann.b_test_train_prepare.pytorch.p_test.functions.steps import C_STEP_SIZE, calc_step_f
def _one_class_cross_tab_str_erg(data: Dict[int, RtTfF], class_id: int, /) \
-> List[Tuple[str, str, str, str, str, str]]:
return [
(
str(class_id), str(tf_v.r_tp), str(tf_v.r_fp),
str(tf_v.r_fn), str(tf_v.r_tn), str(th_v / 100.)
)
for th_v, tf_v in data.items()
]
def merge_one_class_cross_tab(data: List[Tuple[Dict[int, RtTfF], ...]], class_num: int,
step_cnt: int, /) -> Tuple[str, ...]:
step_f = calc_step_f(step_cnt)
classes_list = tuple(
{num * step_f: RtTfF() for num in range(int(C_STEP_SIZE / step_f) + 1)}
for _ in range(class_num)
)
check_sum: List[Dict[int, int]] = [{} for _ in range(class_num)]
for data_el in data:
for index in range(class_num):
for key, value in data_el[index].items():
check_sum[index][key] = check_sum[index].get(key, 0) + \
merge_ttff_fun(classes_list[index][key], value)
check_ttff_merged(check_sum)
res = [
_one_class_cross_tab_str_erg(cl_el, cl_id) for cl_id, cl_el in enumerate(classes_list)
]
cross_tab = [
"\"OneClass_CrossTab\": {",
"\"ClassID\": [" + ','.join(cid[0] for re_t in res for cid in re_t) + "],",
"\"TP\": [" + ','.join(cid[1] for re_t in res for cid in re_t) + "],",
"\"FP\": [" + ','.join(cid[2] for re_t in res for cid in re_t) + "],",
"\"FN\": [" + ','.join(cid[3] for re_t in res for cid in re_t) + "],",
"\"TN\": [" + ','.join(cid[4] for re_t in res for cid in re_t) + "],",
"\"Threshold_in_%\": [" + ','.join(cid[5] for re_t in res for cid in re_t) + "]",
"}"
]
return tuple(cross_tab)
| 41.938776
| 94
| 0.567397
|
cbaa87c05e462bf61288d24c75be27e3e1d63f9f
| 1,119
|
py
|
Python
|
setup.py
|
PeterJCLaw/libproton
|
ed1e4b2e26c2ee39c343c2b46c981bb3b570f6a5
|
[
"MIT"
] | null | null | null |
setup.py
|
PeterJCLaw/libproton
|
ed1e4b2e26c2ee39c343c2b46c981bb3b570f6a5
|
[
"MIT"
] | null | null | null |
setup.py
|
PeterJCLaw/libproton
|
ed1e4b2e26c2ee39c343c2b46c981bb3b570f6a5
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open('README.md') as f:
long_description = f.read()
setup(
name='libproton',
version='3.1',
url='https://github.com/PeterJCLaw/libproton',
project_urls={
'Issue tracker': 'https://github.com/PeterJCLaw/libproton/issues',
},
packages=find_packages(),
description="Proton-compliant match scorer library.",
long_description=long_description,
long_description_content_type='text/markdown',
author="Peter Law",
author_email="PeterJCLaw@gmail.com",
python_requires='>=3.7',
install_requires=[
'PyYAML >=3.11, <6',
],
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
zip_safe=True,
)
| 26.023256
| 74
| 0.621984
|
824bc2b837887eeafa3f4e9b89aa006fc42bbffc
| 23,190
|
py
|
Python
|
sdk/python/pulumi_gcp/cloudscheduler/outputs.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/cloudscheduler/outputs.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/cloudscheduler/outputs.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'JobAppEngineHttpTarget',
'JobAppEngineHttpTargetAppEngineRouting',
'JobHttpTarget',
'JobHttpTargetOauthToken',
'JobHttpTargetOidcToken',
'JobPubsubTarget',
'JobRetryConfig',
]
@pulumi.output_type
class JobAppEngineHttpTarget(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "relativeUri":
suggest = "relative_uri"
elif key == "appEngineRouting":
suggest = "app_engine_routing"
elif key == "httpMethod":
suggest = "http_method"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in JobAppEngineHttpTarget. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
JobAppEngineHttpTarget.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
JobAppEngineHttpTarget.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
relative_uri: str,
app_engine_routing: Optional['outputs.JobAppEngineHttpTargetAppEngineRouting'] = None,
body: Optional[str] = None,
headers: Optional[Mapping[str, str]] = None,
http_method: Optional[str] = None):
"""
:param str relative_uri: The relative URI.
The relative URL must begin with "/" and must be a valid HTTP relative URL.
It can contain a path, query string arguments, and \# fragments.
If the relative URL is empty, then the root path "/" will be used.
No spaces are allowed, and the maximum length allowed is 2083 characters
:param 'JobAppEngineHttpTargetAppEngineRoutingArgs' app_engine_routing: App Engine Routing setting for the job.
Structure is documented below.
:param str body: HTTP request body.
A request body is allowed only if the HTTP method is POST, PUT, or PATCH.
It is an error to set body on a job with an incompatible HttpMethod.
A base64-encoded string.
:param Mapping[str, str] headers: This map contains the header field names and values.
Repeated headers are not supported, but a header value can contain commas.
:param str http_method: Which HTTP method to use for the request.
"""
pulumi.set(__self__, "relative_uri", relative_uri)
if app_engine_routing is not None:
pulumi.set(__self__, "app_engine_routing", app_engine_routing)
if body is not None:
pulumi.set(__self__, "body", body)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if http_method is not None:
pulumi.set(__self__, "http_method", http_method)
@property
@pulumi.getter(name="relativeUri")
def relative_uri(self) -> str:
"""
The relative URI.
The relative URL must begin with "/" and must be a valid HTTP relative URL.
It can contain a path, query string arguments, and \# fragments.
If the relative URL is empty, then the root path "/" will be used.
No spaces are allowed, and the maximum length allowed is 2083 characters
"""
return pulumi.get(self, "relative_uri")
@property
@pulumi.getter(name="appEngineRouting")
def app_engine_routing(self) -> Optional['outputs.JobAppEngineHttpTargetAppEngineRouting']:
"""
App Engine Routing setting for the job.
Structure is documented below.
"""
return pulumi.get(self, "app_engine_routing")
@property
@pulumi.getter
def body(self) -> Optional[str]:
"""
HTTP request body.
A request body is allowed only if the HTTP method is POST, PUT, or PATCH.
It is an error to set body on a job with an incompatible HttpMethod.
A base64-encoded string.
"""
return pulumi.get(self, "body")
@property
@pulumi.getter
def headers(self) -> Optional[Mapping[str, str]]:
"""
This map contains the header field names and values.
Repeated headers are not supported, but a header value can contain commas.
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> Optional[str]:
"""
Which HTTP method to use for the request.
"""
return pulumi.get(self, "http_method")
@pulumi.output_type
class JobAppEngineHttpTargetAppEngineRouting(dict):
def __init__(__self__, *,
instance: Optional[str] = None,
service: Optional[str] = None,
version: Optional[str] = None):
"""
:param str instance: App instance.
By default, the job is sent to an instance which is available when the job is attempted.
:param str service: App service.
By default, the job is sent to the service which is the default service when the job is attempted.
:param str version: App version.
By default, the job is sent to the version which is the default version when the job is attempted.
"""
if instance is not None:
pulumi.set(__self__, "instance", instance)
if service is not None:
pulumi.set(__self__, "service", service)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def instance(self) -> Optional[str]:
"""
App instance.
By default, the job is sent to an instance which is available when the job is attempted.
"""
return pulumi.get(self, "instance")
@property
@pulumi.getter
def service(self) -> Optional[str]:
"""
App service.
By default, the job is sent to the service which is the default service when the job is attempted.
"""
return pulumi.get(self, "service")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
App version.
By default, the job is sent to the version which is the default version when the job is attempted.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class JobHttpTarget(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "httpMethod":
suggest = "http_method"
elif key == "oauthToken":
suggest = "oauth_token"
elif key == "oidcToken":
suggest = "oidc_token"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in JobHttpTarget. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
JobHttpTarget.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
JobHttpTarget.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
uri: str,
body: Optional[str] = None,
headers: Optional[Mapping[str, str]] = None,
http_method: Optional[str] = None,
oauth_token: Optional['outputs.JobHttpTargetOauthToken'] = None,
oidc_token: Optional['outputs.JobHttpTargetOidcToken'] = None):
"""
:param str uri: The full URI path that the request will be sent to.
:param str body: HTTP request body.
A request body is allowed only if the HTTP method is POST, PUT, or PATCH.
It is an error to set body on a job with an incompatible HttpMethod.
A base64-encoded string.
:param Mapping[str, str] headers: This map contains the header field names and values.
Repeated headers are not supported, but a header value can contain commas.
:param str http_method: Which HTTP method to use for the request.
:param 'JobHttpTargetOauthTokenArgs' oauth_token: Contains information needed for generating an OAuth token.
This type of authorization should be used when sending requests to a GCP endpoint.
Structure is documented below.
:param 'JobHttpTargetOidcTokenArgs' oidc_token: Contains information needed for generating an OpenID Connect token.
This type of authorization should be used when sending requests to third party endpoints or Cloud Run.
Structure is documented below.
"""
pulumi.set(__self__, "uri", uri)
if body is not None:
pulumi.set(__self__, "body", body)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if http_method is not None:
pulumi.set(__self__, "http_method", http_method)
if oauth_token is not None:
pulumi.set(__self__, "oauth_token", oauth_token)
if oidc_token is not None:
pulumi.set(__self__, "oidc_token", oidc_token)
@property
@pulumi.getter
def uri(self) -> str:
"""
The full URI path that the request will be sent to.
"""
return pulumi.get(self, "uri")
@property
@pulumi.getter
def body(self) -> Optional[str]:
"""
HTTP request body.
A request body is allowed only if the HTTP method is POST, PUT, or PATCH.
It is an error to set body on a job with an incompatible HttpMethod.
A base64-encoded string.
"""
return pulumi.get(self, "body")
@property
@pulumi.getter
def headers(self) -> Optional[Mapping[str, str]]:
"""
This map contains the header field names and values.
Repeated headers are not supported, but a header value can contain commas.
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> Optional[str]:
"""
Which HTTP method to use for the request.
"""
return pulumi.get(self, "http_method")
@property
@pulumi.getter(name="oauthToken")
def oauth_token(self) -> Optional['outputs.JobHttpTargetOauthToken']:
"""
Contains information needed for generating an OAuth token.
This type of authorization should be used when sending requests to a GCP endpoint.
Structure is documented below.
"""
return pulumi.get(self, "oauth_token")
@property
@pulumi.getter(name="oidcToken")
def oidc_token(self) -> Optional['outputs.JobHttpTargetOidcToken']:
"""
Contains information needed for generating an OpenID Connect token.
This type of authorization should be used when sending requests to third party endpoints or Cloud Run.
Structure is documented below.
"""
return pulumi.get(self, "oidc_token")
@pulumi.output_type
class JobHttpTargetOauthToken(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "serviceAccountEmail":
suggest = "service_account_email"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in JobHttpTargetOauthToken. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
JobHttpTargetOauthToken.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
JobHttpTargetOauthToken.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
service_account_email: str,
scope: Optional[str] = None):
"""
:param str service_account_email: Service account email to be used for generating OAuth token.
The service account must be within the same project as the job.
:param str scope: OAuth scope to be used for generating OAuth access token. If not specified,
"https://www.googleapis.com/auth/cloud-platform" will be used.
"""
pulumi.set(__self__, "service_account_email", service_account_email)
if scope is not None:
pulumi.set(__self__, "scope", scope)
@property
@pulumi.getter(name="serviceAccountEmail")
def service_account_email(self) -> str:
"""
Service account email to be used for generating OAuth token.
The service account must be within the same project as the job.
"""
return pulumi.get(self, "service_account_email")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
"""
OAuth scope to be used for generating OAuth access token. If not specified,
"https://www.googleapis.com/auth/cloud-platform" will be used.
"""
return pulumi.get(self, "scope")
@pulumi.output_type
class JobHttpTargetOidcToken(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "serviceAccountEmail":
suggest = "service_account_email"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in JobHttpTargetOidcToken. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
JobHttpTargetOidcToken.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
JobHttpTargetOidcToken.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
service_account_email: str,
audience: Optional[str] = None):
"""
:param str service_account_email: Service account email to be used for generating OAuth token.
The service account must be within the same project as the job.
:param str audience: Audience to be used when generating OIDC token. If not specified,
the URI specified in target will be used.
"""
pulumi.set(__self__, "service_account_email", service_account_email)
if audience is not None:
pulumi.set(__self__, "audience", audience)
@property
@pulumi.getter(name="serviceAccountEmail")
def service_account_email(self) -> str:
"""
Service account email to be used for generating OAuth token.
The service account must be within the same project as the job.
"""
return pulumi.get(self, "service_account_email")
@property
@pulumi.getter
def audience(self) -> Optional[str]:
"""
Audience to be used when generating OIDC token. If not specified,
the URI specified in target will be used.
"""
return pulumi.get(self, "audience")
@pulumi.output_type
class JobPubsubTarget(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "topicName":
suggest = "topic_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in JobPubsubTarget. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
JobPubsubTarget.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
JobPubsubTarget.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
topic_name: str,
attributes: Optional[Mapping[str, str]] = None,
data: Optional[str] = None):
"""
:param str topic_name: The full resource name for the Cloud Pub/Sub topic to which
messages will be published when a job is delivered. ~>**NOTE:**
The topic name must be in the same format as required by PubSub's
PublishRequest.name, e.g. `projects/my-project/topics/my-topic`.
:param Mapping[str, str] attributes: Attributes for PubsubMessage.
Pubsub message must contain either non-empty data, or at least one attribute.
:param str data: The message payload for PubsubMessage.
Pubsub message must contain either non-empty data, or at least one attribute.
A base64-encoded string.
"""
pulumi.set(__self__, "topic_name", topic_name)
if attributes is not None:
pulumi.set(__self__, "attributes", attributes)
if data is not None:
pulumi.set(__self__, "data", data)
@property
@pulumi.getter(name="topicName")
def topic_name(self) -> str:
"""
The full resource name for the Cloud Pub/Sub topic to which
messages will be published when a job is delivered. ~>**NOTE:**
The topic name must be in the same format as required by PubSub's
PublishRequest.name, e.g. `projects/my-project/topics/my-topic`.
"""
return pulumi.get(self, "topic_name")
@property
@pulumi.getter
def attributes(self) -> Optional[Mapping[str, str]]:
"""
Attributes for PubsubMessage.
Pubsub message must contain either non-empty data, or at least one attribute.
"""
return pulumi.get(self, "attributes")
@property
@pulumi.getter
def data(self) -> Optional[str]:
"""
The message payload for PubsubMessage.
Pubsub message must contain either non-empty data, or at least one attribute.
A base64-encoded string.
"""
return pulumi.get(self, "data")
@pulumi.output_type
class JobRetryConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxBackoffDuration":
suggest = "max_backoff_duration"
elif key == "maxDoublings":
suggest = "max_doublings"
elif key == "maxRetryDuration":
suggest = "max_retry_duration"
elif key == "minBackoffDuration":
suggest = "min_backoff_duration"
elif key == "retryCount":
suggest = "retry_count"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in JobRetryConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
JobRetryConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
JobRetryConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_backoff_duration: Optional[str] = None,
max_doublings: Optional[int] = None,
max_retry_duration: Optional[str] = None,
min_backoff_duration: Optional[str] = None,
retry_count: Optional[int] = None):
"""
:param str max_backoff_duration: The maximum amount of time to wait before retrying a job after it fails.
A duration in seconds with up to nine fractional digits, terminated by 's'.
:param int max_doublings: The time between retries will double maxDoublings times.
A job's retry interval starts at minBackoffDuration,
then doubles maxDoublings times, then increases linearly,
and finally retries retries at intervals of maxBackoffDuration up to retryCount times.
:param str max_retry_duration: The time limit for retrying a failed job, measured from time when an execution was first attempted.
If specified with retryCount, the job will be retried until both limits are reached.
A duration in seconds with up to nine fractional digits, terminated by 's'.
:param str min_backoff_duration: The minimum amount of time to wait before retrying a job after it fails.
A duration in seconds with up to nine fractional digits, terminated by 's'.
:param int retry_count: The number of attempts that the system will make to run a
job using the exponential backoff procedure described by maxDoublings.
Values greater than 5 and negative values are not allowed.
"""
if max_backoff_duration is not None:
pulumi.set(__self__, "max_backoff_duration", max_backoff_duration)
if max_doublings is not None:
pulumi.set(__self__, "max_doublings", max_doublings)
if max_retry_duration is not None:
pulumi.set(__self__, "max_retry_duration", max_retry_duration)
if min_backoff_duration is not None:
pulumi.set(__self__, "min_backoff_duration", min_backoff_duration)
if retry_count is not None:
pulumi.set(__self__, "retry_count", retry_count)
@property
@pulumi.getter(name="maxBackoffDuration")
def max_backoff_duration(self) -> Optional[str]:
"""
The maximum amount of time to wait before retrying a job after it fails.
A duration in seconds with up to nine fractional digits, terminated by 's'.
"""
return pulumi.get(self, "max_backoff_duration")
@property
@pulumi.getter(name="maxDoublings")
def max_doublings(self) -> Optional[int]:
"""
The time between retries will double maxDoublings times.
A job's retry interval starts at minBackoffDuration,
then doubles maxDoublings times, then increases linearly,
and finally retries retries at intervals of maxBackoffDuration up to retryCount times.
"""
return pulumi.get(self, "max_doublings")
@property
@pulumi.getter(name="maxRetryDuration")
def max_retry_duration(self) -> Optional[str]:
"""
The time limit for retrying a failed job, measured from time when an execution was first attempted.
If specified with retryCount, the job will be retried until both limits are reached.
A duration in seconds with up to nine fractional digits, terminated by 's'.
"""
return pulumi.get(self, "max_retry_duration")
@property
@pulumi.getter(name="minBackoffDuration")
def min_backoff_duration(self) -> Optional[str]:
"""
The minimum amount of time to wait before retrying a job after it fails.
A duration in seconds with up to nine fractional digits, terminated by 's'.
"""
return pulumi.get(self, "min_backoff_duration")
@property
@pulumi.getter(name="retryCount")
def retry_count(self) -> Optional[int]:
"""
The number of attempts that the system will make to run a
job using the exponential backoff procedure described by maxDoublings.
Values greater than 5 and negative values are not allowed.
"""
return pulumi.get(self, "retry_count")
| 40.400697
| 143
| 0.634282
|
4bf6c53b980724e707b9e2795f00b294d692069d
| 2,468
|
py
|
Python
|
tchannel/messages/call_response.py
|
westover/tchannel-python
|
d9c16291f49b3b9dd1353c01179d4f4c3168c53a
|
[
"MIT"
] | null | null | null |
tchannel/messages/call_response.py
|
westover/tchannel-python
|
d9c16291f49b3b9dd1353c01179d4f4c3168c53a
|
[
"MIT"
] | null | null | null |
tchannel/messages/call_response.py
|
westover/tchannel-python
|
d9c16291f49b3b9dd1353c01179d4f4c3168c53a
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
from . import common
from .. import rw
from .call_response_continue import CallResponseContinueMessage
from .types import Types
class CallResponseMessage(CallResponseContinueMessage):
"""Respond to an RPC call."""
message_type = Types.CALL_RES
__slots__ = CallResponseContinueMessage.__slots__ + (
'code',
'tracing',
'headers',
)
def __init__(
self,
flags=0,
code=0,
tracing=None,
headers=None,
checksum=None,
args=None,
id=0,
):
args = args or ["", "", ""]
super(CallResponseMessage, self).__init__(flags, checksum, args, id)
self.code = code
self.tracing = tracing or common.Tracing(0, 0, 0, 0)
self.headers = dict(headers) if headers else {}
call_res_rw = rw.instance(
CallResponseMessage,
("flags", rw.number(1)), # flags:1
("code", rw.number(1)), # code:1
("tracing", common.tracing_rw), # tracing:24
# traceflags: 1
("headers", rw.headers( # nh:1 (hk~1 hv~1){nh}
rw.number(1),
rw.len_prefixed_string(rw.number(1))
)),
("checksum", common.checksum_rw), # csumtype:1 (csum:4){0, 1}
("args",
rw.args(rw.number(2))), # [arg1~2, arg2~2, arg3~2]
)
| 35.257143
| 79
| 0.664506
|
4d6985634a45489a4da25484a4330b1a7b189a55
| 15,100
|
py
|
Python
|
pyasn1/type/namedtype.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 32
|
2015-11-06T02:59:41.000Z
|
2021-02-12T02:44:42.000Z
|
pyasn1/type/namedtype.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 6
|
2017-04-26T02:30:16.000Z
|
2017-10-13T16:53:08.000Z
|
pyasn1/type/namedtype.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 4
|
2016-02-01T09:15:05.000Z
|
2020-04-30T03:41:04.000Z
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://pyasn1.sf.net/license.html
#
import sys
from pyasn1.type import tag, tagmap
from pyasn1 import error
__all__ = ['NamedType', 'OptionalNamedType', 'DefaultedNamedType', 'NamedTypes']
class NamedType(object):
"""Create named field object for a constructed ASN.1 type.
The |NamedType| object represents a single name and ASN.1 type of a constructed ASN.1 type.
|NamedType| objects are immutable and duck-type Python :class:`tuple` objects
holding *name* and *asn1Object* components.
Parameters
----------
name: :py:class:`str`
Field name
asn1Object:
ASN.1 type object
"""
isOptional = False
isDefaulted = False
def __init__(self, name, asn1Object):
self.__name = name
self.__type = asn1Object
self.__nameAndType = name, asn1Object
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.__name, self.__type)
def __eq__(self, other):
return self.__nameAndType == other
def __ne__(self, other):
return self.__nameAndType != other
def __lt__(self, other):
return self.__nameAndType < other
def __le__(self, other):
return self.__nameAndType <= other
def __gt__(self, other):
return self.__nameAndType > other
def __ge__(self, other):
return self.__nameAndType >= other
def __hash__(self):
return hash(self.__nameAndType)
def __getitem__(self, idx):
return self.__nameAndType[idx]
def __iter__(self):
return iter(self.__nameAndType)
@property
def name(self):
return self.__name
@property
def asn1Object(self):
return self.__type
# Backward compatibility
def getName(self):
return self.name
def getType(self):
return self.asn1Object
class OptionalNamedType(NamedType):
__doc__ = NamedType.__doc__
isOptional = True
class DefaultedNamedType(NamedType):
__doc__ = NamedType.__doc__
isDefaulted = True
class NamedTypes(object):
"""Create a collection of named fields for a constructed ASN.1 type.
The NamedTypes object represents a collection of named fields of a constructed ASN.1 type.
*NamedTypes* objects are immutable and duck-type Python :class:`dict` objects
holding *name* as keys and ASN.1 type object as values.
Parameters
----------
*namedTypes: :class:`~pyasn1.type.namedtype.NamedType`
"""
def __init__(self, *namedTypes, **kwargs):
self.__namedTypes = namedTypes
self.__namedTypesLen = len(self.__namedTypes)
self.__minTagSet = self.__computeMinTagSet()
self.__nameToPosMap = self.__computeNameToPosMap()
self.__tagToPosMap = self.__computeTagToPosMap()
self.__ambiguousTypes = 'terminal' not in kwargs and self.__computeAmbiguousTypes() or {}
self.__uniqueTagMap = self.__computeTagMaps(unique=True)
self.__nonUniqueTagMap = self.__computeTagMaps(unique=False)
self.__hasOptionalOrDefault = bool([True for namedType in self.__namedTypes
if namedType.isDefaulted or namedType.isOptional])
self.__requiredComponents = frozenset(
[idx for idx, nt in enumerate(self.__namedTypes) if not nt.isOptional and not nt.isDefaulted]
)
self.__keys = frozenset([namedType.name for namedType in self.__namedTypes])
self.__values = tuple([namedType.asn1Object for namedType in self.__namedTypes])
self.__items = tuple([(namedType.name, namedType.asn1Object) for namedType in self.__namedTypes])
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__, ', '.join([repr(x) for x in self.__namedTypes])
)
def __eq__(self, other):
return self.__namedTypes == other
def __ne__(self, other):
return self.__namedTypes != other
def __lt__(self, other):
return self.__namedTypes < other
def __le__(self, other):
return self.__namedTypes <= other
def __gt__(self, other):
return self.__namedTypes > other
def __ge__(self, other):
return self.__namedTypes >= other
def __hash__(self):
return hash(self.__namedTypes)
def __getitem__(self, idx):
try:
return self.__namedTypes[idx]
except TypeError:
return self.__namedTypes[self.__nameToPosMap[idx]]
def __contains__(self, key):
return key in self.__nameToPosMap
def __iter__(self):
return (x[0] for x in self.__namedTypes)
if sys.version_info[0] <= 2:
def __nonzero__(self):
return self.__namedTypesLen > 0
else:
def __bool__(self):
return self.__namedTypesLen > 0
def __len__(self):
return self.__namedTypesLen
# Python dict protocol
def values(self):
return self.__values
def keys(self):
return self.__keys
def items(self):
return self.__items
def clone(self):
return self.__class__(*self.__namedTypes)
class PostponedError(object):
def __init__(self, errorMsg):
self.__errorMsg = errorMsg
def __getitem__(self, item):
raise error.PyAsn1Error(self.__errorMsg)
def __computeTagToPosMap(self):
tagToPosMap = {}
for idx, namedType in enumerate(self.__namedTypes):
tagMap = namedType.asn1Object.tagMap
if isinstance(tagMap, NamedTypes.PostponedError):
return tagMap
if not tagMap:
continue
for _tagSet in tagMap.presentTypes:
if _tagSet in tagToPosMap:
return NamedTypes.PostponedError('Duplicate component tag %s at %s' % (_tagSet, namedType))
tagToPosMap[_tagSet] = idx
return tagToPosMap
def __computeNameToPosMap(self):
nameToPosMap = {}
for idx, namedType in enumerate(self.__namedTypes):
if namedType.name in nameToPosMap:
return NamedTypes.PostponedError('Duplicate component name %s at %s' % (namedType.name, namedType))
nameToPosMap[namedType.name] = idx
return nameToPosMap
def __computeAmbiguousTypes(self):
ambigiousTypes = {}
partialAmbigiousTypes = ()
for idx, namedType in reversed(tuple(enumerate(self.__namedTypes))):
if namedType.isOptional or namedType.isDefaulted:
partialAmbigiousTypes = (namedType,) + partialAmbigiousTypes
else:
partialAmbigiousTypes = (namedType,)
if len(partialAmbigiousTypes) == len(self.__namedTypes):
ambigiousTypes[idx] = self
else:
ambigiousTypes[idx] = NamedTypes(*partialAmbigiousTypes, **dict(terminal=True))
return ambigiousTypes
def getTypeByPosition(self, idx):
"""Return ASN.1 type object by its position in fields set.
Parameters
----------
idx: :py:class:`int`
Field index
Returns
-------
:
ASN.1 type
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given position is out of fields range
"""
try:
return self.__namedTypes[idx].asn1Object
except IndexError:
raise error.PyAsn1Error('Type position out of range')
def getPositionByType(self, tagSet):
"""Return field position by its ASN.1 type.
Parameters
----------
tagSet: :class:`~pysnmp.type.tag.TagSet`
ASN.1 tag set distinguishing one ASN.1 type from others.
Returns
-------
: :py:class:`int`
ASN.1 type position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *tagSet* is not present or ASN.1 types are not unique within callee *NamedTypes*
"""
try:
return self.__tagToPosMap[tagSet]
except KeyError:
raise error.PyAsn1Error('Type %s not found' % (tagSet,))
def getNameByPosition(self, idx):
"""Return field name by its position in fields set.
Parameters
----------
idx: :py:class:`idx`
Field index
Returns
-------
: :py:class:`str`
Field name
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given field name is not present in callee *NamedTypes*
"""
try:
return self.__namedTypes[idx].name
except IndexError:
raise error.PyAsn1Error('Type position out of range')
def getPositionByName(self, name):
"""Return field position by filed name.
Parameters
----------
name: :py:class:`str`
Field name
Returns
-------
: :py:class:`int`
Field position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *name* is not present or not unique within callee *NamedTypes*
"""
try:
return self.__nameToPosMap[name]
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
def getTagMapNearPosition(self, idx):
"""Return ASN.1 types that are allowed at or past given field position.
Some ASN.1 serialization allow for skipping optional and defaulted fields.
Some constructed ASN.1 types allow reordering of the fields. When recovering
such objects it may be important to know which types can possibly be
present at any given position in the field sets.
Parameters
----------
idx: :py:class:`int`
Field index
Returns
-------
: :class:`~pyasn1.type.tagmap.TagMap`
Map if ASN.1 types allowed at given field position
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If given position is out of fields range
"""
try:
return self.__ambiguousTypes[idx].tagMap
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def getPositionNearType(self, tagSet, idx):
"""Return the closest field position where given ASN.1 type is allowed.
Some ASN.1 serialization allow for skipping optional and defaulted fields.
Some constructed ASN.1 types allow reordering of the fields. When recovering
such objects it may be important to know at which field position, in field set,
given *tagSet* is allowed at or past *idx* position.
Parameters
----------
tagSet: :class:`~pyasn1.type.tag.TagSet`
ASN.1 type which field position to look up
idx: :py:class:`int`
Field position at or past which to perform ASN.1 type look up
Returns
-------
: :py:class:`int`
Field position in fields set
Raises
------
: :class:`~pyasn1.error.PyAsn1Error`
If *tagSet* is not present or not unique within callee *NamedTypes*
or *idx* is out of fields range
"""
try:
return idx + self.__ambiguousTypes[idx].getPositionByType(tagSet)
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def __computeMinTagSet(self):
minTagSet = None
for namedType in self.__namedTypes:
asn1Object = namedType.asn1Object
try:
tagSet = asn1Object.minTagSet
except AttributeError:
tagSet = asn1Object.tagSet
if minTagSet is None or tagSet < minTagSet:
minTagSet = tagSet
return minTagSet or tag.TagSet()
@property
def minTagSet(self):
"""Return the minimal TagSet among ASN.1 type in callee *NamedTypes*.
Some ASN.1 types/serialization protocols require ASN.1 types to be
arranged based on their numerical tag value. The *minTagSet* property
returns that.
Returns
-------
: :class:`~pyasn1.type.tagset.TagSet`
Minimal TagSet among ASN.1 types in callee *NamedTypes*
"""
return self.__minTagSet
def __computeTagMaps(self, unique):
presentTypes = {}
skipTypes = {}
defaultType = None
for namedType in self.__namedTypes:
tagMap = namedType.asn1Object.tagMap
if isinstance(tagMap, NamedTypes.PostponedError):
return tagMap
for tagSet in tagMap:
if unique and tagSet in presentTypes:
return NamedTypes.PostponedError('Non-unique tagSet %s of %s at %s' % (tagSet, namedType, self))
presentTypes[tagSet] = namedType.asn1Object
skipTypes.update(tagMap.skipTypes)
if defaultType is None:
defaultType = tagMap.defaultType
elif tagMap.defaultType is not None:
raise error.PyAsn1Error('Duplicate default ASN.1 type at %s' % (self,))
return tagmap.TagMap(presentTypes, skipTypes, defaultType)
@property
def tagMap(self):
"""Return a *TagMap* object from tags and types recursively.
Return a :class:`~pyasn1.type.tagmap.TagMap` object by
combining tags from *TagMap* objects of children types and
associating them with their immediate child type.
Example
-------
.. code-block:: python
OuterType ::= CHOICE {
innerType INTEGER
}
Calling *.tagMap* on *OuterType* will yield a map like this:
.. code-block:: python
Integer.tagSet -> Choice
"""
return self.__nonUniqueTagMap
@property
def tagMapUnique(self):
"""Return a *TagMap* object from unique tags and types recursively.
Return a :class:`~pyasn1.type.tagmap.TagMap` object by
combining tags from *TagMap* objects of children types and
associating them with their immediate child type.
Example
-------
.. code-block:: python
OuterType ::= CHOICE {
innerType INTEGER
}
Calling *.tagMapUnique* on *OuterType* will yield a map like this:
.. code-block:: python
Integer.tagSet -> Choice
Note
----
Duplicate *TagSet* objects found in the tree of children
types would cause error.
"""
return self.__uniqueTagMap
@property
def hasOptionalOrDefault(self):
return self.__hasOptionalOrDefault
@property
def namedTypes(self):
return iter(self.__namedTypes)
@property
def requiredComponents(self):
return self.__requiredComponents
| 29.492188
| 116
| 0.605232
|
6c5636a859756dc88d73234155f3e5cb260e0c8c
| 47
|
py
|
Python
|
Applications/price_GEF_14/top_level.py
|
nagadakos/online-learning
|
3be9a59b56d4b7147b7efa4175448e74731cd005
|
[
"Apache-2.0"
] | null | null | null |
Applications/price_GEF_14/top_level.py
|
nagadakos/online-learning
|
3be9a59b56d4b7147b7efa4175448e74731cd005
|
[
"Apache-2.0"
] | 4
|
2018-10-25T20:53:07.000Z
|
2018-10-30T16:20:50.000Z
|
Applications/price_GEF_14/top_level.py
|
nagadakos/online-learning
|
3be9a59b56d4b7147b7efa4175448e74731cd005
|
[
"Apache-2.0"
] | 1
|
2018-10-26T13:48:31.000Z
|
2018-10-26T13:48:31.000Z
|
import sys
print("Hello from price_GEF_14!")
| 9.4
| 33
| 0.744681
|
5dc444c29a3e98c4a4e65ae66afccc0225b3f5f3
| 2,737
|
py
|
Python
|
nuaal/discovery/Topology.py
|
mihudec/nuaal
|
c5c50c59e8a1f2b3f0f6a5266f4dd392befc13cd
|
[
"Apache-2.0"
] | null | null | null |
nuaal/discovery/Topology.py
|
mihudec/nuaal
|
c5c50c59e8a1f2b3f0f6a5266f4dd392befc13cd
|
[
"Apache-2.0"
] | null | null | null |
nuaal/discovery/Topology.py
|
mihudec/nuaal
|
c5c50c59e8a1f2b3f0f6a5266f4dd392befc13cd
|
[
"Apache-2.0"
] | null | null | null |
from nuaal.utils import get_logger
from nuaal.definitions import DATA_PATH
import json
class Topology(object):
def __init__(self, DEBUG=False):
self.DEBUG = DEBUG
self.logger = get_logger(name="Topology", DEBUG=DEBUG)
self.topology = {"nodes": [], "links": []}
def next_ui(self):
id_couter = 0
next_topo = {"nodes": [], "links": []}
id_map = {}
for node in self.topology["nodes"]:
id_map[node] = id_couter
next_topo["nodes"].append({"id": id_couter, "name": node})
id_couter += 1
for link in self.topology["links"]:
next_topo["links"].append({"source": id_map[link["sourceNode"]], "target": id_map[link["targetNode"]]})
return next_topo
class CliTopology(Topology):
def __init__(self, DEBUG=False):
super(CliTopology, self).__init__(DEBUG=DEBUG)
def build_topology(self, data):
if isinstance(data, list):
data = {x["hostname"]: x["neighbors"] for x in data}
elif isinstance(data, dict):
data = {k: data[k]["neighbors"] for k in data.keys()}
self.logger.info(msg="Building topology based on {} visited devices.".format(len(data)))
all_nodes = []
all_links = []
for device_id, neighbors in data.items():
links = self._get_links(device_id=device_id, neighbors=neighbors)
if device_id not in all_nodes:
all_nodes.append(device_id)
for neighbor_id in [x["targetNode"] for x in links]:
if neighbor_id not in all_nodes:
all_nodes.append(neighbor_id)
for link in links:
if link not in all_links and self._reverse_link(link) not in all_links:
all_links.append(link)
self.topology["links"] = all_links
self.topology["nodes"] = all_nodes
self.logger.info(msg="Discovered total of {} nodes and {} links".format(len(all_nodes), len(all_links)))
def _get_links(self, device_id, neighbors):
links = []
for neighbor in neighbors:
link = {
"sourceNode": device_id,
"sourceInterface": neighbor["localInterface"],
"targetNode": neighbor["hostname"],
"targetInterface": neighbor["remoteInterface"]
}
links.append(link)
return links
def _reverse_link(self, link):
reverse_link = {
"sourceNode": link["targetNode"],
"sourceInterface": link["targetInterface"],
"targetNode": link["sourceNode"],
"targetInterface": link["sourceInterface"]
}
return reverse_link
| 38.549296
| 115
| 0.579832
|
9943b2746a61f64bd977c9cd37052a7974c4121c
| 4,145
|
py
|
Python
|
tests/gamegrids_tests.py
|
xenomorff/code-dot-org-python
|
7b35999dc35fa9ca13c683f43eca631dc92e5da4
|
[
"Unlicense"
] | null | null | null |
tests/gamegrids_tests.py
|
xenomorff/code-dot-org-python
|
7b35999dc35fa9ca13c683f43eca631dc92e5da4
|
[
"Unlicense"
] | null | null | null |
tests/gamegrids_tests.py
|
xenomorff/code-dot-org-python
|
7b35999dc35fa9ca13c683f43eca631dc92e5da4
|
[
"Unlicense"
] | null | null | null |
import sys
import math
sys.path.append('..')
import codestudio.gamegrids as gg
from nose.tools import eq_,ok_
class TestXYGrid():
def setup(self):
self.grid = gg.XYGrid()
def defaults_test(self):
eq_(self.grid,[[]])
def init_test(self):
eq_(self.grid.init(3,2,'~'), self.grid)
eq_(self.grid,[['','',''],['','~','~',],['','~','~'],['','~','~']])
self.grid[1][1] = '*'
eq_(self.grid,[['','',''],['','*','~',],['','~','~'],['','~','~']])
self.grid.init(10,10,0)
self.grid[4][7] = '*'
tenxten = [
['', '', '', '', '', '', '', '', '', '', ''],
['', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
['', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
['', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
['', 0, 0, 0, 0, 0, 0, '*', 0, 0, 0],
['', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
['', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
['', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
['', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
['', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
['', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
eq_(self.grid,tenxten)
def to_text_test(self):
self.grid.init(3,2,'~')
self.grid[1][1] = '*'
eq_(self.grid.to_text(),'* ~ ~ \n~ ~ ~ \n')
def ping_test(self):
self.grid.init(4,4)
eq_(self.grid.ping(1,1),False)
self.grid[2][2] = 1
print(self.grid.to_text())
print(self.grid.ping(1,2))
ok_(self.grid.ping(2,2))
ok_(self.grid.ping(1,2))
ok_(self.grid.ping(3,2))
ok_(self.grid.ping(2,1))
ok_(self.grid.ping(2,3))
ok_(self.grid.ping(3,1))
ok_(self.grid.ping(1,3))
ok_(self.grid.ping(1,1))
ok_(self.grid.ping(3,3))
ok_(not self.grid.ping(4,3))
ok_(not self.grid.ping(4,2))
assert False
def draw_line_1m_test(self):
self.grid.init(10,10,'~')
self.grid.draw_line((1,1,4,4),'*')
string = ("* ~ ~ ~ ~ ~ ~ ~ ~ ~ \n"
"~ * ~ ~ ~ ~ ~ ~ ~ ~ \n"
"~ ~ * ~ ~ ~ ~ ~ ~ ~ \n"
"~ ~ ~ * ~ ~ ~ ~ ~ ~ \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ ~ \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ ~ \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ ~ \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ ~ \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ ~ \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ ~ \n")
eq_(self.grid.to_text(),string)
def draw_lines_test(self):
self.grid.init(10,10,'~')
self.grid.draw_lines([(1,1,4,4),(10,0,10,10)],'*')
string = ("* ~ ~ ~ ~ ~ ~ ~ ~ * \n"
"~ * ~ ~ ~ ~ ~ ~ ~ * \n"
"~ ~ * ~ ~ ~ ~ ~ ~ * \n"
"~ ~ ~ * ~ ~ ~ ~ ~ * \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ * \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ * \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ * \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ * \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ * \n"
"~ ~ ~ ~ ~ ~ ~ ~ ~ * \n")
print(self.grid.to_text())
eq_(self.grid.to_text(),string)
def length_test():
eq_(gg.length((0,0,1,3)),math.sqrt((1-2)**2+(3-0)**2))
def xy_test():
end = tuple(round(n) for n in gg.xy(1,3,30,2))
eq_(end,(2,5))
def slope_test():
lines = {
(0,0,1,1): 1.0,
(1,1,2,2): 1.0,
(1,1,1,1): None,
(1,2,2,2): 0.0,
(2,2,-1,-1): 1.0,
(0,0,2,3): 1.5,
(0,0,3,2): 2/3,
(5,0,10,0): 0.0,
}
for line in lines:
eq_(gg.slope(line),lines[line])
'''
def y_of_test():
arglist = [
[(0,0,1,1),1,1],
[(0,0,1,1),2,2],
[(0,0,0,0),2,None],
[(1,0,3,0),2,0],
]
for args in arglist:
line,x,y = args
eq_(gg.y_of(line,x),y)
def bounded_y_of_test():
arglist = [
[(0,0,1,1),1,1],
[(0,0,-1,-1),-0.5,-0.5],
[(0,0,1,1),2,1],
[(0,0,-1,-1),2,0],
[(0,0,-1,-1),-2,-1],
[(0,0,0,0),2,None],
[(1,0,3,0),2,0],
]
for args in arglist:
line,x,y = args
print(line,x)
eq_(gg.bounded_y_of(line,x),y)
'''
| 29.397163
| 75
| 0.337033
|
489f7850b809c249ee6eb923b633230e18fd1505
| 22,325
|
py
|
Python
|
core/controllers/story_editor_test.py
|
CodeGrammer02/oppia
|
3a861163aa15838b931ecc9ae39eab44e7e9073c
|
[
"Apache-2.0"
] | 5
|
2022-01-22T17:22:23.000Z
|
2022-02-04T09:21:24.000Z
|
core/controllers/story_editor_test.py
|
omprakash1999mina/oppia
|
00282e533b5832cb763100de1a5cc727644d64ef
|
[
"Apache-2.0"
] | null | null | null |
core/controllers/story_editor_test.py
|
omprakash1999mina/oppia
|
00282e533b5832cb763100de1a5cc727644d64ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the story editor page."""
from __future__ import annotations
from core import feconf
from core.domain import story_domain
from core.domain import story_services
from core.domain import topic_fetchers
from core.domain import user_services
from core.tests import test_utils
class BaseStoryEditorControllerTests(test_utils.GenericTestBase):
def setUp(self):
"""Completes the sign-up process for the various users."""
super(BaseStoryEditorControllerTests, self).setUp()
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME])
self.admin = user_services.get_user_actions_info(self.admin_id)
self.topic_id = topic_fetchers.get_new_topic_id()
self.story_id = story_services.get_new_story_id()
self.save_new_story(self.story_id, self.admin_id, self.topic_id)
self.save_new_topic(
self.topic_id, self.admin_id, name='Name',
abbreviated_name='name', url_fragment='name',
description='Description', canonical_story_ids=[self.story_id],
additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[],
next_subtopic_id=1)
class StoryPublicationTests(BaseStoryEditorControllerTests):
def test_put_can_not_publish_story_with_invalid_story_id(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
new_story_id = story_services.get_new_story_id()
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.STORY_PUBLISH_HANDLER, new_story_id),
{'new_story_status_is_public': True},
csrf_token=csrf_token, expected_status_int=404)
# Raises error 404 even when story is saved as the new story id is not
# associated with the topic.
self.save_new_story(new_story_id, self.admin_id, self.topic_id)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.STORY_PUBLISH_HANDLER, new_story_id),
{'new_story_status_is_public': True}, csrf_token=csrf_token,
expected_status_int=404)
self.logout()
def test_put_can_not_publish_story_with_invalid_new_story_status_value(
self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.STORY_PUBLISH_HANDLER, self.story_id),
{'new_story_status_is_public': 'Invalid value'},
csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_story_publish_and_unpublish(self):
# Check that admins can publish a story.
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.STORY_PUBLISH_HANDLER, self.story_id),
{'new_story_status_is_public': True}, csrf_token=csrf_token)
topic = topic_fetchers.get_topic_by_id(self.topic_id)
for reference in topic.canonical_story_references:
if reference.story_id == self.story_id:
self.assertEqual(reference.story_is_published, True)
self.put_json(
'%s/%s' % (
feconf.STORY_PUBLISH_HANDLER, self.story_id),
{'new_story_status_is_public': False}, csrf_token=csrf_token)
topic = topic_fetchers.get_topic_by_id(self.topic_id)
for reference in topic.canonical_story_references:
if reference.story_id == self.story_id:
self.assertEqual(reference.story_is_published, False)
self.logout()
# Check that non-admins cannot publish a story.
self.put_json(
'%s/%s' % (
feconf.STORY_PUBLISH_HANDLER, self.story_id),
{'new_story_status_is_public': True}, csrf_token=csrf_token,
expected_status_int=401)
class ValidateExplorationsHandlerTests(BaseStoryEditorControllerTests):
def test_validation_error_messages(self):
# Check that admins can publish a story.
self.login(self.CURRICULUM_ADMIN_EMAIL)
self.save_new_valid_exploration(
'0', self.admin_id, title='Title 1',
category='Mathematics', language_code='en',
correctness_feedback_enabled=True)
json_response = self.get_json(
'%s/%s' % (
feconf.VALIDATE_STORY_EXPLORATIONS_URL_PREFIX, self.story_id),
params={
'comma_separated_exp_ids': '15,0'
})
error_messages = json_response['validation_error_messages']
message_1 = (
'Expected story to only reference valid explorations, but found '
'a reference to an invalid exploration with ID: 15')
message_2 = (
'Exploration with ID 0 is not public. Please publish '
'explorations before adding them to a story.'
)
self.assertEqual(error_messages, [message_1, message_2])
self.logout()
def test_invalid_input_exception_when_no_exp_ids_passed(self):
# Check that admins can publish a story.
self.login(self.CURRICULUM_ADMIN_EMAIL)
self.get_json(
'%s/%s' % (
feconf.VALIDATE_STORY_EXPLORATIONS_URL_PREFIX, self.story_id),
expected_status_int=400)
self.logout()
class StoryEditorTests(BaseStoryEditorControllerTests):
def test_can_not_access_story_editor_page_with_invalid_story_id(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
new_story_id = story_services.get_new_story_id()
self.get_html_response(
'%s/%s' % (
feconf.STORY_EDITOR_URL_PREFIX, new_story_id),
expected_status_int=404)
# Raises error 404 even when story is saved as the new story id is not
# associated with the topic.
self.save_new_story(new_story_id, self.admin_id, self.topic_id)
self.get_html_response(
'%s/%s' % (
feconf.STORY_EDITOR_URL_PREFIX, new_story_id),
expected_status_int=404)
self.logout()
def test_can_not_get_access_story_handler_with_invalid_story_id(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
new_story_id = story_services.get_new_story_id()
self.get_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, new_story_id),
expected_status_int=404)
# Raises error 404 even when story is saved as the new story id is not
# associated with the topic.
self.save_new_story(new_story_id, self.admin_id, self.topic_id)
self.get_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, new_story_id),
expected_status_int=404)
self.logout()
def test_can_not_get_access_story_handler_with_invalid_topic_id(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
new_story_id = story_services.get_new_story_id()
self.save_new_story(new_story_id, self.admin_id, self.topic_id)
self.get_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, new_story_id),
expected_status_int=404)
self.save_new_topic(
'topic_id_new', self.admin_id, name='Name 2',
abbreviated_name='name-two', url_fragment='name-two',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[],
next_subtopic_id=1)
# An error would be raised here also as the story is not in the given
# topic.
self.get_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, new_story_id),
expected_status_int=404)
self.logout()
def test_put_can_not_access_story_handler_with_invalid_story_id(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
change_cmd = {
'version': 1,
'commit_message': 'changed description',
'change_dicts': [{
'cmd': 'update_story_property',
'property_name': 'description',
'old_value': 'Description',
'new_value': 'New Description'
}]
}
new_story_id = story_services.get_new_story_id()
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, new_story_id),
change_cmd, csrf_token=csrf_token, expected_status_int=404)
# Raises error 404 even when story is saved as the new story id is not
# associated with the topic.
self.save_new_story(new_story_id, self.admin_id, self.topic_id)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, new_story_id),
change_cmd, csrf_token=csrf_token, expected_status_int=404)
self.logout()
def test_put_can_not_access_story_handler_with_invalid_topic_id(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
change_cmd = {
'version': 1,
'commit_message': 'changed description',
'change_dicts': [{
'cmd': 'update_story_property',
'property_name': 'description',
'old_value': 'Description',
'new_value': 'New Description'
}]
}
new_story_id = story_services.get_new_story_id()
self.save_new_story(new_story_id, self.admin_id, self.topic_id)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, new_story_id), change_cmd,
csrf_token=csrf_token, expected_status_int=404)
# Raises error 404 even when topic is saved as the story id is not
# associated with the new topic.
self.save_new_topic(
'topic_id_new', self.admin_id, name='Name 2',
abbreviated_name='name-new', url_fragment='name-new',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[],
next_subtopic_id=1)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, new_story_id),
change_cmd, csrf_token=csrf_token, expected_status_int=404)
self.logout()
def test_put_can_not_access_story_handler_with_no_commit_message(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
change_cmd = {
'version': 1,
'change_dicts': [{
'cmd': 'update_story_property',
'property_name': 'description',
'old_value': 'Description',
'new_value': 'New Description'
}]
}
csrf_token = self.get_new_csrf_token()
json_response = self.put_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id),
change_cmd, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
json_response['error'],
'Expected a commit message but received none.')
self.logout()
def test_put_fails_with_long_commit_message(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
change_cmd = {
'version': 1,
'commit_message': 'a' * 1001,
'change_dicts': [{
'cmd': 'update_story_property',
'property_name': 'description',
'old_value': 'Description',
'new_value': 'New Description'
}]
}
csrf_token = self.get_new_csrf_token()
json_response = self.put_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id),
change_cmd, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
json_response['error'],
'Commit messages must be at most 375 characters long.')
self.logout()
def test_delete_can_not_access_story_handler_with_invalid_story_id(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
self.delete_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX,
story_services.get_new_story_id()),
expected_status_int=404)
self.logout()
def test_delete_can_not_access_story_handler_with_invalid_topic_id(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
new_story_id = story_services.get_new_story_id()
self.save_new_story(new_story_id, self.admin_id, 'invalid_topic_id')
self.delete_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX,
new_story_id),
expected_status_int=404)
self.logout()
def test_access_story_editor_page(self):
"""Test access to editor pages for the sample story."""
# Check that non-admins cannot access the editor page.
self.login(self.NEW_USER_EMAIL)
self.get_html_response(
'%s/%s' % (
feconf.STORY_EDITOR_URL_PREFIX, self.story_id),
expected_status_int=401)
self.logout()
# Check that admins can access and edit in the editor
# page.
self.login(self.CURRICULUM_ADMIN_EMAIL)
self.get_html_response(
'%s/%s' % (
feconf.STORY_EDITOR_URL_PREFIX, self.story_id))
self.logout()
def test_editable_story_handler_get(self):
# Check that non-admins cannot access the editable story data.
self.login(self.NEW_USER_EMAIL)
self.get_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id),
expected_status_int=401)
self.logout()
# Check that admins can access the editable story data.
self.login(self.CURRICULUM_ADMIN_EMAIL)
self.save_new_valid_exploration(
'0', self.admin_id, title='Title 1',
category='Mathematics', language_code='en',
correctness_feedback_enabled=True)
self.publish_exploration(self.admin_id, '0')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_1',
'title': 'Title 1'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'node_id': 'node_1',
'old_value': None,
'new_value': '0'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS),
'node_id': 'node_1',
'old_value': [],
'new_value': ['skill_id_1']
})]
self.save_new_skill(
'skill_id_1', self.admin_id, description='Description 3')
story_services.update_story(
self.admin_id, self.story_id, change_list, 'Updated story node.')
json_response = self.get_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id))
self.assertEqual(self.story_id, json_response['story']['id'])
self.assertEqual('Name', json_response['topic_name'])
self.assertEqual(len(json_response['skill_summaries']), 1)
self.assertEqual(
json_response['skill_summaries'][0]['description'], 'Description 3')
self.logout()
def test_editable_story_handler_put(self):
# Check that admins can edit a story.
change_cmd = {
'version': 1,
'commit_message': 'changed description',
'change_dicts': [{
'cmd': 'update_story_property',
'property_name': 'description',
'old_value': 'Description',
'new_value': 'New Description'
}]
}
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
json_response = self.put_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id),
change_cmd, csrf_token=csrf_token)
self.assertEqual(self.story_id, json_response['story']['id'])
self.assertEqual(
'New Description', json_response['story']['description'])
self.logout()
# Check that non-admins cannot edit a story.
self.put_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id),
change_cmd, csrf_token=csrf_token, expected_status_int=401)
def test_guest_can_not_delete_story(self):
response = self.delete_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id),
expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
def test_admins_can_delete_story(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
self.delete_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id),
expected_status_int=200)
self.logout()
def test_non_admins_cannot_delete_story(self):
self.login(self.NEW_USER_EMAIL)
self.delete_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id),
expected_status_int=401)
self.logout()
def test_put_can_not_access_story_handler_with_no_payload_version(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
change_cmd = {
'version': None,
'commit_message': 'changed description',
'change_dicts': [{
'cmd': 'update_story_property',
'property_name': 'description',
'old_value': 'Description',
'new_value': 'New Description'
}]
}
csrf_token = self.get_new_csrf_token()
json_response = self.put_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id),
change_cmd, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
json_response['error'],
'Invalid POST request: a version must be specified.')
self.logout()
def test_put_can_not_access_story_handler_with_mismatch_of_story_versions(
self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
change_cmd = {
'version': 2,
'commit_message': 'changed description',
'change_dicts': [{
'cmd': 'update_story_property',
'property_name': 'description',
'old_value': 'Description',
'new_value': 'New Description'
}]
}
csrf_token = self.get_new_csrf_token()
json_response = self.put_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id),
change_cmd, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
json_response['error'],
'Trying to update version 1 of story from version 2, '
'which is too old. Please reload the page and try again.')
self.logout()
def test_handler_raises_validation_error_with_invalid_new_description(self):
change_cmd = {
'version': 1,
'commit_message': 'changed description',
'change_dicts': [{
'cmd': 'update_story_property',
'property_name': 'description',
'old_value': 'Description',
'new_value': 0
}]
}
self.login(self.CURRICULUM_ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
json_response = self.put_json(
'%s/%s' % (
feconf.STORY_EDITOR_DATA_URL_PREFIX, self.story_id),
change_cmd, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
json_response['error'],
'Expected description to be a string, received 0')
self.logout()
def test_check_url_fragment_exists_or_not(self):
self.login(self.CURRICULUM_ADMIN_EMAIL)
new_story_id = story_services.get_new_story_id()
story = self.save_new_story(new_story_id, self.admin_id, self.topic_id)
json_response = self.get_json(
'%s/%s' % (
feconf.STORY_URL_FRAGMENT_HANDLER, story.url_fragment))
url_fragment_exists = json_response['story_url_fragment_exists']
self.assertEqual(url_fragment_exists, True)
json_response = self.get_json(
'%s/%s' % (
feconf.STORY_URL_FRAGMENT_HANDLER, 'non-existent-url-fragment'))
url_fragment_exists = json_response['story_url_fragment_exists']
self.assertEqual(url_fragment_exists, False)
self.logout()
| 36.900826
| 80
| 0.621725
|
d9cf7c77a54ff31160afdb9743789284f66f5292
| 11,258
|
py
|
Python
|
snappy_pipeline/find_file.py
|
PotatoThrone/snappy-pipeline
|
31200eba84bff8e459e9e210d6d95e2984627f5c
|
[
"MIT"
] | 5
|
2021-02-26T10:39:56.000Z
|
2021-12-23T07:53:26.000Z
|
snappy_pipeline/find_file.py
|
PotatoThrone/snappy-pipeline
|
31200eba84bff8e459e9e210d6d95e2984627f5c
|
[
"MIT"
] | 93
|
2021-02-22T11:23:59.000Z
|
2022-03-31T09:58:39.000Z
|
snappy_pipeline/find_file.py
|
PotatoThrone/snappy-pipeline
|
31200eba84bff8e459e9e210d6d95e2984627f5c
|
[
"MIT"
] | 3
|
2021-02-22T11:44:59.000Z
|
2021-06-21T19:33:53.000Z
|
# -*- coding: utf-8 -*-
"""Code for crawling the file system and caching the results
"""
from collections import OrderedDict
from fnmatch import fnmatch
import json
import logging
import os
import sys
from fasteners import InterProcessLock
__author__ = "Manuel Holtgrewe <manuel.holtgrewe@bihealth.de>"
class FileNamesTooDifferent(Exception):
"""Raised when two file names are too different to be PE reads"""
class PatternSet:
"""Store named or unnamed list of patterns"""
def __init__(self, patterns, names=None):
#: Patterns to search for with names
self.patterns = tuple(patterns)
#: Optional names
self.names = tuple(names or [])
if self.names and len(self.names) != len(self.patterns):
raise ValueError( # pragma: no cover
"Names must be empty or have the same length as files ({} vs. {})".format(
self.names, self.patterns
)
)
#: Named patterns, if any, else ``None``
self.named_patterns = None
if self.names:
self.named_patterns = OrderedDict(zip(names, patterns))
def __str__(self):
return "PatternSet({}, {})".format(self.patterns, self.names)
def __repr__(self):
return str(self)
class FileSystemCrawlerResult:
"""n-tuple of optionally named files"""
def __init__(self, base_folder, files, names=None):
#: Folder to start crawling in
self.base_folder = base_folder
#: Patterns to search for
self.files = tuple(files)
#: Names for the file patterns, optional; if given has to have the same length as files
self.names = tuple(names or [])
if self.names and len(self.names) != len(self.files):
raise ValueError( # pragma: no cover
"Names must be empty or have the same length as files ({} vs. {})".format(
self.names, self.files
)
)
#: Dict with name-to-pattern mapping, ``None`` if ``names`` is not given
self.named_files = None
if self.names:
self.named_files = OrderedDict(zip(names, files))
def to_dict(self):
"""Convert to dict, can only work if self.names and self.files is given"""
# TODO: remove?
if not self.names:
raise ValueError("No names, cannot convert to dict")
return dict(zip(self.names, self.files))
def __str__(self):
tpl = "FileSystemCrawlerResult({})"
return tpl.format(
", ".join(map(repr, (self.base_folder, self.files, self.names, self.named_files)))
)
def __repr__(self):
return str(self)
class FileSystemCrawler:
"""Crawl the file system
- start crawling the file system from a given directory
- look for files matching a given ``PatternSet``
- that are below a directory with a given name
"""
cache_version = 1
def __init__(self, cache_path, invalidation_paths, lock_timeout=60):
#: The logger to use.
self.logger = logging.getLogger("file_crawler")
#: Path to cache (will be stored in JSON format)
self.cache_path = cache_path
#: Path to files to use for checking invalidation.
self.invalidation_paths = invalidation_paths
#: The actual dict with the cache, loaded from path to ``cache_path`` if the cache file
#: exists.
self.cache = None
#: Flag whether cache has been modified and needs saving
self.cache_dirty = False
#: Flag whether cache has been invalidated already.
self.cache_invalidated = False
#: Timeout for obtaining file system lock on the file system
self.lock_timeout = lock_timeout
if os.path.exists(self.cache_path):
self.cache_invalidated = False
self.cache = self._load_cache()
else:
self._set_fresh_cache()
def _set_fresh_cache(self):
"""Set cache to a fresh state."""
self.cache_invalidated = True
self.cache_dirty = True
self.cache = {"cache_version": self.__class__.cache_version, "root_dirs": {}}
def run(self, root_dir, dir_name, pattern_sets, allow_empty_right):
"""Perform the file system crawling from a root directory given a query pattern set
``allow_empty_right`` -- for mixed PE/SE read data sets (must be either SE or PE
for one library!)
"""
matches = {} # path => pattern set idx => pattern idx => [path]
returned = 0 # from {0, 1, 2}; how many patterns matched?
# Invalidate cache fully if the cache file is older than any one of self.invalidation_paths
self._perform_cache_invalidation()
# Ensure that cache entry with crawling results of all files exists
if root_dir not in self.cache["root_dirs"]:
self.cache_dirty = True
self.cache["root_dirs"][root_dir] = tuple(sorted(self._build_cache(root_dir)))
# Now, crawl over this structure and match against all pattern sets
self.logger.debug('Crawling "%s" for dir_name "%s"', root_dir, dir_name)
for i, pattern_set in enumerate(pattern_sets):
self.logger.debug(" patterns in pattern set #%d: %s", i, pattern_set.patterns)
for path in self.cache["root_dirs"][root_dir]:
if dir_name not in path:
continue
idx = path.index(dir_name)
left, right = path[: idx + 1], path[idx + 1 :]
for i, pattern_set in enumerate(pattern_sets):
for j, pattern in enumerate(pattern_set.patterns):
does_match = fnmatch("/".join(right), pattern)
self.logger.debug(
'does "%s" match "%s" match? => %s', "/".join(right), pattern, does_match
)
if does_match:
matches.setdefault("/".join(left), {}).setdefault(i, {}).setdefault(
j, []
).append("/".join(right))
# Go over results and check whether they are conforming.
for path, path_matches in matches.items():
for set_idx, set_matches in path_matches.items():
# Must have a match for each pattern
if not allow_empty_right and len(set_matches) != len(
pattern_sets[set_idx].patterns
):
print(
(
"WARNING: Skipping matches {} as the number of matches is not equal to "
"the number of patterns in {}"
).format(set_matches, pattern_sets[set_idx].patterns),
file=sys.stderr,
)
continue
# Guard against mixing SE and PE results for crawling
if returned:
if returned != len(set_matches):
raise ValueError( # pragma: no cover
"Found mixed SE and PE data for one library!"
)
else:
returned = len(set_matches)
# Must have the same number of matches for each pattern
lst_lens = [len(l) for l in set_matches.values()]
if len(set(lst_lens)) != 1:
raise ValueError( # pragma: no cover
"Must have the same number of matches per pattern, but found {}".format(
list(set_matches.values())
)
)
# Yield result, checking that file names are equal except for one character (e.g.,
# "R1" vs "R2")"
for i in range(0, lst_lens[0]):
files = [
os.path.join(root_dir, path, match[i]) for match in set_matches.values()
]
self._check_fname_mismatches(files)
base_path = os.path.join(root_dir, path)
yield FileSystemCrawlerResult(
base_path, files, pattern_sets[set_idx].names[:returned]
)
def _perform_cache_invalidation(self):
"""Check whether the cache needs to be invalidated and do so if necessary."""
if self.cache_invalidated:
return # Cache has been invalidated before
if not os.path.exists(self.cache_path):
return # No cache yet
self.logger.debug("Checking whether file crawler cache should be invalidated...")
cache_ctime = os.path.getctime(self.cache_path)
for path in self.invalidation_paths:
path_mtime = os.path.getmtime(path)
if path_mtime > cache_ctime:
self.logger.info("Invalidating cache because of %s", path)
self._set_fresh_cache()
return
self.logger.debug(" => no, not invalidating cache")
@classmethod
def _check_fname_mismatches(cls, file_names):
for i in range(0, len(file_names)):
for j in range(0, i):
a = file_names[i]
b = file_names[j]
mismatches = 0
if len(a) != len(b):
raise FileNamesTooDifferent( # pragma: no cover
"File names have different length {} vs {}".format(a, b)
)
for x, y in zip(a, b):
mismatches += int(x != y)
if mismatches > 1:
raise FileNamesTooDifferent( # pragma: no cover
"File names too different ({} mismatches) {} vs {}".format(mismatches, a, b)
)
def _build_cache(self, root_dir):
self.logger.info("Building file system crawler cache from %s", root_dir)
for root, _, files in os.walk(root_dir, followlinks=True):
self.logger.debug("Caching for directory %s", root)
base = root[len(root_dir) + 1 :].split("/") or ["."]
yield from (tuple(base + [f]) for f in files)
def save_cache(self, cache_path=None):
"""Save cache, ``cache_path`` overriding ``self.cache_path``"""
if not (self.cache_dirty or self.cache_invalidated):
return # don't save if unchanged
cache_path = cache_path or self.cache_path
with InterProcessLock(self.cache_path + ".lock"):
self.logger.debug("Saving file system crawler cache to %s", cache_path)
with open(cache_path, "wt") as f:
json.dump(self.cache, f)
def _load_cache(self):
with InterProcessLock(self.cache_path + ".lock"):
self.logger.info("Loading file system crawler cache from %s", self.cache_path)
with open(self.cache_path, "rt") as f:
result = json.loads(f.read(), object_pairs_hook=OrderedDict)
if result["cache_version"] != self.__class__.cache_version:
raise ValueError( # pragma: no cover
"Invalid cache version {}".format(result["cache_version"])
)
return result
| 43.3
| 100
| 0.568751
|
47902c3d4b2249b1046b378958eb93ba588dde1f
| 2,846
|
py
|
Python
|
dojo/tools/php_symfony_security_check/parser.py
|
brunoduruzup/django-DefectDojo
|
cd598b44f1c44ca2a05fdf95f99c0d526509f656
|
[
"BSD-3-Clause"
] | 2
|
2021-09-19T23:19:12.000Z
|
2022-03-26T10:20:49.000Z
|
dojo/tools/php_symfony_security_check/parser.py
|
brunoduruzup/django-DefectDojo
|
cd598b44f1c44ca2a05fdf95f99c0d526509f656
|
[
"BSD-3-Clause"
] | 167
|
2021-03-15T13:49:54.000Z
|
2022-03-31T09:10:30.000Z
|
dojo/tools/php_symfony_security_check/parser.py
|
Hijerboa/django-DefectDojo
|
3aea3bc3406f860c0842b0bf8800efe2c86bf81b
|
[
"BSD-3-Clause"
] | 4
|
2016-09-19T17:33:39.000Z
|
2018-12-10T07:55:45.000Z
|
import json
from dojo.models import Finding
class PhpSymfonySecurityCheckParser(object):
def get_scan_types(self):
return ["PHP Symfony Security Check"]
def get_label_for_scan_types(self, scan_type):
return scan_type # no custom label for now
def get_description_for_scan_types(self, scan_type):
return "Import results from the PHP Symfony Security Checker by Sensioslabs."
def get_findings(self, json_file, test):
tree = self.parse_json(json_file)
return self.get_items(tree, test)
def parse_json(self, json_file):
if json_file is None:
self.items = []
return
try:
data = json_file.read()
try:
tree = json.loads(str(data, 'utf-8'))
except:
tree = json.loads(data)
except:
raise Exception("Invalid format")
return tree
def get_items(self, tree, test):
items = {}
for dependency_name, dependency_data in list(tree.items()):
advisories = dependency_data.get('advisories')
dependency_version = dependency_data['version']
if dependency_version and dependency_version.startswith('v'):
dependency_version = dependency_version[1:]
for advisory in advisories:
item = get_item(dependency_name, dependency_version, advisory, test)
unique_key = str(dependency_name) + str(dependency_data['version'] + str(advisory['cve']))
items[unique_key] = item
return list(items.values())
def get_item(dependency_name, dependency_version, advisory, test):
finding = Finding(title=dependency_name + " - " + "(" + dependency_version + ", " + advisory['cve'] + ")",
test=test,
# TODO decide how to handle the fact we don't have a severity. None will lead to problems handling minimum severity on import
severity='Info',
description=advisory['title'],
# TODO Decide if the default '1035: vulnerable 3rd party component' is OK to use?
cwe=1035,
cve=advisory['cve'],
mitigation='upgrade',
references=advisory['link'],
active=False,
verified=False,
false_p=False,
duplicate=False,
out_of_scope=False,
mitigated=None,
impact="No impact provided",
static_finding=True,
dynamic_finding=False,
component_name=dependency_name,
component_version=dependency_version)
return finding
| 36.487179
| 147
| 0.562544
|
6ebd207bf321ac3833ba45fa5f32faa1144392ec
| 518
|
py
|
Python
|
tests/resize/downscale.py
|
True-North-Intelligent-Algorithms/tnia-python
|
ab580716082e0ec655a27eb856dae629ebb1a3bf
|
[
"BSD-3-Clause"
] | 3
|
2022-02-08T16:30:05.000Z
|
2022-02-18T22:28:50.000Z
|
tests/resize/downscale.py
|
True-North-Intelligent-Algorithms/tnia-python
|
ab580716082e0ec655a27eb856dae629ebb1a3bf
|
[
"BSD-3-Clause"
] | null | null | null |
tests/resize/downscale.py
|
True-North-Intelligent-Algorithms/tnia-python
|
ab580716082e0ec655a27eb856dae629ebb1a3bf
|
[
"BSD-3-Clause"
] | null | null | null |
from skimage.transform import rescale, resize, downscale_local_mean
from skimage import data
import matplotlib.pyplot as plt
image = data.coins()
image_downscaled = downscale_local_mean(image, (4,4))
image_resized = resize(image_downscaled, image.shape)
fig, ax = plt.subplots(nrows=1, ncols=3)
ax[0].imshow(image, cmap='gray')
ax[0].set_title('Original')
ax[1].imshow(image_downscaled, cmap='gray')
ax[1].set_title('downscaled')
ax[2].imshow(image_resized, cmap='gray')
ax[2].set_title('resized')
plt.show()
| 21.583333
| 67
| 0.752896
|
aa84bbdb57890083170db5ee8307b9f22f452103
| 42,259
|
py
|
Python
|
ice.py
|
Manojtuguru/Yamz
|
36ba15cd34e35f9b02266a375b4b135f0b15e97c
|
[
"BSD-3-Clause"
] | null | null | null |
ice.py
|
Manojtuguru/Yamz
|
36ba15cd34e35f9b02266a375b4b135f0b15e97c
|
[
"BSD-3-Clause"
] | null | null | null |
ice.py
|
Manojtuguru/Yamz
|
36ba15cd34e35f9b02266a375b4b135f0b15e97c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
#
# ice - web frontend for SeaIce, based on the Python-Flask framework.
#
# Copyright (c) 2013, Christopher Patton, all rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import seaice
import ConfigParser
from flask import Markup
from flask import render_template, render_template_string
from flask import url_for, redirect, flash
from flask import request, session, g
from flask.ext import login as l
from flask import request
from itertools import izip
from HTMLParser import HTMLParser
from urllib2 import Request, urlopen, URLError
import os, sys, optparse, re
import json, psycopg2 as pgdb
## Parse command line options. ##
parser = optparse.OptionParser()
parser.description="""\
This program is a Python/Flask-based web frontend for the SeaIce metadictionary.
SeaIce is a database comprised of a set of user-defined, crowd-sourced terms and
relations. The goal of SeaIce is to develop a succinct and complete set of
metadata terms to register just about any type of file or data set. 'ice' is
distributed under the terms of the BSD license with the hope that it will be
# useful, but without warranty. You should have received a copy of the BSD
license with this program; otherwise, visit
http://opensource.org/licenses/BSD-3-Clause.
"""
parser.add_option("--config", dest="config_file", metavar="FILE",
help="User credentials for local PostgreSQL database. " +
"If 'heroku' is given, then a connection to a foreign host specified by " +
"DATABASE_URL is established.",
default='heroku')
parser.add_option('--credentials', dest='credentials_file', metavar='FILE',
help='File with OAuth-2.0 credentials. (Defaults to `.seaice_auth`.)',
default='.seaice_auth')
parser.add_option('--deploy', dest='deployment_mode',
help='Deployment mode, used to choose OAuth parameters in credentials file.',
default='heroku')
parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False,
help="Start flask in debug mode.")
parser.add_option("--role", dest="db_role", metavar="USER",
help="Specify the database role to use for the DB connector pool. These roles " +
"are specified in the configuration file (see --config).",
default="default")
(options, args) = parser.parse_args()
# Figure out if we're in production mode. Look in 'heroku' section only.
config = ConfigParser.ConfigParser()
config.read('.seaice_auth')
if config.has_option('heroku', 'prod_mode'):
prod_mode = config.getboolean('heroku', 'prod_mode')
else:
prod_mode = False # default
## Setup flask application ##
print "ice: starting ..."
db_config = None
try:
if options.config_file == "heroku":
app = seaice.SeaIceFlask(__name__)
else:
db_config = seaice.auth.get_config(options.config_file)
app = seaice.SeaIceFlask(__name__, db_user = db_config.get(options.db_role, 'user'),
db_password = db_config.get(options.db_role, 'password'),
db_name = db_config.get(options.db_role, 'dbname'))
except pgdb.DatabaseError, e:
print >>sys.stderr, "error: %s" % e
sys.exit(1)
try:
credentials = seaice.auth.get_config(options.credentials_file)
google = seaice.auth.get_google_auth(credentials.get(options.deployment_mode, 'google_client_id'),
credentials.get(options.deployment_mode, 'google_client_secret'))
except OSError:
print >>sys.stderr, "error: config file '%s' not found" % options.config_file
sys.exit(1)
app.debug = True
app.use_reloader = True
app.secret_key = credentials.get(options.deployment_mode, 'app_secret')
## Session logins ##
login_manager = l.LoginManager()
login_manager.init_app(app)
login_manager.anonymous_user = seaice.user.AnonymousUser
## Prescore terms ##
# This will be used to check for consistency errors in live scoring
# and isn't needed until I implement O(1) scoring.
#print "ice: checking term score consistnency (dev)" TODO
#for term in db_con.getAllTerms():
# if not db_con.checkTermConsistency(term['id']):
# print "warning: corrected inconsistent consensus score for term %d" % term['id']
# db_con.commit()
print "ice: setup complete."
@login_manager.user_loader
def load_user(id):
return app.SeaIceUsers.get(int(id))
## Request wrappers (may have use for these later) ##
@app.before_request
def before_request():
pass
@app.teardown_request
def teardown_request(exception):
pass
## HTTP request handlers ##
@app.errorhandler(404)
def pageNotFound(e):
return render_template('basic_page.html', user_name = l.current_user.name,
title = "Oops! - 404",
headline = "404",
content = "The page you requested doesn't exist."), 404
# home page
@app.route("/")
def index():
if l.current_user.id:
g.db = app.dbPool.getScoped()
# TODO Store these values in class User in order to prevent
# these queries every time the homepage is accessed.
my = seaice.pretty.printTermsAsLinks(g.db,
g.db.getTermsByUser(l.current_user.id))
star = seaice.pretty.printTermsAsLinks(g.db,
g.db.getTermsByTracking(l.current_user.id))
notify = l.current_user.getNotificationsAsHTML(g.db)
return render_template("index.html", user_name = l.current_user.name,
my = Markup(my.decode('utf-8')) if my else None,
star = Markup(star.decode('utf-8')) if star else None,
notify = Markup(notify.decode('utf-8')) if notify else None)
return render_template("index.html", user_name = l.current_user.name)
@app.route("/about")
def about():
return render_template("about.html", user_name = l.current_user.name)
@app.route("/guidelines")
def guidelines():
return render_template("guidelines.html", user_name = l.current_user.name)
@app.route("/api")
def api():
return redirect(url_for('static', filename='api/index.html'))
@app.route("/contact")
def contact():
return render_template("contact.html", user_name = l.current_user.name)
# Portal
#
portal = False
portalterm = ''
portalpath = ''
portalintro = {
'UCOP': '<p> Welcome to the UC Office of the President Portal',
'citsci': '''<p> Welcome to the first YAMZ Portal,
#citscitools is a subset of the full YAMZ Metadictionary, an open vocabulary of metadata
terms from all domains and from all parts of "metadata speech." The purpose of #citscitools is to help
the public produce and vet a specific type of metadata: the words used to describe the different tools and technologies that are commonly used in citizen science.
The metadata included in #citscitools will be used to inform the structure of a new database of citizen
science tools created by the online community <a href="http://scistarter.com"> SciStarter. </a> in coordination
with the <a href="https://wilsoncommonslab.org/"> Commons Lab</a> at the Woodrow Wilson International Center for Scholars.
While people are welcome to contribute to the #citscitools portal at any time, the portal will be debuted in real time at the <a href = "https://sfis.asu.edu/asu-citizen-science-maker-summit-2016"> ASU Citizen Science and Maker Summit </a>, on October 27-28, 2016. </p>
<hr>'''
}
#@app.route("/p")
#@app.route("/p/<term>")
#def hash (term=None):
# g.db = app.dbPool.getScoped()
# if not term:
# return render_template("browse.html", user_name = l.current_user.name,
# title = "No Term",
# headline = "No Term ",
# content = Markup("xxx"))
# prefix = "#{g: xq"
#prefix = ""
# result = False
#n, tag = g.db.getTermByInitialTermString(prefix+term)
#if n == 1:
# x = prefix + term + " "
# t = tag['term_string']
# v,s = t.split('|')
#a = str(v)
#x = prefix+term+" "
# if x == v:
#result = seaice.pretty.printTermAsHTML(g.db, term, l.current_user.id)
# if result:
# portal = True
# portalpath = 'p/' + term
# Here we need to restrict the browse just to portal terms. Prior art:
# return browse("stable", prefix+term)
# 1. < fill in name of browse routine >
# 2. <tag function>
#return render_template("browse.html", user_name = l.current_user.name,
# title = " Portal " + term,
# headline = "Welcome to portal: " + term, # "," + x + "," + v + ",", #+ tag['term_string'] + "," + term + "," + x,
# content = Markup(result))
# return render_template('basic_page.html', user_name = l.current_user.name,
# title = "Oops! - 404",
# headline = "404",
# content = "The page you requested doesn't exist."), 404
#if n == 2:
# result = seaice.pretty.getPrettyParagraph(g.db, term)
#return render_template("browse.html", user_name = l.current_user.name,
# title = "Browse",
# headline = "ambigious", #+ tag['term_string'] + "," + term + "," + x,
#content = Markup(result))
#return render_template("browse.html", user_name = l.current_user.name,
# title = "Browse",
# headline = "Browse Tags", #+ tag['term_string'] + "," + term + "," + x,
# content = Markup(n))
#if not result:
# return render_template('basic_page.html', user_name = l.current_user.name,
# title = "Oops! - 404",
# headline = "404",
# content = "The page you requested doesn't exist."), 404
#terms = g.db.search(seaice.pretty.ixuniq.tag)
#elif n==2:
# result = seaice.pretty.getPrettyParagraph(g.db, term)
#elif n==0:
#return render_template("browse.html", user_name = l.current_user.name,
#title = "Browse",
#headline = "Browse Tags" + tag['term_string'] + "," + term,
#content = Markup(result.decode('utf-8'))) */
#@app.route("/p/browse")
#def portalbrowse():
# return render_template("portalbrowse.html", user_name = l.current_user)
#@app.route("/p/contribute")
#def portaladd():
# return render_template("portalcontribute.html", user_name = l.current_user.name)
#@app.route("/p/about")
#def portalabout():
# return render_template("portalabout.html", user_name = l.current_user.name)
#@app.route("/p/contact")
#def portalcontact():
# return render_template("portalcontact.html", user_name = l.current_user.name)
#@app.route("/p/account")
#def portalaccount():
# return render_template("portalaccount.html", user_name = l.current_user.name)
## Login and logout ##
@app.route("/login")
def login():
if l.current_user.id:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Oops!",
content = "You are already logged in!")
form = '''
<p>
In order to propose new terms or comment on others, you must first
sign in.
<li>Sign in with <a href="/login/google">Google</a>.</li>
</p>
'''
return render_template("basic_page.html", title = "Login page",
headline = "Login",
content = Markup(form.decode('utf-8')))
@app.route("/login/google")
def login_google():
callback=url_for('authorized', _external=True)
return google.authorize(callback=callback)
@app.route(seaice.auth.REDIRECT_URI)
@google.authorized_handler
def authorized(resp):
access_token = resp['access_token']
session['access_token'] = access_token, ''
headers = {'Authorization': 'OAuth '+access_token}
req = Request('https://www.googleapis.com/oauth2/v1/userinfo', None, headers)
try:
res = urlopen(req)
except URLError, e:
if e.code == 401: # Unauthorized - bad token
session.pop('access_token', None)
return 'l'
g_user = json.load(res)
g.db = app.dbPool.getScoped()
user = g.db.getUserByAuth('google', g_user['id'])
if not user: # not seen this person before, so create user
g_user['authority'] = 'google'
g_user['auth_id'] = g_user['id']
g_user['id'] = app.userIdPool.ConsumeId()
g_user['last_name'] = "nil"
g_user['first_name'] = "nil"
g_user['reputation'] = "30"
g.db.insertUser(g_user)
g.db.commit()
user = g.db.getUserByAuth('google', g_user['auth_id'])
app.SeaIceUsers[user['id']] = seaice.user.User(user['id'], user['first_name'])
l.login_user(app.SeaIceUsers.get(user['id']))
return render_template("account.html", user_name = l.current_user.name,
email = g_user['email'],
message = """
According to our records, this is the first time you've logged onto
SeaIce with this account. Please provide your first and last name as
you would like it to appear with your contributions. Thank you!""")
l.login_user(app.SeaIceUsers.get(user['id']))
flash("Logged in successfully")
return redirect(url_for('index'))
@google.tokengetter
def get_access_token():
return session.get('access_token')
@app.route('/logout')
@l.login_required
def logout():
l.logout_user()
return redirect(url_for('index'))
@login_manager.unauthorized_handler
def unauthorized():
return redirect(url_for('login'))
## Users ##
@app.route("/account", methods = ['POST', 'GET'])
@l.login_required
def settings():
g.db = app.dbPool.dequeue()
if request.method == "POST":
g.db.updateUser(l.current_user.id,
request.form['first_name'],
request.form['last_name'],
True if request.form.get('enotify') else False)
g.db.commit()
app.dbPool.enqueue(g.db)
l.current_user.name = request.form['first_name']
return getUser(str(l.current_user.id))
# method was GET
user = g.db.getUser(l.current_user.id)
app.dbPool.enqueue(g.db)
return render_template("account.html", user_name = l.current_user.name,
email = user['email'].decode('utf-8'),
last_name_edit = user['last_name'].decode('utf-8'),
first_name_edit = user['first_name'].decode('utf-8'),
reputation = user['reputation'] + ' *' if user['super_user'] else ' _',
enotify = 'yes' if user['enotify'] else 'no',
message = """
Here you can change how your name will appear to other users.
Navigating away from this page will safely discard any changes.""")
@app.route("/user=<int:user_id>")
def getUser(user_id = None):
g.db = app.dbPool.getScoped()
try:
user = g.db.getUser(int(user_id))
if user:
result = """<hr>
<table cellpadding=12>
<tr><td valign=top width="40%">First name:</td><td>{0}</td></tr>
<tr><td valign=top>Last name:</td><td>{1}</td></tr>
<tr><td valign=top>Email:</td><td>{2}</td></td>
<tr><td valign=top>Reputation:</td><td>{3}</td></td>
<tr><td valign=top>Receive email notifications:</td><td>{4}</td>
</table> """.format(user['first_name'], user['last_name'],
user['email'],
user['reputation'] + \
' *' if user['super_user'] else '',
user['enotify'])
return render_template("basic_page.html",
user_name = l.current_user.name,
title = "User - %s" % user_id,
headline = "User",
content = Markup(result.decode('utf')))
except IndexError: pass
return render_template("basic_page.html", user_name = l.current_user.name,
title = "User not found",
headline = "User",
content = Markup("User <strong>#%s</strong> not found!" % user_id))
@app.route("/user=<int:user_id>/notif=<int:notif_index>/remove", methods=['GET'])
@l.login_required
def remNotification(user_id, notif_index):
try:
assert user_id == l.current_user.id
app.SeaIceUsers[user_id].remove(notif_index, app.dbPool.getScoped())
return redirect("/")
except AssertionError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Oops!",
content = 'You may only delete your own notifications.')
except AssertionError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Oops!",
content = 'Index out of range.')
## Look up terms ##
@app.route("/term/concept=<term_concept_id>")
@app.route("/term=<term_concept_id>")
def getTerm(term_concept_id = None, message = ""):
# NOTE: this getTerm is called with concept_id, the other getTerm with id
g.db = app.dbPool.getScoped()
term = g.db.getTermByConceptId(term_concept_id)
if not term:
return render_template("basic_page.html",
user_name = l.current_user.name,
title = "Term not found",
headline = "Term",
content = Markup("Term <strong>#%s</strong> not found!" \
% term_concept_id))
result = seaice.pretty.printTermAsHTML(g.db, term, l.current_user.id)
result = message + "<hr>" + result + "<hr>"
result += seaice.pretty.printCommentsAsHTML(g.db, g.db.getCommentHistory(term['id']),
l.current_user.id)
if l.current_user.id:
result += """
<form action="/term={0}/comment" method="post">
<table cellpadding=16 width=60%>
<tr><td><textarea type="text" name="comment_string" rows=3
style="width:100%; height:500%"
placeholder="Add comment"></textarea></td></tr>
<tr><td align=right><input type="submit" value="Comment"><td>
</td>
</table>
</form>""".format(term['id'])
else:
result += """
<form action="/login" method="get">
<table cellpadding=16 width=60%>
<tr><td><textarea type="text" rows=3
style="width:100%; height:100%"
placeholder="Log in to comment." readonly></textarea></td></tr>
<tr><td align=right><input type="submit" value="Login"><td>
</td>
</table>
</form>"""
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Term %s" % term['term_string'],
headline = "Term",
content = Markup(result.decode('utf-8')))
@app.route("/browse/")
@app.route("/browse/<listing>")
#@app.route("/p/browse")
@app.route("/p/<pterm>/")
@app.route("/p/<pterm>/browse/")
@app.route("/p/<pterm>/browse/<listing>")
def browse(listing = None, pterm = None):
g.db = app.dbPool.getScoped()
#terms = g.db.search(seaice.pretty.ixuniq + pterm)
#result = seaice.pretty.printTermsAsBriefHTML(g.db, terms, l.current_user.id)
# get SOME terms
#else:
letter = '~'
# xxx alpha ordering of tags is wrong (because they start '#{g: ')
if pterm: # make sure pterm is exact match for existing tag
prefix = "#{g: xq" # xxx use global tagstart
global tag
n,tag = g.db.getTermByInitialTermString(prefix+pterm)
if n != 1:
return render_template('basic_page.html', user_name = l.current_user.name,
title = "Oops! - 404",
headline = "Portal doesn't exist - 404",
content = "The portal you requested doesn't exist."), 404
# if we get here, we know n == 1
global x
x = tag['term_string']
global dpterm
#dpterm = tag2simple(x)
s,r = x.split('|')
u,t = s.split('xq')
v = s.lower()
global portal, portalpath
portal = True
portalpath = '/p/' + t
c = prefix + pterm + " "
c = c.lower()
if c == v:
terms = g.db.search(seaice.pretty.ixuniq + pterm)
#b = open('content.txt', 'r')
#f = b.read()#a = str(pt)
else:
return render_template('basic_page.html', user_name = l.current_user.name,
title = "Oops! - 404",
headline = "Portal ambiguous - 404",
content = "The portal you requested doesn't exist."), 404
else:
terms = g.db.getAllTerms(sortBy="term_string")
pterm = ''
global portalterm
portalterm = pterm
# if we get here, portalpath will be either empty or contain a valid portalpath
result = "<h5>{0} | {1} | {2} | {3} | {4}</h5><hr>".format(
# '<a href="' + portalpath + '/browse/score">high score</a>' if listing != "score" else 'high score',
'<a href="'+portalpath+'/browse/score">high score</a>' if listing != "score" else 'high score',
'<a href="'+portalpath+'/browse/recent">recent</a>' if listing != "recent" else 'recent',
'<a href="'+portalpath+'/browse/volatile">volatile</a>' if listing != "volatile" else 'volatile',
'<a href="'+portalpath+'/browse/stable">stable</a>' if listing != "stable" else 'stable',
'<a href="'+portalpath+'/browse/alphabetical">alphabetical</a>' if listing != "alphabetical" else 'alphabetical'
)
# if we get here, terms contains all the terms we're going to print, in either the general or the portal case
# result += seaice.pretty.printTermsAsBriefHTML(g.db, terms, l.current_user.id)
# return render_template("portalbrowse.html", user_name = l.current_user.name,
# title = "Browse " + pterm,
# headline = "Browse Portal " + pterm,
# b = open('content.txt', 'r'),
# content = Markup(result.decode('utf-8')))
#if listing == "recent" and pterm == True: # Most recently added listing
# result += seaice.pretty.printTermsAsBriefHTML(g.db,
# sorted(pt, key=lambda term: term['modified'], reverse=True),
# l.current_user.id)
if listing == "recent": # Most recently added listing
result += seaice.pretty.printTermsAsBriefHTML(g.db,
sorted(terms, key=lambda term: term['modified'], reverse=True),
l.current_user.id)
elif listing == "score": # Highest consensus
terms = sorted(terms, key=lambda term: term['consensus'], reverse=True)
result += seaice.pretty.printTermsAsBriefHTML(g.db,
sorted(terms, key=lambda term: term['up'] - term['down'], reverse=True), l.current_user.id)
elif listing == "volatile": # Least stable (Frequent updates, commenting, and voting)
terms = sorted(terms, key=lambda term: term['t_stable'] or term['t_last'], reverse=True)
result += seaice.pretty.printTermsAsBriefHTML(g.db, terms, l.current_user.id)
elif listing == "stable": # Most stable, highest consensus
terms = sorted(terms, key=lambda term: term['t_stable'] or term['t_last'])
result += seaice.pretty.printTermsAsBriefHTML(g.db, terms, l.current_user.id)
elif listing == "alphabetical": # Alphabetical listing
result += "<table>"
for term in terms:
firstc = term['term_string'][0].upper()
if firstc != '#' and firstc != letter:
letter = term['term_string'][0].upper()
result += "</td></tr><tr><td width=20% align=center valign=top><h4>{0}</h4></td><td width=80%>".format(letter)
result += "<p><a %s</a>" % seaice.pretty.innerAnchor(
g.db, term['term_string'], term['concept_id'], term['definition'],
tagAsTerm=True)
result += " <i>contributed by %s</i></p>" % g.db.getUserNameById(term['owner_id'])
result += "</table>"
else:
return redirect(portalpath + "/browse/recent")
hdline = "Browse "
hdline += t if pterm != '' else "dictionary"
#hdline += dpterm if pterm != '' else "dictionary"
tle = "Browse " + t
try:
pintro = Markup(portalintro[pterm])
except:
pintro = ''
return render_template("browse.html", user_name = l.current_user.name,
title = tle,
headline = hdline ,
portalintro = pintro,
content = Markup(result.decode('utf-8')))
#portal = False
hash2uniquerifier_regex = re.compile('(?<!#)#(\w[\w.-]+)')
# xxx is " the problem (use ' below)?
#token_ref_regex = re.compile("(?<!#\{g: )([#&]+)([\w.-]+)")
@app.route("/search", methods = ['POST', 'GET'])
def returnQuery():
g.db = app.dbPool.getScoped()
if request.method == "POST":
# XXX whoa -- this use of term_string variable name (in all html forms)
# is totally different from term_string as used in the database!
#if len(portalterm) != 0:
termstr = request.form['term_string']
global portalterm
#if portalterm:
#termstr += ' & #' + portalterm
search_words = hash2uniquerifier_regex.sub(
seaice.pretty.ixuniq + '\\1',
termstr)
xterms = g.db.search(search_words)
Y = "true" if portal else "false"
if portal == True:
prefix = "#{g: xq"
xterms = g.db.search(search_words)
x=0
xterms3=[None]*len(xterms)
while x<len(xterms):
xterms2=xterms[x]['definition']
xterms3[x]=(xterms2)
x+=1
portsearch = [i.split('g:',1)[-1] for i in xterms3]
portsearch2 = [i.split('|',1)[0] for i in portsearch]
u = ' '+ 'xq'+portalterm+ ' '
s = [i for i, x in enumerate(portsearch2) if x == u]
xtermsf = list(xterms[i] for i in s)
if len(xtermsf) == 0:
return render_template('basic_page.html', user_name = l.current_user.name,
title = "Oops! - 404",
headline = "404 - No term in portal found in " + portalterm + "Search in Yamz dictionary",
content = " The portal term you requested doesn't exist inside the portal."), 404
else:
result = seaice.pretty.printTermsAsBriefHTML(g.db, xtermsf, l.current_user.id)
return render_template("search.html", user_name = l.current_user.name,
term_string = request.form['term_string'],
result = Markup(Markup(result.decode('utf-8'))))
else:
terms = g.db.search(search_words)
result = seaice.pretty.printTermsAsBriefHTML(g.db, xterms, l.current_user.id)
return render_template("search.html", user_name = l.current_user.name,
term_string = request.form['term_string'],
result = Markup(Markup(result.decode('utf-8'))))
#search = 'definition'
#for sublist in xterms2:
# if sublist[1] == search:
# return sublist
#n,tag = g.db.getTermByInitialTermString(prefix+portalterm)
#xterms2 = request.form['persistent_id']
# search_words += ' & #' + portalterm
# terms = g.db.search(search_words)
# terms += g.db.search(request.form['term_string'])
# result = seaice.pretty.printTermsAsBriefHTML(g.db, terms, l.current_user.id)
#return render_template("search.html", user_name = l.current_user.name,
# term_string = request.form['term_string'],
# result = Markup(result.decode('utf-8')))
# for normal search route, assume search_words is a simple string
#terms = g.db.search(request.form['term_string'])
#return render_template("search.html", user_name = l.current_user.name,
#term_string = request.form['term_string'],
#result = Markup("search_words " + search_words + ", " + str(X)))
#result = Markup(result.decode('utf-8')))
else: # GET
return render_template("search.html", user_name = l.current_user.name)
# yyy to do: display tag definition at top of search results
# when user clicks on community tag (searches for all terms bearing the tag)
@app.route("/tag/<tag>")
def getTag(tag = None):
g.db = app.dbPool.getScoped()
terms = g.db.search(seaice.pretty.ixuniq + tag)
if len(terms) == 0:
return render_template("tag.html", user_name = l.current_user.name,
term_string = tag)
else:
result = seaice.pretty.printTermsAsBriefHTML(g.db, terms, l.current_user.id)
return render_template("tag.html", user_name = l.current_user.name,
term_string = tag, result = Markup(result.decode('utf-8')))
## Propose, edit, or remove a term ##
@app.route("/contribute", methods = ['POST', 'GET'])
@l.login_required
def addTerm():
global tag
if request.method == "POST":
g.db = app.dbPool.dequeue()
term = {
#'term_string' : request.form['term_string'],
'term_string' : seaice.pretty.refs_norm(g.db, request.form['term_string']),
'definition' : seaice.pretty.refs_norm(g.db, request.form['definition']),
'examples' : seaice.pretty.refs_norm(g.db, request.form['examples']),
'owner_id' : l.current_user.id,
'id' : app.termIdPool.ConsumeId() }
(id, concept_id) = g.db.insertTerm(term, prod_mode)
# Special handling is needed for brand new tags, which always return
# "(undefined/ambiguous)" qualifiers at the moment of definition.ghtbulb
#
G = term['term_string'].startswith('#{g:')
if term['term_string'].startswith('#{g:'): # if defining a tag
#term['term_string'] = '#{g: %s | %s}' % ( # correct our initial
term['term_string'] = '%s%s | %s}' % ( # correct our initial
seaice.pretty.tagstart,
seaice.pretty.ixuniq + request.form['term_string'][1:],
concept_id) # guesses and update
#if portal == True:
# term['definition'] += ' ' + tag['term_string']
g.db.updateTerm(term['id'], term, None, prod_mode)
elif portal == True and G == False:
term['definition'] += '\n' + tag['term_string']
g.db.updateTerm(term['id'], term, None, prod_mode)
g.db.commit()
app.dbPool.enqueue(g.db)
return getTerm(concept_id,
message = "Your term has been added to the metadictionary!")
else: # GET
return render_template("contribute.html", user_name = l.current_user.name,
title = "Contribute", headline = "Add a dictionary term")
@app.route("/term=<term_concept_id>/edit", methods = ['POST', 'GET'])
@l.login_required
def editTerm(term_concept_id = None):
try:
g.db = app.dbPool.dequeue()
term = g.db.getTermByConceptId(term_concept_id)
#user = g.db.getUser(l.current_user.id)
# yyy not checking if term was found?
assert l.current_user.id and term['owner_id'] == l.current_user.id
if request.method == "POST":
assert request.form.get('examples') != None
updatedTerm = {
#'term_string' : request.form['term_string'],
'term_string' : seaice.pretty.refs_norm(g.db, request.form['term_string']),
'definition' : seaice.pretty.refs_norm(g.db, request.form['definition']),
'examples' : seaice.pretty.refs_norm(g.db, request.form['examples']),
'owner_id' : l.current_user.id }
g.db.updateTerm(term['id'], updatedTerm, term['persistent_id'], prod_mode)
# Notify tracking users
notify_update = seaice.notify.TermUpdate(
term['id'], l.current_user.id, term['modified'])
for user_id in g.db.getTrackingByTerm(term['id']):
app.SeaIceUsers[user_id].notify(notify_update, g.db)
g.db.commit()
app.dbPool.enqueue(g.db)
return getTerm(term_concept_id,
message = "Your term has been updated in the metadictionary.")
else: # GET
app.dbPool.enqueue(g.db)
if term:
return render_template("contribute.html",
user_name = l.current_user.name,
title = "Edit - %s" % term_concept_id,
headline = "Edit term",
edit_id = term_concept_id,
term_string_edit = term['term_string'].decode('utf-8'),
definition_edit = term['definition'].decode('utf-8'),
examples_edit = term['examples'].decode('utf-8'))
except ValueError:
return render_template("basic_page.html",
user_name = l.current_user.name,
title = "Term not found",
headline = "Term",
content = Markup("Term <strong>#%s</strong> not found!" % term_concept_id))
except AssertionError:
return render_template("basic_page.html",
user_name = l.current_user.name,
title = "Term - %s" % term_concept_id,
content =
"""Error! You may only edit or remove terms and definitions that
you've contributed. However, you may comment or vote on this term.
assert term['owner_id'] (%s) == l.current_user.id (%s)""" % (term['owner_id'], l.current_user.id))
@app.route("/term=<int:term_id>/remove", methods=["POST"])
@l.login_required
def remTerm(term_id):
try:
g.db = app.dbPool.getScoped()
term = g.db.getTerm(int(request.form['id']))
assert term and term['owner_id'] == l.current_user.id
assert term['class'] == 'vernacular'
tracking_users = g.db.getTrackingByTerm(term_id)
id = g.db.removeTerm(int(request.form['id']), term['persistent_id'],
prod_mode)
app.termIdPool.ReleaseId(id)
# Notify tracking users
notify_removed = seaice.notify.TermRemoved(l.current_user.id,
term['term_string'],
g.db.getTime())
for user_id in tracking_users:
app.SeaIceUsers[user_id].notify(notify_removed, g.db)
g.db.commit()
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Remove term",
content = Markup(
"Successfully removed term <b>%s (%s)</b> from the metadictionary." % (term['term_string'], term['concept_id'])))
except AssertionError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Term - %s" % term_id,
content =
"""Error! You may only remove terms that are in the vernacular class and
that you've contributed. However, you may comment or vote on this term. """)
## Comments ##
@app.route("/term=<int:term_id>/comment", methods=['POST'])
@l.login_required
def addComment(term_id):
try:
assert l.current_user.id
term_id = int(term_id)
g.db = app.dbPool.getScoped()
comment = { 'comment_string' : seaice.pretty.refs_norm(g.db, request.form['comment_string']),
'term_id' : term_id,
'owner_id' : l.current_user.id,
'id' : app.commentIdPool.ConsumeId()}
comment_id = g.db.insertComment(comment)
# Notify owner and tracking users
notify_comment = seaice.notify.Comment(term_id, l.current_user.id, comment['comment_string'],
g.db.getComment(comment_id)['created'])
tracking_users = [ user_id for user_id in g.db.getTrackingByTerm(term_id) ]
tracking_users.append(g.db.getTerm(term_id)['owner_id'])
for user_id in tracking_users:
if user_id != l.current_user.id:
app.SeaIceUsers[user_id].notify(notify_comment, g.db)
g.db.commit()
return redirect("/term=%s" % g.db.getTermConceptId(term_id))
except AssertionError:
return redirect(url_for('login'))
@app.route("/comment=<int:comment_id>/edit", methods = ['POST', 'GET'])
@l.login_required
def editComment(comment_id = None):
try:
g.db = app.dbPool.dequeue()
comment = g.db.getComment(int(comment_id))
assert l.current_user.id and comment['owner_id'] == l.current_user.id
if request.method == "POST":
updatedComment = { 'comment_string' : seaice.pretty.refs_norm(g.db, request.form['comment_string']),
'owner_id' : l.current_user.id }
g.db.updateComment(int(comment_id), updatedComment)
g.db.commit()
app.dbPool.enqueue(g.db)
return getTerm(g.db.getTermConceptId(comment['term_id']), message = "Your comment has been updated.")
else: # GET
app.dbPool.enqueue(g.db)
if comment:
form = """
<form action="/comment={0}/edit" method="post">
<table cellpadding=16 width=60%>
<tr><td><textarea type="text" name="comment_string" rows=3
style="width:100%; height:100%"
placeholder="Add comment">{1}</textarea></td></tr>
<tr><td align=right><input type="submit" value="Comment"><td>
</td>
</table>
</form>""".format(comment_id, comment['comment_string'])
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Edit comment",
headline = "Edit your comment",
content = Markup(form.decode('utf-8')))
except ValueError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Comment not found",
content = Markup("Comment <strong>#%s</strong> not found!" % comment_id))
except AssertionError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Term - %s" % term_id,
content =
"""Error! You may only edit or remove terms and definitions that
you've contributed. However, you may comment or vote on this term. """)
@app.route("/comment=<int:comment_id>/remove", methods=['POST'])
def remComment(comment_id):
try:
g.db = app.dbPool.getScoped()
comment = g.db.getComment(int(request.form['id']))
assert comment and comment['owner_id'] == l.current_user.id
g.db.removeComment(int(request.form['id']))
g.db.commit()
return redirect("/term=%s" % g.db.getTermConceptId(comment['term_id']))
except AssertionError:
return render_template("basic_page.html", user_name = l.current_user.name,
title = "Oops!",
content =
"""Error! You may only edit or remove your own comments.""")
## Voting! ##
@app.route("/term=<int:term_id>/vote", methods=['POST'])
@l.login_required
def voteOnTerm(term_id):
g.db = app.dbPool.getScoped()
p_vote = g.db.getVote(l.current_user.id, term_id)
if request.form['action'] == 'up':
if p_vote == 1:
g.db.castVote(l.current_user.id, term_id, 0)
else:
g.db.castVote(l.current_user.id, term_id, 1)
elif request.form['action'] == 'down':
if p_vote == -1:
g.db.castVote(l.current_user.id, term_id, 0)
else:
g.db.castVote(l.current_user.id, term_id, -1)
else:
g.db.castVote(l.current_user.id, term_id, 0)
g.db.commit()
print "User #%d voted %s term #%d" % (l.current_user.id, request.form['action'], term_id)
return redirect("/term=%s" % g.db.getTermConceptId(term_id))
@app.route("/term=<int:term_id>/track", methods=['POST'])
@l.login_required
def trackTerm(term_id):
g.db = app.dbPool.getScoped()
if request.form['action'] == "star":
g.db.trackTerm(l.current_user.id, term_id)
else:
g.db.untrackTerm(l.current_user.id, term_id)
g.db.commit()
print "User #%d %sed term #%d" % (l.current_user.id, request.form['action'], term_id)
return ("/term=%s" % g.db.getTermConceptId(term_id))
## Start HTTP server. (Not relevant on Heroku.) ##
if __name__ == '__main__':
app.debug = True
app.run('0.0.0.0', 5000, use_reloader = False)
| 40.017992
| 275
| 0.602286
|
4d4dd1ec9588fc39913403835beb5beeb902c309
| 2,258
|
py
|
Python
|
frigg/settings/base.py
|
gitter-badger/frigg
|
e49292ccae9eb24b255ed31eb1bdf7f56204b04b
|
[
"MIT"
] | null | null | null |
frigg/settings/base.py
|
gitter-badger/frigg
|
e49292ccae9eb24b255ed31eb1bdf7f56204b04b
|
[
"MIT"
] | null | null | null |
frigg/settings/base.py
|
gitter-badger/frigg
|
e49292ccae9eb24b255ed31eb1bdf7f56204b04b
|
[
"MIT"
] | null | null | null |
"""
Django settings for frigg project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v8aa$cb0knx6)vyo!%tn6k6_g($!n1yq_v+4bg9v4*n@&dpu0w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'frigg.builds'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'frigg.urls'
WSGI_APPLICATION = 'frigg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'files/'),
)
PROJECT_TMP_DIRECTORY = "/home/ubuntu/builds/frigg_working_dir/"
SERVER_ADDRESS = '127.0.0.1:8000'
| 23.768421
| 71
| 0.728964
|
426afa62a6f3608936d63154ab2cdad97cbdde44
| 3,808
|
py
|
Python
|
blazar-3.0.0/blazar/tests/utils/test_plugins.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 24
|
2015-10-18T02:53:07.000Z
|
2022-01-04T12:01:07.000Z
|
blazar-3.0.0/blazar/tests/utils/test_plugins.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 19
|
2017-05-23T21:34:09.000Z
|
2022-02-21T19:15:29.000Z
|
blazar-3.0.0/blazar/tests/utils/test_plugins.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 22
|
2015-10-30T07:42:40.000Z
|
2021-05-31T14:33:38.000Z
|
# Copyright (c) 2017 NTT.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from blazar.manager import exceptions as manager_exceptions
from blazar import tests
from blazar.utils import plugins as plugins_utils
class TestPluginsUtils(tests.TestCase):
def setUp(self):
super(TestPluginsUtils, self).setUp()
def test_convert_requirements_empty(self):
request = '[]'
result = plugins_utils.convert_requirements(request)
self.assertEqual([], result)
def test_convert_requirements_empty_string(self):
# NOTE(priteau): Currently, empty requirements can be persisted in the
# database as empty strings, which are not valid JSON objects.
request = ''
result = plugins_utils.convert_requirements(request)
self.assertEqual([], result)
def test_convert_requirements_small(self):
request = '["=", "$memory", "4096"]'
result = plugins_utils.convert_requirements(request)
self.assertEqual(['memory == 4096'], result)
def test_convert_requirements_with_incorrect_syntax_1(self):
self.assertRaises(
manager_exceptions.MalformedRequirements,
plugins_utils.convert_requirements, '["a", "$memory", "4096"]')
def test_convert_requirements_with_incorrect_syntax_2(self):
self.assertRaises(
manager_exceptions.MalformedRequirements,
plugins_utils.convert_requirements, '["=", "memory", "4096"]')
def test_convert_requirements_with_incorrect_syntax_3(self):
self.assertRaises(
manager_exceptions.MalformedRequirements,
plugins_utils.convert_requirements, '["=", "$memory", 4096]')
def test_convert_requirements_complex(self):
request = '["and", [">", "$memory", "4096"], [">", "$disk", "40"]]'
result = plugins_utils.convert_requirements(request)
self.assertEqual(['memory > 4096', 'disk > 40'], result)
def test_convert_requirements_complex_with_incorrect_syntax_1(self):
self.assertRaises(
manager_exceptions.MalformedRequirements,
plugins_utils.convert_requirements,
'["and", [">", "memory", "4096"], [">", "$disk", "40"]]')
def test_convert_requirements_complex_with_incorrect_syntax_2(self):
self.assertRaises(
manager_exceptions.MalformedRequirements,
plugins_utils.convert_requirements,
'["fail", [">", "$memory", "4096"], [">", "$disk", "40"]]')
def test_convert_requirements_complex_with_not_json_value(self):
self.assertRaises(
manager_exceptions.MalformedRequirements,
plugins_utils.convert_requirements, 'something')
def test_list_difference(self):
old_list = [1, 1, 2, 3, 4, 4, 4, 5]
new_list = [1, 2, 3, 4, 7, 8, 8]
result = plugins_utils.list_difference(old_list, new_list)
to_remove = [1, 4, 4, 5]
to_add = [7, 8, 8]
self.assertEqual((to_remove, to_add), result)
def test_list_difference_empty(self):
old_list = []
new_list = [1, 2, 2, 2, 3, 4, 7, 8, 8]
result = plugins_utils.list_difference(old_list, new_list)
to_remove = []
to_add = [1, 2, 2, 2, 3, 4, 7, 8, 8]
self.assertEqual((to_remove, to_add), result)
| 37.333333
| 78
| 0.667017
|
72d7940da7899e68ed74be36486a63decca83d97
| 399
|
py
|
Python
|
code-example/ch10/quick_session.py
|
grahovsky/python-edu
|
15dad408d8f226b69362b073c30af5cf4079094a
|
[
"Unlicense"
] | null | null | null |
code-example/ch10/quick_session.py
|
grahovsky/python-edu
|
15dad408d8f226b69362b073c30af5cf4079094a
|
[
"Unlicense"
] | null | null | null |
code-example/ch10/quick_session.py
|
grahovsky/python-edu
|
15dad408d8f226b69362b073c30af5cf4079094a
|
[
"Unlicense"
] | null | null | null |
from flask import Flask, session
app = Flask(__name__)
app.secret_key = 'YouWillNeverGuess'
@app.route('/setuser/<user>')
def setuser(user: str) -> str:
session['user'] = user
return 'User value set to: ' + session['user']
@app.route('/getuser')
def getuser() -> str:
return 'User value is currently set to: ' + session['user']
if __name__ == '__main__':
app.run(debug=True)
| 19
| 63
| 0.654135
|
42ed32f33d406efc08bcdfa7968e213eec2913b6
| 8,431
|
py
|
Python
|
UnetTrainTest.py
|
isha31415/unet-eyetracking
|
d548da18fa46cd878a2084daa70192b55288aeef
|
[
"MIT"
] | null | null | null |
UnetTrainTest.py
|
isha31415/unet-eyetracking
|
d548da18fa46cd878a2084daa70192b55288aeef
|
[
"MIT"
] | null | null | null |
UnetTrainTest.py
|
isha31415/unet-eyetracking
|
d548da18fa46cd878a2084daa70192b55288aeef
|
[
"MIT"
] | null | null | null |
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
import sys
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.layers import *
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.preprocessing.image import ImageDataGenerator
import keras.backend as K
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
#-------------------------------------------------------
# PARSE Command Line Arguments
#-------------------------------------------------------
corneal_opt=0 # For Corneal Data
pupil_opt = 0 # For Pupil Data
train_opt=0 # For Training UNET Model
verbose_opt=0 # For Verbose predict on entire dataset in the end
prediction_opt=0 # For prediction on provided file only
prediction_file_name = './images-3.jpeg'
#python UnetTrainTest.py pupil -f file_name
#python UnetTrainTest.py train pupil
#python UnetTrainTest.py corneal -f file_name
#python UnetTrainTest.py train corneal
i=1
while i < len(sys.argv):
if sys.argv[i] == 'train':
train_opt = 1
elif sys.argv[i] == 'corneal':
corneal_opt = 1
elif sys.argv[i] == 'pupil':
pupil_opt = 1
elif sys.argv[i] == 'verbose':
verbose_opt = 1
elif sys.argv[i] == '-f':
prediction_opt = 1
#next field is the image name for prediction, skip an index
prediction_file_name = sys.argv[i+1]
i=i+1
else:
print("Argument ", sys.argv[i], " Not Recognized!")
i=i+1
#-------------------------------------------------------
# Load and Prep Train and Evaluate Data (images)
#-------------------------------------------------------
if corneal_opt :
print("Preparing Corneal Data.....")
IMAGE_LIB = './datasets/corneal/img/'
MASK_LIB = './datasets/corneal/masks/'
weights_file = 'original_corneal.h5'
elif pupil_opt :
print("Preparing Pupil Data.....")
IMAGE_LIB = './datasets/pupil/img/'
MASK_LIB = './datasets/pupil/masks/'
weights_file = 'original_pupil.h5'
IMG_HEIGHT, IMG_WIDTH = 288, 432
SEED=42
all_images = [x for x in sorted(os.listdir(IMAGE_LIB)) if x[-1] == 'g']
x_data = np.empty((len(all_images), IMG_HEIGHT, IMG_WIDTH), dtype='float32')
for i, name in enumerate(all_images):
im = cv2.imread(IMAGE_LIB + name, 0).astype("int16").astype('float32')
im = cv2.resize(im, dsize=(IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_LANCZOS4)
im = (im - np.min(im)) / (np.max(im) - np.min(im))
x_data[i] = im
y_data = np.empty((len(all_images), IMG_HEIGHT, IMG_WIDTH), dtype='float32')
for i, name in enumerate(all_images):
im = cv2.imread(MASK_LIB + name, 0).astype('float32')/255.
im = cv2.resize(im, dsize=(IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_NEAREST)
y_data[i] = im
x_data = x_data[:,:,:,np.newaxis]
y_data = y_data[:,:,:,np.newaxis]
x_train, x_val, y_train, y_val = train_test_split(x_data, y_data, test_size = 0.5)
#-------------------------------------------------------
# Define UNET model
#-------------------------------------------------------
print("Compiling UNET Model.....")
#Optimization Cost Metrics
def dice_coef(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return ( (2. * intersection + smooth) /
(K.sum(y_true_f) + K.sum(y_pred_f) + smooth) )
def dice_coef_loss(y_true, y_pred):
return 1-dice_coef(y_true, y_pred)
'''
#Worked only for Pupil but not for Corneal Reflection
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + K.epsilon()) / (K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon())
'''
input_layer = Input(shape=x_train.shape[1:])
c1 = Conv2D(filters=8, kernel_size=(3,3), activation='relu', padding='same')(input_layer)
l = MaxPool2D(strides=(2,2))(c1)
c2 = Conv2D(filters=16, kernel_size=(3,3), activation='relu', padding='same')(l)
l = MaxPool2D(strides=(2,2))(c2)
c3 = Conv2D(filters=32, kernel_size=(3,3), activation='relu', padding='same')(l)
l = MaxPool2D(strides=(2,2))(c3)
c4 = Conv2D(filters=32, kernel_size=(1,1), activation='relu', padding='same')(l)
l = concatenate([UpSampling2D(size=(2,2))(c4), c3], axis=-1)
l = Conv2D(filters=32, kernel_size=(2,2), activation='relu', padding='same')(l)
l = concatenate([UpSampling2D(size=(2,2))(l), c2], axis=-1)
l = Conv2D(filters=24, kernel_size=(2,2), activation='relu', padding='same')(l)
l = concatenate([UpSampling2D(size=(2,2))(l), c1], axis=-1)
l = Conv2D(filters=16, kernel_size=(2,2), activation='relu', padding='same')(l)
l = Conv2D(filters=64, kernel_size=(1,1), activation='relu')(l)
l = Dropout(0.5)(l)
output_layer = Conv2D(filters=1, kernel_size=(1,1), activation='sigmoid')(l)
model = Model(input_layer, output_layer)
def my_generator(x_train, y_train, batch_size):
data_generator = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
rotation_range=10,
zoom_range=0.1,
fill_mode='constant',
cval=0.1).flow(x_train, x_train, batch_size, seed=SEED)
mask_generator = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
rotation_range=10,
zoom_range=0.1,
fill_mode='constant',
cval=0.0).flow(y_train, y_train, batch_size, seed=SEED)
while True:
x_batch, _ = data_generator.next()
y_batch, _ = mask_generator.next()
yield x_batch, y_batch
# By using the same RNG seed in both calls to ImageDataGenerator,
#we should get images and masks that correspond to each other.
#Let's check this, to be safe.
#-------------------------------------------------------
# Train UNET MOdel
#-------------------------------------------------------
if train_opt:
print("Training UNET Model.....")
#Changed the loss function to new one dice_coef_loss
#model.compile(optimizer=Adam(1e-4), loss='binary_crossentropy', metrics=[dice_coef])
model.compile(optimizer=Adam(1e-4), loss=dice_coef_loss, metrics=[dice_coef])
weight_saver = ModelCheckpoint('original_pupil.h5', monitor='val_dice_coef',
save_best_only=True, save_weights_only=True)
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.8 ** x)
hist = model.fit_generator(my_generator(x_train, y_train, 8),
steps_per_epoch = 200,
validation_data = (x_val, y_val),
epochs=2, verbose=1,
callbacks = [weight_saver, annealer])
#-------------------------------------------------------
# Evaluate UNET Models
#-------------------------------------------------------
print("Loading Weights file: ", weights_file)
model.load_weights(weights_file)
if prediction_opt :
test_image_input = np.empty((1, IMG_HEIGHT, IMG_WIDTH), dtype='float32')
im = cv2.imread(prediction_file_name, 0).astype("int16").astype('float32')
im = cv2.resize(im, dsize=(IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_LANCZOS4)
test_image_input = (im - np.min(im)) / (np.max(im) - np.min(im))
test_image_output = model.predict(test_image_input.reshape(1,IMG_HEIGHT, IMG_WIDTH, 1))[0,:,:,0]
fig, ax = plt.subplots(1,2,figsize=(12,6))
ax[0].imshow(test_image_input, cmap='gray')
ax[1].imshow(test_image_output, cmap='gray')
plt.show()
if verbose_opt:
print("Predicting Results.....")
y_hat_train = model.predict(x_train)
#print Train image, corresponding mask image and also what model prediction does on them
rows = len(x_train)
for i in range(rows):
fig, ax = plt.subplots(1,2,figsize=(12,6))
print("Train Image number: ", i+1)
ax[0].imshow(x_train[i,:,:,0], cmap='gray')
ax[1].imshow(y_hat_train[i,:,:,0], cmap='gray')
plt.show()
#print Validate image, corresponding mask image, and also what model prediction does on them
y_hat_val = model.predict(x_val)
rows = len(x_val)
for i in range(rows):
fig, ax = plt.subplots(1,2,figsize=(12,6))
print("Validage Image number: ", i+1)
ax[0].imshow(x_val[i,:,:,0], cmap='gray')
ax[1].imshow(y_hat_val[i,:,:,0], cmap='gray')
plt.show()
| 36.656522
| 100
| 0.625311
|
2721e343717fe09e3b9084936bc8af0175b2429b
| 2,383
|
py
|
Python
|
archai/algos/darts/darts_model_desc_builder.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | 344
|
2020-06-12T22:12:56.000Z
|
2022-03-29T06:48:20.000Z
|
archai/algos/darts/darts_model_desc_builder.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | 29
|
2020-06-13T19:56:49.000Z
|
2022-03-30T20:26:48.000Z
|
archai/algos/darts/darts_model_desc_builder.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | 68
|
2020-06-12T19:32:43.000Z
|
2022-03-05T06:58:40.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Collection, Optional, Tuple, List
import copy
from overrides import overrides
from archai.nas.model_desc_builder import ModelDescBuilder
from archai.nas.operations import Op
from archai.nas.model_desc import ConvMacroParams, CellDesc, CellType, OpDesc, \
EdgeDesc, TensorShape, TensorShapes, NodeDesc
from archai.algos.darts.mixed_op import MixedOp
from archai.common.config import Config
class DartsModelDescBuilder(ModelDescBuilder):
@overrides
def pre_build(self, conf_model_desc:Config)->None:
Op.register_op('mixed_op',
lambda op_desc, arch_params, affine:
MixedOp(op_desc, arch_params, affine))
@overrides
def build_nodes(self, stem_shapes:TensorShapes, conf_cell:Config,
cell_index:int, cell_type:CellType, node_count:int,
in_shape:TensorShape, out_shape:TensorShape) \
->Tuple[TensorShapes, List[NodeDesc]]:
assert in_shape[0]==out_shape[0]
reduction = (cell_type==CellType.Reduction)
nodes:List[NodeDesc] = []
conv_params = ConvMacroParams(in_shape[0], out_shape[0])
# add mixed op for each edge in each node
# how does the stride works? For all ops connected to s0 and s1, we apply
# reduction in WxH. All ops connected elsewhere automatically gets
# reduced WxH (because all subsequent states are derived from s0 and s1).
# Note that channel is increased via conv_params for the cell
for i in range(node_count):
edges=[]
for j in range(i+2):
op_desc = OpDesc('mixed_op',
params={
'conv': conv_params,
'stride': 2 if reduction and j < 2 else 1
}, in_len=1, trainables=None, children=None)
edge = EdgeDesc(op_desc, input_ids=[j])
edges.append(edge)
nodes.append(NodeDesc(edges=edges, conv_params=conv_params))
out_shapes = [copy.deepcopy(out_shape) for _ in range(node_count)]
return out_shapes, nodes
| 39.716667
| 82
| 0.600504
|
447c82a046fe4a10194d8aa9ce31d540f6eb91fd
| 1,795
|
py
|
Python
|
config/urls.py
|
vinyasmusic/price-alert
|
8781bb567898ee9a60164994ceedbcf4372d6357
|
[
"MIT"
] | 5
|
2018-08-02T15:00:53.000Z
|
2022-01-15T07:56:27.000Z
|
config/urls.py
|
vinyasmusic/price-alert
|
8781bb567898ee9a60164994ceedbcf4372d6357
|
[
"MIT"
] | 8
|
2018-08-06T18:45:30.000Z
|
2018-08-15T17:28:44.000Z
|
config/urls.py
|
vinyasmusic/price-alert
|
8781bb567898ee9a60164994ceedbcf4372d6357
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/",
TemplateView.as_view(template_name="pages/about.html"),
name="about",
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path(
"users/",
include("stock_price_alert.users.urls", namespace="users"),
),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
path("alerts/", include("stock_price_alert.alerts.urls", namespace="alerts"))
] + static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 33.240741
| 85
| 0.636212
|
e48af641238353bfcde5e5fa904e733c97b8a6d0
| 21,449
|
py
|
Python
|
intersight/models/iam_account.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/iam_account.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/iam_account.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class IamAccount(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoBaseMoRef]',
'create_time': 'datetime',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoBaseMoRef',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'domain_groups': 'list[IamDomainGroupRef]',
'end_point_roles': 'list[IamEndPointRoleRef]',
'idpreferences': 'list[IamIdpReferenceRef]',
'idps': 'list[IamIdpRef]',
'name': 'str',
'permissions': 'list[IamPermissionRef]',
'privilege_sets': 'list[IamPrivilegeSetRef]',
'privileges': 'list[IamPrivilegeRef]',
'resource_limits': 'IamResourceLimitsRef',
'roles': 'list[IamRoleRef]',
'session_limits': 'IamSessionLimitsRef',
'status': 'str'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'tags': 'Tags',
'version_context': 'VersionContext',
'domain_groups': 'DomainGroups',
'end_point_roles': 'EndPointRoles',
'idpreferences': 'Idpreferences',
'idps': 'Idps',
'name': 'Name',
'permissions': 'Permissions',
'privilege_sets': 'PrivilegeSets',
'privileges': 'Privileges',
'resource_limits': 'ResourceLimits',
'roles': 'Roles',
'session_limits': 'SessionLimits',
'status': 'Status'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, tags=None, version_context=None, domain_groups=None, end_point_roles=None, idpreferences=None, idps=None, name=None, permissions=None, privilege_sets=None, privileges=None, resource_limits=None, roles=None, session_limits=None, status=None):
"""
IamAccount - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._tags = None
self._version_context = None
self._domain_groups = None
self._end_point_roles = None
self._idpreferences = None
self._idps = None
self._name = None
self._permissions = None
self._privilege_sets = None
self._privileges = None
self._resource_limits = None
self._roles = None
self._session_limits = None
self._status = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if domain_groups is not None:
self.domain_groups = domain_groups
if end_point_roles is not None:
self.end_point_roles = end_point_roles
if idpreferences is not None:
self.idpreferences = idpreferences
if idps is not None:
self.idps = idps
if name is not None:
self.name = name
if permissions is not None:
self.permissions = permissions
if privilege_sets is not None:
self.privilege_sets = privilege_sets
if privileges is not None:
self.privileges = privileges
if resource_limits is not None:
self.resource_limits = resource_limits
if roles is not None:
self.roles = roles
if session_limits is not None:
self.session_limits = session_limits
if status is not None:
self.status = status
@property
def account_moid(self):
"""
Gets the account_moid of this IamAccount.
The Account ID for this managed object.
:return: The account_moid of this IamAccount.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this IamAccount.
The Account ID for this managed object.
:param account_moid: The account_moid of this IamAccount.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this IamAccount.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this IamAccount.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this IamAccount.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this IamAccount.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this IamAccount.
The time when this managed object was created.
:return: The create_time of this IamAccount.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this IamAccount.
The time when this managed object was created.
:param create_time: The create_time of this IamAccount.
:type: datetime
"""
self._create_time = create_time
@property
def mod_time(self):
"""
Gets the mod_time of this IamAccount.
The time when this managed object was last modified.
:return: The mod_time of this IamAccount.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this IamAccount.
The time when this managed object was last modified.
:param mod_time: The mod_time of this IamAccount.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this IamAccount.
A unique identifier of this Managed Object instance.
:return: The moid of this IamAccount.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this IamAccount.
A unique identifier of this Managed Object instance.
:param moid: The moid of this IamAccount.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this IamAccount.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this IamAccount.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this IamAccount.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this IamAccount.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this IamAccount.
An array of owners which represent effective ownership of this object.
:return: The owners of this IamAccount.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this IamAccount.
An array of owners which represent effective ownership of this object.
:param owners: The owners of this IamAccount.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this IamAccount.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this IamAccount.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this IamAccount.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this IamAccount.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def tags(self):
"""
Gets the tags of this IamAccount.
An array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this IamAccount.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this IamAccount.
An array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this IamAccount.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this IamAccount.
The versioning info for this managed object
:return: The version_context of this IamAccount.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this IamAccount.
The versioning info for this managed object
:param version_context: The version_context of this IamAccount.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def domain_groups(self):
"""
Gets the domain_groups of this IamAccount.
:return: The domain_groups of this IamAccount.
:rtype: list[IamDomainGroupRef]
"""
return self._domain_groups
@domain_groups.setter
def domain_groups(self, domain_groups):
"""
Sets the domain_groups of this IamAccount.
:param domain_groups: The domain_groups of this IamAccount.
:type: list[IamDomainGroupRef]
"""
self._domain_groups = domain_groups
@property
def end_point_roles(self):
"""
Gets the end_point_roles of this IamAccount.
User defined end point roles. These roles are assigned to Intersight users to perform end point operations such as GUI/CLI cross launch.
:return: The end_point_roles of this IamAccount.
:rtype: list[IamEndPointRoleRef]
"""
return self._end_point_roles
@end_point_roles.setter
def end_point_roles(self, end_point_roles):
"""
Sets the end_point_roles of this IamAccount.
User defined end point roles. These roles are assigned to Intersight users to perform end point operations such as GUI/CLI cross launch.
:param end_point_roles: The end_point_roles of this IamAccount.
:type: list[IamEndPointRoleRef]
"""
self._end_point_roles = end_point_roles
@property
def idpreferences(self):
"""
Gets the idpreferences of this IamAccount.
System created IdPs configured for authentication in an account. By default Cisco IdP is created upon account creation.
:return: The idpreferences of this IamAccount.
:rtype: list[IamIdpReferenceRef]
"""
return self._idpreferences
@idpreferences.setter
def idpreferences(self, idpreferences):
"""
Sets the idpreferences of this IamAccount.
System created IdPs configured for authentication in an account. By default Cisco IdP is created upon account creation.
:param idpreferences: The idpreferences of this IamAccount.
:type: list[IamIdpReferenceRef]
"""
self._idpreferences = idpreferences
@property
def idps(self):
"""
Gets the idps of this IamAccount.
IdPs configured for authentication in an account.
:return: The idps of this IamAccount.
:rtype: list[IamIdpRef]
"""
return self._idps
@idps.setter
def idps(self, idps):
"""
Sets the idps of this IamAccount.
IdPs configured for authentication in an account.
:param idps: The idps of this IamAccount.
:type: list[IamIdpRef]
"""
self._idps = idps
@property
def name(self):
"""
Gets the name of this IamAccount.
Name of the account. By default, name is same as the MoID of the account.
:return: The name of this IamAccount.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this IamAccount.
Name of the account. By default, name is same as the MoID of the account.
:param name: The name of this IamAccount.
:type: str
"""
self._name = name
@property
def permissions(self):
"""
Gets the permissions of this IamAccount.
System defined permissions within an account. Permission provides a way to assign roles to a user or user group to perform operations on object hierarchy.
:return: The permissions of this IamAccount.
:rtype: list[IamPermissionRef]
"""
return self._permissions
@permissions.setter
def permissions(self, permissions):
"""
Sets the permissions of this IamAccount.
System defined permissions within an account. Permission provides a way to assign roles to a user or user group to perform operations on object hierarchy.
:param permissions: The permissions of this IamAccount.
:type: list[IamPermissionRef]
"""
self._permissions = permissions
@property
def privilege_sets(self):
"""
Gets the privilege_sets of this IamAccount.
User defined privilege sets. Privilege set is a collection of privileges. Privilege sets are assigned to a user using roles.
:return: The privilege_sets of this IamAccount.
:rtype: list[IamPrivilegeSetRef]
"""
return self._privilege_sets
@privilege_sets.setter
def privilege_sets(self, privilege_sets):
"""
Sets the privilege_sets of this IamAccount.
User defined privilege sets. Privilege set is a collection of privileges. Privilege sets are assigned to a user using roles.
:param privilege_sets: The privilege_sets of this IamAccount.
:type: list[IamPrivilegeSetRef]
"""
self._privilege_sets = privilege_sets
@property
def privileges(self):
"""
Gets the privileges of this IamAccount.
Account specific privileges. Privilege represents an action which can be performed in Intersight such as creating server profile, deleting a user etc. Privileges are assigned to a user using privilege sets and roles.
:return: The privileges of this IamAccount.
:rtype: list[IamPrivilegeRef]
"""
return self._privileges
@privileges.setter
def privileges(self, privileges):
"""
Sets the privileges of this IamAccount.
Account specific privileges. Privilege represents an action which can be performed in Intersight such as creating server profile, deleting a user etc. Privileges are assigned to a user using privilege sets and roles.
:param privileges: The privileges of this IamAccount.
:type: list[IamPrivilegeRef]
"""
self._privileges = privileges
@property
def resource_limits(self):
"""
Gets the resource_limits of this IamAccount.
Represents user and user group related configuration limits.
:return: The resource_limits of this IamAccount.
:rtype: IamResourceLimitsRef
"""
return self._resource_limits
@resource_limits.setter
def resource_limits(self, resource_limits):
"""
Sets the resource_limits of this IamAccount.
Represents user and user group related configuration limits.
:param resource_limits: The resource_limits of this IamAccount.
:type: IamResourceLimitsRef
"""
self._resource_limits = resource_limits
@property
def roles(self):
"""
Gets the roles of this IamAccount.
User defined roles created within an account. Role is a collection of privilege sets. Roles are assigned to user using permission object.
:return: The roles of this IamAccount.
:rtype: list[IamRoleRef]
"""
return self._roles
@roles.setter
def roles(self, roles):
"""
Sets the roles of this IamAccount.
User defined roles created within an account. Role is a collection of privilege sets. Roles are assigned to user using permission object.
:param roles: The roles of this IamAccount.
:type: list[IamRoleRef]
"""
self._roles = roles
@property
def session_limits(self):
"""
Gets the session_limits of this IamAccount.
Represents session related configuration limits.
:return: The session_limits of this IamAccount.
:rtype: IamSessionLimitsRef
"""
return self._session_limits
@session_limits.setter
def session_limits(self, session_limits):
"""
Sets the session_limits of this IamAccount.
Represents session related configuration limits.
:param session_limits: The session_limits of this IamAccount.
:type: IamSessionLimitsRef
"""
self._session_limits = session_limits
@property
def status(self):
"""
Gets the status of this IamAccount.
Status of the account. This account remains inactive until a device is claimed to the account.
:return: The status of this IamAccount.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this IamAccount.
Status of the account. This account remains inactive until a device is claimed to the account.
:param status: The status of this IamAccount.
:type: str
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, IamAccount):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 30.125
| 387
| 0.612429
|
c6e940cd355e6fedbb871a61c9dd430a664dc9e8
| 1,937
|
py
|
Python
|
tareas/2/MataLuis/rond.py
|
Ricardo191998/sistop-2020-1
|
a37d2a880995a870ebfeebe7e098e4aefb3cd179
|
[
"CC-BY-4.0"
] | 13
|
2019-08-07T13:48:14.000Z
|
2021-08-31T23:23:35.000Z
|
tareas/2/MataLuis/rond.py
|
Ricardo191998/sistop-2020-1
|
a37d2a880995a870ebfeebe7e098e4aefb3cd179
|
[
"CC-BY-4.0"
] | 48
|
2019-08-07T03:15:54.000Z
|
2019-11-21T16:53:45.000Z
|
tareas/2/MataLuis/rond.py
|
Ricardo191998/sistop-2020-1
|
a37d2a880995a870ebfeebe7e098e4aefb3cd179
|
[
"CC-BY-4.0"
] | 47
|
2019-08-07T01:44:34.000Z
|
2021-11-05T02:31:25.000Z
|
import threading
import time
import random
resultados = []
procesos = []
ronda = []
mutexActivo = threading.Semaphore(1)
mutexActivo2 = threading.Semaphore(1)
def ejecucion(idProceso,quantum):
tiempo = 0
while(len(ronda)>0):
tiempo += 1
mutexActivo.acquire()
aux = ronda[0]
ronda.pop(0)
mutexActivo.release()
if(aux>0):
aux-=quantum
if(aux>0):
mutexActivo2.acquire()
ronda.append(aux)
mutexActivo2.release()
if(aux>0):
if(len(ronda) == 1):
tiempo += 0
elif(len(ronda) == 2):
tiempo += 1
elif(len(ronda) == 3):
tiempo += 2
elif(len(ronda) == 4):
tiempo += 3
elif(len(ronda) == 5):
tiempo += 4
time.sleep(quantum)
resultados[2][idProceso] = float(tiempo)
resultados[1][idProceso] = round(float(resultados[2][idProceso] + procesos[2][idProceso]),2)
resultados[3][idProceso] = round(float(procesos[2][idProceso]) / float(resultados[1][idProceso]),2)
resultados[4][idProceso] = round(float(resultados[1][idProceso]) / float(procesos[2][idProceso]),2)
def proceso(idProceso,quantum):
time.sleep(procesos[1][idProceso])
ronda.append(int(procesos[2][idProceso]))
ejecucion(idProceso,quantum)
def lanza_hilos(quantum):
for i in range(5):
threading.Thread(target=proceso, args=[i,quantum]).start()
def rr(pro, res, quantum):
global procesos
global resultados
resultados = res
procesos = pro
lanza_hilos(quantum)
time.sleep(15)
if (quantum == 1):
print("R1")
if (quantum == 4):
print("R4")
for i in range(len(resultados)):
print("Proceso: %s -> T:%d E:%d P:%d R:%d" %(resultados[0][i],resultados[1][i],resultados[2][i],resultados[3][i],resultados[4][i]))
| 28.910448
| 139
| 0.568405
|
2582d59db0f49fc22215e4902b0cf2fac50ba6f5
| 291
|
py
|
Python
|
mongo_test/setting.py
|
Vuong02011996/data_base_test
|
a57940970ce52a25e10f2262fb94530b1ae2681c
|
[
"MIT"
] | null | null | null |
mongo_test/setting.py
|
Vuong02011996/data_base_test
|
a57940970ce52a25e10f2262fb94530b1ae2681c
|
[
"MIT"
] | null | null | null |
mongo_test/setting.py
|
Vuong02011996/data_base_test
|
a57940970ce52a25e10f2262fb94530b1ae2681c
|
[
"MIT"
] | null | null | null |
# settings.py
from dotenv import load_dotenv
import os
load_dotenv()
# OR, the same with increased verbosity
load_dotenv(verbose=True)
# OR, explicitly providing path to '.env'
from pathlib import Path # Python 3.6+ only
env_path = Path("..") / ".env"
load_dotenv(dotenv_path=env_path)
| 19.4
| 44
| 0.745704
|
bf312702c02dc3fe8efc4729b48290ed13dd30b8
| 3,525
|
py
|
Python
|
sdk/python/pulumi_aws_native/greengrass/get_core_definition.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/greengrass/get_core_definition.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/greengrass/get_core_definition.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetCoreDefinitionResult',
'AwaitableGetCoreDefinitionResult',
'get_core_definition',
'get_core_definition_output',
]
@pulumi.output_type
class GetCoreDefinitionResult:
def __init__(__self__, arn=None, id=None, latest_version_arn=None, name=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if latest_version_arn and not isinstance(latest_version_arn, str):
raise TypeError("Expected argument 'latest_version_arn' to be a str")
pulumi.set(__self__, "latest_version_arn", latest_version_arn)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="latestVersionArn")
def latest_version_arn(self) -> Optional[str]:
return pulumi.get(self, "latest_version_arn")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Optional[Any]:
return pulumi.get(self, "tags")
class AwaitableGetCoreDefinitionResult(GetCoreDefinitionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCoreDefinitionResult(
arn=self.arn,
id=self.id,
latest_version_arn=self.latest_version_arn,
name=self.name,
tags=self.tags)
def get_core_definition(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCoreDefinitionResult:
"""
Resource Type definition for AWS::Greengrass::CoreDefinition
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:greengrass:getCoreDefinition', __args__, opts=opts, typ=GetCoreDefinitionResult).value
return AwaitableGetCoreDefinitionResult(
arn=__ret__.arn,
id=__ret__.id,
latest_version_arn=__ret__.latest_version_arn,
name=__ret__.name,
tags=__ret__.tags)
@_utilities.lift_output_func(get_core_definition)
def get_core_definition_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCoreDefinitionResult]:
"""
Resource Type definition for AWS::Greengrass::CoreDefinition
"""
...
| 33.894231
| 134
| 0.660993
|
59054904727bc257bf1691d8b5fc19aafde79212
| 655
|
py
|
Python
|
ROS2_Examples/1-to-1/Object_Oriented/master.py
|
roboscienceorg/robolib
|
32a8d5e3ae4dd693cdf6c094050c9d19861a1331
|
[
"ECL-2.0"
] | 1
|
2019-02-27T01:12:44.000Z
|
2019-02-27T01:12:44.000Z
|
ROS2_Examples/1-to-1/Object_Oriented/master.py
|
roboscienceorg/robolib
|
32a8d5e3ae4dd693cdf6c094050c9d19861a1331
|
[
"ECL-2.0"
] | 31
|
2018-09-28T19:29:48.000Z
|
2019-02-24T15:03:52.000Z
|
ROS2_Examples/1-to-1/Object_Oriented/master.py
|
roboscienceorg/robolib
|
32a8d5e3ae4dd693cdf6c094050c9d19861a1331
|
[
"ECL-2.0"
] | null | null | null |
import rclpy
from subscriber import SubscriberExample
from publisher import PublisherExample
from rclpy.executors import SingleThreadedExecutor, MultiThreadedExecutor
# Initialize rclpy
rclpy.init(args=None)
# This establishes an executor which will run all the nodes
# In this case they will all be run on one thread
exec = SingleThreadedExecutor()
# This builds the nodes
pub = PublisherExample(topic_name="/Topic1")
sub = SubscriberExample(topic_name="/Topic1")
# Add them to the executor to be executed
exec.add_node(pub)
exec.add_node(sub)
# Actually run the nodes
exec.spin()
# Teardown
pub.destroy_node()
sub.destroy_node()
rclpy.shutdown()
| 22.586207
| 73
| 0.796947
|
623ab1c8c6206a8c4696b47d2702071afc89692f
| 1,021
|
py
|
Python
|
tests/module/input_module/test_ultrasonic.py
|
yjm07/pymodi
|
207589d083d3a05061a2934cbc1c5c945bd0e108
|
[
"MIT"
] | 13
|
2020-05-19T02:34:05.000Z
|
2022-02-18T06:44:34.000Z
|
tests/module/input_module/test_ultrasonic.py
|
yjm07/pymodi
|
207589d083d3a05061a2934cbc1c5c945bd0e108
|
[
"MIT"
] | 43
|
2020-06-21T12:39:40.000Z
|
2022-03-14T03:29:37.000Z
|
tests/module/input_module/test_ultrasonic.py
|
yjm07/pymodi
|
207589d083d3a05061a2934cbc1c5c945bd0e108
|
[
"MIT"
] | 23
|
2020-05-18T06:34:54.000Z
|
2021-02-25T11:50:17.000Z
|
import unittest
from modi.module.input_module.ultrasonic import Ultrasonic
from modi.util.message_util import parse_message
from modi.util.miscellaneous import MockConn
class TestUltrasonic(unittest.TestCase):
"""Tests for 'Ultrasonic' class."""
def setUp(self):
"""Set up test fixtures, if any."""
self.conn = MockConn()
mock_args = (-1, -1, self.conn)
self.ultrasonic = Ultrasonic(*mock_args)
def tearDown(self):
"""Tear down test fixtures, if any."""
del self.ultrasonic
def test_get_distance(self):
"""Test get_distance method."""
_ = self.ultrasonic.distance
self.assertEqual(
self.conn.send_list[0],
parse_message(
0x03, 0, -1,
(
Ultrasonic.DISTANCE,
None,
self.ultrasonic.prop_samp_freq,
None
)
)
)
if __name__ == '__main__':
unittest.main()
| 25.525
| 58
| 0.558276
|
d3b770f53a84962d7fc7652acce75864486ad377
| 331
|
py
|
Python
|
monero_glue/messages/MoneroKeyImageSyncFinalRequest.py
|
ph4r05/monero-agent
|
0bac0e6f33142b2bb885565bfd1ef8ac04559280
|
[
"MIT"
] | 20
|
2018-04-05T22:06:10.000Z
|
2021-09-18T10:43:44.000Z
|
monero_glue/messages/MoneroKeyImageSyncFinalRequest.py
|
ph4r05/monero-agent
|
0bac0e6f33142b2bb885565bfd1ef8ac04559280
|
[
"MIT"
] | null | null | null |
monero_glue/messages/MoneroKeyImageSyncFinalRequest.py
|
ph4r05/monero-agent
|
0bac0e6f33142b2bb885565bfd1ef8ac04559280
|
[
"MIT"
] | 5
|
2018-08-06T15:06:04.000Z
|
2021-07-16T01:58:43.000Z
|
# Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import Dict, List # noqa: F401
from typing_extensions import Literal # noqa: F401
except ImportError:
pass
class MoneroKeyImageSyncFinalRequest(p.MessageType):
MESSAGE_WIRE_TYPE = 534
| 22.066667
| 59
| 0.700906
|
f817d2de543bae6fa05573655513a12633e5bd22
| 1,065
|
py
|
Python
|
python-package/lets_plot/_kbridge.py
|
OLarionova-HORIS/lets-plot
|
89e30a574fe2de3da17186acdbe1cf427d66d87f
|
[
"MIT"
] | null | null | null |
python-package/lets_plot/_kbridge.py
|
OLarionova-HORIS/lets-plot
|
89e30a574fe2de3da17186acdbe1cf427d66d87f
|
[
"MIT"
] | null | null | null |
python-package/lets_plot/_kbridge.py
|
OLarionova-HORIS/lets-plot
|
89e30a574fe2de3da17186acdbe1cf427d66d87f
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020. JetBrains s.r.o.
# Use of this source code is governed by the MIT license that can be found in the LICENSE file.
# noinspection PyUnresolvedReferences
from typing import Dict
import lets_plot_kotlin_bridge
from ._type_utils import standardize_dict
def _generate_dynamic_display_html(plot_spec: Dict) -> str:
plot_spec = _standardize_plot_spec(plot_spec)
return lets_plot_kotlin_bridge.generate_html(plot_spec)
def _generate_svg(plot_spec: Dict) -> str:
plot_spec = _standardize_plot_spec(plot_spec)
return lets_plot_kotlin_bridge.export_svg(plot_spec)
def _generate_static_html_page(plot_spec: Dict, version: str, iframe: bool) -> str:
plot_spec = _standardize_plot_spec(plot_spec)
return lets_plot_kotlin_bridge.export_html(plot_spec, version, iframe)
def _standardize_plot_spec(plot_spec: Dict) -> Dict:
"""
:param plot_spec: dict
"""
if not isinstance(plot_spec, dict):
raise ValueError("dict expected but was {}".format(type(plot_spec)))
return standardize_dict(plot_spec)
| 30.428571
| 96
| 0.768075
|
c5be0b5a92a06737f6f5c790992547ae1c43ee99
| 918
|
py
|
Python
|
mi/dataset/driver/adcpt_acfgm/dcl/pd0/test/test_adcpt_acfgm_dcl_pd0_telemetered_driver.py
|
cdobs/mi-instrument
|
99f9322a4afabc5dff9b0fad12166075efce838c
|
[
"BSD-2-Clause"
] | 1
|
2018-09-14T23:28:29.000Z
|
2018-09-14T23:28:29.000Z
|
mi/dataset/driver/adcpt_acfgm/dcl/pd0/test/test_adcpt_acfgm_dcl_pd0_telemetered_driver.py
|
cdobs/mi-instrument
|
99f9322a4afabc5dff9b0fad12166075efce838c
|
[
"BSD-2-Clause"
] | 33
|
2017-04-25T19:53:45.000Z
|
2022-03-18T17:42:18.000Z
|
mi/dataset/driver/adcpt_acfgm/dcl/pd0/test/test_adcpt_acfgm_dcl_pd0_telemetered_driver.py
|
cdobs/mi-instrument
|
99f9322a4afabc5dff9b0fad12166075efce838c
|
[
"BSD-2-Clause"
] | 31
|
2015-03-04T01:01:09.000Z
|
2020-10-28T14:42:12.000Z
|
#!/usr/bin/env python
import os
import unittest
from mi.core.log import get_logger
from mi.dataset.dataset_driver import ParticleDataHandler
from mi.dataset.driver.adcpt_acfgm.dcl.pd0.adcpt_acfgm_dcl_pd0_telemetered_driver import parse
from mi.dataset.driver.adcpt_acfgm.dcl.pd0.resource import RESOURCE_PATH
__author__ = 'Jeff Roy'
log = get_logger()
class SampleTest(unittest.TestCase):
def test_one(self):
source_file_path = os.path.join(RESOURCE_PATH, '20140424.adcpt.log')
particle_data_handler = ParticleDataHandler()
particle_data_handler = parse(None, source_file_path, particle_data_handler)
log.debug("SAMPLES: %s", particle_data_handler._samples)
log.debug("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = SampleTest('test_one')
test.test_one()
| 27
| 94
| 0.75817
|
227c9d6cd693cf9184e599158c9a623766080129
| 3,837
|
py
|
Python
|
pyccel/parser/syntax/himi.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
pyccel/parser/syntax/himi.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
pyccel/parser/syntax/himi.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
This module contains the syntax associated to the types.tx grammar
"""
from os.path import join, dirname
from sympy.utilities.iterables import iterable
from pyccel.parser.syntax.basic import BasicStmt
from pyccel.ast import DataType, datatype
from pyccel.ast import Variable
from pyccel.ast import VariableType, FunctionType
def _construct_dtype(dtype):
"""."""
if isinstance(dtype, FunctionTypeStmt):
return dtype.expr
else:
return datatype(str(dtype))
class HiMi(object):
"""Class for HiMi syntax."""
def __init__(self, **kwargs):
"""
Constructor for HiMi.
"""
self.statements = kwargs.pop('statements', [])
class DeclareTypeStmt(BasicStmt):
"""."""
def __init__(self, **kwargs):
"""
"""
self.name = kwargs.pop('name')
self.dtype = kwargs.pop('dtype')
super(DeclareTypeStmt, self).__init__(**kwargs)
@property
def expr(self):
name = str(self.name)
dtype = _construct_dtype(self.dtype)
return VariableType(dtype, name)
class DeclareVariableStmt(BasicStmt):
"""."""
def __init__(self, **kwargs):
"""
"""
self.name = kwargs.pop('name')
self.dtype = kwargs.pop('dtype')
super(DeclareVariableStmt, self).__init__(**kwargs)
@property
def expr(self):
name = str(self.name)
dtype = datatype(str(self.dtype))
return Variable(dtype, name)
class DeclareFunctionStmt(BasicStmt):
"""."""
def __init__(self, **kwargs):
"""
"""
self.name = kwargs.pop('name')
self.dtype = kwargs.pop('dtype')
super(DeclareFunctionStmt, self).__init__(**kwargs)
@property
def expr(self):
name = str(self.name)
dtype = _construct_dtype(self.dtype)
# TODO must return a TypedFunction
return Variable(dtype, name)
class FunctionTypeStmt(BasicStmt):
"""."""
def __init__(self, **kwargs):
"""
"""
self.domains = kwargs.pop('domains')
super(FunctionTypeStmt, self).__init__(**kwargs)
@property
def expr(self):
domains = []
for d in self.domains:
domains.append(datatype(str(d)))
return FunctionType(domains)
#################################################
#################################################
# whenever a new rule is added in the grammar, we must update the following
# lists.
types_classes = [HiMi,
FunctionTypeStmt,
DeclareTypeStmt,
DeclareFunctionStmt,
DeclareVariableStmt]
def parse(filename=None, stmts=None, debug=False):
this_folder = dirname(__file__)
# Get meta-model from language description
grammar = join(this_folder, '../grammar/himi.tx')
from textx.metamodel import metamodel_from_file
meta = metamodel_from_file(grammar, debug=debug, classes=types_classes)
# Instantiate model
if filename:
model = meta.model_from_file(filename)
elif stmts:
model = meta.model_from_str(stmts)
else:
raise ValueError('Expecting a filename or a string')
stmts = []
for stmt in model.statements:
e = stmt.expr
stmts.append(e)
if len(stmts) == 1:
return stmts[0]
else:
return stmts
######################
if __name__ == '__main__':
# print (parse(stmts='T = int'))
# print (parse(stmts='x : int'))
# print (parse(stmts='f :: int -> double'))
# print (parse(stmts='T = int -> double'))
# print (parse(stmts='T = int -> double -> double'))
# print (parse(stmts='int -> double')) # TODO to be removed. only for testing
print (parse(stmts='int -> double -> double')) # TODO to be removed. only for testing
| 24.132075
| 89
| 0.589002
|
ae38d107b508e31383a907513fa74563ac3aa1e0
| 3,704
|
py
|
Python
|
venv/Lib/site-packages/networkx/algorithms/components/weakly_connected.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
venv/Lib/site-packages/networkx/algorithms/components/weakly_connected.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 25
|
2020-11-16T15:36:41.000Z
|
2021-06-01T05:15:31.000Z
|
venv/Lib/site-packages/networkx/algorithms/components/weakly_connected.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
"""Weakly connected components."""
import networkx as nx
from networkx.utils.decorators import not_implemented_for
__all__ = [
"number_weakly_connected_components",
"weakly_connected_components",
"is_weakly_connected",
]
@not_implemented_for("undirected")
def weakly_connected_components(G):
"""Generate weakly connected components of G.
Parameters
----------
G : NetworkX graph
A directed graph
Returns
-------
comp : generator of sets
A generator of sets of nodes, one for each weakly connected
component of G.
Raises
------
NetworkXNotImplemented
If G is undirected.
Examples
--------
Generate a sorted list of weakly connected components, largest first.
>>> G = nx.path_graph(4, create_using=nx.DiGraph())
>>> nx.add_path(G, [10, 11, 12])
>>> [
... len(c)
... for c in sorted(nx.weakly_connected_components(G), key=len, reverse=True)
... ]
[4, 3]
If you only want the largest component, it's more efficient to
use max instead of sort:
>>> largest_cc = max(nx.weakly_connected_components(G), key=len)
See Also
--------
connected_components
strongly_connected_components
Notes
-----
For directed graphs only.
"""
seen = set()
for v in G:
if v not in seen:
c = set(_plain_bfs(G, v))
yield c
seen.update(c)
@not_implemented_for("undirected")
def number_weakly_connected_components(G):
"""Returns the number of weakly connected components in G.
Parameters
----------
G : NetworkX graph
A directed graph.
Returns
-------
n : integer
Number of weakly connected components
Raises
------
NetworkXNotImplemented
If G is undirected.
See Also
--------
weakly_connected_components
number_connected_components
number_strongly_connected_components
Notes
-----
For directed graphs only.
"""
return sum(1 for wcc in weakly_connected_components(G))
@not_implemented_for("undirected")
def is_weakly_connected(G):
"""Test directed graph for weak connectivity.
A directed graph is weakly connected if and only if the graph
is connected when the direction of the edge between nodes is ignored.
Note that if a graph is strongly connected (i.e. the graph is connected
even when we account for directionality), it is by definition weakly
connected as well.
Parameters
----------
G : NetworkX Graph
A directed graph.
Returns
-------
connected : bool
True if the graph is weakly connected, False otherwise.
Raises
------
NetworkXNotImplemented
If G is undirected.
See Also
--------
is_strongly_connected
is_semiconnected
is_connected
is_biconnected
weakly_connected_components
Notes
-----
For directed graphs only.
"""
if len(G) == 0:
raise nx.NetworkXPointlessConcept(
"""Connectivity is undefined for the null graph."""
)
return len(list(weakly_connected_components(G))[0]) == len(G)
def _plain_bfs(G, source):
"""A fast BFS node generator
The direction of the edge between nodes is ignored.
For directed graphs only.
"""
Gsucc = G.succ
Gpred = G.pred
seen = set()
nextlevel = {source}
while nextlevel:
thislevel = nextlevel
nextlevel = set()
for v in thislevel:
if v not in seen:
yield v
seen.add(v)
nextlevel.update(Gsucc[v])
nextlevel.update(Gpred[v])
| 21.91716
| 85
| 0.616631
|
fa0e1edcab64e49d443e456cd7a5b868636cea57
| 8,549
|
py
|
Python
|
django/db/backends/mysql/operations.py
|
jacinda/django
|
307acc745a4e655c35db96f96ceb4b87597dee49
|
[
"BSD-3-Clause"
] | null | null | null |
django/db/backends/mysql/operations.py
|
jacinda/django
|
307acc745a4e655c35db96f96ceb4b87597dee49
|
[
"BSD-3-Clause"
] | null | null | null |
django/db/backends/mysql/operations.py
|
jacinda/django
|
307acc745a4e655c35db96f96ceb4b87597dee49
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import six, timezone
from django.utils.encoding import force_text
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = dict(BaseDatabaseOperations.integer_field_ranges,
PositiveSmallIntegerField=(0, 4294967295),
PositiveIntegerField=(0, 18446744073709551615),
)
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
sql = "DAYOFWEEK(%s)" % field_name
else:
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql, params
def date_interval_sql(self, timedelta):
return "INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND" % (
timedelta.days, timedelta.seconds, timedelta.microseconds), []
def format_for_duration_arithmetic(self, sql):
if self.connection.features.supports_microsecond_precision:
return 'INTERVAL %s MICROSECOND' % sql
else:
return 'INTERVAL FLOOR(%s / 1000000) SECOND' % sql
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def adapt_datetimefield_value(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return six.text_type(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return six.text_type(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def combine_expression(self, connector, sub_expressions):
"""
MySQL requires special cases for ^ operators in query expressions
"""
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_textfield_value(self, value, expression, connection, context):
if value is not None:
value = force_text(value)
return value
def convert_booleanfield_value(self, value, expression, connection, context):
if value in (0, 1):
value = bool(value)
return value
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
| 39.762791
| 113
| 0.611066
|
e17a2eb5f61184dcf12ca8dde78566c532e34122
| 1,332
|
py
|
Python
|
Smartphone/smartphone/scripts/rotationMatrix.py
|
marcodemutti/SofAr-project
|
6e807072f8567490cf5b4ebcf81d37ab5e8a4109
|
[
"Apache-2.0"
] | 1
|
2021-10-04T18:24:21.000Z
|
2021-10-04T18:24:21.000Z
|
Smartphone/smartphone/scripts/rotationMatrix.py
|
marcodemutti/SofAr-project
|
6e807072f8567490cf5b4ebcf81d37ab5e8a4109
|
[
"Apache-2.0"
] | 1
|
2020-09-16T19:57:43.000Z
|
2020-09-16T19:57:43.000Z
|
Smartphone/smartphone/scripts/rotationMatrix.py
|
andreabradpitto/SofAr-project
|
6e807072f8567490cf5b4ebcf81d37ab5e8a4109
|
[
"Apache-2.0"
] | 3
|
2020-10-29T14:07:58.000Z
|
2020-11-26T11:22:25.000Z
|
#!/usr/bin/env python
"""
Documentation for rotationMatrix.py
This file is consisting in one function only, and its only aim is to provide rotation matrix transformations
"""
import numpy as np
import math
#Compute rotation matrix R, starting from the angles given in euler rapresentation
def eulerAnglesToRotationMatrix(angles): # angles [roll, pitch, yaw]
"""!
Function that transforms euler angle coordinates into the rotation matrix
@param angles euler angles, i.e. orientation with respect to X, Y, Z axes
@returns rotation matrix
"""
R_x = np.array([[1, 0, 0],
[0, math.cos(angles[0]), math.sin(angles[0])],
[0, -math.sin(angles[0]), math.cos(angles[0])]
])
R_y = np.array([[math.cos(angles[1]), 0, -math.sin(angles[1])],
[0, 1, 0],
[math.sin(angles[1]), 0, math.cos(angles[1])]
])
R_z = np.array([[math.cos(angles[2]), math.sin(angles[2]), 0],
[-math.sin(angles[2]), math.cos(angles[2]), 0],
[0, 0, 1]
])
R = np.dot(R_x, np.dot(R_y, R_z))
return R
| 36
| 108
| 0.499249
|
a3aed40e5c1a79ed2b07f56c87bc3d9f57c4696b
| 6,831
|
py
|
Python
|
tests/test_xls2xml/test_XMLHandler.py
|
ProlificsICP/ICP
|
36a72c72b349cc7bdc62dd49236569619e952548
|
[
"Apache-2.0"
] | null | null | null |
tests/test_xls2xml/test_XMLHandler.py
|
ProlificsICP/ICP
|
36a72c72b349cc7bdc62dd49236569619e952548
|
[
"Apache-2.0"
] | null | null | null |
tests/test_xls2xml/test_XMLHandler.py
|
ProlificsICP/ICP
|
36a72c72b349cc7bdc62dd49236569619e952548
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import unittest
from scripts.XMLHandler import XMLHandler
import scripts.DialogData as Dialog
'''
Created on Jan 16, 2018
@author: alukes
'''
DOMAIN = u'G_TEST'
class XMLHandlerTest(unittest.TestCase):
def setUp(self):
self._handler = XMLHandler()
def tearDown(self):
self._handler = None
def test_positive_convertMainChannelOutputToXml(self):
""" Convert Dialog data with just channel outputs into XML. """
labels = {}
dialogData = Dialog.DialogData()
intentData = dialogData.getIntentData(u'HELP_①', DOMAIN)
intentData.addIntentAlternative(u'I need help.')
intentData.addIntentAlternative(u'Can you help me?')
intentData.addRawOutput([u'Sure.①'], labels)
intentData.addRawOutput([u'Sorry, cannot help you.'], labels)
intentData.addRawOutput([u'Let us see, what is your problem?'], labels)
intents = [u'HELP_①']
xmlDocument = self._handler.convertDialogData(dialogData, intents)
actual = self._handler.printXml(xmlDocument, False)
expected = (u'<nodes><node name="HELP_①"><condition>#HELP_①</condition><output><textValues><values>Sure.①</values>'
u'<values>Sorry, cannot help you.</values><values>Let us see, what is your problem?</values>'
u'</textValues></output></node></nodes>')
self.assertEquals(actual, expected)
def test_positive_convertMultipleChannelOutputsToXml(self):
""" Convert Dialog data with just channel outputs into XML. """
labels = {}
dialogData = Dialog.DialogData()
intentData = dialogData.getIntentData(u'HELP_①', DOMAIN)
intentData.addIntentAlternative(u'I need help.')
intentData.addIntentAlternative(u'Can you help me?')
intentData.addRawOutput([u'Sure.①'], labels)
intentData.addRawOutput([u'Sorry, cannot help you.%%260 seconds'], labels)
intentData.addRawOutput([u'Let us see, what is your problem?%%7image.png%%8some URL'], labels)
intents = [u'HELP_①']
xmlDocument = self._handler.convertDialogData(dialogData, intents)
actual = self._handler.printXml(xmlDocument, False)
expected = (u'<nodes><node name="HELP_①"><condition>#HELP_①</condition><output><textValues><values>Sure.①</values>'
u'<values>Sorry, cannot help you.</values><values>Let us see, what is your problem?</values>'
u'</textValues><url>some URL</url><timeout>60 seconds</timeout><graphics>image.png</graphics>'
u'</output></node></nodes>')
self.assertEquals(actual, expected)
def test_positive_convertConetxtToXml(self):
""" Convert Dialog data with just context into XML. """
dialogData = Dialog.DialogData()
intentData = dialogData.getIntentData(u'HELP_①', DOMAIN)
intentData.addChannelOutput(u'1', u'Hi.')
intentData.addVariable(u'var1', u'①')
intentData.addVariable(u'var2', u'value')
intents = [u'HELP_①']
xmlDocument = self._handler.convertDialogData(dialogData, intents)
actual = self._handler.printXml(xmlDocument, False)
expected = (u'<nodes><node name="HELP_①"><condition>#HELP_①</condition><output><textValues><values>Hi.</values></textValues></output><context><var1>①</var1><var2>value</var2>'
u'</context></node></nodes>')
self.assertEquals(actual, expected)
def test_positive_convertGotoToXml(self):
""" Convert Dialog data with Goto and channel into XML. """
dialogData = Dialog.DialogData()
intentData = dialogData.getIntentData(u'HELP_①', DOMAIN)
intentData.addChannelOutput(u'1', u'Hi.')
intentData.setJumpTo(u'label①', u'condition')
intents = [u'HELP_①']
xmlDocument = self._handler.convertDialogData(dialogData, intents)
actual = self._handler.printXml(xmlDocument, False)
expected = (u'<nodes><node name="HELP_①"><condition>#HELP_①</condition><output><textValues><values>Hi.</values></textValues></output><goto><target>label①</target>'
u'<selector>condition</selector></goto></node></nodes>')
self.assertEquals(actual, expected)
def test_positive_convertDialogDataToXml(self):
""" Convert Dialog data containing all types of segments into XML. """
labels = {}
dialogData = Dialog.DialogData()
intentData = dialogData.getIntentData(u'HELP_①', DOMAIN)
intentData.addIntentAlternative(u'I need help.')
intentData.addIntentAlternative(u'Can you help me?')
intentData.addRawOutput([u'Sure.①'], labels)
intentData.addRawOutput([u'Sorry, cannot help you.%%360 seconds'], labels)
intentData.addRawOutput([u'Let us see, what is your problem?%%7image.jpg%%8my_URL'], labels)
intentData.addVariable(u'var1', u'some ①')
intentData.addVariable(u'var2', u'other value')
intentData.setJumpTo(u'label_①', u'user input')
intents = [u'HELP_①']
xmlDocument = self._handler.convertDialogData(dialogData, intents)
actual = self._handler.printXml(xmlDocument, False)
expected = (u'<nodes><node name="HELP_①"><condition>#HELP_①</condition><output><textValues><values>Sure.①</values>'
u'<values>Sorry, cannot help you.</values><values>Let us see, what is your problem?</values>'
u'</textValues><url>my_URL</url><sound>60 seconds</sound><graphics>image.jpg</graphics></output>'
u'<context><var1>some ①</var1><var2>other value</var2></context><goto><target>label_①</target>'
u'<selector>user input</selector></goto></node></nodes>')
self.assertEquals(actual, expected)
def test_positive_convertDialogDataSelectedIntentsOnly(self):
""" Convert Dialog data with just channel outputs into XML. """
labels = {}
dialogData = Dialog.DialogData()
intentData = dialogData.getIntentData(u'HELP_1', DOMAIN)
intentData.addIntentAlternative(u'I need help.')
intentData.addRawOutput(u'Sure.', labels)
intentData = dialogData.getIntentData(u'HELP_2', DOMAIN)
intentData.addIntentAlternative(u'Can you help me?')
intentData.addRawOutput([u'Sorry, cannot help you.'], labels)
intents = [u'HELP_2']
xmlDocument = self._handler.convertDialogData(dialogData, intents)
actual = self._handler.printXml(xmlDocument, False)
expected = (u'<nodes><node name="HELP_2"><condition>#HELP_2</condition><output><textValues>'
u'<values>Sorry, cannot help you.</values></textValues></output></node></nodes>')
print actual
print expected
self.assertEquals(actual, expected)
if __name__ == "__main__":
unittest.main()
| 47.4375
| 183
| 0.660957
|
97326c6db01c5acb62163475eb62b59d8f80bca4
| 250
|
py
|
Python
|
utils/__init__.py
|
woo1/CIHP_PGN
|
7867ca4224e4743ea9384bf24761fda3caddbce8
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
woo1/CIHP_PGN
|
7867ca4224e4743ea9384bf24761fda3caddbce8
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
woo1/CIHP_PGN
|
7867ca4224e4743ea9384bf24761fda3caddbce8
|
[
"MIT"
] | null | null | null |
from .model_pgn import PGNModel
from .utils import decode_labels, inv_preprocess, prepare_label, save, load
from .ops import conv2d, max_pool, linear
from .image_reader import ImageReader, ImageReaderInfer
from .image_reader_pgn import ImageReaderPGN
| 50
| 75
| 0.848
|
b7e7c62963b78889c5d9ad71c7e2be2c188c9d30
| 2,006
|
py
|
Python
|
profiles_api/models.py
|
homabakhtiarian/profiles-rest-api
|
0f05aae98cecd9f7ae8c78e794233133c6db1767
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
homabakhtiarian/profiles-rest-api
|
0f05aae98cecd9f7ae8c78e794233133c6db1767
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
homabakhtiarian/profiles-rest-api
|
0f05aae98cecd9f7ae8c78e794233133c6db1767
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from django.conf import settings
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('Users must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create a new superuser with given details"""
user = self.create_user(email, name, password)
user.is_supreuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name',]
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Return the model as a string"""
return self.status_text
| 29.072464
| 90
| 0.672981
|
33a0889d3743e2dc314b2b6bb173a50362e1079c
| 10,082
|
py
|
Python
|
sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/aio/operations/_restores_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2
|
2021-09-07T18:30:33.000Z
|
2021-11-23T02:50:57.000Z
|
sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/aio/operations/_restores_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 4
|
2021-10-06T16:39:52.000Z
|
2021-11-18T18:33:37.000Z
|
sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/aio/operations/_restores_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RestoresOperations:
"""RestoresOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _trigger_initial(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
protected_item_name: str,
recovery_point_id: str,
parameters: "_models.RestoreRequestResource",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._trigger_initial.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'protectedItemName': self._serialize.url("protected_item_name", protected_item_name, 'str'),
'recoveryPointId': self._serialize.url("recovery_point_id", recovery_point_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RestoreRequestResource')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_trigger_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}/recoveryPoints/{recoveryPointId}/restore'} # type: ignore
async def begin_trigger(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
protected_item_name: str,
recovery_point_id: str,
parameters: "_models.RestoreRequestResource",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Restores the specified backed up data. This is an asynchronous operation. To know the status of
this API call, use
GetProtectedItemOperationResult API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the backed up items.
:type fabric_name: str
:param container_name: Container name associated with the backed up items.
:type container_name: str
:param protected_item_name: Backed up item to be restored.
:type protected_item_name: str
:param recovery_point_id: Recovery point ID which represents the backed up data to be restored.
:type recovery_point_id: str
:param parameters: resource restore request.
:type parameters: ~azure.mgmt.recoveryservicesbackup.models.RestoreRequestResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._trigger_initial(
vault_name=vault_name,
resource_group_name=resource_group_name,
fabric_name=fabric_name,
container_name=container_name,
protected_item_name=protected_item_name,
recovery_point_id=recovery_point_id,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
'protectedItemName': self._serialize.url("protected_item_name", protected_item_name, 'str'),
'recoveryPointId': self._serialize.url("recovery_point_id", recovery_point_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_trigger.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}/recoveryPoints/{recoveryPointId}/restore'} # type: ignore
| 52.238342
| 321
| 0.683991
|
0565b98d8e4505a8011cb36876cbbd6b55139f9b
| 42,239
|
py
|
Python
|
test/integration/component/test_multiple_subnets_in_isolated_network_rvr.py
|
lujiefsi/cloudstack
|
74a7cbf753537928265c1f36afe086d69ad44e90
|
[
"Apache-2.0"
] | 1
|
2019-04-09T20:58:27.000Z
|
2019-04-09T20:58:27.000Z
|
test/integration/component/test_multiple_subnets_in_isolated_network_rvr.py
|
lujiefsi/cloudstack
|
74a7cbf753537928265c1f36afe086d69ad44e90
|
[
"Apache-2.0"
] | 1
|
2020-12-16T12:07:40.000Z
|
2020-12-17T13:41:37.000Z
|
test/integration/component/test_multiple_subnets_in_isolated_network_rvr.py
|
lujiefsi/cloudstack
|
74a7cbf753537928265c1f36afe086d69ad44e90
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Tests of acquiring IPs in multiple subnets for isolated network or vpc
"""
from nose.plugins.attrib import attr
from marvin.cloudstackAPI import rebootRouter
from marvin.cloudstackTestCase import cloudstackTestCase
import unittest
from marvin.lib.utils import (validateList,
get_host_credentials,
get_process_status,
cleanup_resources)
from marvin.lib.base import (Account,
Domain,
VirtualMachine,
ServiceOffering,
Zone,
Network,
NetworkOffering,
VPC,
VpcOffering,
StaticNATRule,
NATRule,
PublicIPAddress,
PublicIpRange)
from marvin.lib.common import (get_domain,
get_zone,
get_free_vlan,
get_template,
list_hosts,
list_routers)
import logging
import random
class TestMultiplePublicIpSubnets(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestMultiplePublicIpSubnets,
cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.zone = Zone(zone.__dict__)
cls.template = get_template(cls.apiclient, cls.zone.id)
cls._cleanup = []
cls.skip = False
if str(cls.zone.securitygroupsenabled) == "True":
cls.skip = True
return
cls.hypervisor = cls.testClient.getHypervisorInfo()
if cls.hypervisor.lower() not in ['kvm']:
cls.skip = True
return
cls.logger = logging.getLogger("TestMultiplePublicIpSubnets")
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
# Create small service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["small"]
)
cls._cleanup.append(cls.service_offering)
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(cls):
if cls.skip:
cls.skipTest("Test can be run only on advanced zone and KVM hypervisor")
cls.apiclient = cls.testClient.getApiClient()
cls.cleanup = []
return
def tearDown(cls):
try:
cleanup_resources(cls.apiclient, cls.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def get_router(self, router_id):
routers = list_routers(
self.apiclient,
id=router_id,
listall=True)
self.assertEqual(
isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
self.assertNotEqual(
len(routers),
0,
"Check list router response"
)
return routers[0]
def get_routers(self, network_id):
routers = list_routers(
self.apiclient,
networkid=network_id,
listall=True)
self.assertEqual(
isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
self.assertNotEqual(
len(routers),
0,
"Check list router response"
)
return routers
def get_vpc_routers(self, vpc_id):
routers = list_routers(
self.apiclient,
vpcid=vpc_id,
listall=True)
self.assertEqual(
isinstance(routers, list),
True,
"Check for list routers response return valid data"
)
self.assertNotEqual(
len(routers),
0,
"Check list router response"
)
return routers
def get_router_host(self, router):
self.assertEqual(
router.state,
'Running',
"Check list router response for router state"
)
hosts = list_hosts(
self.apiclient,
id=router.hostid)
self.assertEqual(
isinstance(hosts, list),
True,
"Check for list hosts response return valid data")
host = hosts[0]
if host.hypervisor.lower() not in "kvm":
return
host.user, host.password = get_host_credentials(self.config, host.ipaddress)
host.port=22
return host
def get_router_ips(self, router):
guestIp = None
controlIp = None
sourcenatIp = None
for nic in router.nic:
if guestIp is None and nic.traffictype == "Guest":
guestIp = nic.ipaddress
elif nic.traffictype == "Control":
controlIp = nic.ipaddress
elif sourcenatIp is None and nic.traffictype == "Public":
sourcenatIp = nic.ipaddress
return guestIp, controlIp, sourcenatIp
def verify_router_publicnic_state(self, router, host, publicNics):
command = '/opt/cloud/bin/checkrouter.sh | cut -d ":" -f2 |tr -d " "'
self.logger.debug("Executing command '%s'" % command)
result = get_process_status(
host.ipaddress,
host.port,
host.user,
host.password,
router.linklocalip,
command)
self.assertTrue(len(result) > 0, "Cannot get router %s redundant state" % router.name)
redundant_state = result[0]
self.logger.debug("router %s redudnant state is %s" % (router.name, redundant_state))
if redundant_state == "FAULT":
self.logger.debug("Skip as redundant_state is %s" % redundant_state)
return
elif redundant_state == "MASTER":
command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state DOWN" |wc -l' % publicNics
elif redundant_state == "BACKUP":
command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state UP" |wc -l' % publicNics
result = get_process_status(
host.ipaddress,
host.port,
host.user,
host.password,
router.linklocalip,
command)
self.assertTrue(len(result) > 0 and result[0] == "0", "Expected result is 0 but actual result is %s" % result[0])
def verify_network_interfaces_in_router(self, router, host, expectedNics):
command = 'ip link show |grep BROADCAST | cut -d ":" -f2 |tr -d " "|tr "\n" ","'
self.logger.debug("Executing command '%s'" % command)
result = get_process_status(
host.ipaddress,
host.port,
host.user,
host.password,
router.linklocalip,
command)
self.assertTrue(len(result) > 0 and result[0] == expectedNics, "Expected nics are %s but actual nics are %s" %(expectedNics, result))
def verify_ip_address_in_router(self, router, host, ipaddress, device, isExist=True):
command = 'ip addr show %s |grep "inet "|cut -d " " -f6 |cut -d "/" -f1 |grep -w %s' % (device,ipaddress)
self.logger.debug("Executing command '%s'" % command)
result = get_process_status(
host.ipaddress,
host.port,
host.user,
host.password,
router.linklocalip,
command)
self.assertEqual(len(result) > 0 and result[0] == ipaddress, isExist, "ip %s verification failed" % ipaddress)
def get_free_ipaddress(self, vlanId):
ipaddresses = PublicIPAddress.list(
self.apiclient,
vlanid=vlanId,
state='Free'
)
self.assertEqual(
isinstance(ipaddresses, list),
True,
"List ipaddresses should return a valid response for Free ipaddresses"
)
random.shuffle(ipaddresses)
return ipaddresses[0].ipaddress
@attr(tags=["advanced"], required_hardware="false")
def test_02_acquire_public_ips_in_isolated_network_with_redundant_vrs(self):
""" Acquire IPs in multiple subnets in isolated networks with redundant VRs
# Steps
# 1. Create network offering with single VR, and enable it
# 2. create isolated network with the network offering
# 3. create a vm in the network.
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP
# 4. get a free public ip, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP and new ip
# 5. remove the port forwarding rule, and release the new ip
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic IP, eth2 -> source nat IP
# 6. create new public ip range 1
# 7. get a free ip 4 in new ip range 2, assign to network, and enable static nat to vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1
# 8. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 2,
# 9. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 2, new ip 3
# 10. release new ip 2
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 3
# 11. release new ip 1
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3
# 12. create new public ip range 2
# 13. get a free ip 4 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4
# 14. get a free ip 5 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/5
# 15. get a free ip 6 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/5/6
# 16. release new ip 5
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/6
# 17. release new ip 4
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 6
# 18. release new ip 3
# verify the available nics in VR should be "eth0,eth1,eth2,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth4 -> new ip 6
# 19. restart network
# verify the available nics in VR should be "eth0,eth1,eth2,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth4 -> new ip 6
# 20. reboot router
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 6
# 21. restart network with cleanup
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 6
# 22. restart network with cleanup, makeredundant=true
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 6
"""
# Create new domain1
self.domain1 = Domain.create(
self.apiclient,
services=self.services["acl"]["domain1"],
parentdomainid=self.domain.id)
# Create account1
self.account1 = Account.create(
self.apiclient,
self.services["acl"]["accountD1"],
domainid=self.domain1.id
)
self.cleanup.append(self.account1)
self.cleanup.append(self.domain1)
# 1. Create network offering with redundant VRs, and enable it
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["nw_off_isolated_RVR"],
)
self.network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(self.network_offering)
# 2. create isolated network with the network offering
self.services["network"]["zoneid"] = self.zone.id
self.services["network"]["networkoffering"] = self.network_offering.id
self.network1 = Network.create(
self.apiclient,
self.services["network"],
self.account1.name,
self.account1.domainid
)
# 3. create a vm in the network.
try:
self.virtual_machine1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account1.name,
domainid=self.account1.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
zoneid=self.zone.id,
networkids=self.network1.id
)
except Exception as e:
self.fail("Exception while deploying virtual machine: %s" % e)
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_router_publicnic_state(router, host, "eth2")
# 4. get a free public ip, assign to network, and create port forwarding rules (ssh) to the vm
ipaddress = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress.ipaddress.id,
openfirewall=True
)
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP/new ip
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress.ipaddress.ipaddress, "eth2", True)
self.verify_router_publicnic_state(router, host, "eth2")
# 5. release the new ip
ipaddress.delete(self.apiclient)
# verify the available nics in VR should be "eth0,eth1,eth2"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress.ipaddress.ipaddress, "eth2", False)
self.verify_router_publicnic_state(router, host, "eth2")
# 6. create new public ip range 1
self.services["publiciprange"]["zoneid"] = self.zone.id
self.services["publiciprange"]["forvirtualnetwork"] = "true"
random_subnet_number = random.randrange(10,50)
self.services["publiciprange"]["vlan"] = get_free_vlan(
self.apiclient,
self.zone.id)[1]
self.services["publiciprange"]["gateway"] = "172.16." + str(random_subnet_number) + ".1"
self.services["publiciprange"]["startip"] = "172.16." + str(random_subnet_number) + ".2"
self.services["publiciprange"]["endip"] = "172.16." + str(random_subnet_number) + ".10"
self.services["publiciprange"]["netmask"] = "255.255.255.0"
self.public_ip_range1 = PublicIpRange.create(
self.apiclient,
self.services["publiciprange"]
)
self.cleanup.append(self.public_ip_range1)
# 7. get a free ip 4 in new ip range 2, assign to network, and enable static nat to vm
ip_address_1 = self.get_free_ipaddress(self.public_ip_range1.vlan.id)
ipaddress_1 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_1
)
StaticNATRule.enable(
self.apiclient,
virtualmachineid=self.virtual_machine1.id,
ipaddressid=ipaddress_1.ipaddress.id,
networkid=self.network1.id
)
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, "eth3", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3")
# 8. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 2,
ip_address_2 = self.get_free_ipaddress(self.public_ip_range1.vlan.id)
ipaddress_2 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_2
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress_2.ipaddress.id,
openfirewall=True
)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, "eth3", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3")
# 9. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 2, new ip 3
ip_address_3 = self.get_free_ipaddress(self.public_ip_range1.vlan.id)
ipaddress_3 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_3
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress_3.ipaddress.id,
openfirewall=True
)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3")
# 10. release new ip 2
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 3
ipaddress_2.delete(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3")
# 11. release new ip 1
# verify the available nics in VR should be "eth0,eth1,eth2,eth3"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3
ipaddress_1.delete(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3")
# 12. create new public ip range 2
self.services["publiciprange"]["zoneid"] = self.zone.id
self.services["publiciprange"]["forvirtualnetwork"] = "true"
self.services["publiciprange"]["vlan"] = get_free_vlan(
self.apiclient,
self.zone.id)[1]
self.services["publiciprange"]["gateway"] = "172.16." + str(random_subnet_number + 1) + ".1"
self.services["publiciprange"]["startip"] = "172.16." + str(random_subnet_number + 1) + ".2"
self.services["publiciprange"]["endip"] = "172.16." + str(random_subnet_number + 1) + ".10"
self.services["publiciprange"]["netmask"] = "255.255.255.0"
self.public_ip_range2 = PublicIpRange.create(
self.apiclient,
self.services["publiciprange"]
)
self.cleanup.append(self.public_ip_range2)
# 13. get a free ip 4 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4
ip_address_4 = self.get_free_ipaddress(self.public_ip_range2.vlan.id)
ipaddress_4 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_4
)
StaticNATRule.enable(
self.apiclient,
virtualmachineid=self.virtual_machine1.id,
ipaddressid=ipaddress_4.ipaddress.id,
networkid=self.network1.id
)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3|eth4")
# 14. get a free ip 5 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/5
ip_address_5 = self.get_free_ipaddress(self.public_ip_range2.vlan.id)
ipaddress_5 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_5
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress_5.ipaddress.id,
openfirewall=True
)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", True)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3|eth4")
# 15. get a free ip 6 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/5/6
ip_address_6 = self.get_free_ipaddress(self.public_ip_range2.vlan.id)
ipaddress_6 = PublicIPAddress.create(
self.apiclient,
zoneid=self.zone.id,
networkid=self.network1.id,
ipaddress=ip_address_6
)
nat_rule = NATRule.create(
self.apiclient,
self.virtual_machine1,
self.services["natrule"],
ipaddressid=ipaddress_6.ipaddress.id,
openfirewall=True
)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", True)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", True)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth4", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3|eth4")
# 16. release new ip 5
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/6
ipaddress_5.delete(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", True)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth4", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3|eth4")
# 17. release new ip 4
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 6
ipaddress_4.delete(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, "eth3", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth4", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3|eth4")
# 18. release new ip 3
# verify the available nics in VR should be "eth0,eth1,eth2,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth4 -> new ip 6
ipaddress_3.delete(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth4", True)
self.verify_router_publicnic_state(router, host, "eth2|eth4")
# 19. restart network
# verify the available nics in VR should be "eth0,eth1,eth2,eth4,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth4 -> new ip 6
self.network1.restart(self.apiclient)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth4,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth4", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth4", True)
self.verify_router_publicnic_state(router, host, "eth2|eth4")
# 20. reboot router
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 6
if len(routers) > 0:
router = routers[0]
cmd = rebootRouter.rebootRouterCmd()
cmd.id = router.id
self.apiclient.rebootRouter(cmd)
router = self.get_router(router.id)
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth3", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3")
# 21. restart network with cleanup
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 6
self.network1.restart(self.apiclient, cleanup=True)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth3", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3")
# 22. restart network with cleanup, makeredundant=true
# verify the available nics in VR should be "eth0,eth1,eth2,eth3,"
# verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 6
self.network1.restart(self.apiclient, cleanup=True, makeredundant=True)
routers = self.get_routers(self.network1.id)
for router in routers:
host = self.get_router_host(router)
self.verify_network_interfaces_in_router(router, host, "eth0,eth1,eth2,eth3,")
guestIp, controlIp, sourcenatIp = self.get_router_ips(router)
self.verify_ip_address_in_router(router, host, guestIp, "eth0", True)
self.verify_ip_address_in_router(router, host, controlIp, "eth1", True)
self.verify_ip_address_in_router(router, host, sourcenatIp, "eth2", True)
self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, "eth3", False)
self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, "eth3", True)
self.verify_router_publicnic_state(router, host, "eth2|eth3")
| 52.340768
| 141
| 0.633703
|
82b3456578c1d03b0fec4ecc90c18c4bb05e681d
| 1,654
|
py
|
Python
|
component/scripts/predict_tree.py
|
luforestal/Street
|
222c5d829506c37ddc4e8d3a0f9a41c79daf819b
|
[
"MIT"
] | null | null | null |
component/scripts/predict_tree.py
|
luforestal/Street
|
222c5d829506c37ddc4e8d3a0f9a41c79daf819b
|
[
"MIT"
] | null | null | null |
component/scripts/predict_tree.py
|
luforestal/Street
|
222c5d829506c37ddc4e8d3a0f9a41c79daf819b
|
[
"MIT"
] | null | null | null |
import torch
from pathlib import Path
from PIL import Image
import pandas as pd
from deepforest import main
from component.scripts.processing import *
import torch
from pathlib import Path
from PIL import Image
import pandas as pd
from deepforest import main
model_gsv = torch.hub.load(
"/home/lvelasquez/YoloV5/yolov5",
"custom",
path="/home/lvelasquez/YoloV5/yolov5/best.pt",
source="local",
)
m_aer = main.deepforest()
m_aer.use_release()
def tree_detection(folder_dwl_name):
image_path = list(
filter(not_hidden, list(img for img in Path(folder_dwl_name).glob("**/*.jpg")))
)
bbox1 = []
for image in image_path:
aerial = "aerial"
if aerial in str(image):
# With DeepForest model (m) detect trees in image,
# Plot the image with detection FALSE
boxes = m_aer.predict_image(path=str(image), return_plot=False)
# pred = m.predict_image(path='test_1.png', return_plot = True)
# Activate this line if plot image is require
for index, row in boxes.iterrows():
boxes["xcenter"] = (boxes["xmax"] + boxes["xmin"]) / 2
boxes["ycenter"] = (boxes["ymax"] + boxes["ymin"]) / 2
else:
im = Image.open(image)
name = image.name[:-4]
results = model_gsv(im)
bbox = results.pandas().xyxy[0]
bbox["image"] = name
bbox["xcenter"] = (bbox["xmax"] + bbox["xmin"]) / 2
bbox["ycenter"] = (bbox["ymax"] + bbox["ymin"]) / 2
bbox1.append(bbox)
bx_gsv = pd.concat(bbox1)
return bx_gsv, boxes
| 28.033898
| 87
| 0.599758
|
97776fa8ec6057937457ffcf0fe51b76db134665
| 2,965
|
py
|
Python
|
examples/detect_landmarks_in_image.py
|
spacejake/face-alignment
|
5c3acb5ff649de0ee9820bb595856cf2229c5db4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/detect_landmarks_in_image.py
|
spacejake/face-alignment
|
5c3acb5ff649de0ee9820bb595856cf2229c5db4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/detect_landmarks_in_image.py
|
spacejake/face-alignment
|
5c3acb5ff649de0ee9820bb595856cf2229c5db4
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import os
sys.path.append(os.path.abspath('..'))
import matplotlib
matplotlib.use("TkAgg")
import face_alignment
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.animation as animationimport
import tkinter
from skimage import io
# Run the 3D face alignment on a test image, without CUDA.
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device='cuda:0', flip_input=True, remote=False)
input = io.imread('../test/assets/aflw-test.jpg')
preds = fa.get_landmarks(input)[-1]
#TODO: Make this nice
fig = plt.figure(figsize=plt.figaspect(.5))
ax = fig.add_subplot(1, 2, 1)
ax.imshow(input)
ax.plot(preds[0:17,0],preds[0:17,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)
ax.plot(preds[17:22,0],preds[17:22,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)
ax.plot(preds[22:27,0],preds[22:27,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)
ax.plot(preds[27:31,0],preds[27:31,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)
ax.plot(preds[31:36,0],preds[31:36,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)
ax.plot(preds[36:42,0],preds[36:42,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)
ax.plot(preds[42:48,0],preds[42:48,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)
ax.plot(preds[48:60,0],preds[48:60,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)
ax.plot(preds[60:68,0],preds[60:68,1],marker='o',markersize=6,linestyle='-',color='w',lw=2)
ax.axis('off')
ax = fig.add_subplot(1, 2, 2, projection='3d')
surf = ax.scatter(preds[:,0]*1.2,preds[:,1],preds[:,2],c="cyan", alpha=1.0, edgecolor='b')
ax.plot3D(preds[:17,0]*1.2,preds[:17,1], preds[:17,2], color='blue' )
ax.plot3D(preds[17:22,0]*1.2,preds[17:22,1],preds[17:22,2], color='blue')
ax.plot3D(preds[22:27,0]*1.2,preds[22:27,1],preds[22:27,2], color='blue')
ax.plot3D(preds[27:31,0]*1.2,preds[27:31,1],preds[27:31,2], color='blue')
ax.plot3D(preds[31:36,0]*1.2,preds[31:36,1],preds[31:36,2], color='blue')
ax.plot3D(preds[36:42,0]*1.2,preds[36:42,1],preds[36:42,2], color='blue')
ax.plot3D(preds[42:48,0]*1.2,preds[42:48,1],preds[42:48,2], color='blue')
ax.plot3D(preds[48:,0]*1.2,preds[48:,1],preds[48:,2], color='blue' )
ax.view_init(elev=90., azim=90.)
ax.set_xlim(ax.get_xlim()[::-1])
# plt.show()
root = tkinter.Tk()
root.wm_title("Embedding in Tk")
canvas = FigureCanvasTkAgg(fig, root)
ax.mouse_init()
canvas.draw()
canvas.get_tk_widget().grid()
def _quit():
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
button = tkinter.Button(master=root, text="Quit", command=_quit)
# button.pack(side=tkinter.BOTTOM)
tkinter.mainloop()
| 39.533333
| 116
| 0.681956
|
0585b92e4ddae4e82ab19a06993b5420a19f68da
| 905
|
py
|
Python
|
setup.py
|
jcrafts/dragon-radar
|
247ac5035b85f3a5c6ff60885841616b1c9f58ed
|
[
"MIT"
] | 12
|
2017-03-04T01:28:18.000Z
|
2021-02-23T16:07:29.000Z
|
setup.py
|
jcrafts/dragon-radar
|
247ac5035b85f3a5c6ff60885841616b1c9f58ed
|
[
"MIT"
] | 18
|
2016-06-19T19:31:32.000Z
|
2021-02-17T14:40:01.000Z
|
setup.py
|
gravitypriest/dragon-radar
|
247ac5035b85f3a5c6ff60885841616b1c9f58ed
|
[
"MIT"
] | null | null | null |
import py2exe
from distutils.core import setup
from constants import VERSION
setup(zipfile=None,
version=VERSION,
console=[
{'script': '__main__.py',
'dest_base': 'dragon-radar',
'icon_resources': [(0, 'icon.ico')]
}],
data_files=[('params', ['params/demux.json',
'params/episodes.json',
'params/offsets.json',
'params/titles.json',
'params/title-times.json',
'params/valid.json']),
('ac3files', ['ac3files/blank_20_192.ac3',
'ac3files/blank_20_384.ac3',
'ac3files/blank_51_384.ac3',
'ac3files/blank_51_448.ac3']),
('', ['dragon-radar.ini', 'changelog.txt'])])
| 39.347826
| 63
| 0.446409
|
03a724e17a5c74cb421f9b9ce133c884be7b9124
| 10,084
|
py
|
Python
|
scripts/lonelypages.py
|
PArangSae/pywikibot
|
caf1401e71a81d11e681a6d6adfdea907aa33b94
|
[
"MIT"
] | null | null | null |
scripts/lonelypages.py
|
PArangSae/pywikibot
|
caf1401e71a81d11e681a6d6adfdea907aa33b94
|
[
"MIT"
] | null | null | null |
scripts/lonelypages.py
|
PArangSae/pywikibot
|
caf1401e71a81d11e681a6d6adfdea907aa33b94
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This is a script written to add the template "orphan" to pages.
These command line parameters can be used to specify which pages to work on:
¶ms;
Furthermore, the following command line parameters are supported:
-enable: Enable or disable the bot via a Wiki Page.
-disambig: Set a page where the bot saves the name of the disambig
pages found (default: skip the pages)
-always Always say yes, won't ask
Example:
python pwb.py lonelypages -enable:User:Bot/CheckBot -always
"""
#
# (C) Pywikibot team, 2006-2020
#
# Distributed under the terms of the MIT license.
#
import re
import sys
import pywikibot
from pywikibot import i18n, pagegenerators
from pywikibot.bot import suggest_help, SingleSiteBot
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {'¶ms;': pagegenerators.parameterHelp} # noqa: N816
class OrphanTemplate:
"""The orphan template configuration."""
def __init__(self, site, name, parameters, aliases=None, subst=False):
"""Initializer."""
self._name = name
if not aliases:
aliases = []
elif not subst:
aliases = list(aliases) + [name]
else:
name = 'subst:' + name
if parameters:
name += '|' + parameters
self.template = '{{' + name + '}}'
self._names = frozenset(aliases)
template_ns = site.namespaces[10]
# TODO: Add redirects to self.names too
if not pywikibot.Page(site, self._name, template_ns.id).exists():
raise ValueError('Orphan template "{0}" does not exist on '
'"{1}".'.format(self._name, site))
for name in self._names:
if not pywikibot.Page(site, name, template_ns.id).exists():
pywikibot.warning('Orphan template alias "{0}" does not exist '
'on "{1}"'.format(name, site))
self.regex = re.compile(
r'\{\{(?:'
+ ':|'.join(template_ns) + '|)('
+ '|'.join(re.escape(name) for name in self._names)
+ r')[\|\}]', re.I)
# The orphan template names in the different languages.
_templates = {
'af': ('Weesbladsy', 'datum={{subst:CURRENTMONTHNAME}} '
'{{subst:CURRENTYEAR}}', ['wi']),
'ar': ('يتيمة', 'تاريخ={{نسخ:اسم_شهر}} {{نسخ:عام}}'),
'arz': ('يتيمه', 'تاريخ={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}'),
'ca': ('Orfe', 'date={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}'),
'en': ('Orphan', 'date={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}',
['wi']),
'kn': ('Orphan', 'date={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}'),
'it': ('O', '||mese={{subst:CURRENTMONTHNAME}} {{subst:CURRENTYEAR}}',
['a']),
'ja': ('孤立', '{{subst:DATE}}'),
'ko': ('외톨이', '{{{{{|안전풀기:}}}#timel:Y-m-d|now}}'),
'test': ('Orphan', ''),
'zh': ('Orphan/auto', '', ['orphan'], True),
}
class LonelyPagesBot(SingleSiteBot):
"""Orphan page tagging bot."""
def __init__(self, generator, **kwargs):
"""Initializer."""
self.available_options.update({
'enablePage': None, # Check if someone set an enablePage or not
'disambigPage': None, # If no disambigPage given, not use it.
})
super().__init__(**kwargs)
self.generator = generator
# Take the configurations according to our project
if self.opt.enablePage:
self.opt.enablePage = pywikibot.Page(
self.site, self.opt.enablePage)
self.comment = i18n.twtranslate(
self.site, 'lonelypages-comment-add-template')
self.commentdisambig = i18n.twtranslate(
self.site, 'lonelypages-comment-add-disambig-template')
orphan_template = i18n.translate(self.site, _templates)
if orphan_template is not None:
try:
orphan_template = OrphanTemplate(self.site, *orphan_template)
except ValueError as e:
orphan_template = e
if orphan_template is None or isinstance(orphan_template, ValueError):
err_message = 'Missing configuration for site {}'.format(self.site)
suggest_help(
exception=orphan_template, additional_text=err_message)
sys.exit(err_message)
else:
self._settings = orphan_template
# DisambigPage part
if self.opt.disambigPage is not None:
self.disambigpage = pywikibot.Page(
self.site, self.opt.disambigPage)
try:
self.disambigtext = self.disambigpage.get()
except pywikibot.NoPage:
pywikibot.output("{0} doesn't exist, skip!"
.format(self.disambigpage.title()))
self.disambigtext = ''
except pywikibot.IsRedirectPage:
pywikibot.output("{0} is a redirect, don't use it!"
.format(self.disambigpage.title()))
self.opt.disambigPage = None
@property
def settings(self):
"""Return the settings for the configured site."""
return self._settings
def enable_page(self):
"""Enable or disable bot via wiki page."""
enable = self.opt.enablePage
if enable is not None:
try:
getenable = enable.get()
except pywikibot.NoPage:
pywikibot.output(
"{0} doesn't exist, I use the page as if it was blank!"
.format(enable.title()))
getenable = ''
except pywikibot.IsRedirectPage:
pywikibot.output('{0} is a redirect, skip!'
.format(enable.title()))
getenable = ''
return getenable == 'enable'
return True
def setup(self):
"""Setup the bot.
If the enable page is set to disable, set an empty generator which
turns off the bot (useful when the bot is run on a server).
"""
if not self.enable_page():
pywikibot.output('The bot is disabled')
self.generator = ()
def treat(self, page):
"""Check if page is applicable and not marked and add template then."""
pywikibot.output('Checking {0}...'.format(page.title()))
if page.isRedirectPage(): # If redirect, skip!
pywikibot.output('{0} is a redirect! Skip...'
.format(page.title()))
return
refs = list(page.getReferences(total=1))
if len(refs) > 0:
pywikibot.output("{0} isn't orphan! Skip..."
.format(page.title()))
return
else:
# no refs, no redirect; check if there's already the template
try:
oldtxt = page.get()
except pywikibot.NoPage:
pywikibot.output("{0} doesn't exist! Skip..."
.format(page.title()))
return
except pywikibot.IsRedirectPage:
pywikibot.output('{0} is a redirect! Skip...'
.format(page.title()))
return
if self.settings.regex.search(oldtxt):
pywikibot.output(
'Your regex has found something in {0}, skipping...'
.format(page.title()))
return
if (page.isDisambig()
and self.opt.disambigPage is not None):
pywikibot.output('{0} is a disambig page, report..'
.format(page.title()))
if not page.title().lower() in self.disambigtext.lower():
self.disambigtext = '{0}\n*[[{1}]]'.format(
self.disambigtext, page.title())
self.disambigpage.text = self.disambigtext
self.disambigpage.save(self.commentdisambig)
return
# Is the page a disambig but there's not disambigPage? Skip!
elif page.isDisambig():
pywikibot.output('{0} is a disambig page, skip...'
.format(page.title()))
return
else:
# Ok, the page need the template. Let's put it there!
# Adding the template in the text
newtxt = '{0}\n{1}'.format(self.settings.template, oldtxt)
self.userPut(page, oldtxt, newtxt, summary=self.comment)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
options = {}
local_args = pywikibot.handle_args(args)
gen_factory = pagegenerators.GeneratorFactory()
site = pywikibot.Site()
for arg in local_args:
if arg.startswith('-enable'):
if len(arg) == 7:
options['enablePage'] = pywikibot.input(
'Would you like to check if the bot should run or not?')
else:
options['enablePage'] = arg[8:]
elif arg.startswith('-disambig'):
if len(arg) == 9:
options['disambigPage'] = pywikibot.input(
'In which page should the bot save the disambig pages?')
else:
options['disambigPage'] = arg[10:]
elif arg == '-always':
options['always'] = True
else:
gen_factory.handleArg(arg)
generator = gen_factory.getCombinedGenerator()
# If the generator is not given, use the default one
if not generator:
generator = site.lonelypages(total=gen_factory.limit)
bot = LonelyPagesBot(generator, **options)
bot.run()
if __name__ == '__main__':
main()
| 37.073529
| 79
| 0.549583
|
b68cfca431622049f6479b101306e0f81c1425a2
| 2,230
|
py
|
Python
|
src/sage/server/introspect.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | 2
|
2021-08-20T00:30:35.000Z
|
2021-11-17T10:54:00.000Z
|
src/sage/server/introspect.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | null | null | null |
src/sage/server/introspect.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | null | null | null |
"""
Sage Notebook Introspection
TODO: - add support for grabbing source code from Pyrex functions
(even if not perfect is better than nothing). - PNG or MathML
output format for docstring
"""
###########################################################################
# Copyright (C) 2006 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
###########################################################################
def introspect(S, query, format='html'):
"""
Return introspection from a given query string.
INPUT:
- ``S`` - a Sage0 object, i.e., an interface to a
running instance of Python with the Sage libraries loaded
- ``query`` - a string: - if has no '?' then return
completion list - if begins or ends in one '?' return docstring -
if begins or ends in '??' return source code
- ``format`` - (string) 'html', 'png', 'none' (only
html is implemented right now!)
"""
if format != 'html':
raise NotImplementedError
query = query.replace('\n','').strip()
if query[:9] == '?__last__':
return get_docstring_last(S, int(query[9:])/15)
if len(query) > 1:
if query[:2] == '??':
return get_source_code(S, query[2:])
elif query[-2:] == '??':
return get_source_code(S, query[:-2])
if len(query) > 0:
if query[0] == '?':
return get_docstring(S, query[1:])
elif query[-1] == '?':
return get_docstring(S, query[:-1])
return get_completions(S, query)
def _get_docstring(S, query):
cmd = '_support_.docstring("%s", globals())'%query
z = S.eval(cmd)
z = z.replace('\\n','\n').replace('\\t',' ')[1:-1]
z = word_wrap(z, ncols=numcols)
return z
def _get_source_code(S, query):
cmd = '"".join(_support_.source_code("%s", globals()))'%query
z = S.eval(cmd)
z = z.replace('\\n','\n').replace("\\","").replace('\\t',' ')[1:-1]
return z
def _get_completions(S, query):
cmd = '"<br>".join(_support_.completions("%s", globals()))'%query
z = S.eval(cmd)
_last_ = z
return z[1:-1]
| 30.972222
| 78
| 0.537668
|
8f1662b538705f3c409356746484a8eab16fd5a3
| 11,476
|
py
|
Python
|
Audio_CrisNet.py
|
BioSIP/countcrowd_TFG
|
0ba2c7d4e4db4710f3e345565081d499a2d62cc0
|
[
"MIT"
] | null | null | null |
Audio_CrisNet.py
|
BioSIP/countcrowd_TFG
|
0ba2c7d4e4db4710f3e345565081d499a2d62cc0
|
[
"MIT"
] | null | null | null |
Audio_CrisNet.py
|
BioSIP/countcrowd_TFG
|
0ba2c7d4e4db4710f3e345565081d499a2d62cc0
|
[
"MIT"
] | null | null | null |
from losses import LogCoshLoss
import os
from scipy.io import loadmat
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torch
import torchaudio
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import numpy as np
import pickle
# https://pytorch.org/tutorials/beginner/audio_preprocessing_tutorial.html
SAVE_FILENAME = 'AudioCrisNet_prueba.pickle'
# Para comprobar si tenemos GPUs disponibles para usar o no:
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
class AudioDataset(Dataset):
def __init__(self, audio_path, density_path, transform=None):
# 3 opciones para el density_path:
#density_path = '/Volumes/Cristina /TFG/Data/density/train'
#density_path = '/Volumes/Cristina /TFG/Data/density/test'
#density_path = '/Volumes/Cristina /TFG/Data/density/val'
self.density_path = density_path
self.audio_path = audio_path
self.transform = transform
self.mapfiles = os.listdir(self.density_path)
# Para no incluir los archivos con '._':
self.mapfiles = [
el for el in self.mapfiles if el.startswith('._') == False]
self.mapfiles_wo_ext = [el[:-4] for el in self.mapfiles]
# list comprehension
#audio_path = '/Volumes/Cristina /TFG/Data/auds/'
self.audiofiles = os.listdir(audio_path)
self.audiofiles_wo_ext = [el[:-4] for el in self.audiofiles]
self.audiofiles = [
el + '.wav' for el in self.audiofiles_wo_ext if el in self.mapfiles_wo_ext]
self.audiofiles = sorted(self.audiofiles)
self.mapfiles = sorted(self.mapfiles)
# Añadir extensiones a archivos de audio:
# for i in range(len(self.audiofiles)):
# Añado la extensión al nombre del archivo que quiero importar:
#self.audiofiles[i] = [self.audiofiles[i] + '.wav']
def __len__(self):
return len(self.audiofiles)
def __getitem__(self, idx):
# DENSITY MAP
map_path = self.density_path + self.mapfiles[idx]
mapa = loadmat(map_path)
y = torch.as_tensor(mapa['map'].sum(), dtype=torch.float32)
# AUDIO
# Encuentro el path del archivo:
filename = str(self.audiofiles[idx])
filename = filename.lstrip("['")
filename = filename.rstrip("']")
aud_path = self.audio_path + filename
# Cargamos el audio:
waveform, sample_rate = torchaudio.load(
aud_path) # waveform es un tensor
x = waveform.view((2, 1, -1)) # dimensiones
if self.transform:
x = self.transform(x)
return x, y
# class SpectrogramDataset(Dataset):
# PROGRAMAR LUEGO!!!
'''
audio_path = '/home/pakitochus/Descargas/propuestas_tfg_cristina/crowd/definitivo/DISCO_dataset/auds/'
train_density_path = '/home/pakitochus/Descargas/propuestas_tfg_cristina/crowd/definitivo/DISCO_dataset/density/train/'
val_density_path = '/home/pakitochus/Descargas/propuestas_tfg_cristina/crowd/definitivo/DISCO_dataset/density/val/'
test_density_path = '/home/pakitochus/Descargas/propuestas_tfg_cristina/crowd/definitivo/DISCO_dataset/density/test/'
'''
audio_path = '/media/NAS/home/cristfg/datasets/auds/'
train_density_path = '/media/NAS/home/cristfg/datasets/density/train/'
val_density_path = '/media/NAS/home/cristfg/datasets/density/val/'
test_density_path = '/media/NAS/home/cristfg/datasets/density/test/'
trainset = AudioDataset(audio_path, train_density_path)
valset = AudioDataset(audio_path, val_density_path)
testset = AudioDataset(audio_path, test_density_path)
# PRUEBA para ver tensores de audio y de mapas de los conjuntos de train y val:
# print(trainset.__getitem__(20))
# print(valset.__getitem__(20))
#BATCH_SIZE: pequeño (1-3)
batch_size = 48
# BATCH_SIZE: pequeño (1-3)
train_loader = DataLoader(trainset, batch_size, shuffle=True)
val_loader = DataLoader(valset, 32, shuffle=False)
test_loader = DataLoader(testset, 32, shuffle=False)
# RED:
'''
#Por si quiero probar luego con LeNet (CAMBIAR INPUTS!):
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__() # esta linea es siempre necesaria
self.conv1 = nn.Conv2d(1, 6, 5, padding=2)
self.mp1 = nn.MaxPool2d(1,2)
self.conv2 = nn.Conv2d(6, 16, 5, padding=2)
self.mp2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(16, 120, 3, padding=1)
self.fc1 = nn.Linear(7*7*120, 256)#capa oculta
self.fc2 = nn.Linear(256, 10)#capa de salida
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.mp1(x)
x = F.relu(self.conv2(x))
x = self.mp2(x)
x = F.relu(self.conv3(x))
x = x.view(-1, 7*7*120)
x = F.relu(self.fc1(x))#Función de activación relu en la salida de la capa oculta
x = F.softmax(self.fc2(x), dim=1)#Función de activación softmax en la salida de la capa oculta
return x
'''
# MaxPool2d((1,2))
# torch.nn.Conv2d(in_channels, out_channels, kernel_size) -> kernel_size = (1, 61)
# in_channels ->2, out_channels -> [32,64].
# optim - > adam
class CrisNet(nn.Module):
def __init__(self):
super(CrisNet, self).__init__() # esta linea es siempre necesaria
self.max_pool1 = nn.MaxPool2d((1,2))
self.conv1 = nn.Conv2d(2, 32, (1,5))
self.conv2 = nn.Conv2d(32, 64, (1,5))
self.conv3 = nn.Conv2d(64, 128, (1,5))
self.conv4 = nn.Conv2d(128, 256, (1,5))
self.conv5 = nn.Conv2d(256, 512, (1,5))
self.conv6 = nn.Conv2d(512, 1024, (1,5))
self.fc1 = nn.Linear(763904,1)
'''
self.conv2 = nn.Conv2d()
self.max_pool2 = nn.MaxPool2d((1,2))
self.fc2 = nn.Linear()
'''
def forward(self, x):
#Con función de activación ReLu
#PRIMERA CAPA
x = F.relu(self.conv1(x))
x = self.max_pool1(x)
#SEGUNDA CAPA
x = F.relu(self.conv2(x))
x = self.max_pool1(x)
#TERCERA CAPA
x = F.relu(self.conv3(x))
x = self.max_pool1(x)
#CUARTA CAPA
x = F.relu(self.conv4(x))
x = self.max_pool1(x)
#QUINTA CAPA
x = F.relu(self.conv5(x))
x = self.max_pool1(x)
#SEXTA CAPA
x = F.relu(self.conv6(x))
x = self.max_pool1(x)
x = x.view((x.size(0),-1))
x = self.fc1(x)
return x
modelo=CrisNet()
modelo = modelo.to(device)
criterion = nn.MSELoss().to(device) # definimos la pérdida
# criterion = LogCoshLoss(reduction='sum')
optimizador = optim.Adam(modelo.parameters(), lr=1e-4)#, weight_decay=1e-4)
# optimizador = optim.SGD(modelo.parameters(), lr=1e-4)
# print(modelo)
# print(train_loader)
# print(type(train_loader))
# print(x)
# print(x.size())
# print(y)
# print(y.size())
# Para predecir y, la normalizaremos. Siempre por el mismo valor:
Y_NORM = 100
losses = {'train': list(), 'validacion': list(),
'val_mae': list(), 'val_mse': list()}
min_val_loss = float('Inf')
expcode = 'crisnet_adam_mse'
for epoch in range(30):
print("Entrenando... \n") # Esta será la parte de entrenamiento
training_loss = 0.0 # el loss en cada epoch de entrenamiento
total = 0
modelo.train() # Para preparar el modelo para el training
for x, y in train_loader:
total += 1
# ponemos a cero todos los gradientes en todas las neuronas:
optimizador.zero_grad()
y = y/Y_NORM # normalizamos
x = x.to(device)
y = y.to(device)
output = modelo(x) # forward
loss = criterion(output.squeeze(), y.squeeze()) # evaluación del loss
# print(f'loss: {loss:.4f}')
loss.backward() # backward pass
optimizador.step() # optimización
training_loss += loss.item() # acumulamos el loss de este batch
training_loss *= Y_NORM/total
losses['train'].append(training_loss) # .item())
val_loss = 0.0
total = 0
modelo.eval() # Preparar el modelo para validación y/o test
print("Validando... \n")
for x, y in val_loader:
total += 1
y = y/Y_NORM # normalizamos ¿AQUÍ TAMBIÉN?
x = x.to(device)
y = y.to(device)
output = modelo(x)
loss = criterion(output.squeeze(), y.squeeze())
val_loss += loss.item()
val_loss *= Y_NORM/total
if val_loss <= min_val_loss:
min_val_loss = val_loss
filename = expcode+'.pt'
print(f'Saving as {filename}')
torch.save(modelo, filename)
losses['validacion'].append(val_loss) # .item())
print(
f'Epoch {epoch} \t\t Training Loss: {training_loss} \t\t Validation Loss: {val_loss}')
with open(SAVE_FILENAME, 'wb') as handle:
pickle.dump(losses, handle, protocol=pickle.HIGHEST_PROTOCOL)
# ENTRENAMIENTO
n_epochs = 170
modelo = torch.load(filename)
optimizador = optim.SGD(modelo.parameters(), lr=1e-7, momentum=0.9)
epoch_ni = 0 # epochs not improving.
MAX_ITER = 100
for epoch in range(n_epochs):
print("Entrenando... \n") # Esta será la parte de entrenamiento
training_loss = 0.0 # el loss en cada epoch de entrenamiento
total = 0
modelo.train() # Para preparar el modelo para el training
for x, y in train_loader:
total += 1
# ponemos a cero todos los gradientes en todas las neuronas:
optimizador.zero_grad()
y = y/Y_NORM # normalizamos
x = x.to(device)
y = y.to(device)
output = modelo(x) # forward
loss = criterion(output.squeeze(), y.squeeze()) # evaluación del loss
# print(f'loss: {loss}')
loss.backward() # backward pass
optimizador.step() # optimización
training_loss += loss.item() # acumulamos el loss de este batch
training_loss *= Y_NORM/total
losses['train'].append(training_loss) # .item())
val_loss = 0.0
total = 0
modelo.eval() # Preparar el modelo para validación y/o test
print("Validando... \n")
for x, y in val_loader:
total += 1
y = y/Y_NORM # normalizamos ¿AQUÍ TAMBIÉN?
x = x.to(device)
y = y.to(device)
output = modelo(x) # forward
loss = criterion(output.squeeze(), y.squeeze()) # evaluación del loss
val_loss += loss.item()
val_loss *= Y_NORM/total
if val_loss <= min_val_loss:
min_val_loss = val_loss
filename = expcode+'.pt'
print(f'Saving as {filename}')
torch.save(modelo, filename)
epoch_ni = 0
else:
epoch_ni +=1
if epoch_ni > MAX_ITER:
break
losses['validacion'].append(val_loss) # .item())
#losses['val_mae'].append(mae_accum) # .item())
#losses['val_mse'].append(mse_accum) # .item())
print(
f'Epoch {epoch} \t\t Training Loss: {training_loss} \t\t Validation Loss: {val_loss}')
with open(SAVE_FILENAME, 'wb') as handle:
pickle.dump(losses, handle, protocol=pickle.HIGHEST_PROTOCOL)
# TEST
modelo = torch.load(filename)
modelo.eval() # Preparar el modelo para validación y/o test
print("Testing... \n")
total = 0
mse = nn.MSELoss(reduction='sum') # definimos la pérdida
mae = nn.L1Loss(reduction='sum')
test_loss_mse = 0.0
test_loss_mae = 0.0
yreal = list()
ypredicha = list()
for x, y in test_loader:
y = y/Y_NORM # normalizamos
x = x.to(device)
y = y.to(device)
total += y.shape[0]
output = modelo(x)
output = output.squeeze()
mse_loss = mse(output, y)
test_loss_mse += mse_loss.cpu().item()
mae_loss = mae(output, y)
test_loss_mae += mae_loss.cpu().item()
# para guardar las etiquetas.
yreal.append(y.detach().cpu().numpy())
ypredicha.append(output.detach().cpu().numpy())
# Esto siemrpe que reduction='sum' -> equiparable a número de personas.
test_loss_mse *= Y_NORM/total
# Esto siemrpe que reduction='sum' -> equiparable a número de personas.
test_loss_mae *= Y_NORM/total
# yreal = np.array(yreal).flatten()
# ypredicha = np.array(ypredicha).flatten() # comprobar si funciona.
losses['yreal'] = yreal*Y_NORM
losses['ypredicha'] = ypredicha*Y_NORM
print(f'Test Loss (MSE): {test_loss_mse}')
losses['test_mse'] = test_loss_mse # .item())
print(f'Test Loss (MAE): {test_loss_mae}')
losses['test_mae'] = test_loss_mae # .item())
with open(SAVE_FILENAME, 'wb') as handle:
pickle.dump(losses, handle, protocol=pickle.HIGHEST_PROTOCOL)
#%%
'''
Testing...
Test Loss (MSE): 122.91883238156636
Test Loss (MAE): 74.75587590535481
'''
| 27.389021
| 119
| 0.700331
|
fd84008950777f158281496887c40f28eb3dc45c
| 831
|
py
|
Python
|
graphz/embedding.py
|
morriswmz/graphz
|
1b436ec80d2f8bf8b7fcda99abcfba127f7afea3
|
[
"MIT"
] | null | null | null |
graphz/embedding.py
|
morriswmz/graphz
|
1b436ec80d2f8bf8b7fcda99abcfba127f7afea3
|
[
"MIT"
] | null | null | null |
graphz/embedding.py
|
morriswmz/graphz
|
1b436ec80d2f8bf8b7fcda99abcfba127f7afea3
|
[
"MIT"
] | null | null | null |
import numpy as np
import warnings
def laplacian_eigenmaps(graph, n_components=2):
"""
Computes the Lapacian eigenmap.
Ref: M. Belkin, P. Niyogi, Laplacian eigenmaps and spectral techniques for
embedding and clustering, in: NIPS, Vol. 14, 2001, pp. 585-591.
"""
if graph.n_nodes > 10000:
warnings.warn('The default implementation computes the full eigendecomposition, which is not efficient for large graphs.')
if graph.directed:
raise ValueError('Graph should be undirected.')
if n_components < 0 or n_components > graph.n_nodes:
raise ValueError('Number of components must be positive and less than the number of nodes.')
L = graph.get_laplacian(normalize=True)
l, v = np.linalg.eig(L)
ind = np.argsort(np.abs(l))
return v[:, ind[1:1+n_components]]
| 37.772727
| 130
| 0.694344
|
f3c4f3c95e48cf4caf5cac5176b35c0d7eb3760d
| 3,484
|
py
|
Python
|
projects/pointnav_baselines/experiments/ithor/pointnav_ithor_rgbd_simpleconvgru_ddppo.py
|
prithv1/allenact
|
ee736e6a3aeed29b3661ee18fa0dc0a68a40201e
|
[
"MIT"
] | 1
|
2020-09-10T13:09:14.000Z
|
2020-09-10T13:09:14.000Z
|
projects/pointnav_baselines/experiments/ithor/pointnav_ithor_rgbd_simpleconvgru_ddppo.py
|
andrlima/allenact
|
f29dd6f0ec62425b02ca07fee815b1a82627a28e
|
[
"MIT"
] | null | null | null |
projects/pointnav_baselines/experiments/ithor/pointnav_ithor_rgbd_simpleconvgru_ddppo.py
|
andrlima/allenact
|
f29dd6f0ec62425b02ca07fee815b1a82627a28e
|
[
"MIT"
] | null | null | null |
import gym
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from core.algorithms.onpolicy_sync.losses import PPO
from core.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from projects.pointnav_baselines.experiments.ithor.pointnav_ithor_base import (
PointNaviThorBaseConfig,
)
from projects.pointnav_baselines.models.point_nav_models import (
PointNavActorCriticSimpleConvRNN,
)
from plugins.ithor_plugin.ithor_sensors import RGBSensorThor
from plugins.robothor_plugin.robothor_sensors import (
DepthSensorRoboThor,
GPSCompassSensorRoboThor,
)
from plugins.robothor_plugin.robothor_tasks import PointNavTask
from utils.experiment_utils import Builder, PipelineStage, TrainingPipeline, LinearDecay
class PointNaviThorRGBPPOExperimentConfig(PointNaviThorBaseConfig):
"""An Point Navigation experiment configuration in iThor with RGBD
input."""
def __init__(self):
super().__init__()
self.ENV_ARGS["renderDepthImage"] = True
self.SENSORS = [
RGBSensorThor(
height=self.SCREEN_SIZE,
width=self.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
DepthSensorRoboThor(
height=self.SCREEN_SIZE,
width=self.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="depth_lowres",
),
GPSCompassSensorRoboThor(),
]
self.PREPROCESSORS = []
self.OBSERVATIONS = [
"rgb_lowres",
"depth_lowres",
"target_coordinates_ind",
]
@classmethod
def tag(cls):
return "Pointnav-iTHOR-RGBD-SimpleConv-DDPPO"
@classmethod
def training_pipeline(cls, **kwargs):
ppo_steps = int(75000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 3
num_steps = 30
save_interval = 5000000
log_interval = 10000
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={"ppo_loss": Builder(PPO, kwargs={}, default=PPOConfig,)},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return PointNavActorCriticSimpleConvRNN(
action_space=gym.spaces.Discrete(len(PointNavTask.class_action_names())),
observation_space=kwargs["observation_set"].observation_spaces,
goal_sensor_uuid="target_coordinates_ind",
hidden_size=512,
embed_coordinates=False,
coordinate_dims=2,
num_rnn_layers=1,
rnn_type="GRU",
)
| 32.867925
| 88
| 0.641504
|
0bba7b6fac6ee93fd7b120a04df87b3dea7f8cde
| 5,112
|
py
|
Python
|
tensorflow/python/saved_model/utils_impl.py
|
knightvishal/tensorflow
|
5d3dd19b7146d954fc1b4e9e44e9881e75d363c1
|
[
"Apache-2.0"
] | 4
|
2021-06-15T17:26:07.000Z
|
2021-11-17T10:58:08.000Z
|
tensorflow/python/saved_model/utils_impl.py
|
knightvishal/tensorflow
|
5d3dd19b7146d954fc1b4e9e44e9881e75d363c1
|
[
"Apache-2.0"
] | 4
|
2020-09-26T00:55:50.000Z
|
2022-02-10T01:53:06.000Z
|
tensorflow/python/saved_model/utils_impl.py
|
knightvishal/tensorflow
|
5d3dd19b7146d954fc1b4e9e44e9881e75d363c1
|
[
"Apache-2.0"
] | 1
|
2019-12-20T01:12:47.000Z
|
2019-12-20T01:12:47.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel utility functions implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.lib.io import file_io
from tensorflow.python.saved_model import constants
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# TensorInfo helpers.
@tf_export("saved_model.build_tensor_info",
"saved_model.utils.build_tensor_info")
@deprecation.deprecated_endpoints("saved_model.utils.build_tensor_info")
def build_tensor_info(tensor):
"""Utility function to build TensorInfo proto.
Args:
tensor: Tensor or SparseTensor whose name, dtype and shape are used to
build the TensorInfo. For SparseTensors, the names of the three
constitutent Tensors are used.
Returns:
A TensorInfo protocol buffer constructed based on the supplied argument.
"""
tensor_info = meta_graph_pb2.TensorInfo(
dtype=dtypes.as_dtype(tensor.dtype).as_datatype_enum,
tensor_shape=tensor.get_shape().as_proto())
if isinstance(tensor, sparse_tensor.SparseTensor):
tensor_info.coo_sparse.values_tensor_name = tensor.values.name
tensor_info.coo_sparse.indices_tensor_name = tensor.indices.name
tensor_info.coo_sparse.dense_shape_tensor_name = tensor.dense_shape.name
else:
tensor_info.name = tensor.name
return tensor_info
@tf_export("saved_model.get_tensor_from_tensor_info",
"saved_model.utils.get_tensor_from_tensor_info")
@deprecation.deprecated_endpoints(
"saved_model.utils.get_tensor_from_tensor_info")
def get_tensor_from_tensor_info(tensor_info, graph=None, import_scope=None):
"""Returns the Tensor or SparseTensor described by a TensorInfo proto.
Args:
tensor_info: A TensorInfo proto describing a Tensor or SparseTensor.
graph: The tf.Graph in which tensors are looked up. If None, the
current default graph is used.
import_scope: If not None, names in `tensor_info` are prefixed with this
string before lookup.
Returns:
The Tensor or SparseTensor in `graph` described by `tensor_info`.
Raises:
KeyError: If `tensor_info` does not correspond to a tensor in `graph`.
ValueError: If `tensor_info` is malformed.
"""
graph = graph or ops.get_default_graph()
def _get_tensor(name):
return graph.get_tensor_by_name(
ops.prepend_name_scope(name, import_scope=import_scope))
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
return _get_tensor(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
_get_tensor(tensor_info.coo_sparse.indices_tensor_name),
_get_tensor(tensor_info.coo_sparse.values_tensor_name),
_get_tensor(tensor_info.coo_sparse.dense_shape_tensor_name))
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
# Path helpers.
def get_or_create_variables_dir(export_dir):
"""Return variables sub-directory, or create one if it doesn't exist."""
variables_dir = get_variables_dir(export_dir)
if not file_io.file_exists(variables_dir):
file_io.recursive_create_dir(variables_dir)
return variables_dir
def get_variables_dir(export_dir):
"""Return variables sub-directory in the SavedModel."""
return os.path.join(
compat.as_text(export_dir),
compat.as_text(constants.VARIABLES_DIRECTORY))
def get_variables_path(export_dir):
"""Return the variables path, used as the prefix for checkpoint files."""
return os.path.join(
compat.as_text(get_variables_dir(export_dir)),
compat.as_text(constants.VARIABLES_FILENAME))
def get_or_create_assets_dir(export_dir):
"""Return assets sub-directory, or create one if it doesn't exist."""
assets_destination_dir = get_assets_dir(export_dir)
if not file_io.file_exists(assets_destination_dir):
file_io.recursive_create_dir(assets_destination_dir)
return assets_destination_dir
def get_assets_dir(export_dir):
"""Return path to asset directory in the SavedModel."""
return os.path.join(
compat.as_text(export_dir),
compat.as_text(constants.ASSETS_DIRECTORY))
| 36.514286
| 80
| 0.761933
|
81f51b5d155f5349e76b0e5850ec0e416b16c908
| 4,948
|
py
|
Python
|
ui.py
|
psyphernix/CLI-Inventory-Management-System
|
8ae03ac7a8f6cad329089cd9e775b60ee7f5d7fd
|
[
"MIT"
] | null | null | null |
ui.py
|
psyphernix/CLI-Inventory-Management-System
|
8ae03ac7a8f6cad329089cd9e775b60ee7f5d7fd
|
[
"MIT"
] | null | null | null |
ui.py
|
psyphernix/CLI-Inventory-Management-System
|
8ae03ac7a8f6cad329089cd9e775b60ee7f5d7fd
|
[
"MIT"
] | null | null | null |
#------------------------------to take user input-----------------------------#
def user_input(fr_list):
''' function to take user input'''
list_out = [] # creating a empty list
wish = input("Does customer want to buy anything?(yes/no): ")
enter = "yes" # assigning "yes" to enter for future use
n = 0 # setting counter n
check = [] # creating empty list to store customer's buying item's name
if wish == "yes": # do given operations if wish equals to "yes"
while enter == "yes": # iterate through given operations when enter equals "yes"
list_input = [] # creating empty list
res = False # assigning False value to variable res for exception handling purpose
while res == False: # iterate through given operations as long as res equals False
try: # do given operations while res equals False
product = input("Enter the Product Name: ") # asking user to enter product name
for item in fr_list: # iterating in file read list
if item[0] == product and item[2] == 0: # print following if product is out of stock
print("Sorry! %s is out of stock." % product)
elif product in check and product in item: # print following message if product is customer's buying list
print("Sorry! %s has already bought %s." % (name_customer, product))
elif product in item: # assign True to res if product is available
res = True
except:
print("Product not available!") # message to print if all conditions in try failed
list_input.append(product) # append product to list named list_input
res1 = False # assigning False value to variable res1 for exception handling purpose
while res1 == False: # iterate through given operations as long as res1 equals False
try: # do given operations while res1 equals False
quantity = int(input("Enter the Quantity: ")) # asking user to input quantity and coverting to interger
for i in fr_list: # iterating in file read list
if i[0] == product: # do following if value at index 0 of i is equal to product
if quantity < 1 or quantity > i[2]: # message to display if quantity entered is less than 1 and greater than quantity available
print("Quantity should be 1 to available quantity. Please enter properly!")
else: # assign True to res1 if above if conditions fail
res1 = True
except:
print("Invalid Input!") # message to print if all conditions in try failed
list_input.append(quantity) # append quantity to list named list_input
res2 = False # assigning False value to variable res2 for exception handling purpose
while res2 == False: # iterate through given operations as long as res2 equals False
try: # do given operations while res2 equals False
discount_amount = int(input("Enter the Discount Amount in Percentage: ")) # asking user to input discount in percent and coverting to interger
if discount_amount < 0 or discount_amount > 100: # message to display if discount amount is negative and greater than 100
print("Percentage should be 0 to 100. Please enter properly!")
else: # assign True to res2 if above if conditons fail
res2 = True
except:
print("Invalid Input!") # message to print if all conditions in try failed
list_input.append(discount_amount) # append discount amount to list named list_input
if n == 0: # ask user to enter his/her name if when n equals 0
name_customer = input("Enter the customer's name: ")
list_input.append(name_customer) # append customer's name to list named list_input
list_out.append(list_input) # append list named list_input to another list named list_out to create 2D list
enter = input("Does %s want to buy another product?(yes/no): " % name_customer) # to ask customer if he/she wants to buy another product
if enter == "yes": # do following operations if user want to buy another product
check.append(product) # appending product to list named check
n = n + 1 # increase n value by 1
else:
return list_out # to return list named list_out from function
else:
return False # if wish is not equal to "yes", return False from function
| 78.539683
| 163
| 0.589935
|
03b91203efb8f7ba0ef9fd8dfd511a3223b52f74
| 316
|
py
|
Python
|
problemsets/Codeforces/Python/A987.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
problemsets/Codeforces/Python/A987.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
problemsets/Codeforces/Python/A987.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
a=['purple', 'green', 'blue', 'orange', 'red', 'yellow']
b=['Power', 'Time', 'Space', 'Soul', 'Reality', 'Mind']
n=int(input())
c=[input() for _ in [0]*n]
d=[b[x] for x,y in enumerate(a) if y not in c]
print(len(d),*d,sep='\n')
| 26.333333
| 56
| 0.575949
|
b2ea50e72a90d57c0ce1639b9aa32bf39f8fb676
| 3,817
|
py
|
Python
|
coverage/src/distance.py
|
zhang-informatics/UMLS_iDISK
|
c3cbea8c87f23be35be678a863c932cb422794fb
|
[
"MIT"
] | null | null | null |
coverage/src/distance.py
|
zhang-informatics/UMLS_iDISK
|
c3cbea8c87f23be35be678a863c932cb422794fb
|
[
"MIT"
] | null | null | null |
coverage/src/distance.py
|
zhang-informatics/UMLS_iDISK
|
c3cbea8c87f23be35be678a863c932cb422794fb
|
[
"MIT"
] | null | null | null |
import argparse
import json
import editdistance
import pickle
import numpy as np
from tqdm import tqdm
"""
This script computes the Levenshtein and Jaccard distances between the
raw matched and unmatched terms. For each term it finds the minimum
distance in the other set.
"""
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--infile", type=str, required=True,
help="JSON file containing matches.")
parser.add_argument("--outfile", type=str, required=True,
help="Where to save the matching summary.")
return parser.parse_args()
def main(infile, outfile):
matches = json.load(open(infile))
# We don't (as of now) compute distances between sets of normalized terms.
matched, unmatched = get_matched_unmatched_raw_terms(matches)
print("Computing Edit Distances")
edits_match, edits_unmatch = min_distances(matched, unmatched,
editdistance.eval)
pickle.dump(edits_match, open(f"{outfile}.edit_dist_match.pickle", "wb"))
pickle.dump(edits_unmatch, open(f"{outfile}.edit_dist_unmatch.pickle", "wb")) # noqa
print("Computing Jaccard Distances")
jaccs_match, jaccs_unmatch = min_distances(matched, unmatched, jaccard)
pickle.dump(jaccs_match, open(f"{outfile}.jaccard_dist_match.pickle", "wb")) # noqa
pickle.dump(jaccs_unmatch, open(f"{outfile}.jaccard_dist_unmatch.pickle", "wb")) # noqa
summarize(edits_match, outfile, write_mode='w',
name="Edit Distance (matched -> unmatched)")
summarize(edits_unmatch, outfile, write_mode='a',
name="Edit Distance (unmatched -> matched)")
summarize(jaccs_match, outfile, write_mode='a',
name="Jaccard Distance (matched -> unmatched)")
summarize(jaccs_unmatch, outfile, write_mode='a',
name="Jaccard Distance (unmatched -> matched)")
def get_matched_unmatched_raw_terms(matches):
matched_terms = set()
unmatched_terms = set()
for cui in matches:
for aui in matches[cui]:
match = matches[cui][aui]
if len(match["umls_cuis"]) > 0:
matched_terms.add(match["term"].lower())
else:
unmatched_terms.add(match["term"].lower())
return matched_terms, unmatched_terms
def min_distances(matched, unmatched, distance_func):
"""
Returns two lists of numbers:
1. The minimum distance of each matched term to the unmatched terms.
2. The minimum distance of each unmatched term to the matched terms.
where distance is measured by the supplied distance function.
"""
matched_distances = [np.inf] * len(matched)
unmatched_distances = [np.inf] * len(unmatched)
for (m, matched_term) in tqdm(list(enumerate(matched))):
for (u, unmatched_term) in enumerate(unmatched):
dist = distance_func(matched_term, unmatched_term)
if dist < matched_distances[m]:
matched_distances[m] = dist
if dist < unmatched_distances[u]:
unmatched_distances[m] = dist
return matched_distances, unmatched_distances
def jaccard(term1, term2):
st1 = set(term1)
st2 = set(term2)
num = len(st1.intersection(st2))
denom = len(st1.union(st2))
return 1 - (num / denom)
def summarize(distances, outfile, write_mode='w', name=""):
q1, q2, q3 = np.percentile(distances, [25, 50, 75])
minimum = min(distances)
maximum = max(distances)
with open(outfile, write_mode) as outF:
outF.write(name + '\n')
outF.write(f"min, max: {minimum:.2f}, {maximum:.2f}\n")
outF.write(f"quartiles: {q1:.2f}, {q2:.2f}, {q3:.2f}\n\n")
if __name__ == "__main__":
args = parse_args()
main(args.infile, args.outfile)
| 37.792079
| 92
| 0.657061
|
a7458f89ead9a3e5459284f8f9ed358203d433a4
| 4,872
|
py
|
Python
|
pypowerbi/client.py
|
lucasfcnunes/pypowerbi
|
4ab4c0f075bf1db43bb3bd8e691157fa459bd9e1
|
[
"MIT"
] | 1
|
2021-03-16T17:34:03.000Z
|
2021-03-16T17:34:03.000Z
|
pypowerbi/client.py
|
lucasfcnunes/pypowerbi
|
4ab4c0f075bf1db43bb3bd8e691157fa459bd9e1
|
[
"MIT"
] | null | null | null |
pypowerbi/client.py
|
lucasfcnunes/pypowerbi
|
4ab4c0f075bf1db43bb3bd8e691157fa459bd9e1
|
[
"MIT"
] | null | null | null |
import json
import datetime
import adal
from .features import Features
from .reports import Reports
from .datasets import Datasets
from .imports import Imports
from .groups import Groups
from .gateways import Gateways
from .activity_logs import ActivityLogs
class PowerBIClient:
default_resource_url = "https://analysis.windows.net/powerbi/api"
default_api_url = "https://api.powerbi.com"
default_authority_url = "https://login.windows.net/common"
api_version_snippet = "v1.0"
api_myorg_snippet = "myorg"
@staticmethod
def get_client_with_username_password(
client_id,
username,
password,
authority_url=None,
resource_url=None,
api_url=None,
):
"""
Constructs a client with the option of using common defaults.
:param client_id: The Power BI Client ID
:param username: Username
:param password: Password
:param authority_url: The authority_url; defaults to 'https://login.windows.net/common'
:param resource_url: The resource_url; defaults to 'https://analysis.windows.net/powerbi/api'
:param api_url: The api_url: defaults to 'https://api.powerbi.com'
:return:
"""
if authority_url is None:
authority_url = PowerBIClient.default_authority_url
if resource_url is None:
resource_url = PowerBIClient.default_resource_url
if api_url is None:
api_url = PowerBIClient.default_api_url
context = adal.AuthenticationContext(
authority=authority_url, validate_authority=True, api_version=None
)
# get your authentication token
token = context.acquire_token_with_username_password(
resource=resource_url,
client_id=client_id,
username=username,
password=password,
)
return PowerBIClient(api_url, token)
def __init__(self, api_url, token):
self.api_url = api_url
self.token = token
self.datasets = Datasets(self)
self.features = Features(self)
self.reports = Reports(self)
self.imports = Imports(self)
self.groups = Groups(self)
self.gateways = Gateways(self)
self.activity_logs = ActivityLogs(self)
@property
def auth_header(self):
if self._auth_header is None:
self._auth_header = {"Authorization": f'Bearer {self.token["accessToken"]}'}
return self._auth_header
_auth_header = None
class EffectiveIdentity:
username_key = "username"
roles_key = "roles"
datasets_key = "datasets"
def __init__(self, username, roles, datasets):
self.username = username
self.roles = roles
self.datasets = datasets
class EffectiveIdentityEncoder(json.JSONEncoder):
def default(self, o):
return {
EffectiveIdentity.username_key: o.username,
EffectiveIdentity.roles_key: o.roles,
EffectiveIdentity.datasets_key: o.datasets,
}
class TokenRequest:
access_level_key = "accessLevel"
dataset_id_key = "datasetId"
allow_saveas_key = "allowSaveAs"
identities_key = "identities"
def __init__(
self, access_level, dataset_id=None, allow_saveas=None, identities=None
):
self.access_level = access_level
self.dataset_id = dataset_id
self.allow_saveas = allow_saveas
self.identities = identities
class TokenRequestEncoder(json.JSONEncoder):
def default(self, o):
effective_identity_encoder = EffectiveIdentityEncoder()
json_dict = {TokenRequest.access_level_key: o.access_level}
if o.dataset_id is not None:
json_dict[TokenRequest.dataset_id_key] = o.dataset_id
if o.allow_saveas is not None:
json_dict[TokenRequest.allow_saveas_key] = o.allow_saveas
if o.identities is not None:
json_dict[TokenRequest.identities_key] = [
effective_identity_encoder.default(x) for x in o.identities
]
return json_dict
class EmbedToken:
token_key = "token"
token_id_key = "tokenId"
expiration_key = "expiration"
def __init__(self, token, token_id, expiration):
self.token = token
self.token_id = token_id
self.expiration = expiration
@classmethod
def from_dict(cls, dictionary):
if cls.token_key not in dictionary:
raise RuntimeError(f"Token dict has no {cls.token_key} key")
token = dictionary[cls.token_key]
token_id = dictionary[cls.token_id_key]
expiration = dictionary[cls.expiration_key]
return EmbedToken(token, token_id, expiration)
@property
def expiration_as_datetime(self):
return datetime.datetime.strptime(self.expiration, "%Y-%m-%dT%H:%M:%SZ")
| 29.527273
| 101
| 0.663383
|
453eeb17d4c4a5f7480679b63cd133b35461a43f
| 526
|
py
|
Python
|
inthemoment/utils.py
|
MeighenBergerS/inthemoment
|
6c2f27d952a80bd377cfa00fc4db8f3d1cdf0dad
|
[
"MIT"
] | null | null | null |
inthemoment/utils.py
|
MeighenBergerS/inthemoment
|
6c2f27d952a80bd377cfa00fc4db8f3d1cdf0dad
|
[
"MIT"
] | null | null | null |
inthemoment/utils.py
|
MeighenBergerS/inthemoment
|
6c2f27d952a80bd377cfa00fc4db8f3d1cdf0dad
|
[
"MIT"
] | null | null | null |
# utils.py
# Authors Stephan Meighen-Berger
# Utility functions for the inthemoment package
import numpy as np
def find_nearest(array, value):
""" Function to find the closest value in the array
Parameters
----------
array: iterable
Iterable object
value: float/int
The value to find the closest element for
Returns
-------
idx: int
The index of the closest value
"""
array = np.array(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
| 20.230769
| 55
| 0.627376
|
da30f52517b76b9a80811ed7645f72810f835d6d
| 29,145
|
py
|
Python
|
core/storage/question/gae_models_test.py
|
AbhinavGopal/oppiabackup
|
e5ae39b20623d4389885802d670b0142d82034ea
|
[
"Apache-2.0"
] | 1
|
2022-02-22T09:27:22.000Z
|
2022-02-22T09:27:22.000Z
|
core/storage/question/gae_models_test.py
|
IMADILKHAN/oppia
|
454bf732dfd0087bcc0b8b7cd65d80ba386f4929
|
[
"Apache-2.0"
] | null | null | null |
core/storage/question/gae_models_test.py
|
IMADILKHAN/oppia
|
454bf732dfd0087bcc0b8b7cd65d80ba386f4929
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.storage.question.gae_models."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import random
import types
from constants import constants
from core.domain import skill_services
from core.domain import state_domain
from core.platform import models
from core.tests import test_utils
import python_utils
import utils
(base_models, question_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.question])
class QuestionModelUnitTests(test_utils.GenericTestBase):
"""Tests the QuestionModel class."""
def test_get_deletion_policy(self):
self.assertEqual(
question_models.QuestionModel.get_deletion_policy(),
base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE)
def test_has_reference_to_user_id(self):
question_state_data = self._create_valid_question_data('ABC')
linked_skill_ids = ['skill_id1', 'skill_id2']
self.save_new_question(
'question_id1', 'owner_id', question_state_data, linked_skill_ids)
self.assertTrue(
question_models.QuestionModel
.has_reference_to_user_id('owner_id'))
self.assertFalse(
question_models.QuestionModel
.has_reference_to_user_id('x_id'))
def test_create_question_empty_skill_id_list(self):
state = state_domain.State.create_default_state('ABC')
question_state_data = state.to_dict()
language_code = 'en'
version = 1
question_model = question_models.QuestionModel.create(
question_state_data, language_code, version, [])
self.assertEqual(
question_model.question_state_data, question_state_data)
self.assertEqual(question_model.language_code, language_code)
self.assertItemsEqual(question_model.linked_skill_ids, [])
def test_create_question_with_skill_ids(self):
state = state_domain.State.create_default_state('ABC')
question_state_data = state.to_dict()
linked_skill_ids = ['skill_id1', 'skill_id2']
language_code = 'en'
version = 1
question_model = question_models.QuestionModel.create(
question_state_data, language_code, version,
linked_skill_ids)
self.assertEqual(
question_model.question_state_data, question_state_data)
self.assertEqual(question_model.language_code, language_code)
self.assertItemsEqual(
question_model.linked_skill_ids, linked_skill_ids)
def test_put_multi_questions(self):
question_state_data = self._create_valid_question_data('ABC')
linked_skill_ids = ['skill_id1', 'skill_id2']
self.save_new_question(
'question_id1', 'owner_id',
question_state_data,
linked_skill_ids)
self.save_new_question(
'question_id2', 'owner_id',
question_state_data,
linked_skill_ids)
question_ids = ['question_id1', 'question_id2']
self.assertItemsEqual(
question_models.QuestionModel.get(question_ids[0]).linked_skill_ids,
['skill_id1', 'skill_id2'])
self.assertItemsEqual(
question_models.QuestionModel.get(question_ids[1]).linked_skill_ids,
['skill_id1', 'skill_id2'])
question_model1 = question_models.QuestionModel.get(question_ids[0])
question_model1.linked_skill_ids = ['skill_id3']
question_model2 = question_models.QuestionModel.get(question_ids[1])
question_model2.linked_skill_ids = ['skill_id3']
question_models.QuestionModel.put_multi_questions(
[question_model1, question_model2])
self.assertEqual(question_models.QuestionModel.get(
question_ids[0]).linked_skill_ids, ['skill_id3'])
self.assertEqual(question_models.QuestionModel.get(
question_ids[1]).linked_skill_ids, ['skill_id3'])
def test_raise_exception_by_mocking_collision(self):
state = state_domain.State.create_default_state('ABC')
question_state_data = state.to_dict()
language_code = 'en'
version = 1
with self.assertRaisesRegexp(
Exception, 'The id generator for QuestionModel is producing too '
'many collisions.'
):
# Swap dependent method get_by_id to simulate collision every time.
with self.swap(
question_models.QuestionModel, 'get_by_id',
types.MethodType(
lambda x, y: True,
question_models.QuestionModel)):
question_models.QuestionModel.create(
question_state_data, language_code, version, set([]))
class QuestionSkillLinkModelUnitTests(test_utils.GenericTestBase):
"""Tests the QuestionSkillLinkModel class."""
def test_get_deletion_policy(self):
self.assertEqual(
question_models.QuestionSkillLinkModel.get_deletion_policy(),
base_models.DELETION_POLICY.KEEP)
def test_has_reference_to_user_id(self):
self.assertFalse(
question_models.QuestionSkillLinkModel
.has_reference_to_user_id('any_id'))
def test_create_question_skill_link(self):
question_id = 'A Test Question Id'
skill_id = 'A Test Skill Id'
skill_difficulty = 0.4
questionskilllink_model = question_models.QuestionSkillLinkModel.create(
question_id, skill_id, skill_difficulty)
self.assertEqual(questionskilllink_model.question_id, question_id)
self.assertEqual(questionskilllink_model.skill_id, skill_id)
self.assertEqual(
questionskilllink_model.skill_difficulty, skill_difficulty)
def test_put_multi_question_skill_link(self):
questionskilllink_model1 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', 'skill_id1', 0.1)
)
questionskilllink_model2 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id1', 0.5)
)
questionskilllink_model3 = (
question_models.QuestionSkillLinkModel.create(
'question_id3', 'skill_id3', 0.8)
)
question_models.QuestionSkillLinkModel.put_multi_question_skill_links(
[questionskilllink_model1, questionskilllink_model2,
questionskilllink_model3])
question_skill_links = (
question_models.QuestionSkillLinkModel.get_models_by_skill_id(
'skill_id1')
)
self.assertEqual(len(question_skill_links), 2)
question_ids = [question_skill.question_id for question_skill
in question_skill_links]
self.assertEqual(question_ids, ['question_id1', 'question_id2'])
def test_delete_multi_question_skill_link(self):
questionskilllink_model1 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', 'skill_id1', 0.1)
)
questionskilllink_model2 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id1', 0.5)
)
questionskilllink_model3 = (
question_models.QuestionSkillLinkModel.create(
'question_id3', 'skill_id3', 0.8)
)
question_models.QuestionSkillLinkModel.put_multi_question_skill_links(
[questionskilllink_model1, questionskilllink_model2,
questionskilllink_model3])
question_skill_links = (
question_models.QuestionSkillLinkModel.get_models_by_skill_id(
'skill_id1')
)
self.assertEqual(len(question_skill_links), 2)
question_ids = [question_skill.question_id for question_skill
in question_skill_links]
self.assertEqual(question_ids, ['question_id1', 'question_id2'])
question_models.QuestionSkillLinkModel.delete_multi_question_skill_links( # pylint: disable=line-too-long
[questionskilllink_model1, questionskilllink_model2])
question_skill_links = (
question_models.QuestionSkillLinkModel.get_models_by_skill_id(
'skill_id1')
)
self.assertEqual(len(question_skill_links), 0)
question_skill_links = (
question_models.QuestionSkillLinkModel.get_models_by_skill_id(
'skill_id3')
)
self.assertEqual(len(question_skill_links), 1)
self.assertEqual(question_skill_links[0].question_id, 'question_id3')
def test_get_models_by_question_id(self):
questionskilllink_model1 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', 'skill_id1', 0.1)
)
questionskilllink_model2 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id1', 0.5)
)
questionskilllink_model3 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id3', 0.8)
)
question_models.QuestionSkillLinkModel.put_multi_question_skill_links(
[questionskilllink_model1, questionskilllink_model2,
questionskilllink_model3])
question_skill_links = (
question_models.QuestionSkillLinkModel.get_models_by_question_id(
'question_id2')
)
self.assertEqual(len(question_skill_links), 2)
question_skill_links = (
question_models.QuestionSkillLinkModel.get_models_by_question_id(
'question_id3')
)
self.assertEqual(len(question_skill_links), 0)
def test_get_question_skill_links_by_skill_ids(self):
skill_id_1 = skill_services.get_new_skill_id()
self.save_new_skill(skill_id_1, 'user', description='Description 1')
skill_id_2 = skill_services.get_new_skill_id()
self.save_new_skill(skill_id_2, 'user', description='Description 2')
questionskilllink_model1 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', skill_id_1, 0.1)
)
questionskilllink_model2 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', skill_id_1, 0.5)
)
questionskilllink_model3 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', skill_id_2, 0.8)
)
question_models.QuestionSkillLinkModel.put_multi_question_skill_links(
[questionskilllink_model1, questionskilllink_model2,
questionskilllink_model3])
question_skill_link_models, next_cursor_str = (
question_models.QuestionSkillLinkModel.get_question_skill_links_by_skill_ids( # pylint: disable=line-too-long
1, [skill_id_1, skill_id_2], ''
)
)
self.assertEqual(len(question_skill_link_models), 2)
self.assertEqual(question_skill_link_models[0].skill_id, skill_id_2)
self.assertEqual(question_skill_link_models[1].skill_id, skill_id_1)
question_skill_link_models_2, next_cursor_str = (
question_models.QuestionSkillLinkModel.get_question_skill_links_by_skill_ids( # pylint: disable=line-too-long
1, [skill_id_1, skill_id_2], next_cursor_str
)
)
self.assertEqual(len(question_skill_link_models_2), 1)
self.assertEqual(question_skill_link_models_2[0].skill_id, skill_id_1)
self.assertNotEqual(
question_skill_link_models[0], question_skill_link_models_2[0])
def test_get_question_skill_links_by_skill_ids_many_skills(self):
# Test the case when len(skill_ids) > constants.MAX_SKILLS_PER_QUESTION.
skill_id_1 = skill_services.get_new_skill_id()
self.save_new_skill(skill_id_1, 'user', description='Description 1')
skill_id_2 = skill_services.get_new_skill_id()
self.save_new_skill(skill_id_2, 'user', description='Description 2')
skill_id_3 = skill_services.get_new_skill_id()
self.save_new_skill(skill_id_3, 'user', description='Description 3')
skill_id_4 = skill_services.get_new_skill_id()
self.save_new_skill(skill_id_4, 'user', description='Description 4')
questionskilllink_model1 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', skill_id_1, 0.1)
)
questionskilllink_model2 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', skill_id_2, 0.5)
)
questionskilllink_model3 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', skill_id_3, 0.8)
)
questionskilllink_model4 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', skill_id_4, 0.3)
)
question_models.QuestionSkillLinkModel.put_multi_question_skill_links(
[questionskilllink_model1, questionskilllink_model2,
questionskilllink_model3, questionskilllink_model4])
question_skill_link_models, _ = (
question_models.QuestionSkillLinkModel.get_question_skill_links_by_skill_ids( # pylint: disable=line-too-long
1, [skill_id_1, skill_id_2, skill_id_3, skill_id_4], ''
)
)
self.assertEqual(len(question_skill_link_models), 3)
self.assertEqual(question_skill_link_models[0].skill_id, skill_id_4)
self.assertEqual(question_skill_link_models[1].skill_id, skill_id_3)
self.assertEqual(question_skill_link_models[2].skill_id, skill_id_2)
def test_get_question_skill_links_based_on_difficulty(self):
questionskilllink_model1 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', 'skill_id1', 0.7)
)
questionskilllink_model2 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id1', 0.6)
)
questionskilllink_model3 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id2', 0.5)
)
questionskilllink_model4 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', 'skill_id2', 0.9)
)
questionskilllink_model5 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', 'skill_id3', 0.9)
)
questionskilllink_model6 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id3', 0.6)
)
question_models.QuestionSkillLinkModel.put_multi_question_skill_links(
[questionskilllink_model1, questionskilllink_model2,
questionskilllink_model3, questionskilllink_model4,
questionskilllink_model5, questionskilllink_model6])
question_skill_links = (
question_models.QuestionSkillLinkModel.
get_question_skill_links_based_on_difficulty_equidistributed_by_skill( # pylint: disable=line-too-long
3, ['skill_id1', 'skill_id2', 'skill_id3'], 0.6
)
)
self.assertEqual(len(question_skill_links), 2)
self.assertTrue(questionskilllink_model2 in question_skill_links)
self.assertTrue(questionskilllink_model4 in question_skill_links)
def test_get_random_question_skill_links_based_on_difficulty(self):
questionskilllink_model1 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', 'skill_id1', 0.6)
)
questionskilllink_model2 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id1', 0.6)
)
questionskilllink_model3 = (
question_models.QuestionSkillLinkModel.create(
'question_id3', 'skill_id1', 0.6)
)
questionskilllink_model4 = (
question_models.QuestionSkillLinkModel.create(
'question_id4', 'skill_id1', 0.6)
)
questionskilllink_model5 = (
question_models.QuestionSkillLinkModel.create(
'question_id5', 'skill_id1', 0.6)
)
questionskilllink_model6 = (
question_models.QuestionSkillLinkModel.create(
'question_id6', 'skill_id1', 0.6)
)
questionskilllink_model7 = (
question_models.QuestionSkillLinkModel.create(
'question_id7', 'skill_id1', 0.6)
)
questionskilllink_model8 = (
question_models.QuestionSkillLinkModel.create(
'question_id8', 'skill_id1', 0.6)
)
question_models.QuestionSkillLinkModel.put_multi_question_skill_links(
[questionskilllink_model1, questionskilllink_model2,
questionskilllink_model3, questionskilllink_model4,
questionskilllink_model5, questionskilllink_model6,
questionskilllink_model7, questionskilllink_model8])
def mock_random_sample(alist, num):
if num >= len(alist):
return alist
alist.sort(key=lambda x: x.question_id)
return alist[:num]
sample_swap = self.swap(random, 'sample', mock_random_sample)
def mock_random_int(upper_bound):
return 1 if upper_bound > 1 else 0
random_int_swap = self.swap(utils, 'get_random_int', mock_random_int)
with sample_swap, random_int_swap:
question_skill_links_1 = (
question_models.QuestionSkillLinkModel.
get_question_skill_links_based_on_difficulty_equidistributed_by_skill( # pylint: disable=line-too-long
3, ['skill_id1'], 0.6
)
)
self.assertEqual(len(question_skill_links_1), 3)
self.assertEqual(
question_skill_links_1,
[questionskilllink_model2, questionskilllink_model3,
questionskilllink_model4])
def test_request_too_many_skills_raises_error_when_fetch_by_difficulty(
self):
skill_ids = ['skill_id%s' % number for number in python_utils.RANGE(25)]
with self.assertRaisesRegexp(
Exception, 'Please keep the number of skill IDs below 20.'):
(question_models.QuestionSkillLinkModel.
get_question_skill_links_based_on_difficulty_equidistributed_by_skill( # pylint: disable=line-too-long
3, skill_ids, 0.6
))
def test_get_questions_with_no_skills(self):
question_skill_links = (
question_models.QuestionSkillLinkModel.
get_question_skill_links_based_on_difficulty_equidistributed_by_skill( # pylint: disable=line-too-long
1, [], 0.6
)
)
self.assertEqual(question_skill_links, [])
question_skill_links = (
question_models.QuestionSkillLinkModel.
get_question_skill_links_equidistributed_by_skill(1, []))
self.assertEqual(question_skill_links, [])
def test_get_more_question_skill_links_than_available(self):
questionskilllink_model1 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', 'skill_id1', 0.1)
)
questionskilllink_model2 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id1', 0.5)
)
questionskilllink_model3 = (
question_models.QuestionSkillLinkModel.create(
'question_id3', 'skill_id2', 0.8)
)
question_models.QuestionSkillLinkModel.put_multi_question_skill_links(
[questionskilllink_model1, questionskilllink_model2,
questionskilllink_model3])
# Testing for queries that retrieve more questions than available.
question_skill_links = (
question_models.QuestionSkillLinkModel.
get_question_skill_links_based_on_difficulty_equidistributed_by_skill( # pylint: disable=line-too-long
4, ['skill_id1', 'skill_id2'], 0.5
)
)
self.assertEqual(len(question_skill_links), 3)
self.assertTrue(questionskilllink_model1 in question_skill_links)
self.assertTrue(questionskilllink_model2 in question_skill_links)
self.assertTrue(questionskilllink_model3 in question_skill_links)
def test_get_question_skill_links_when_count_not_evenly_divisible(self):
questionskilllink_model1 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', 'skill_id1', 0.1)
)
questionskilllink_model2 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id1', 0.5)
)
questionskilllink_model3 = (
question_models.QuestionSkillLinkModel.create(
'question_id3', 'skill_id2', 0.8)
)
question_models.QuestionSkillLinkModel.put_multi_question_skill_links(
[questionskilllink_model1, questionskilllink_model2,
questionskilllink_model3])
# Testing for queries with not evenly divisible total_question_count.
question_skill_links = (
question_models.QuestionSkillLinkModel.
get_question_skill_links_based_on_difficulty_equidistributed_by_skill( # pylint: disable=line-too-long
3, ['skill_id1', 'skill_id2'], 0.5
)
)
self.assertEqual(len(question_skill_links), 3)
self.assertTrue(questionskilllink_model1 in question_skill_links)
self.assertTrue(questionskilllink_model2 in question_skill_links)
self.assertTrue(questionskilllink_model3 in question_skill_links)
def test_get_question_skill_links_equidistributed_by_skill(
self):
questionskilllink_model1 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', 'skill_id1', 0.1)
)
questionskilllink_model2 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id1', 0.5)
)
questionskilllink_model3 = (
question_models.QuestionSkillLinkModel.create(
'question_id3', 'skill_id2', 0.8)
)
questionskilllink_model4 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id2', 0.9)
)
question_models.QuestionSkillLinkModel.put_multi_question_skill_links(
[questionskilllink_model1, questionskilllink_model2,
questionskilllink_model3, questionskilllink_model4])
question_skill_links = (
question_models.QuestionSkillLinkModel.
get_question_skill_links_equidistributed_by_skill(
4, ['skill_id1', 'skill_id2']
)
)
self.assertEqual(len(question_skill_links), 3)
self.assertEqual(question_skill_links[0].skill_id, 'skill_id1')
self.assertEqual(question_skill_links[1].skill_id, 'skill_id1')
self.assertEqual(question_skill_links[2].skill_id, 'skill_id2')
# Test questions with multiple linked skills are deduplicated.
question_ids = [link.question_id for link in question_skill_links]
self.assertEqual(question_ids.count('question_id2'), 1)
def test_get_random_question_skill_links_equidistributed_by_skill(self):
questionskilllink_model1 = (
question_models.QuestionSkillLinkModel.create(
'question_id1', 'skill_id1', 0.1)
)
questionskilllink_model2 = (
question_models.QuestionSkillLinkModel.create(
'question_id2', 'skill_id1', 0.5)
)
questionskilllink_model3 = (
question_models.QuestionSkillLinkModel.create(
'question_id3', 'skill_id1', 0.8)
)
questionskilllink_model4 = (
question_models.QuestionSkillLinkModel.create(
'question_id4', 'skill_id1', 0.9)
)
questionskilllink_model5 = (
question_models.QuestionSkillLinkModel.create(
'question_id5', 'skill_id1', 0.6)
)
questionskilllink_model6 = (
question_models.QuestionSkillLinkModel.create(
'question_id6', 'skill_id1', 0.6)
)
questionskilllink_model7 = (
question_models.QuestionSkillLinkModel.create(
'question_id7', 'skill_id1', 0.6)
)
questionskilllink_model8 = (
question_models.QuestionSkillLinkModel.create(
'question_id8', 'skill_id1', 0.6)
)
question_models.QuestionSkillLinkModel.put_multi_question_skill_links(
[questionskilllink_model1, questionskilllink_model2,
questionskilllink_model3, questionskilllink_model4,
questionskilllink_model5, questionskilllink_model6,
questionskilllink_model7, questionskilllink_model8])
def mock_random_sample(alist, num):
if num >= len(alist):
return alist
alist.sort(key=lambda x: x.question_id)
return alist[:num]
sample_swap = self.swap(random, 'sample', mock_random_sample)
def mock_random_int(upper_bound):
return 1 if upper_bound > 1 else 0
random_int_swap = self.swap(utils, 'get_random_int', mock_random_int)
with sample_swap, random_int_swap:
question_skill_links_1 = (
question_models.QuestionSkillLinkModel.
get_question_skill_links_equidistributed_by_skill(
3, ['skill_id1']
)
)
self.assertEqual(len(question_skill_links_1), 3)
self.assertEqual(
question_skill_links_1,
[questionskilllink_model2, questionskilllink_model3,
questionskilllink_model4])
def test_request_too_many_skills_raises_error(self):
skill_ids = ['skill_id%s' % number for number in python_utils.RANGE(25)]
with self.assertRaisesRegexp(
Exception, 'Please keep the number of skill IDs below 20.'):
(question_models.QuestionSkillLinkModel.
get_question_skill_links_equidistributed_by_skill(
3, skill_ids))
class QuestionCommitLogEntryModelUnitTests(test_utils.GenericTestBase):
"""Tests the QuestionCommitLogEntryModel class."""
def test_get_deletion_policy(self):
self.assertEqual(
question_models.QuestionCommitLogEntryModel.get_deletion_policy(),
base_models.DELETION_POLICY.KEEP_IF_PUBLIC)
def test_has_reference_to_user_id(self):
commit = question_models.QuestionCommitLogEntryModel.create(
'b', 0, 'committer_id', 'msg', 'create', [{}],
constants.ACTIVITY_STATUS_PUBLIC, False)
commit.question_id = 'b'
commit.put()
self.assertTrue(
question_models.QuestionCommitLogEntryModel
.has_reference_to_user_id('committer_id'))
self.assertFalse(
question_models.QuestionCommitLogEntryModel
.has_reference_to_user_id('x_id'))
class QuestionSummaryModelUnitTests(test_utils.GenericTestBase):
"""Tests the QuestionSummaryModel class."""
def test_get_deletion_policy(self):
self.assertEqual(
question_models.QuestionSummaryModel.get_deletion_policy(),
base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE)
def test_has_reference_to_user_id(self):
question_summary_model = question_models.QuestionSummaryModel(
id='question',
question_content='Question',
question_model_created_on=datetime.datetime.utcnow(),
question_model_last_updated=datetime.datetime.utcnow()
)
question_summary_model.put()
self.assertFalse(
question_models.QuestionSummaryModel
.has_reference_to_user_id('user_id_x'))
| 43.24184
| 121
| 0.662275
|
ca463018ed81e1ecb04bd3044b01c99b910c1699
| 1,227
|
py
|
Python
|
Tests/notion_block_test.py
|
GatherStar/notion-dump-kernel
|
8ae9a53dfd8ad7beddbe53433ae1c44b58fdc606
|
[
"MIT"
] | 1
|
2022-02-10T15:35:22.000Z
|
2022-02-10T15:35:22.000Z
|
Tests/notion_block_test.py
|
GatherStar/notion-dump-kernel
|
8ae9a53dfd8ad7beddbe53433ae1c44b58fdc606
|
[
"MIT"
] | null | null | null |
Tests/notion_block_test.py
|
GatherStar/notion-dump-kernel
|
8ae9a53dfd8ad7beddbe53433ae1c44b58fdc606
|
[
"MIT"
] | null | null | null |
import logging
import NotionDump
from NotionDump.Dump.dump import Dump
from NotionDump.Notion.Notion import NotionQuery
TOKEN_TEST = "secret_WRLJ9xyEawNxzRhVHVWfciTl9FAyNCd29GMUvr2hQD4"
TABLE_ID = "13b914160ef740dcb64e55c5393762fa"
RER_LIST_ID = "d32db4693409464b9981caec9ef11974"
# 页面表格测试
def test_get_table_block(query):
block_handle = Dump(
dump_id=TABLE_ID,
query_handle=query,
export_child_pages=True,
dump_type=NotionDump.DUMP_TYPE_BLOCK
)
page_detail_json = block_handle.dump_to_file()
# 输出样例
print("page table test")
print(page_detail_json)
# 递归列表测试
def test_get_rer_list(query):
print("page rer list test")
block_handle = Dump(
dump_id=RER_LIST_ID,
query_handle=query,
export_child_pages=True,
dump_type=NotionDump.DUMP_TYPE_BLOCK
)
page_detail_json = block_handle.dump_to_file()
# 输出样例
print(page_detail_json)
if __name__ == '__main__':
query_handle = NotionQuery(token=TOKEN_TEST)
if query_handle is None:
logging.exception("query handle init error")
exit(-1)
# 获取数据库原始数据测试
test_get_table_block(query_handle)
# 解析数据库内容测试
test_get_rer_list(query_handle)
| 23.596154
| 65
| 0.724531
|
1091aca97f6bd8e497daf05179e65d0bec72043f
| 2,328
|
py
|
Python
|
legacy/extract_chief.py
|
Archaeoraptor/tools
|
36d09a66b552454a9bdfdb8aea32cd2b3822a899
|
[
"BSD-3-Clause"
] | 1
|
2021-06-09T08:03:55.000Z
|
2021-06-09T08:03:55.000Z
|
legacy/extract_chief.py
|
Archaeoraptor/tools
|
36d09a66b552454a9bdfdb8aea32cd2b3822a899
|
[
"BSD-3-Clause"
] | null | null | null |
legacy/extract_chief.py
|
Archaeoraptor/tools
|
36d09a66b552454a9bdfdb8aea32cd2b3822a899
|
[
"BSD-3-Clause"
] | 1
|
2021-07-01T07:51:30.000Z
|
2021-07-01T07:51:30.000Z
|
#coding=utf-8
# from bs4 import BeautifulSoup
# import xml.etree.ElementTree as ET
import regex
import os
import sys
regex.purge()
# path = 'C:/Users/zjk/Desktop/xml/422.html'
path = 'C:/Users/zjk/Desktop/xml/Data1/00090036.xml'
# htmlfile = open(path, 'r', encoding='utf-8')
xmlread = open(path, 'r', encoding='utf-8')
xmlfile = xmlread.read()
# 反转义
xmlfile1 = regex.sub('&','&', xmlfile)
xmlfile2 = regex.sub('<','<', xmlfile1)
xmlfile3 = regex.sub('>','>', xmlfile2)
htmlhandle = xmlfile3
f = open('./Data1/test.xml','w',encoding='UTF-8-sig')
f.write(xmlfile3)
f.close
## process the html file
# chief = r'<FONT.*?>.*?</FONT>'
chief_compliant = r'(?<=<STRONG>主诉:</STRONG>).*?(?=</FONT>)|(?<=<STRONG>主诉:</STRONG>).*?(?=</FONT>)'
past_history = r'(?<=<STRONG>既往史:</STRONG>).*?(?=</FONT>)|(?<=<STRONG>既往史:</STRONG>).*?(?=</FONT>)'
personal_history = r'(?<=<STRONG> 个人史:</STRONG>).*?(?=</FONT>)|(?<=<STRONG> 个人史:</STRONG>).*?(?=</FONT>)'
current_medical_history = r'(?<=<STRONG>现病史:</STRONG>).*?(?=</FONT>)|(?<=<STRONG>现病史:</STRONG>).*?(?=</FONT>)'
# family_medical_history = r'(?<=<STRONG> 家族史:</STRONG>).*?(?=</FONT>)|(?<=<STRONG> 家族史:</STRONG>).*?(?=</FONT>)'
family_medical_history = r'(?<=家族史:</STRONG>).*?(?=</FONT>)|(?<=家族史:</STRONG>).*?(?=</FONT>)'
menstruation_and_marriage_history = r'<STRONG>月经及婚育史.*?</STRONG>.*?(?=</FONT>)'
medical_record_number = r'<病案号.*?>.*?</病案号>'
medical_record_number_temp = regex.findall(medical_record_number, htmlhandle)
if medical_record_number_temp:
modified_number = r'\d{6}(?=</病案号>)'
print("病案号:", regex.findall(modified_number, medical_record_number_temp[0]))
else:
print("病案号:", "Null")
print("主诉:", regex.findall(chief_compliant, htmlhandle))
print("既往史:", regex.findall(past_history, htmlhandle))
print("个人史:", regex.findall(personal_history, htmlhandle))
print("现病史:", regex.findall(current_medical_history, htmlhandle))
print("家族史:", regex.findall(family_medical_history, htmlhandle))
# print("月经及婚史1:", regex.findall(menstruation_and_marriage_history, htmlhandle))
modified = regex.findall(menstruation_and_marriage_history, htmlhandle)
# print(modified)
if modified:
modified_reg = r'(?<=</STRONG>).*?(?<=</STRONG>).*'
print("月经及婚史:", regex.findall(modified_reg, modified[0]))
else:
print("月经及婚史:", "Null")
| 35.272727
| 123
| 0.658076
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.