blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
49c8129426eeffff345ae304991e2b9e7e5cf774 | 147b94f148dcaf10dbc3dfbcf571c1fa6d47a115 | /code/enclosure_test.py | 26a9cf737d3f2212aab46b39dd207bbf3572cbeb | [] | no_license | indra-n/env-context-detection | 4ad608af9a7b32920d57339d7b9e450862622b46 | 433d377448c3e807ac0ff833a4f1733e12e6931a | refs/heads/master | 2021-01-22T01:38:33.787931 | 2017-09-02T21:27:39 | 2017-09-02T21:27:39 | 102,221,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,074 | py | import numpy as np
import pandas as pd
from sklearn import ensemble
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import random
import csv
random.seed(42)
# Tests if location is surrounded by walls
#Load the data
fp_indoor_cutsark_in1_sp = '..\data_Greenwich\indoor\CuttySark_front\CuttySark_front_P2_(inside)\Obs_features_1'
fp_indoor_cutsark_in2_sp = '..\data_Greenwich\indoor\CuttySark_front\CuttySark_front_P2_(inside)\Obs_features_2'
fp_indoor_market_gr1_sp = '..\data_Greenwich\indoor\GreenwichMarket\\under_glass_roof_P2\Obs_features_1'
fp_indoor_market_gr2_sp = '..\data_Greenwich\indoor\GreenwichMarket\\under_glass_roof_P2\Obs_features_2'
fp_indoor_museum_gr1_sp = '..\data_Greenwich\indoor\MaritimeMuseum\hall_underGlassRoof\Obs_features_1'
fp_indoor_museum_gr2_sp = '..\data_Greenwich\indoor\MaritimeMuseum\hall_underGlassRoof\Obs_features_2'
fp_indoor_museum_lw1_sp = '..\data_Greenwich\indoor\MaritimeMuseum\\under_light_well\Obs_features_1'
fp_indoor_museum_lw2_sp = '..\data_Greenwich\indoor\MaritimeMuseum\\under_light_well\Obs_features_2'
fp_inter_path1_sp = '..\data_Greenwich\intermediate\covered_path_byGym\Obs_features_1'
fp_inter_path2_sp = '..\data_Greenwich\intermediate\covered_path_byGym\Obs_features_2'
fp_inter_dept3_sp = '..\data_Greenwich\intermediate\Deptford_TrainStation\P3\Obs_features'
fp_inter_GreenTS_p1_1_sp = '..\data_Greenwich\intermediate\Greenwich_TrainStation\P1\Obs_features_1'
fp_inter_GreenTS_p1_2_sp = '..\data_Greenwich\intermediate\Greenwich_TrainStation\P1\Obs_features_2'
fp_inter_GreenTS_p2_1_sp = '..\data_Greenwich\intermediate\Greenwich_TrainStation\P2\Obs_features_1'
fp_inter_GreenTS_p2_2_sp = '..\data_Greenwich\intermediate\Greenwich_TrainStation\P2\Obs_features_2'
fp_inter_market_aw1_sp = '..\data_Greenwich\intermediate\GreenwichMarket\entrance_archway_P1\Obs_features_1'
fp_inter_market_aw2_sp = '..\data_Greenwich\intermediate\GreenwichMarket\entrance_archway_P1\Obs_features_2'
fp_inter_park_dark1_sp = '..\data_Greenwich\intermediate\GreenwichPark\\tree_cover_dark\Obs_features_1'
fp_inter_park_dark2_sp = '..\data_Greenwich\intermediate\GreenwichPark\\tree_cover_dark\Obs_features_2'
fp_inter_park_light1_sp = '..\data_Greenwich\intermediate\GreenwichPark\\tree_cover_lighter\Obs_features_1'
fp_inter_park_light2_sp = '..\data_Greenwich\intermediate\GreenwichPark\\tree_cover_lighter\Obs_features_2'
fp_inter_queens_arch_sp = '..\data_Greenwich\intermediate\QueensHouse\\archway\Obs_features'
fp_inter_queens_col1_sp = '..\data_Greenwich\intermediate\QueensHouse\colonnade\Obs_features_1'
fp_inter_queens_col2_sp = '..\data_Greenwich\intermediate\QueensHouse\colonnade\Obs_features_2'
fp_open_park1_sp = '..\data_Greenwich\open_sky\GreenwichPark\open\Obs_features_1'
fp_open_park2_sp = '..\data_Greenwich\open_sky\GreenwichPark\open\Obs_features_2'
fp_urban_sl1_sp = '..\data_Greenwich\\urban\\behind_SailLoftPub\Obs_features_1'
fp_urban_sl2_sp = '..\data_Greenwich\\urban\\behind_SailLoftPub\Obs_features_2'
fp_urban_cutsark_out1_sp = '..\data_Greenwich\\urban\CuttySark_front\CuttySark_front_P1_(outside)\Obs_features_1'
fp_urban_cutsark_out2_sp = '..\data_Greenwich\\urban\CuttySark_front\CuttySark_front_P1_(outside)\Obs_features_2'
fp_urban_dept1_sp = '..\data_Greenwich\\urban\Deptford_TrainStation\P1\Obs_features'
fp_urban_dept2_sp = '..\data_Greenwich\\urban\Deptford_TrainStation\P2\Obs_features'
fp_urban_GreenTS_p3_1_sp = '..\data_Greenwich\\urban\Greenwich_TrainStation\P3\Obs_features_1'
fp_urban_GreenTS_p3_2_sp = '..\data_Greenwich\\urban\Greenwich_TrainStation\P3\Obs_features_2'
fp_urban_queens_court_sp = '..\data_Greenwich\\urban\QueensHouse\courtyard\Obs_features'
# Load in dataframe
#######
# Enclosure labels
# 0 - no enclosure
# 1 - light enclosure (glass walls, open side etc.)
# 2 - enclosing walls
df_indoor_cutsark_in1 = pd.read_csv(fp_indoor_cutsark_in1_sp)
df_indoor_cutsark_in2 = pd.read_csv(fp_indoor_cutsark_in2_sp)
df_indoor_market_gr1 = pd.read_csv(fp_indoor_market_gr1_sp)
df_indoor_market_gr2 = pd.read_csv(fp_indoor_market_gr2_sp)
df_indoor_museum_gr1 = pd.read_csv(fp_indoor_museum_gr1_sp)
df_indoor_museum_gr2 = pd.read_csv(fp_indoor_museum_gr2_sp)
df_indoor_museum_lw1 = pd.read_csv(fp_indoor_museum_lw1_sp)
df_indoor_museum_lw2 = pd.read_csv(fp_indoor_museum_lw2_sp)
df_indoor_cutsark_in1['true_class'] = 1
df_indoor_cutsark_in2['true_class'] = 1
df_indoor_market_gr1['true_class'] = 2
df_indoor_market_gr2['true_class'] = 2
df_indoor_museum_gr1['true_class'] = 2
df_indoor_museum_gr2['true_class'] = 2
df_indoor_museum_lw1['true_class'] = 1
df_indoor_museum_lw2['true_class'] = 1
df_inter_path1 = pd.read_csv(fp_inter_path1_sp)
df_inter_path2 = pd.read_csv(fp_inter_path2_sp)
df_inter_dept3 = pd.read_csv(fp_inter_dept3_sp)
df_inter_GreenTS_p1_1 = pd.read_csv(fp_inter_GreenTS_p1_1_sp)
df_inter_GreenTS_p1_2 = pd.read_csv(fp_inter_GreenTS_p1_2_sp)
df_inter_GreenTS_p2_1 = pd.read_csv(fp_inter_GreenTS_p2_1_sp)
df_inter_GreenTS_p2_2 = pd.read_csv(fp_inter_GreenTS_p2_2_sp)
df_inter_market_aw1 = pd.read_csv(fp_inter_market_aw1_sp)
df_inter_market_aw2 = pd.read_csv(fp_inter_market_aw2_sp)
df_inter_park_dark1 = pd.read_csv(fp_inter_park_dark1_sp)
df_inter_park_dark2 = pd.read_csv(fp_inter_park_dark2_sp)
df_inter_park_light1 = pd.read_csv(fp_inter_park_light1_sp)
df_inter_park_light2 = pd.read_csv(fp_inter_park_light2_sp)
df_inter_queens_arch = pd.read_csv(fp_inter_queens_arch_sp)
df_inter_queens_col1 = pd.read_csv(fp_inter_queens_col1_sp)
df_inter_queens_col2 = pd.read_csv(fp_inter_queens_col2_sp).iloc[:67]
df_inter_path1['true_class'] = 1
df_inter_path2['true_class'] = 1
df_inter_dept3['true_class'] = 1
df_inter_GreenTS_p1_1['true_class'] = 1
df_inter_GreenTS_p1_2['true_class'] = 1
df_inter_GreenTS_p2_1['true_class'] = 1
df_inter_GreenTS_p2_2['true_class'] = 1
df_inter_market_aw1['true_class'] = 1
df_inter_market_aw2['true_class'] = 1
df_inter_park_dark1['true_class'] = 1
df_inter_park_dark2['true_class'] = 1
df_inter_park_light1['true_class'] = 0
df_inter_park_light2['true_class'] = 0
df_inter_queens_arch['true_class'] = 2
df_inter_queens_col1['true_class'] = 1
df_inter_queens_col2['true_class'] = 1
df_open_park1 = pd.read_csv(fp_open_park1_sp)
df_open_park2 = pd.read_csv(fp_open_park2_sp)
df_open_park1['true_class'] = 0
df_open_park2['true_class'] = 0
df_urban_sl1 = pd.read_csv(fp_urban_sl1_sp)
df_urban_sl2 = pd.read_csv(fp_urban_sl2_sp)
df_urban_cutsark_out1 = pd.read_csv(fp_urban_cutsark_out1_sp).iloc[0:38]
df_urban_cutsark_out2 = pd.read_csv(fp_urban_cutsark_out2_sp)
df_urban_dept1 = pd.read_csv(fp_urban_dept1_sp)
df_urban_dept2 = pd.read_csv(fp_urban_dept2_sp)
df_urban_GreenTS_p3_1 = pd.read_csv(fp_urban_GreenTS_p3_1_sp)
df_urban_GreenTS_p3_2 = pd.read_csv(fp_urban_GreenTS_p3_2_sp)
df_urban_queens_court = pd.read_csv(fp_urban_queens_court_sp)
df_urban_sl1['true_class'] = 1
df_urban_sl2['true_class'] = 1
df_urban_cutsark_out1['true_class'] = 0
df_urban_cutsark_out2['true_class'] = 0
df_urban_dept1['true_class'] = 0
df_urban_dept2['true_class'] = 0
df_urban_GreenTS_p3_1['true_class'] = 0
df_urban_GreenTS_p3_2['true_class'] = 0
df_urban_queens_court['true_class'] = 2
#cols = ['obs_id', 'e_id', 'sv_prn', 'constell_id', 'azimuth', 'elevation', 'CN0']
# cols = ['sv_prn', 'constell_id', 'azimuth', 'elevation', 'CN0']
# cols=['num_sat', 'sum_snr', 'num_sat_25', 'sum_snr_25', 'elev_0_30', 'elev_30_60', 'elev_60_90',
# 'elev_0_30_25', 'elev_30_60_25', 'elev_60_90_25']
# cols=['num_sat', 'sum_snr', 'num_sat_25', 'sum_snr_25',
# 'elev_0_30_25', 'elev_30_60_25', 'elev_60_90_25']
# cols=['num_sat_25', 'sum_snr_25',
# 'elev_0_30_25', 'elev_30_60_25', 'elev_60_90_25']
cols=['num_sat', 'sum_snr', 'num_sat_25', 'sum_snr_25', 'elev_0_30',
'elev_0_30_25']
#######
# Location values
df_indoor_cutsark_in1['location'] = 321
df_indoor_cutsark_in2['location'] = 322
df_indoor_market_gr1['location'] = 421
df_indoor_market_gr2['location'] = 422
df_indoor_museum_gr1['location'] = 511
df_indoor_museum_gr2['location'] = 512
df_indoor_museum_lw1['location'] = 521
df_indoor_museum_lw2['location'] = 522
df_inter_path1['location'] = 211
df_inter_path2['location'] = 212
df_inter_dept3['location'] = 931
df_inter_GreenTS_p1_1['location'] = 811
df_inter_GreenTS_p1_2['location'] = 812
df_inter_GreenTS_p2_1['location'] = 821
df_inter_GreenTS_p2_2['location'] = 822
df_inter_market_aw1['location'] = 411
df_inter_market_aw2['location'] = 412
df_inter_park_dark1['location'] = 721
df_inter_park_dark2['location'] = 722
df_inter_park_light1['location'] = 731
df_inter_park_light2['location'] = 732
df_inter_queens_arch['location'] = 631
df_inter_queens_col1['location'] = 611
df_inter_queens_col2['location'] = 612
df_open_park1['location'] = 711
df_open_park2['location'] = 712
df_urban_sl1['location'] = 111
df_urban_sl2['location'] = 112
df_urban_cutsark_out1['location'] = 311
df_urban_cutsark_out2['location'] = 312
df_urban_dept1['location'] = 911
df_urban_dept2['location'] = 921
df_urban_GreenTS_p3_1['location'] = 831
df_urban_GreenTS_p3_2['location'] = 832
df_urban_queens_court['location'] = 621
#######
# Alternative assignments
# 1- indoor
# 2- inbetween
# 3- urban
# 4- open sky
# 5- i don't know
# df_indoor_cutsark_in1['true_class'] = 1
# df_indoor_cutsark_in2['true_class'] = 1
# df_indoor_market_gr1['true_class'] = 1
# df_indoor_market_gr2['true_class'] = 1
# df_indoor_museum_gr1['true_class'] = 1
# df_indoor_museum_gr2['true_class'] = 1
# df_indoor_museum_lw1['true_class'] = 2
# df_indoor_museum_lw2['true_class'] = 2
# # df_indoor_cutsark_in1['true_class'] = 5
# # df_indoor_cutsark_in2['true_class'] = 5
# # df_indoor_market_gr1['true_class'] = 5
# # df_indoor_market_gr2['true_class'] = 5
# # df_indoor_museum_gr1['true_class'] = 5
# # df_indoor_museum_gr2['true_class'] = 5
# # df_indoor_museum_lw1['true_class'] = 5
# # df_indoor_museum_lw2['true_class'] = 5
#
# df_inter_path1['true_class'] = 3
# df_inter_path2['true_class'] = 3
# df_inter_dept3['true_class'] = 3
# df_inter_GreenTS_p1_1['true_class'] = 3
# df_inter_GreenTS_p1_2['true_class'] = 3
# df_inter_GreenTS_p2_1['true_class'] = 3
# df_inter_GreenTS_p2_2['true_class'] = 3
# df_inter_market_aw1['true_class'] = 2
# df_inter_market_aw2['true_class'] = 2
# # df_inter_market_aw1['true_class'] = 5
# # df_inter_market_aw2['true_class'] = 5
#
# df_inter_park_dark1['true_class'] = 2
# df_inter_park_dark2['true_class'] = 2
# # df_inter_park_dark1['true_class'] = 5
# # df_inter_park_dark2['true_class'] = 5
#
# df_inter_park_light1['true_class'] = 3
# df_inter_park_light2['true_class'] = 3
#
# df_inter_queens_arch['true_class'] = 2
# #df_inter_queens_arch['true_class'] = 5
#
# df_inter_queens_col1['true_class'] = 3
# df_inter_queens_col2['true_class'] = 3
#
# df_urban_sl1['true_class'] = 3
# df_urban_sl2['true_class'] = 3
# df_urban_cutsark_out1['true_class'] = 3
# df_urban_cutsark_out2['true_class'] = 3
# df_urban_dept1['true_class'] = 4
# df_urban_dept2['true_class'] = 3
# df_urban_GreenTS_p3_1['true_class'] = 3
# df_urban_GreenTS_p3_2['true_class'] = 3
# df_urban_queens_court['true_class'] = 2
#
# df_open_park1['true_class'] = 4
# df_open_park2['true_class'] = 4
# Split training and test data
df_indoor_cutsark_in = pd.concat([df_indoor_cutsark_in1, df_indoor_cutsark_in2])
train_indoor_1 = df_indoor_cutsark_in.sample(60)
test_indoor_1 = df_indoor_cutsark_in.drop(train_indoor_1.index).sample(60)
df_indoor_market_gr = pd.concat([df_indoor_market_gr1, df_indoor_market_gr2])
train_indoor_2 = df_indoor_market_gr.sample(40)
test_indoor_2 = df_indoor_market_gr.drop(train_indoor_2.index).sample(60)
df_indoor_museum_gr = pd.concat([df_indoor_museum_gr1, df_indoor_museum_gr2])
train_indoor_3 = df_indoor_museum_gr.sample(60)
test_indoor_3 = df_indoor_museum_gr2.drop(train_indoor_3.index).sample(60)
train_indoor_4 = df_indoor_museum_lw1.sample(30)
test_indoor_4 = df_indoor_museum_lw2.sample(15)
df_inter_path = pd.concat([df_inter_path1, df_inter_path2])
train_inter_1 = df_inter_path.sample(40)
test_inter_1 = df_inter_path.drop(train_inter_1.index).sample(60)
test_inter_2 = df_inter_dept3.sample(60)
df_inter_GreenTS_p1 = pd.concat([df_inter_GreenTS_p1_1, df_inter_GreenTS_p1_2])
train_inter_2 = df_inter_GreenTS_p1.sample(60)
test_inter_3 = df_inter_GreenTS_p1.drop(train_inter_2.index).sample(60)
train_inter_3 = df_inter_GreenTS_p2_1.sample(60)
test_inter_4 = df_inter_GreenTS_p2_2.sample(60)
train_inter_4 = df_inter_market_aw1.sample(40)
test_inter_5 = df_inter_market_aw2.sample(60)
train_inter_5 = df_inter_park_dark1.sample(40)
test_inter_6 = df_inter_park_dark2.sample(60)
train_inter_6 = df_inter_park_light1.sample(60)
test_inter_9 = df_inter_park_light2.sample(60)
test_inter_7 = df_inter_queens_arch.sample(60)
train_inter_7 = df_inter_queens_col1.sample(60)
test_inter_8 = df_inter_queens_col2.sample(60)
df_urban_sl = pd.concat([df_urban_sl1, df_urban_sl2])
train_urban_1 = df_urban_sl.sample(60)
test_urban_1 = df_urban_sl.drop(train_urban_1.index).sample(60)
df_urban_cutsark_out = pd.concat([df_urban_cutsark_out1, df_urban_cutsark_out2])
train_urban_2 = df_urban_cutsark_out.sample(50)
test_urban_2 = df_urban_cutsark_out2.drop(train_urban_2.index).sample(50)
train_urban_3 = df_urban_dept1.sample(60)
test_urban_3 = df_urban_dept2.sample(60)
train_urban_4 = df_urban_GreenTS_p3_1.sample(40)
test_urban_4 = df_urban_GreenTS_p3_2.sample(60)
train_urban_5 = df_urban_queens_court.sample(60)
train_open = df_open_park1.sample(60)
test_open = df_open_park2.sample(60)
#########
# train_indoor_bm = df_indoor_bm.sample(100)
# train_indoor_ch2221 = df_indoor_ch2221.sample(100)
# train_indoor_ch103a = df_indoor_ch103a.sample(100)
# train_indoor_jah = df_indoor_jah.sample(100)
#
# test_indoor_bm = df_indoor_bm.drop(train_indoor_bm.index).sample(100)
# test_indoor_ch2221 = df_indoor_ch2221.drop(train_indoor_ch2221.index).sample(100)
# test_indoor_ch103a = df_indoor_ch103a.drop(train_indoor_ch103a.index).sample(100)
# test_indoor_ch103b = df_indoor_ch103b.sample(100)
# test_indoor_jah = df_indoor_jah.drop(train_indoor_jah.index).sample(100)
#
# train_inter = df_inter.sample(100)
# test_inter = df_inter.drop(train_inter.index).sample(100)
#
# train_urban_p1b = df_urban_p1b.sample(100)
# train_urban_p2b = df_urban_p2b.sample(100)
# train_urban_p4b = df_urban_p4b.sample(100)
#
# test_urban_p1b = df_urban_p1b.drop(train_urban_p1b.index).sample(100)
# test_urban_p2b = df_urban_p2b.drop(train_urban_p2b.index).sample(100)
# test_urban_p3b = df_urban_p3b.sample(100)
# test_urban_p4b = df_urban_p4b.drop(train_urban_p4b.index).sample(100)
#
# train_open_reg = df_open_reg.sample(100)
# test_open_hyde = df_open_hyde.sample(100)
# train_df = [train_indoor_bm, train_indoor_ch2221, train_indoor_ch103a, train_indoor_jah, train_inter, train_urban_p1b,
# train_urban_p2b, train_urban_p4b, train_open_reg]
train_df = [train_indoor_2, train_indoor_3, train_indoor_4, train_inter_1, train_inter_2, train_inter_3,
train_inter_4, train_inter_5, train_inter_6, train_inter_7, train_urban_1, train_urban_2, train_urban_3,
train_urban_4, train_urban_5, train_open, test_urban_3]
# train_df = [train_indoor_1, train_indoor_2, train_indoor_3, train_indoor_4, train_inter_1, train_inter_2, train_inter_3,
# train_inter_4, train_inter_5, train_inter_6, train_inter_7, train_urban_1, train_urban_2, train_urban_3,
# train_urban_4, train_urban_5, train_open]
train_data = pd.concat(train_df).sample(frac=1).reset_index(drop=True)
# test_df = [test_indoor_bm, test_indoor_ch2221, test_indoor_ch103a, test_indoor_ch103b, test_indoor_jah, test_inter,
# test_urban_p1b, test_urban_p2b, test_urban_p3b, test_urban_p4b, test_open_hyde]
test_df = [test_indoor_2, test_indoor_3, test_indoor_4, test_inter_1, test_inter_2, test_inter_3,
test_inter_4, test_inter_5, test_inter_6, test_inter_7, test_inter_8, test_inter_9, test_urban_1,
test_urban_2, test_urban_3, test_urban_4, test_open]
# test_df = [test_indoor_1, test_indoor_2, test_indoor_3, test_indoor_4, test_inter_1, test_inter_2, test_inter_3,
# test_inter_4, test_inter_5, test_inter_6, test_inter_7, test_inter_8, test_inter_9, test_urban_1,
# test_urban_2, test_urban_3, test_urban_4, test_open]
test_data = pd.concat(test_df).sample(frac=1).reset_index(drop=True)
forest = ensemble.RandomForestClassifier(n_estimators=100)
forest.fit(train_data[cols], train_data['true_class'])
pred = forest.predict(test_data[cols])
pred_probas = forest.predict_proba(test_data[cols])
pred_probas_dept = forest.predict_proba(test_inter_7[cols])
pred_dept = forest.predict(test_inter_7[cols])
differ_dept = abs(pred_dept - test_inter_7['true_class'])
accu_dept = 1 - np.count_nonzero(differ_dept) / test_inter_7.shape[0]
differ = abs(pred - test_data['true_class'])
accu = 1 - np.count_nonzero(differ) / test_data.shape[0]
print(accu)
print(accu_dept)
wrong_pred = test_data[differ != 0]
print(wrong_pred.shape)
print(wrong_pred['location'].value_counts())
cm = confusion_matrix(test_data['true_class'], pred)
print(cm)
cm_proc = cm / np.sum(cm, axis=1).reshape((3, 1))
print(cm_proc)
# print(pred_probas_dept)
# for i in range(1000):
# if differ[i] != 0:
# print(test_data['true_class'][i])
# print(pred_probas[i])
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(test_data[cols].shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(test_data[cols].shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(test_data[cols].shape[1]), indices)
plt.xlim([-1, test_data[cols].shape[1]])
plt.show() | [
"indra.niedre@gmail.com"
] | indra.niedre@gmail.com |
2f827b2603f3b2e2da3faa274a618d5620244e37 | 6b2794ac7ee275654f753659c83e9c6f115b4bbc | /budget/migrations/0008_auto_20190311_1818.py | d6e2d7ac6e8893eb63a9eb2da9d501d480441d49 | [] | no_license | mtmbutler/simplefi | 5ae667b93a69b77070652ecf6d1808badc68cc46 | e1afd06c525a1231a01dd4760d2aa145c9862be9 | refs/heads/main | 2021-06-25T01:27:32.008217 | 2020-12-22T18:48:30 | 2020-12-22T18:48:30 | 183,545,508 | 1 | 1 | null | 2020-12-24T17:21:16 | 2019-04-26T02:51:31 | Python | UTF-8 | Python | false | false | 1,214 | py | # Generated by Django 2.1.7 on 2019-03-12 01:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('budget', '0007_auto_20190311_0740'),
]
operations = [
migrations.AlterModelOptions(
name='statement',
options={'ordering': ['-date']},
),
migrations.AlterField(
model_name='account',
name='annual_fee',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=9, verbose_name='Annual Fee ($)'),
),
migrations.AlterField(
model_name='account',
name='interest_rate',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=9, verbose_name='Interest Rate (%)'),
),
migrations.AlterField(
model_name='account',
name='statement_date',
field=models.PositiveSmallIntegerField(default=1, help_text='The numbered day of each month that your statement posts.', verbose_name='Statement Date'),
),
migrations.AlterUniqueTogether(
name='statement',
unique_together={('account', 'date')},
),
]
| [
"mtmbutler@icloud.com"
] | mtmbutler@icloud.com |
0ab0e12de59845ee780146749d355e57acb66977 | 1783102b22fae04b4c32552d9b6191cc18ef6eee | /app/map_maker_app.py | 51fbfc331ab15d54e43e89c3068c288cbbe14ab8 | [] | no_license | ryanbeales/photo_library | 61b3f13db19ccd3e4cd45441aceced9793ce2b5f | 41295b26d7fde5a1783fce72e932472ec704bd3b | refs/heads/main | 2023-08-16T22:00:24.539270 | 2021-10-15T14:58:09 | 2021-10-15T14:58:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,441 | py | from config import config
from processed_images.processed_images import LockingProcessedImages
from progress.bar import Bar
from datetime import datetime
import folium
import folium.plugins as folium_plugins
import os
import base64
import io
from PIL import Image
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
import logging
logger = logging.getLogger(__name__)
def make_popup(imagedata):
img = Image.open(io.BytesIO(base64.b64decode(imagedata)))
width, height = 128, 128
img.thumbnail((width, height, ))
buffered = io.BytesIO()
img.save(buffered, format="JPEG")
result = base64.b64encode(buffered.getvalue()).decode('utf-8')
html = '<img src="data:image/jpeg;base64,{}">'.format
iframe = folium.IFrame(html(result), width=width+20, height=height+20)
return folium.Popup(iframe, max_width=width+20)
def single_image_process(photos, photo, progress_callback):
p = photos.retrieve(photo)
location = [p.latitude, p.longitude]
popup = make_popup(p.thumbnail)
icon = folium.Icon(color='red', icon='ok')
progress_callback()
return location, popup, icon
def date_range_map(photos, start_date, end_date):
print(f'Generating marker cluster map for date range: {start_date} - {end_date}')
photodaterange = photos.get_file_list_date_range(start_date, end_date)
mapdata = []
mappopups = []
mapicons = []
print('Launching threads to process markers')
progress = Bar('Making markers', width=110, max=len(photodaterange), suffix='%(index)d/%(max)d - %(eta)ds')
with ThreadPoolExecutor() as executor:
results = [
executor.submit(
single_image_process,
photos,
photo,
progress.next
)
for photo in photodaterange
]
wait(results, return_when=ALL_COMPLETED)
print('Threads completed, getting results')
for result in results:
if result.result():
location, popup, icon = result.result()
mapdata.append(location)
mappopups.append(popup)
mapicons.append(icon)
progress.finish()
print('Adding points to map...')
mc = folium_plugins.MarkerCluster(
locations = mapdata,
popups = mappopups,
icons = mapicons
)
m = folium.Map(control_scale=True)
m.add_child(mc)
m.save(config['DEFAULT']['output_dir'] + os.sep + 'marker_cluster.html')
print('Marker cluster map generated!')
def heatmap(photos):
print('Generating heat map')
m = folium.Map(control_scale=True)
locations = photos.get_locations()
data = [[r[1],r[2]] for r in locations]
heatmap = folium_plugins.HeatMap(data)
m.add_child(heatmap)
m.save(config['DEFAULT']['output_dir'] + os.sep + 'heatmap.html')
print('Done generating heat map')
if __name__ == '__main__':
photos = LockingProcessedImages(db_dir=config['photo_database']['database_dir'])
photos.load()
if config['map_maker'].getboolean('heatmap'):
heatmap(photos)
if config['map_maker'].getboolean('date_range_map'):
start_date = datetime.strptime(config['map_maker']['date_range_start'], '%d-%m-%Y')
end_date = datetime.strptime(config['map_maker']['date_range_end'], '%d-%m-%Y')
date_range_map(photos, start_date, end_date)
photos.close() | [
"ryanbeales@gmail.com"
] | ryanbeales@gmail.com |
6e6ab7d02971a974af8aa461a3f148f5f4eb1a9a | d0ee0d199ee5595ba38615ecd89ada5be334a0f9 | /button.py | 886e4b542c33024d533ee064950f15c00beb24ca | [] | no_license | davidChibueze/Alien-Force-Invasion | ab045388aee61a0e9c3c6ced726a83cb8e50531a | 68fe0634273a1a019a0a3dfe06c34b9a7e0603de | refs/heads/main | 2023-04-22T06:06:52.932349 | 2021-05-14T12:06:19 | 2021-05-14T12:06:19 | 367,349,070 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | import pygame.font
class Button:
def __init__(self, ai_game, msg):
"""Initialize button attributes."""
self.screen = ai_game.screen
self.screen_rect = self.screen.get_rect()
# Set the dimensions and properties of the button.
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
# Build the button's rect object and center it.
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
# The button message needs to be prepped only once.
self._prep_msg(msg)
def _prep_msg(self, msg):
"""Turn msg into a rendered image and center text on the button."""
self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
# Draw blank button and then draw message.
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect) | [
"noreply@github.com"
] | davidChibueze.noreply@github.com |
d42d3fd35bef84a4bc8c882075bcd8e35c62b2e5 | 3d2d7c223314acf338d9e1aedb9463ac780ed8aa | /fujiblog/urls.py | 49f160e2124df99f58dd2dfb8b7729694d1f2342 | [] | no_license | fuji97/fujiblog | 79b4d751c466e8fd6a82d99385f6bf3d7248258d | d00b6652cad3673a9b4ecc6ca37c0e17ecebc58c | refs/heads/master | 2021-01-20T10:05:12.595966 | 2017-06-08T22:54:54 | 2017-06-08T22:54:54 | 90,153,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | """fujiblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('zinnia.urls')),
#url(r'^weblog/', include('zinnia.urls')),
url(r'^comments/', include('django_comments.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"fuji1097@gmail.com"
] | fuji1097@gmail.com |
e8e564dd8a81a7204c2c1219c8828de5d75a5b39 | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-Cocoa/PyObjCTest/test_nsexpression.py | 10aca71722b9813074d199da83ce3d260fed8d3b | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | from PyObjCTools.TestSupport import *
from Foundation import *
class TestNSExpression(TestCase):
def testConstants(self):
self.assertEqual(NSConstantValueExpressionType, 0)
self.assertEqual(NSEvaluatedObjectExpressionType, 1)
self.assertEqual(NSVariableExpressionType, 2)
self.assertEqual(NSKeyPathExpressionType, 3)
self.assertEqual(NSFunctionExpressionType, 4)
self.assertEqual(NSUnionSetExpressionType, 5)
self.assertEqual(NSIntersectSetExpressionType, 6)
self.assertEqual(NSMinusSetExpressionType, 7)
self.assertEqual(NSSubqueryExpressionType, 13)
self.assertEqual(NSAggregateExpressionType, 14)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(NSBlockExpressionType, 19)
@min_os_level("10.9")
def testConstants10_9(self):
self.assertEqual(NSAnyKeyExpressionType, 15)
@min_os_level("10.11")
def testConstants10_11(self):
self.assertEqual(NSConditionalExpressionType, 20)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertArgIsBlock(NSExpression.expressionForBlock_arguments_, 0, b"@@@@")
self.assertResultIsBlock(NSExpression.expressionBlock, b"@@@@")
@min_os_level("10.6")
def testMethod10_6_unsupported(self):
self.assertArgIsPrintf(NSExpression.expressionWithFormat_, 0)
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
1f3ff6b0e0ffd238536bc4cba66923da5ef896f3 | b8be27aa871f298e9b9a53f417219ebb080378d6 | /deep-dive-convolutional-neural-networks/vgg/vgg.py | b4985404ee4a610b23290e35edbb125bbbe411be | [] | no_license | Bayesian4042/computer-vision | f0d9e010ecf043b72b49a8118cf334310200f031 | afe969a1be2e8f396f2fe6282d0027534f88281d | refs/heads/master | 2023-02-15T20:03:34.237416 | 2021-01-10T16:30:35 | 2021-01-10T16:30:35 | 135,942,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,644 | py |
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from models.model import Model
from preprocessing.imagenet.bgr import resize_crop
from weight_loading.numpyfile import load_weights
from helper.layer import fc, conv
class VGG(Model):
"""
VGG16 model definition for Tensorflow
"""
image_size = 224
image_prep = resize_crop
def __init__(self, tensor, keep_prob=1.0, num_classes=1000, retrain_layer=[], weights_path='./weights/vgg16.npy'):
# Call the parent class, which will create the graph
Model.__init__(self, tensor, keep_prob, num_classes, retrain_layer, weights_path)
# Call the create function to build the computational graph
self.final, self.endpoints = self.create()
def get_final_op(self):
return self.final
def get_endpoints(self):
return self.endpoints
def get_restore_vars(self):
return [v for v in tf.global_variables() if not v.name.split('/')[0] in self.retrain_layer]
def get_retrain_vars(self):
return tf.trainable_variables()
def load_initial_weights(self, session):
load_weights(session, self.weights_path, self.retrain_layer)
def create(self):
# 1st Layer: Conv -> Conv -> Pool
# conv(tensor, filter_height, filter_width, num_filters, stride_y, stride_x, name, padding)
conv1_1 = conv(self.tensor, 3, 3, 64, 1, 1, padding='SAME', name='conv1_1', trainable=self.is_layer_trainable('conv1_1'))
conv1_2 = conv(conv1_1 , 3, 3, 64, 1, 1, padding='SAME', name='conv1_2', trainable=self.is_layer_trainable('conv1_2'))
pool1 = tf.nn.max_pool(conv1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# 2nd Layer: Conv -> Conv -> Pool
conv2_1 = conv(pool1 , 3, 3, 128, 1, 1, padding='SAME', name='conv2_1', trainable=self.is_layer_trainable('conv2_1'))
conv2_2 = conv(conv2_1, 3, 3, 128, 1, 1, padding='SAME', name='conv2_2', trainable=self.is_layer_trainable('conv2_2'))
pool2 = tf.nn.max_pool(conv2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# 3rd Layer: Conv -> Conv -> Conv -> Pool
conv3_1 = conv(pool2 , 3, 3, 256, 1, 1, padding='SAME', name='conv3_1', trainable=self.is_layer_trainable('conv3_1'))
conv3_2 = conv(conv3_1, 3, 3, 256, 1, 1, padding='SAME', name='conv3_2', trainable=self.is_layer_trainable('conv3_2'))
conv3_3 = conv(conv3_2, 3, 3, 256, 1, 1, padding='SAME', name='conv3_3', trainable=self.is_layer_trainable('conv3_3'))
pool3 = tf.nn.max_pool(conv3_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
# 4th Layer: Conv -> Conv -> Conv -> Pool
conv4_1 = conv(pool3 , 3, 3, 512, 1, 1, padding='SAME', name='conv4_1', trainable=self.is_layer_trainable('conv4_1'))
conv4_2 = conv(conv4_1, 3, 3, 512, 1, 1, padding='SAME', name='conv4_2', trainable=self.is_layer_trainable('conv4_2'))
conv4_3 = conv(conv4_2, 3, 3, 512, 1, 1, padding='SAME', name='conv4_3', trainable=self.is_layer_trainable('conv4_3'))
pool4 = tf.nn.max_pool(conv4_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')
# 5th Layer: Conv -> Conv -> Conv -> Pool
conv5_1 = conv(pool4 , 3, 3, 512, 1, 1, padding='SAME', name='conv5_1', trainable=self.is_layer_trainable('conv5_1'))
conv5_2 = conv(conv5_1, 3, 3, 512, 1, 1, padding='SAME', name='conv5_2', trainable=self.is_layer_trainable('conv5_2'))
conv5_3 = conv(conv5_2, 3, 3, 512, 1, 1, padding='SAME', name='conv5_3', trainable=self.is_layer_trainable('conv5_3'))
pool5 = tf.nn.max_pool(conv5_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool5')
# 6th Layer: FC -> DropOut
# [1:] cuts away the first element
pool5_out = int(np.prod(pool5.get_shape()[1:])) # 7 * 7 * 512 = 25088
pool5_flat = tf.reshape(pool5, [-1, pool5_out]) # shape=(image count, 7, 7, 512) -> shape=(image count, 25088)
fc6 = fc(pool5_flat, num_out=4096, name='fc6', relu=True, trainable=self.is_layer_trainable('fc6'))
dropout1 = tf.nn.dropout(fc6, self.keep_prob)
# 7th Layer: FC
fc7 = fc(dropout1, num_out=4096, name='fc7', relu=True, trainable=self.is_layer_trainable('fc7'))
dropout2 = tf.nn.dropout(fc7, self.keep_prob)
# 8th Layer: FC
fc8 = fc(dropout2, num_out=self.num_classes, name='fc8', relu=False, trainable=self.is_layer_trainable('fc8'))
# add layers to the endpoints dict
endpoints = OrderedDict()
endpoints['conv1/conv1_1'] = conv1_1
endpoints['conv1/conv1_2'] = conv1_2
endpoints['pool1'] = pool1
endpoints['conv2/conv2_1'] = conv2_1
endpoints['conv2/conv2_2'] = conv2_2
endpoints['pool2'] = pool2
endpoints['conv3/conv3_1'] = conv3_1
endpoints['conv3/conv3_2'] = conv3_2
endpoints['conv3/conv3_3'] = conv3_3
endpoints['pool3'] = pool3
endpoints['conv4/conv4_1'] = conv4_1
endpoints['conv4/conv4_2'] = conv4_2
endpoints['conv4/conv4_3'] = conv4_3
endpoints['pool4'] = pool4
endpoints['conv5/conv5_1'] = conv5_1
endpoints['conv5/conv5_2'] = conv5_2
endpoints['conv5/conv5_3'] = conv5_3
endpoints['pool5'] = pool5
endpoints['pool5/flat'] = pool5_flat # 25088
endpoints['fc6'] = fc6 # 4096
endpoints['fc7'] = fc7 # 4096
endpoints['fc8'] = fc8 # number of output classes
return fc8, endpoints
| [
"singhabhilasha4042@gmail.com"
] | singhabhilasha4042@gmail.com |
1fceb20404030a5fec787e594da373dd6185278b | 20dc3427454e86c949e4d0e44c89a9f0ec0ff76a | /tests/watcher.py | 9ce156b16248902bfd948f5e076ea084b2367399 | [
"MIT"
] | permissive | simonwittber/fibra | a0d01e1b1a040ea8d8d84c6150c781bfa63b4ebe | 1761ba79cb643b0392bb82d7e80ce9e55bb75275 | refs/heads/master | 2021-01-23T13:16:53.985148 | 2011-05-31T02:21:14 | 2011-05-31T02:21:14 | 1,824,259 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | import fibra
def task():
yield None
print 'raising'
raise Exception('ARGH')
def watcher(e):
print "watcher received:", type(e), e
schedule = fibra.schedule()
t = task()
schedule.install(t)
schedule.watch(t, watcher)
schedule.run()
| [
"simonwittber@gmail.com"
] | simonwittber@gmail.com |
580cbb2d0c363236cfddb8740feec72eacf3119a | e9db45dc23454e256decaabc697016b18cc79cd1 | /game.py | d06e0fa7b641346b8909d0745de62f4ebd0f241d | [] | no_license | siyan38000/WikiGame | 4d7eebe41546ac7d8e038933b9065c96e0950979 | 5bd3949cddca71c5add7923384caa010754a4490 | refs/heads/main | 2023-03-07T08:53:24.773947 | 2021-02-15T11:03:43 | 2021-02-15T11:03:43 | 326,699,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,233 | py | from bs4 import BeautifulSoup
import requests
import urllib.request
import random
import tkinter as tk
window = tk.Tk()
window.title('Wikigame')
var = tk.StringVar()
global start_links
global startURL
#Definition des deux pages aléatoires
def getRandomPage():
return requests.get('https://fr.wikipedia.org/wiki/Sp%C3%A9cial:Page_au_hasard').content
#Fonction que filtre les liens afin de ne garder que les liens vers d'autree articles
def linksFilter(url):
linksList = []
with urllib.request.urlopen(url) as page:
actualPage = BeautifulSoup(page.read(), 'html.parser')
for anchor in actualPage.find_all('div', {"class":"mw-parser-output"}):
for links in anchor.find_all('a'):
link = formatage(str(links.get('href')))
#On s'assure que le lien pointe bien vers un article et qu'il n'existe pas déja dans la liste
if not ('/w/') in link:
if not ('#') in link:
if not ('Fichier:') in link:
if not ('http:') in link:
if not ('https:') in link:
if not ('Modèle:') in link:
if not ('/API') in link:
if not ('Spécial:') in link:
if not ('Catégorie:') in link:
if not (':') in link:
if not ('None') in link:
if link not in linksList:
linksList.append(link)
return linksList
def formatage(arg):
return arg.replace("%20"," ").replace("%27","'").replace("%C3%A8","è").replace("%C3%A9","é").replace('%C3%AA','ê').replace("%C3%A2","â").replace("%C5%93","œ").replace("%C3%B",'ü').replace("%C3%AC","ì").replace('%C3%A7','ç').replace('%C3%A0','à').replace('%C3%B4','ô').replace('%C3%89','É').replace("%C3%AF","ï")
#Fonction qui s'execute au clic sur un bouton radio et recupere sa valeur
def askForChoice():
choice = var.get()
updateWindow(choice)
depart = BeautifulSoup(getRandomPage(), 'html.parser')
arrive = BeautifulSoup(getRandomPage(), 'html.parser')
url1 = depart.find('li', attrs={'id': 'ca-nstab-main'}).find('a')['href']
url2 = arrive.find('li', attrs={'id': 'ca-nstab-main'}).find('a')['href']
def wikigame(start, end):
startURL = start.find('li', attrs={'id': 'ca-nstab-main'}).find('a')['href']
global endURL
endURL = end.find('li', attrs={'id': 'ca-nstab-main'}).find('a')['href']
updateWindow(startURL)
#Met a jour l'affichage a chaque changement de page
#le paramètre cpt compte le nombre de fois que la fonction est appelée
def updateWindow(url, cpt=[0]):
#Suppression de tout les objets de la fenetre
for widget in window.winfo_children():
widget.destroy()
if url == endURL:
tk.Label(window, text="BRAVO !").pack()
tk.Label(window, text="Page trouvée en {} coups".format(cpt)).pack()
else:
tk.Label(window, text="Page actuelle : {}(URL = https://fr.wikipedia.org{})".format(url.replace("/wiki/",""), url)).pack()
tk.Label(window, text="Page d'arrivée :{} (URL : https://fr.wikipedia.org{})".format(arrive.find(id='firstHeading').text,url2)).pack()
#Ajout de la scrollbar pour la liste des liens
canvas = tk.Canvas(window)
scroll = tk.Scrollbar(window, orient='vertical', command=canvas.yview)
start_links = linksFilter('https://fr.wikipedia.org'+url)
i = 0
for link in start_links:
rb = tk.Radiobutton(canvas, text="{} - {}".format(i, link), variable=var, value = link, command=askForChoice)
canvas.create_window(0, i*50, anchor='nw', window=rb, height=50)
i = i + 1
canvas.configure(scrollregion=canvas.bbox('all'), yscrollcommand=scroll.set)
canvas.pack(fill='both', expand=True, side='left')
scroll.pack(fill='y', side='right')
cpt[0] += 1
wikigame(depart, arrive)
tk.mainloop()
| [
"yanis.petit@epsi.fr"
] | yanis.petit@epsi.fr |
15cfbda2f912b5560429a9729b5ac0d60497f097 | 97495220db95d0ba4a4a7e7ad1863f8a49fc97df | /feat_ext/nets/resnet_v1.py | ee92afabd43407f356494d1605f8b9191ecc746a | [
"Apache-2.0"
] | permissive | forwchen/HVTG | cfdbc7a774fb3b911519a0e83b9edf88f772653a | b800ea3e1b9067389db98e4a9f6de3ce702aa081 | refs/heads/master | 2022-12-07T03:48:45.334685 | 2020-08-25T03:01:24 | 2020-08-25T03:01:24 | 278,540,659 | 17 | 2 | null | null | null | null | UTF-8 | Python | false | false | 16,634 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the original form of Residual Networks.
The 'v1' residual networks (ResNets) implemented in this module were proposed
by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Other variants were introduced in:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The networks defined in this module utilize the bottleneck building block of
[1] with projection shortcuts only for increasing depths. They employ batch
normalization *after* every weight layer. This is the architecture used by
MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and
ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'
architecture and the alternative 'v2' architecture of [2] which uses batch
normalization *before* every weight layer in the so-called full pre-activation
units.
Typical use:
from tensorflow.contrib.slim.nets import resnet_v1
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
slim = tf.contrib.slim
@slim.add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None,
use_bounded_activations=False):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
use_bounded_activations: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=tf.nn.relu6 if use_bounded_activations else None,
scope='shortcut')
residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3')
if use_bounded_activations:
# Use clip_by_value to simulate bandpass activation.
residual = tf.clip_by_value(residual, -6.0, 6.0)
output = tf.nn.relu6(shortcut + residual)
else:
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride,
store_non_strided_activations)
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
end_points['pre_pool'] = net
end_points['pre_pool_7x7'] = slim.avg_pool2d(net, [7, 7], stride=1, scope='pool1')
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v1.default_image_size = 224
def resnet_v1_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v1 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v1 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v1_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope='resnet_v1_50'):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_50.default_image_size = resnet_v1.default_image_size
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope='resnet_v1_101'):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_101.default_image_size = resnet_v1.default_image_size
def resnet_v1_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
store_non_strided_activations=False,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_152'):
"""ResNet-152 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_152.default_image_size = resnet_v1.default_image_size
def resnet_v1_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
store_non_strided_activations=False,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_200'):
"""ResNet-200 model of [2]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_200.default_image_size = resnet_v1.default_image_size
| [
"forwchen@gmail.com"
] | forwchen@gmail.com |
d3a7bdf0bdf102bef366ae9b245a8a5f800eb96d | 8def256b361cb117e291d435f20ee9b4b27fe9f7 | /getpubmed.py | 50d5e9323f6ec327c212519d69d01d401f6dd248 | [] | no_license | dvdmrn/citation_scraper | 44011afc4cda515b512ce6aceb32fc7412e6c292 | ff5a81dca7e31463c1793755863dedc11c7f7215 | refs/heads/master | 2021-05-13T23:51:09.950508 | 2018-01-14T04:18:42 | 2018-01-14T04:18:42 | 116,526,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,667 | py | import metapub
import confidence as c
import helpers
fetch = metapub.PubMedFetcher()
listOfCitations = []
def get_pmid(descriptor):
"""
gets the pmid of an article based off a descriptor
descriptor: name or doi
returns: pub med id, or False
"""
candidates = fetch.pmids_for_query(descriptor)
if len(candidates) == 1:
return candidates[0]
if len(candidates) > 1:
print "WARNING: multiple matches found, selecting first candidate"
return candidates[0]
# !!! determine most viable match
else:
# couldn't find anything
print "SAD: no results found! (TT-TT)"
return 0
def lookup_pmid(pmid):
"""
finds an article with a given pub med id --
pmid = an int
returns: a PubMedArticle
"""
try:
article = fetch.article_by_pmid(pmid)
except:
print(" SAD: could not fetch pubmed data! (TT-TT)")
return 0
return article
def create_citation(pm_article):
"""
Creates a NF friendly citation --
pm_article: a PubMedArticle
returns: a string
"""
title = pm_article.title
volume = pm_article.volume
issue = pm_article.issue
journal = pm_article.journal
pages = pm_article.pages
missingData = 0
if issue:
issue = "("+issue+")"
else:
missingData += 1
issue = ""
if not journal:
missingData += 1
journal = pm_article.book
if not journal:
missingData += 1
journal = "COULD_NOT_FIND_JOURNAL_SORRY_BUB"
if not volume:
missingData += 1
volume = ""
if not pages:
missingData += 1
pages = ""
citation = journal+" "+volume+issue+":"+pages
if missingData >= 2:
citation = citation+"!!! missing quite a bit of data"
print " WARNING: "+str(missingData)+" missing fields. Citation flagged."
return citation
def process_pubs(dois):
writeData = [] # list of Rows
"""
dict:
file
title
citation
confidence
"""
for pub in dois:
print("\n---")
title = ""
citation = ""
conf = 0
if pub["doi"]:
print "+ searching for doi: "+pub["doi"]+"; file: "+pub["file"]
pmid = get_pmid(pub["doi"])
if pmid:
article = lookup_pmid(pmid)
if article:
citation = create_citation(article)
title = article.title
conf = c.confidence_metric(article,"pdfs_to_analyze/"+pub["file"])
if conf < 0.6:
print("\!/ WARNING \!/ pubmed data below critical confidence levels")
citation = citation+"!!! VERIFY"
print "writing citation: "+citation
else:
print(" No doi found for: "+pub["file"]+"; ignoring file")
writeData.append({"file":pub["file"],"title":title,"citation":citation,"confidence":conf})
return writeData
# ==================================================
# id = get_pmid("10.1039/c4fo00570h")
# article = lookup_pmid(id)
| [
"damarino@cs.ubc.ca"
] | damarino@cs.ubc.ca |
e17345f6cf00a1d2eedbf04969bc6da4e66e9878 | cce1b624c5d41d8a5e832217a928225b45f62b15 | /mysite/polls/models.py | 93d1e0a3856579251dbc219036c60bdf13f0f39c | [] | no_license | SauravL3010/Django | 8b192c9d57606ebd3d1f0f310689b3c14ad6e2f6 | 021c80357e98ebafc2bbb6e60b1a5a473a79f20f | refs/heads/main | 2023-02-06T15:34:11.876571 | 2020-12-31T01:56:38 | 2020-12-31T01:56:38 | 324,661,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def recently_pub(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
"slgurhal@uwaterloo.ca"
] | slgurhal@uwaterloo.ca |
5574fa697c20b9926bc62f49277b71d1dcd3a57d | 672fa6128c88e43bf14b4168c7c08c60061477bd | /day5/page_object/loginPage.py | 89fcacd15f93afadc07573b5f125d363b627bcad | [] | no_license | zuhui940615/selenium7th | 80128efc75f58bfa1e296506e60c3102d871b8e0 | a752edd16311424360cef4d0b746259b4e424cc4 | refs/heads/master | 2020-03-21T10:42:49.519768 | 2018-06-24T08:46:12 | 2018-06-24T08:46:12 | 138,466,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,991 | py | #这种框架的设计思想,叫做page—object设计模式,是一种高级框架设计思想
#这种思想的主旨是把业务逻辑和代码技术分离开
#测试用例的类,专门负责业务逻辑
#元素定位和操作交给 网页对象
#在pageObiect这个类中,把每个网页看成一个类
#其中,网页中的每个元素看成类中的一个属性
#针对这个元素的操作,看成类中的一个方法
#元素的信息,定位是名词性,所以可以看成属性(成员变量)
#元素的操作是动词性的,所以可以看成是方法
#那么,下面我们封装一下登录这个网页
#这个类主要做的就是把元素定位,改一个易于理解的名字
'''driver.get("http://localhost/index.php?m=user&c=public&a=login")
driver.find_element(By.NAME,"username").send_keys("huohuozu")
driver.find_element(By.NAME, "password").send_keys("123456")
old_title = driver.title
driver.find_element(By.CLASS_NAME, "login_btn").click()'''
#把上面的代码封装成下面的样子
from selenium import webdriver
from selenium.webdriver.common.by import By
class LoginPage:
#为这个网页创建一个构造函数
#在python中构造函数固定名字__init__()
def __init__(self,driver):
#因为setup方法中已经创建了一个浏览器,所以这里不需要新建浏览器,直接用setup建好的浏览器
#self.driver = webdriver.Chrome()
self.driver = driver
self.url = "http://localhost/index.php?m=user&c=public&a=login"
username_input_loc = (By.ID, "username")
password_input_loc = (By.ID, "password")
login_button_loc = (By.CLASS_NAME, "login_btn")
#声明一个变量username_input_loc,保存元素的定位需要的两个参数
#python的元组,类似于数组
#这句话的意思是,声明了一个数组叫username_input_loc
#这个数组中有两个元素,分别是 By.ID,"username"
def open(self):
self.driver.get(self.url)
#给参数设置默认值,如果调用方法时,传入一个新的用户名,那么使用新的
#如果调用方法时,不传参,那么使用默认值
def input_username(self,username="huohuozu"):
#这个类中涉及到三个元素定位,因为元素定位不太稳定,经常需要修改,所以应该把定位方式声明成类中的一个属性
#self.driver.find_element(By.ID,"username").send_keys("username")
#*表示find_element()这个方法传入的不是一个元组,
#而是把元组中的每个元素都分别传入find_element()这个方法,作为单独的参数
self.driver.find_element(*self.username_input_loc).send_keys(username)
def input_password(self,password='123456'):
self.driver.find_element( *self.password_input_loc).send_keys(password)
def click_login_button(self):
self.driver.find_element(*self.login_button_loc).click()
| [
"15032683126@163.com"
] | 15032683126@163.com |
9102058651fbf91cbac1b616a121c35f0eb0973e | 8ab173ee437170afd5e4179f4e44d46b829f3ab0 | /Validation/RecoTrack/python/plotting/html.py | 04c09289f18ce2745bf5d1c2b56af89db89b9cc2 | [
"Apache-2.0"
] | permissive | suchandradutta/cmssw | 6b085313fe15868bd3f7dfddfb850debe111410e | ed3aa96ca24548294076d466db17b0bca44d1359 | refs/heads/Phase2Digitizer_91X_170420 | 2023-06-25T09:47:56.111691 | 2017-04-20T09:59:31 | 2017-04-20T09:59:31 | 12,500,444 | 1 | 1 | null | 2018-11-06T10:34:46 | 2013-08-31T04:15:48 | C++ | UTF-8 | Python | false | false | 25,818 | py | import os
import collections
def _lowerFirst(s):
return s[0].lower()+s[1:]
_sampleName = {
"RelValMinBias": "Min Bias",
"RelValTTbar": "TTbar",
"RelValQCD_Pt_600_800": "QCD Pt 600 to 800",
"RelValQCD_Pt_3000_3500": "QCD Pt 3000 to 3500",
"RelValQCD_FlatPt_15_3000": "QCD Flat Pt 15 to 3000",
"RelValZMM": "ZMuMu",
"RelValWjet_Pt_3000_3500": "Wjet Pt 3000 to 3500",
"RelValH125GGgluonfusion": "Higgs to gamma gamma",
"RelValSingleElectronPt35": "Single Electron Pt 35",
"RelValSingleElectronPt35Extended": "Single Electron Pt 35 (extended eta)",
"RelValSingleElectronPt10": "Single Electron Pt 10",
"RelValSingleMuPt10": "Single Muon Pt 10",
"RelValSingleMuPt10Extended": "Single Muon Pt 10 (extended eta)",
"RelValSingleMuPt100": "Single Muon Pt 100",
"RelValTenMuE_0_200": "Ten muon Pt 0-200",
}
_sampleFileName = {
"RelValMinBias": "minbias",
"RelValTTbar": "ttbar",
"RelValQCD_Pt_600_800": "qcd600",
"RelValQCD_Pt_3000_3500": "qcd3000",
"RelValQCD_FlatPt_15_3000": "qcdflat",
"RelValZMM": "zmm",
"RelValWjet_Pt_3000_3500": "wjet3000",
"RelValH125GGgluonfusion": "hgg",
"RelValSingleElectronPt35": "ele35",
"RelValSingleElectronPt35Extended": "ele35ext",
"RelValSingleElectronPt10": "ele10",
"RelValSingleMuPt10": "mu10",
"RelValSingleMuPt10Extended": "mu10ext",
"RelValSingleMuPt100": "mu100",
"RelValTenMuE_0_200": "tenmu200",
}
_allTPEfficName = "All tracks (all TPs)"
_fromPVName = "Tracks from PV"
_fromPVAllTPName = "Tracks from PV (all TPs)"
_conversionName = "Tracks for conversions"
_gsfName = "Electron GSF tracks"
def _toHP(s):
return "High purity "+_lowerFirst(s)
def _allToHP(s):
return s.replace("All", "High purity")
def _ptCut(s):
return s.replace("Tracks", "Tracks pT > 0.9 GeV").replace("tracks", "tracks pT > 0.9 GeV")
_trackQualityNameOrder = collections.OrderedDict([
("seeding_seeds", "Seeds"),
("seeding_seedsa", "Seeds A"),
("seeding_seedsb", "Seeds B"),
("seeding_seedstripl", "Seeds triplets"),
("seeding_seedspair", "Seeds pairs"),
("building_", "Built tracks"),
("", "All tracks"),
("highPurity", "High purity tracks"),
("Pt09", "Tracks pT > 0.9 GeV"),
("highPurityPt09", "High purity tracks pT > 0.9 GeV"),
("ByOriginalAlgo", "All tracks by originalAlgo"),
("highPurityByOriginalAlgo", "High purity tracks by originalAlgo"),
("ByAlgoMask", "All tracks by algoMask"),
("highPurityByAlgoMask", "High purity tracks by algoMask"),
("btvLike", "BTV-like"),
("ak4PFJets", "AK4 PF jets"),
("allTPEffic_", _allTPEfficName),
("allTPEffic_highPurity", _allToHP(_allTPEfficName)),
("fromPV_", _fromPVName),
("fromPV_highPurity", _toHP(_fromPVName)),
("fromPV_Pt09", _ptCut(_fromPVName)),
("fromPV_highPurityPt09", _toHP(_ptCut(_fromPVName))),
("fromPVAllTP_", _fromPVAllTPName),
("fromPVAllTP_highPurity", _toHP(_fromPVAllTPName)),
("fromPVAllTP_Pt09", _ptCut(_fromPVAllTPName)),
("fromPVAllTP_highPurityPt09", _toHP(_ptCut(_fromPVAllTPName))),
("fromPVAllTP2_", _fromPVAllTPName.replace("PV", "PV v2")),
("fromPVAllTP2_highPurity", "High purity "+_lowerFirst(_fromPVAllTPName).replace("PV", "PV v2")),
("fromPVAllTP2_Pt09", _fromPVAllTPName.replace("Tracks", "Tracks pT > 0.9 GeV").replace("PV", "PV v2")),
("fromPVAllTP2_highPurityPt09", _toHP(_ptCut(_fromPVAllTPName)).replace("PV", "PV v2")),
("conversion_", _conversionName),
("gsf_", _gsfName),
])
_trackAlgoName = {
"ootb": "Out of the box",
"iter0" : "Iterative Step 0",
"iter1" : "Iterative Step 1",
"iter2" : "Iterative Step 2",
"iter3" : "Iterative Step 3",
"iter4" : "Iterative Step 4",
"iter5" : "Iterative Step 5",
"iter6" : "Iterative Step 6",
"iter7" : "Iterative Step 7",
"iter9" : "Iterative Step 9",
"iter10": "Iterative Step 10",
}
_trackAlgoOrder = [
'ootb',
'initialStepPreSplitting',
'initialStep',
'highPtTripletStep',
'detachedQuadStep',
'detachedTripletStep',
'lowPtQuadStep',
'lowPtTripletStep',
'pixelPairStep',
'mixedTripletStep',
'pixelLessStep',
'tobTecStep',
'jetCoreRegionalStep',
'muonSeededStepInOut',
'muonSeededStepOutIn',
'duplicateMerge',
'convStep',
'conversionStep',
'ckfInOutFromConversions',
'ckfOutInFromConversions',
'electronGsf',
'iter0',
'iter1',
'iter2',
'iter3',
'iter4',
'iter5',
'iter6',
'iter7',
'iter9',
'iter10',
]
_pageNameMap = {
"summary": "Summary",
"vertex": "Vertex",
"v0": "V0",
"miniaod": "MiniAOD",
"timing": "Timing",
"hlt": "HLT",
}
_sectionNameMapOrder = collections.OrderedDict([
# These are for the summary page
("seeding_seeds", "Seeds"),
("building", "Built tracks"),
("", "All tracks"),
("highPurity", "High purity tracks"),
("btvLike", "BTV-like"),
("ak4PFJets", "AK4 PF jets"),
("allTPEffic", _allTPEfficName),
("allTPEffic_highPurity", _allTPEfficName.replace("All", "High purity")),
("fromPV", _fromPVName),
("fromPV_highPurity", "High purity "+_lowerFirst(_fromPVName)),
("fromPVAllTP", _fromPVAllTPName),
("fromPVAllTP_highPurity", "High purity "+_lowerFirst(_fromPVAllTPName)),
("conversion", _conversionName),
("gsf", _gsfName),
# These are for vertices
("genvertex", "Gen vertices"),
("pixelVertices", "Pixel vertices"),
("selectedPixelVertices", "Selected pixel vertices"),
("firstStepPrimaryVerticesPreSplitting", "firstStepPrimaryVerticesPreSplitting"),
("firstStepPrimaryVertices", "firstStepPrimaryVertices"),
("offlinePrimaryVertices", "All vertices (offlinePrimaryVertices)"),
("selectedOfflinePrimaryVertices", "Selected vertices (selectedOfflinePrimaryVertices)"),
("offlinePrimaryVerticesWithBS", "All vertices with BS constraint"),
("selectedOfflinePrimaryVerticesWithBS", "Selected vertices with BS constraint"),
# These are for V0
("k0", "K0"),
("lambda", "Lambda"),
])
_allTPEfficLegend = "All tracks, efficiency denominator contains all TrackingParticles"
_fromPVLegend = "Tracks from reco PV vs. TrackingParticles from gen PV (fake rate includes pileup tracks)"
_fromPVPtLegend = "Tracks (pT > 0.9 GeV) from reco PV vs. TrackingParticles from gen PV (fake rate includes pileup tracks)"
_fromPVAllTPLegend = "Tracks from reco PV, fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
_fromPVAllTPPtLegend = "Tracks (pT > 0.9 GeV) from reco PV, fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
_fromPVAllTP2Legend = "Tracks from reco PV (another method), fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
_fromPVAllTPPt2Legend = "Tracks (pT > 0.9 GeV) from reco PV (another method), fake rate numerator contains all TrackingParticles (separates fake tracks from pileup tracks)"
def _sectionNameLegend():
return {
"btvLike": "BTV-like selected tracks",
"ak4PFJets": "Tracks from AK4 PF jets (jet corrected pT > 10 GeV)",
"allTPEffic": _allTPEfficLegend,
"allTPEffic_": _allTPEfficLegend,
"allTPEffic_highPurity": _allToHP(_allTPEfficLegend),
"fromPV": _fromPVLegend,
"fromPV_": _fromPVLegend,
"fromPV_highPurity": _toHP(_fromPVLegend),
"fromPV_Pt09": _fromPVPtLegend,
"fromPV_highPurity_Pt09": _toHP(_fromPVPtLegend),
"fromPVAllTP": _fromPVAllTPLegend,
"fromPVAllTP_": _fromPVAllTPLegend,
"fromPVAllTP_highPurity": _toHP(_fromPVAllTPLegend),
"fromPVAllTP_Pt09": _fromPVAllTPPtLegend,
"fromPVAllTP_highPurityPt09": _toHP(_fromPVAllTPPtLegend),
"fromPVAllTP2_": _fromPVAllTP2Legend,
"fromPVAllTP2_highPurity": _toHP(_fromPVAllTP2Legend),
"fromPVAllTP2_Pt09": _fromPVAllTPPt2Legend,
"fromPVAllTP2_highPurityPt09": _toHP(_fromPVAllTPPt2Legend),
}
class Table:
# table [column][row]
def __init__(self, columnHeaders, rowHeaders, table, purpose, page, section):
if len(columnHeaders) != len(table):
raise Exception("Got %d columnHeaders for table with %d columns for page %s, section %s" % (len(columnHeaders), len(table), page, section))
lenRow = len(table[0])
for icol, column in enumerate(table):
if len(column) != lenRow:
raise Exception("Got non-square table, first column has %d rows, column %d has %d rows" % (lenRow, icol, len(column)))
if len(rowHeaders) != lenRow:
raise Exception("Got %d rowHeaders for table with %d rows" % (len(rowHeaders), lenRow))
self._columnHeaders = columnHeaders
self._rowHeaders = rowHeaders
self._table = table
self._purpose = purpose
self._page = page
self._section = section
def getPurpose(self):
return self._purpose
def getPage(self):
return self._page
def getSection(self):
return self._section
def ncolumns(self):
return len(self._table)
def nrows(self):
return len(self._table[0])
def columnHeaders(self):
return self._columnHeaders
def rowHeaders(self):
return self._rowHeaders
def tableAsColumnRow(self):
return self._table
def tableAsRowColumn(self):
return map(list, zip(*self._table))
class PlotPurpose:
class TrackingIteration: pass
class TrackingSummary: pass
class Vertexing: pass
class MiniAOD: pass
class Timing: pass
class HLT: pass
class Page(object):
def __init__(self, title, sampleName):
self._content = [
'<html>',
' <head>',
' <title>%s</title>' % title,
' </head>',
' <body>',
' '+sampleName,
' <br/>',
' <br/>',
]
self._plotSets = {}
self._tables = {}
def addPlotSet(self, section, plotSet):
if section in self._plotSets:
self._plotSets[section].extend(plotSet)
else:
self._plotSets[section] = plotSet
def addTable(self, section, table):
self._tables[section] = table
def isEmpty(self):
for plotSet in self._plotSets.itervalues():
if len(plotSet) > 0:
return False
if len(self._tables) > 0:
return False
return True
def write(self, fileName):
self._legends = []
self._sectionLegendIndex = {}
self._columnHeaders = []
self._columnHeadersIndex = {}
self._formatPlotSets()
self._formatTables()
self._formatLegend()
self._content.extend([
' </body>',
'</html>',
])
#print "Writing HTML report page", fileName
f = open(fileName, "w")
for line in self._content:
f.write(line)
f.write("\n")
f.close()
def _appendLegend(self, section):
leg = ""
legends = _sectionNameLegend()
if section in legends:
if section in self._sectionLegendIndex:
leg = self._sectionLegendIndex[section]
else:
legnum = len(self._legends)+1
leg = "<sup>%d</sup>" % legnum
leg2 = "<sup>%d)</sup>" % legnum
self._legends.append("%s %s" % (leg2, legends[section]))
self._sectionLegendIndex[section] = leg
return leg
def _formatPlotSets(self):
self._content.extend([
' <table>'
' <tr>',
])
fileTable = []
sections = self._orderSets(self._plotSets.keys())
for isec, section in enumerate(sections):
leg = self._appendLegend(section)
self._content.extend([
' <td>%s%s</td>' % (self._mapSectionName(section), leg),
])
files = [(os.path.basename(f), f) for f in self._plotSets[section]]
for row in fileTable:
found = False
for i, (bsf, f) in enumerate(files):
if bsf == row[0]:
row.append(f)
found = True
del files[i]
break
if not found:
row.append(None)
for bsf, f in files:
fileTable.append( [bsf] + [None]*isec + [f] )
self._content.extend([
' </tr>',
])
for row in fileTable:
self._content.append(' <tr>')
bs = row[0]
for elem in row[1:]:
if elem is not None:
self._content.append(' <td><a href="%s">%s</a></td>' % (elem, bs))
else:
self._content.append(' <td></td>')
self._content.append(' </tr>')
self._content.extend([
' </table>',
])
def _appendColumnHeader(self, header):
leg = ""
if header in self._columnHeadersIndex:
leg = self._columnHeadersIndex[header]
else:
leg = str(chr(ord('A')+len(self._columnHeaders)))
self._columnHeaders.append("%s: %s" % (leg, header))
self._columnHeadersIndex[header] = leg
return leg
def _formatTables(self):
def _allNone(row):
for item in row:
if item is not None:
return False
return True
sections = self._orderSets(self._tables.keys())
for isec, section in enumerate(sections):
leg = self._appendLegend(section)
table = self._tables[section]
self._content.extend([
' <br/>',
' %s%s' % (self._mapSectionName(section), leg),
' <table border="1">'
])
# table is stored in column-row, need to transpose
data = table.tableAsRowColumn()
self._content.extend([
' <tr>'
' <td></td>'
])
heads = table.columnHeaders()
if max(map(lambda h: len(h), heads)) > 20:
heads = [self._appendColumnHeader(h) for h in heads]
for head in heads:
self._content.append(' <td>%s</td>' % head)
self._content.append(' </tr>')
for irow, row in enumerate(data):
# Skip row if all values are non-existent
if _allNone(row):
continue
self._content.extend([
' <tr>'
' <td>%s</td>' % table.rowHeaders()[irow]
])
# align the number columns to right
for icol, item in enumerate(row):
formatted = str(item) if item is not None else ""
self._content.append(' <td align="right">%s</td>' % formatted)
self._content.append(' </tr>')
self._content.append(' </table>')
for shortenedColumnHeader in self._columnHeaders:
self._content.append(' %s<br/>' % shortenedColumnHeader)
self._columnHeaders = []
self._columnHeadersIndex = {}
def _formatLegend(self):
if len(self._legends) > 0:
self._content.extend([
' <br/>'
' Details:</br>',
])
for leg in self._legends:
self._content.append(' %s<br/>' % leg)
def _mapSectionName(self, section):
return _sectionNameMapOrder.get(section, section)
def _orderSets(self, keys):
keys_sorted = sorted(keys)
ret = []
for section in _sectionNameMapOrder.keys():
if section in keys_sorted:
ret.append(section)
keys.remove(section)
ret.extend(keys_sorted)
return ret
class PageSet(object):
def __init__(self, title, sampleName, sample, fastVsFull, pileupComparison, dqmSubFolderTranslatedToSectionName=None):
self._title = title
self._sampleName = sampleName
self._pages = collections.OrderedDict()
self._dqmSubFolderTranslatedToSectionName = dqmSubFolderTranslatedToSectionName
self._prefix = ""
if sample.fastsim():
self._prefix += "fast_"
if fastVsFull:
self._prefix += "full_"
self._prefix += _sampleFileName.get(sample.label(), sample.label())+"_"
if hasattr(sample, "hasScenario") and sample.hasScenario():
self._prefix += sample.scenario()+"_"
if hasattr(sample, "hasPileup"):
if sample.hasPileup():
self._prefix += "pu"+str(sample.pileupNumber())+"_"+sample.pileupType()+"_"
else:
self._prefix += "nopu_"
if pileupComparison:
self._prefix += "vspu_"
def _getPage(self, key, pageClass):
if key not in self._pages:
page = pageClass(self._title, self._sampleName)
self._pages[key] = page
else:
page = self._pages[key]
return page
def addPlotSet(self, plotterFolder, dqmSubFolder, plotFiles):
pageKey = plotterFolder.getPage()
if pageKey is None:
if dqmSubFolder is not None:
pageKey = dqmSubFolder.translated
else:
pageKey = plotterFolder.getName()
page = self._getPage(pageKey, Page)
sectionName = plotterFolder.getSection()
if sectionName is None:
if plotterFolder.getPage() is not None and dqmSubFolder is not None:
if self._dqmSubFolderTranslatedToSectionName is not None:
sectionName = self._dqmSubFolderTranslatedToSectionName(dqmSubFolder.translated)
else:
sectionName = dqmSubFolder.translated
else:
sectionName = ""
page.addPlotSet(sectionName, plotFiles)
def addTable(self, table):
if table is None:
return
page = self._getPage(table.getPage(), Page)
page.addTable(table.getSection(), table)
def write(self, baseDir):
#print "TrackingPageSet.write"
ret = []
keys = self._orderPages(self._pages.keys())
for key in keys:
page = self._pages[key]
if page.isEmpty():
continue
fileName = "%s%s.html" % (self._prefix, key)
page.write(os.path.join(baseDir, fileName))
ret.append( (self._mapPagesName(key), fileName) )
return ret
def _mapPagesName(self, name):
return _pageNameMap.get(name, name)
def _orderPages(self, keys):
return keys
class TrackingIterPage(Page):
def __init__(self, *args, **kwargs):
super(TrackingIterPage, self).__init__(*args, **kwargs)
def _mapSectionName(self, quality):
return _trackQualityNameOrder.get(quality, quality)
def _orderSets(self, qualities):
ret = []
for qual in _trackQualityNameOrder.keys():
if qual in qualities:
ret.append(qual)
qualities.remove(qual)
ret.extend(qualities)
return ret
class TrackingPageSet(PageSet):
def __init__(self, *args, **kwargs):
super(TrackingPageSet, self).__init__(*args, **kwargs)
def addPlotSet(self, plotterFolder, dqmSubFolder, plotFiles):
(algo, quality) = dqmSubFolder.translated
pageName = algo
sectionName = quality
# put all non-iterative stuff under OOTB
#
# it is bit of a hack to access trackingPlots.TrackingPlotFolder this way,
# but it was simple and it works
if algo != "ootb" and not plotterFolder._plotFolder.isAlgoIterative(algo):
pageName = "ootb"
sectionName = algo
folderName = plotterFolder.getName()
if folderName != "":
sectionName = folderName+"_"+sectionName
page = self._getPage(pageName, TrackingIterPage)
page.addPlotSet(sectionName, plotFiles)
def _mapPagesName(self, algo): # algo = pageName
return _trackAlgoName.get(algo, algo)
def _orderPages(self, algos):
ret = []
for algo in _trackAlgoOrder:
if algo in algos:
ret.append(algo)
algos.remove(algo)
ret.extend(algos)
return ret
class IndexSection:
def __init__(self, sample, title, fastVsFull, pileupComparison):
self._sample = sample
self._sampleName = ""
if sample.fastsim():
self._sampleName += "FastSim "
if fastVsFull:
self._sampleName += "vs FullSim "
pileup = ""
if hasattr(sample, "hasPileup"):
pileup = "with no pileup"
if sample.hasPileup():
pileup = "with %d pileup (%s)" % (sample.pileupNumber(), sample.pileupType())
if pileupComparison is not None:
pileup += " "+pileupComparison
if hasattr(sample, "customPileupLabel"):
pileup = sample.customPileupLabel()
scenario = ""
if hasattr(sample, "hasScenario") and sample.hasScenario():
scenario = " (\"%s\")" % sample.scenario()
self._sampleName += "%s sample%s %s" % (_sampleName.get(sample.name(), sample.name()), scenario, pileup)
params = [title, self._sampleName, sample, fastVsFull, pileupComparison is not None]
self._summaryPage = PageSet(*params)
self._iterationPages = TrackingPageSet(*params)
self._vertexPage = PageSet(*params)
self._miniaodPage = PageSet(*params)
self._timingPage = PageSet(*params)
self._hltPages = PageSet(*params, dqmSubFolderTranslatedToSectionName=lambda algoQuality: algoQuality[0])
self._otherPages = PageSet(*params)
self._purposePageMap = {
PlotPurpose.TrackingIteration: self._iterationPages,
PlotPurpose.TrackingSummary: self._summaryPage,
PlotPurpose.Vertexing: self._vertexPage,
PlotPurpose.MiniAOD: self._miniaodPage,
PlotPurpose.Timing: self._timingPage,
PlotPurpose.HLT: self._hltPages,
}
def addPlots(self, plotterFolder, dqmSubFolder, plotFiles):
page = self._purposePageMap.get(plotterFolder.getPurpose(), self._otherPages)
page.addPlotSet(plotterFolder, dqmSubFolder, plotFiles)
def addTable(self, table):
if table is None:
return
page = self._purposePageMap.get(table.getPurpose(), self._otherPages)
page.addTable(table)
params = []
def write(self, baseDir):
ret = [
" "+self._sampleName,
" <br/>",
" <ul>",
]
for pages in [self._summaryPage, self._iterationPages, self._vertexPage, self._miniaodPage, self._timingPage, self._hltPages, self._otherPages]:
labelFiles = pages.write(baseDir)
for label, fname in labelFiles:
ret.append(' <li><a href="%s">%s</a></li>' % (fname, label))
ret.extend([
' </ul>',
' <br/>',
])
return ret
class HtmlReport:
def __init__(self, validationName, newBaseDir):
self._title = "Tracking validation "+validationName
self._newBaseDir = newBaseDir
self._index = [
'<html>',
' <head>',
' <title>%s</title>' % self._title,
' </head>',
' <body>',
]
self._sections = collections.OrderedDict()
def addNote(self, note):
self._index.append(' <p>%s</p>'%note)
def beginSample(self, sample, fastVsFull=False, pileupComparison=None):
# Fast vs. Full becomes just after the corresponding Fast
# Same for PU
rightAfterRefSample = fastVsFull or (pileupComparison is not None)
key = (sample.digest(), rightAfterRefSample)
if key in self._sections:
self._currentSection = self._sections[key]
else:
self._currentSection = IndexSection(sample, self._title, fastVsFull, pileupComparison)
self._sections[key] = self._currentSection
def addPlots(self, *args, **kwargs):
self._currentSection.addPlots(*args, **kwargs)
def addTable(self, *args, **kwargs):
self._currentSection.addTable(*args, **kwargs)
def write(self):
# Reorder sections such that Fast vs. Full becomes just after the corresponding Fast
keys = self._sections.iterkeys()
newkeys = []
for key in keys:
if not key[1]:
newkeys.append(key)
continue
# is fast vs full
ind_fast = newkeys.index( (key[0], False) )
newkeys.insert(ind_fast+1, key)
for key in newkeys:
section = self._sections[key]
self._index.extend(section.write(self._newBaseDir))
self._index.extend([
" </body>",
"</html>",
])
f = open(os.path.join(self._newBaseDir, "index.html"), "w")
for line in self._index:
f.write(line)
f.write("\n")
f.close()
class HtmlReportDummy:
def __init__(self):
pass
def beginSample(self, *args, **kwargs):
pass
def addPlots(self, *args, **kwargs):
pass
def addTable(self, *args, **kwargs):
pass
| [
"matti.kortelainen@cern.ch"
] | matti.kortelainen@cern.ch |
186e04c580756ed5fcd2b7e91ca54ec476d908a3 | 017b95b21359aedb77b5a1df390ecb4130c2a9ea | /django_blog/myblog/models.py | 1dfdff15fced66ab2951b1a2b5374413de70c0a9 | [] | no_license | havidri/Django-Blog | 721880a1eddc7d62a9b75f34d8a039e5b404dee9 | db79e155bf326ede2b88ae120356d8def2a30d97 | refs/heads/main | 2023-07-04T12:44:59.372176 | 2021-08-12T06:35:00 | 2021-08-12T06:35:00 | 394,821,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | import reserve as reserve
from django.contrib.auth.models import User
from django.db import models
from django.contrib.auth import get_user_model
from tinymce import HTMLField
from django.urls import reverse
User = get_user_model()
class Author(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_pic = models.ImageField()
def __str__(self):
return self.user.username
class Category(models.Model):
title = models.CharField(max_length=20)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('index')
class Post(models.Model):
title = models.CharField(max_length=100)
description = models.CharField(max_length=200)
content = HTMLField()
date = models.DateTimeField(auto_now=True)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
thumbnail = models.ImageField(null=True, blank=True)
categories = models.ManyToManyField(Category)
featured = models.BooleanField()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog', kwargs={
'blog_id': self.id
})
@property
def get_comments(self):
return self.comments.all().order_by()
class Comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
content = models.TextField()
author = models.ForeignKey(Author, on_delete=models.CASCADE, null=True)
post = models.ForeignKey(Post, related_name='comments', on_delete=models.CASCADE)
def __str__(self):
return self.user.username | [
"havidriyono@yahoo.com"
] | havidriyono@yahoo.com |
950cc3ec633927641e6bc1b3f51f4408ecff16e7 | 5f5ea1011786269376ec09f43c3b9bb246e9d98b | /login-robot/src/services/user_service.py | 4a581e0d11c439c7d0e7adfb1f74d0ea2493329a | [] | no_license | tholsti/hy-ohtu-syksy-2021-tehtavat | 30561b84e0da768f15b9f6787e34136f60bf6d00 | 5613e033fbddb5833f7e69b3c148204554c5dd3a | refs/heads/main | 2023-04-04T23:44:26.248025 | 2021-04-18T11:10:48 | 2021-04-18T11:10:48 | 334,141,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | from entities.user import User
import re
class UserInputError(Exception):
pass
class AuthenticationError(Exception):
pass
class RegistrationError(Exception):
pass
class UserService:
def __init__(self, user_repository):
self._user_repository = user_repository
def check_credentials(self, username, password):
if not username or not password:
raise UserInputError("Username and password are required")
user = self._user_repository.find_by_username(username)
if not user or user.password != password:
raise AuthenticationError("Invalid username or password")
return user
def create_user(self, username, password):
self.validate(username, password)
user = self._user_repository.create(
User(username, password)
)
return user
def validate(self, username, password):
if not username or not password:
raise UserInputError("Username and password are required")
if self._user_repository.find_by_username(username):
raise RegistrationError("Username already exists")
if (not re.match('^[a-z]{3,}$', username)):
raise RegistrationError("Username is invalid")
if (not re.match('^[\S]{8,}$', password)):
raise RegistrationError("Password is too short")
if (not re.search('[^a-z]$', password)):
raise RegistrationError("Password contains only letters")
| [
"tomi.holstila@gmail.com"
] | tomi.holstila@gmail.com |
c4b60be269cb804c222514ca84f971ba53fe0a2b | 7590d16f6db2c0b16982fc644b5d536ab1f98c7e | /src/webapp/apps/profiles/management/commands/followers_from_csv.py | 7da4fdec687f5479e19b365a51ac2a350a0c6591 | [] | no_license | GeoRemindMe/GeoRemindMe_Platform | 33444bd8e2fcbf1c8fc42a78140fa5848441ae84 | 30436fba4f16cd787903a667302a3b34a2b8a8e2 | refs/heads/master | 2016-09-05T22:02:51.526975 | 2012-07-12T20:08:26 | 2012-07-12T20:08:26 | 2,743,081 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | # coding=utf-8
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from apps.timelines.models import Follower
import csv
import sys
class Command(BaseCommand):
args = '.csv'
def handle(self, *args, **options):
file = csv.reader(open(args[0], 'r'), delimiter='#')
rownum = 0
for r in file:
if rownum == 0:
rownum=rownum+1
continue
try:
follower = User.objects.get(username=r[0])
followee = User.objects.get(username=r[1])
if not Follower.objects.is_follower(follower, followee):
Follower.objects.toggle_follower(follower, followee)
except User.DoesNotExist:
pass
return sys.exit(0) | [
"javier@georemindme.com"
] | javier@georemindme.com |
a9812104f466c0374fbccf71d0cd2b8edbf21fb8 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20200601/route_filter.py | 91eecb201ea5a51babd94a74b8238698682e23f2 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,170 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RouteFilterArgs', 'RouteFilter']
@pulumi.input_type
class RouteFilterArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a RouteFilter resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if route_filter_name is not None:
pulumi.set(__self__, "route_filter_name", route_filter_name)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="routeFilterName")
def route_filter_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the route filter.
"""
return pulumi.get(self, "route_filter_name")
@route_filter_name.setter
def route_filter_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_filter_name", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class RouteFilter(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteFilterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param RouteFilterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteFilterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteFilterArgs.__new__(RouteFilterArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_filter_name"] = route_filter_name
__props__.__dict__["rules"] = rules
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["ipv6_peerings"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:network:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20161201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200501:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20201101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210501:RouteFilter")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteFilter, __self__).__init__(
'azure-native:network/v20200601:RouteFilter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteFilter':
"""
Get an existing RouteFilter resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RouteFilterArgs.__new__(RouteFilterArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["ipv6_peerings"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["rules"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return RouteFilter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipv6Peerings")
def ipv6_peerings(self) -> pulumi.Output[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
A collection of references to express route circuit ipv6 peerings.
"""
return pulumi.get(self, "ipv6_peerings")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> pulumi.Output[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
A collection of references to express route circuit peerings.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the route filter resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence['outputs.RouteFilterRuleResponse']]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
e257d259dbfc021d53cf6ad1b76045bdfbe6eb01 | 1567a3af5e8bec0735cde692a2ed9e25614b3625 | /TestEnv.py | 788e1611dfe604cdb92aea8610742f919662db88 | [] | no_license | lroin/Py_Cralwer | bbae9022299ffa28d8ef3833af7d67585ffe6bf6 | 84ccab0ecdc260e59e149893ff12871b7ba9951b | refs/heads/master | 2023-03-21T02:43:08.184180 | 2016-12-19T09:03:18 | 2016-12-19T09:03:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,200 | py | from MajorCrawler import *
# ==============================<<func>>==============================
def getBJ(): #北京, dic->DB, 有验证码
url_i='http://www.bjcourt.gov.cn/ktgg/index.htm?c=&court=&start=&end=&type=&p='
header=['省级行政区','网址','内容']
flag=True
for i in range(1,10):
#url=url_i+str(i)
#buf=getContent(url)
Links=[]
result=[]
"""
for ele in re.findall('<a href="(/ktgg/ktggDetailInfo.htm?[\s\S]+?)"',buf):
Links.append('http://www.bjcourt.gov.cn'+ele)
"""
Links=['http://www.bjcourt.gov.cn/ktgg/ktggDetailInfo.htm?NId=58109&NAjbh=8755026']
for fwd in Links:
node={}
node=node.fromkeys(header)
page=getContent(fwd)
soup=BeautifulSoup(page,'html.parser')
"""
if re.search('定于二〇一五年',page): # 如果本页有2015的资料, 则到此为止
print('[北京] 2016 end.')
flag=False
break
elif re.search('验证码',page):
print('[北京] ',fwd)
print('[北京] 验证码,等待90秒后重试.')
time.sleep(90)
"""
try:
node['省级行政区']='北京市'
node['网址']=fwd
#node['内容']=
for x in soup.find_all(class_='article_con'):
writeText(x,'test.txt')
else:
print('class failed')
result.append(node)
print(node)
except AttributeError:
print(fwd)
traceback.print_exc()
writeText(traceback.format_exc(),'_ErrorLog.txt')
continue
if flag==False:
print('[北京] End at page ',i,'.')
break
else:
#write_DB(result)
print('[北京] Page ',i,' saved.')
break
return;
# ==============================<<Main>>==============================
getBJ() | [
"eyu.yang@gmail.com"
] | eyu.yang@gmail.com |
f4005d99185dc2e01e9b1daf8d65d901d29911ca | b7d155502d3494866becbfbd5237a45425054b5d | /DAY_9/Face detection using HAAR CLASSIFIERS/Face_Eye_Detection_in_OPENCV.py | 955641f2f9d7c0002059b15cac205a066bebffc3 | [] | no_license | IEEESFIT1/31DaysOfCode | 1b1f01fb73efde32ab68d170a4ecb1dc18824cff | 2eac7a720ad15734a7020dcb3aab31a2d6d55cc8 | refs/heads/main | 2023-08-06T09:20:49.980701 | 2021-10-01T14:03:09 | 2021-10-01T14:03:09 | 317,566,761 | 7 | 3 | null | 2021-10-01T14:03:10 | 2020-12-01T14:27:11 | Python | UTF-8 | Python | false | false | 870 | py | import cv2
from numpy.lib.type_check import imag
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye_tree_eye_glasses.xml')
frame = cv2.VideoCapture(0)
while frame.isOpened():
_,img = frame.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
for (x,y,w,h) in faces :
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0),3)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectdetectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,255,0),5)
cv2.imshow('img', img)
if cv2.waitkey(1) & 0xFF == ord('q'):
break
frame.release() | [
"noreply@github.com"
] | IEEESFIT1.noreply@github.com |
24b6a392193af3ed499ed5481be0d574615aa635 | fa0f12a6d63be22b588133bfb9c130f1eeecab3d | /myvenv/lib/python3.7/site-packages/pip/_internal/cli/autocompletion.py | 1295e23141c110930d3bf02637af4990d0143b8e | [] | no_license | 8th-caulion/high-hat | 6b2c455be14b5e617bf993cfb67c68975df3aa65 | fc1f9793747892b7b58f066c45ab95d3f0269db9 | refs/heads/master | 2023-08-02T12:07:36.540488 | 2020-06-03T17:36:32 | 2020-06-03T17:36:32 | 267,542,957 | 0 | 6 | null | 2021-09-22T19:09:26 | 2020-05-28T09:04:29 | Python | UTF-8 | Python | false | false | 8,237 | py | """Logic that powers autocompletion installed by ``pip completion``.
"""
import optparse
import os
import sys
<<<<<<< HEAD
from itertools import chain
from pip._internal.cli.main_parser import create_main_parser
from pip._internal.commands import commands_dict, create_command
from pip._internal.utils.misc import get_installed_distributions
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Iterable, List, Optional
def autocomplete():
# type: () -> None
=======
from pip._internal.cli.main_parser import create_main_parser
from pip._internal.commands import commands_dict, get_summaries
from pip._internal.utils.misc import get_installed_distributions
def autocomplete():
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
"""Entry Point for completion of main and subcommand options.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
<<<<<<< HEAD
parser = create_main_parser()
subcommands = list(commands_dict)
options = []
# subcommand
subcommand_name = None # type: Optional[str]
for word in cwords:
if word in subcommands:
subcommand_name = word
break
# subcommand options
if subcommand_name is not None:
=======
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for show and uninstall
should_list_installed = (
subcommand_name in ['show', 'uninstall'] and
not current.startswith('-')
)
if should_list_installed:
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
<<<<<<< HEAD
subcommand = create_command(subcommand_name)
=======
subcommand = commands_dict[subcommand_name]()
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
for opt in subcommand.parser.option_list_all:
if opt.help != optparse.SUPPRESS_HELP:
for opt_str in opt._long_opts + opt._short_opts:
options.append((opt_str, opt.nargs))
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
# get completion type given cwords and available subcommand options
completion_type = get_path_completion_type(
cwords, cword, subcommand.parser.option_list_all,
)
# get completion files and directories if ``completion_type`` is
# ``<file>``, ``<dir>`` or ``<path>``
if completion_type:
<<<<<<< HEAD
paths = auto_complete_paths(current, completion_type)
options = [(path, 0) for path in paths]
=======
options = auto_complete_paths(current, completion_type)
options = ((opt, 0) for opt in options)
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1] and option[0][:2] == "--":
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
<<<<<<< HEAD
flattened_opts = chain.from_iterable(opts)
if current.startswith('-'):
for opt in flattened_opts:
=======
opts = (o for it in opts for o in it)
if current.startswith('-'):
for opt in opts:
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
if opt.help != optparse.SUPPRESS_HELP:
subcommands += opt._long_opts + opt._short_opts
else:
# get completion type given cwords and all available options
<<<<<<< HEAD
completion_type = get_path_completion_type(cwords, cword,
flattened_opts)
if completion_type:
subcommands = list(auto_complete_paths(current,
completion_type))
=======
completion_type = get_path_completion_type(cwords, cword, opts)
if completion_type:
subcommands = auto_complete_paths(current, completion_type)
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def get_path_completion_type(cwords, cword, opts):
<<<<<<< HEAD
# type: (List[str], int, Iterable[Any]) -> Optional[str]
=======
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
"""Get the type of path completion (``file``, ``dir``, ``path`` or None)
:param cwords: same as the environmental variable ``COMP_WORDS``
:param cword: same as the environmental variable ``COMP_CWORD``
:param opts: The available options to check
:return: path completion type (``file``, ``dir``, ``path`` or None)
"""
if cword < 2 or not cwords[cword - 2].startswith('-'):
<<<<<<< HEAD
return None
=======
return
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
for opt in opts:
if opt.help == optparse.SUPPRESS_HELP:
continue
for o in str(opt).split('/'):
if cwords[cword - 2].split('=')[0] == o:
if not opt.metavar or any(
x in ('path', 'file', 'dir')
for x in opt.metavar.split('/')):
return opt.metavar
<<<<<<< HEAD
return None
def auto_complete_paths(current, completion_type):
# type: (str, str) -> Iterable[str]
=======
def auto_complete_paths(current, completion_type):
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
"""If ``completion_type`` is ``file`` or ``path``, list all regular files
and directories starting with ``current``; otherwise only list directories
starting with ``current``.
:param current: The word to be completed
:param completion_type: path completion type(`file`, `path` or `dir`)i
:return: A generator of regular files and/or directories
"""
directory, filename = os.path.split(current)
current_path = os.path.abspath(directory)
# Don't complete paths if they can't be accessed
if not os.access(current_path, os.R_OK):
return
filename = os.path.normcase(filename)
# list all files that start with ``filename``
file_list = (x for x in os.listdir(current_path)
if os.path.normcase(x).startswith(filename))
for f in file_list:
opt = os.path.join(current_path, f)
comp_file = os.path.normcase(os.path.join(directory, f))
# complete regular files when there is not ``<dir>`` after option
# complete directories when there is ``<file>``, ``<path>`` or
# ``<dir>``after option
if completion_type != 'dir' and os.path.isfile(opt):
yield comp_file
elif os.path.isdir(opt):
yield os.path.join(comp_file, '')
| [
"rldnjs9347@gmail.com"
] | rldnjs9347@gmail.com |
06c79cf2ab054537d61dc9f297aec93bfa26b767 | 4f43cb4a2cbdafde4d9070aace0edca633cb6ab4 | /stats.py | bfb89069fc6f8e55e8d7ab98671bde3a69d70d0a | [] | no_license | trevorc/blackscholes | 72a05ec97b52e2c4d15b2bfd5db86991724ffda3 | 2b927a5f0c469ea52b7de6709572a3a8f2105ffc | refs/heads/master | 2016-09-05T19:20:58.816374 | 2010-08-09T00:53:14 | 2010-08-09T00:54:53 | 825,545 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | import collections
import math
import scipy.optimize
Errors = collections.namedtuple('Errors', ['rss', 'r_squared', 'rmse'])
def lm(f, y, x, b0, **kwargs):
def compute_residuals(b):
return y - f(x, b)
return scipy.optimize.leastsq(compute_residuals, b0, **kwargs)
def errors(f, y, x, b):
Y = f(x, b)
y_mean = sum(y, 0.0) / len(y)
rss = sum((y - Y) ** 2)
ss_tot = sum((y - y_mean) ** 2)
r_squared = 1 - rss / ss_tot
rmse = math.sqrt(rss / len(y))
return Errors(rss, r_squared, math.sqrt(rss / len(y)))
| [
"trevor@caira.com"
] | trevor@caira.com |
b9132f16bfc5b5e0cc2704d85af65a089cffd7cb | eee647635af1583d9b1150b7cd3195336291e1d2 | /ABC133/c.py | eb49ffdc05d6db403c85c8227196668dd8d288ac | [] | no_license | lilium513/competition_programing | 42f69222290b09b491477b8a2b9c2d4513ebe301 | 45082bf542224b667e753ad357cf145f683fde54 | refs/heads/master | 2020-06-22T03:16:34.510906 | 2019-07-31T18:22:31 | 2019-07-31T18:22:31 | 197,619,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py |
def do():
L, R = list(map(int, input().split(" ")))
ans = 10 ** 15
if R - L < 5000: #差が小さい場合は全探索
for i in range(L,R + 1):
for j in range(i+1,R + 1):
if (i*j) % 2019 < ans:
ans = (i*j) % 2019
else:#そうでなければ確実に一つ2019の倍数がある
ans = 0
print(ans)
| [
"lim.intefx@gmail.com"
] | lim.intefx@gmail.com |
d6f9576e15f4246ceca27311ec1c907b2dde14b7 | e06a996c9f78bd8767bde431951e91859dc6ae8a | /experimentalComponents/gupta_paper_brian2.py | 8f6e78cb4d1b3abc295fc6460508c62b8b415f08 | [
"MIT"
] | permissive | Jbwasse2/snn-rl | 7dbe8bd5c23837cb76f492e7b911081dee7a4e4a | 29b040655f432bd390bc9d835b86cbfdf1a622e4 | refs/heads/master | 2020-08-07T10:28:16.533162 | 2019-10-07T15:21:02 | 2019-10-07T15:21:02 | 213,411,865 | 0 | 0 | NOASSERTION | 2019-10-07T14:53:54 | 2019-10-07T14:53:54 | null | UTF-8 | Python | false | false | 852 | py | #While reading fninf-08-00006.pdf I copied some useful snippets of code here
#Leaky integrate and fire with refractoriness
G = NeuronGroup ( number_of_neurons,
'dv/dt = -(v - v_0)/tau_m : volt # membrane potential',
threshold='v > v_th',
reset='v = v_0',
refractory='(t-lastspike) <= 2*ms');
#Random initial values for membrane potential
G.v = 'v_0+randn() *3*mV'
#Spike timming dependant plasticity
S = Synapses ( source_group , target_group,
'''w: siemens
dA_source/dt = -A_source/tau_source: siemens (event-driven)
dA_target/dt = -A_target/tau_target: siemens (event-driven)''',
pre='''g_post += w
A_source += deltaA_source
w = clip(w+A_target, 0*siemens, w_max)''',
post='''A_target += deltaA_target
w = clip(w+A_source, 0*siemens, w_max)''')
#Connectivity without self connections
S.connect('i != j')
| [
"tartavull@gmail.com"
] | tartavull@gmail.com |
8b3bfe75a888d9cb49b2f4c56c83b47f04bfaa01 | 7de174ec684fe60717b2757fe5e194cc597fee38 | /plugins/plugin_clone.py | 312d2f5ae7f840bba90736e8b0d4e3c654b8abc7 | [] | no_license | oma256/repo_scan | 41ad4972908859f947d7226dd80f09a6f582301c | 03971eea701a0079d9a824261f6ee0e21c1d2f79 | refs/heads/master | 2022-03-26T06:40:20.388127 | 2019-12-05T15:09:11 | 2019-12-05T15:09:11 | 222,365,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import os
import subprocess
import sys
from loguru import logger
from plugins.base import Command
from utils.parser import create_parser
class Clone(Command):
def execute(self) -> None:
parser = create_parser()
args = parser.parse_args()
if not args.repository:
create_parser().print_help()
sys.exit(0)
if not os.path.exists('sandbox'):
os.makedirs('sandbox')
logger.info("Downloading repository")
subprocess.Popen(cwd='./sandbox',
args=['git', 'clone', args.repository],
stderr=subprocess.STDOUT,
stdout=subprocess.DEVNULL).communicate()
logger.info("Done")
| [
"oma.dulatov@gmail.com"
] | oma.dulatov@gmail.com |
c33583ccd6b33f5b384c1373a54f70a46388cc88 | 9888ef3bb4408a4cef8b2ad49d3b6eb873056694 | /multiclass_allH5_data/step2_write_all_H5_tfrecord.py | f34eb929cb07fbe2d0d0722c4d6dff8cd268ecc1 | [] | no_license | MeiliLiu-STEM/TFSeg_BraTS | bd3d52a8cbfeeea1b188fc268f0dfb74c0171efa | d6f482a57a859b59a5c507094efa928a21239198 | refs/heads/master | 2021-04-08T02:41:10.564845 | 2018-08-20T14:45:17 | 2018-08-20T14:45:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,577 | py | # -*- coding: utf-8 -*-
# @__ramraj__
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import sys
import numpy as np
import cv2
import os
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import config
import h5py
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def write_record(imgs, lbls, IDs, tfrecord_name='./train.tfrecords', lbl='train'):
writer = tf.python_io.TFRecordWriter(tfrecord_name)
n_obs = imgs.shape[0]
for i in range(n_obs):
if not i % 100:
print('{} data: {}/{}'.format(lbl, i, n_obs))
sys.stdout.flush()
# Load the image
img = imgs[i, :, :, :]
lbl = lbls[i, :, :]
ID = IDs[i]
# Create a feature
feature = {
'train/image': _bytes_feature(tf.compat.as_bytes(img.tostring())),
'train/label': _bytes_feature(tf.compat.as_bytes(lbl.tostring())),
'train/id': _bytes_feature(tf.compat.as_bytes(ID))}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
def binarize_targets(y_train):
task = 'all'
if task == 'all':
y_train = (y_train > 0).astype(int)
elif task == 'necrotic':
y_train = (y_train == 1).astype(int)
elif task == 'edema':
y_train = (y_train == 2).astype(int)
elif task == 'enhance':
y_train = (y_train == 4).astype(int)
else:
exit("Unknow task %s" % task)
# print('uniques elements in y_Train : ', np.unique(y_train))
return y_train
def load_data(path, do_test=False):
images = []
labels = []
ids = []
print('Reading images')
mod_dict = dict((v, k) for k, v in config.MODALITY_DICT.iteritems())
for i in os.listdir(path):
tmp_list = i.split('_')
patient_num = tmp_list[2]
slice_ix = tmp_list[3]
h5f = h5py.File(os.path.join(path, i), 'r')
# +++++++++++++++++++++++++ IMAGE +++++++++++++++++++++++++
mod_images = []
for mod in range(4):
dataset_name = '{}_{}_{}'.format(mod_dict[mod],
patient_num, slice_ix)
img = h5f[dataset_name][:]
mod_images.append(img)
images.append(mod_images)
# +++++++++++++++++++++++++ LABEL +++++++++++++++++++++++++
lbl = h5f['gt_{}_{}'.format(patient_num, slice_ix)][:]
lbl = binarize_targets(lbl)
labels.append(lbl)
h5f.close()
# +++++++++++++++++++++++++++ ID ++++++++++++++++++++++++++
ids.append(i)
images = np.array(images, dtype=np.float32)
images = images.transpose((0, 2, 3, 1))
labels = np.array(labels, dtype=np.int32)
ids = np.array(ids)
print('images shape : ', images.shape)
print('labels shape : ', labels.shape)
return images, labels, ids
def creat_tf_records():
images_data, labels_data, ids_data = load_data(config.H5_SRC)
print('Data Loaded.')
print(' Data : ', images_data.shape, '\n')
train_images, test_images, train_labels, test_labels, \
train_ids, test_ids = train_test_split(images_data, labels_data, ids_data,
test_size=config.TEST_SPLIT,
random_state=42)
print('Train data : ')
print(train_images.shape)
print(train_labels.shape)
print(train_ids.shape)
print(test_images.shape)
print(test_labels.shape)
print(test_ids.shape)
print('++++++++++++++++++++++++++++++++')
# ========================================
# Shuffle
train_images, train_labels, train_ids = shuffle(train_images, train_labels, train_ids)
test_images, test_labels, test_ids = shuffle(test_images, test_labels, test_ids)
TFRECORD_ROOT = './record/'
if not os.path.exists(TFRECORD_ROOT):
os.makedirs(TFRECORD_ROOT)
# Write Train TFRecords
write_record(train_images, train_labels, train_ids,
tfrecord_name=TFRECORD_ROOT + 'train.tfrecords', lbl='train')
print('\n')
# Write Test TFRecords
write_record(test_images, test_labels, test_ids,
tfrecord_name=TFRECORD_ROOT + 'test.tfrecords', lbl='test')
if __name__ == '__main__':
creat_tf_records()
| [
"cramraj8@gmail.com"
] | cramraj8@gmail.com |
06d8642d821b8be29fbef654e1e24ef1fe4d3a1e | f480589c6f8c1d33fccb0dad4380dada77340660 | /migrations/versions/ac3f2179013d_.py | 90ab319f620ac1efe4ff2779223877201e2f8817 | [] | no_license | carlosribas/backend-coding-challenge | 519cb35bfd57caf014dfcd4505a7a84da6ae9bda | 32b9fa7dd7940a27a10eff3af01f4ce2e93ccdbd | refs/heads/master | 2020-04-05T04:29:42.085218 | 2018-11-12T12:28:26 | 2018-11-12T12:28:26 | 156,553,812 | 0 | 0 | null | 2018-11-07T13:53:30 | 2018-11-07T13:53:30 | null | UTF-8 | Python | false | false | 926 | py | """empty message
Revision ID: ac3f2179013d
Revises:
Create Date: 2018-11-07 20:12:02.753303
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ac3f2179013d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('translator',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('text', sa.String(length=255), nullable=False),
sa.Column('text_translated', sa.String(length=255), nullable=True),
sa.Column('uid', sa.String(length=50), nullable=True),
sa.Column('status', sa.String(length=10), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('translator')
# ### end Alembic commands ###
| [
"caduribas@gmail.com"
] | caduribas@gmail.com |
58bb65a58ddad2e7ba4755e15c3698f3ff9b3301 | cb33113c4063867fa41cb74943d0a056a383b6a1 | /codexpert/Snake.py | bf0365b45c2712a8fdc2e057e76157dea480dae5 | [] | no_license | manuck/Algorithm | 9c6280095da6b88473460da52d07fb23ee6c3f9f | 4c15ff42f39224eb9b29728544c92dce9341fdfa | refs/heads/master | 2020-04-18T02:06:53.437576 | 2019-06-26T08:59:16 | 2019-06-26T08:59:16 | 167,148,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | import sys
sys.stdin = open("Snake_input.txt")
| [
"snc9000@naver.com"
] | snc9000@naver.com |
e798b57fa3a276c7acb65be428cc91e5a58aca43 | e3f2ab2999a851121897c02ee81bd85c2543bb96 | /ketan/codes/ee18btech11030/ee18btech11030_1.py | 7034225e0dcac1c1afe24ced57259387f4318dfb | [] | no_license | yashwanthguguloth24/control | ee38822c00d709ab63a35a9ebf7be886abae7eb7 | cff91230294686a4ee9432b04aea4333198512c1 | refs/heads/master | 2022-09-16T14:49:10.111030 | 2020-06-01T03:21:08 | 2020-06-01T03:21:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,349 | py | ###################################################################
# This is python code for Bode plots.
# By Moparthi Varun Sankar
# April 28 , 2020
# Released under GNU GPL
###################################################################
from scipy import signal
import matplotlib.pyplot as plt
from pylab import*
#if using termux
import subprocess
import shlex
#end if
#Defining the transfer function
s1 = signal.lti([16200,21*16200,110*16200], [11, 18*11 ,99*11,162*11,0]) #G(s)
s2 = signal.lti([1,0.121], [754.223*1,754.223*0.0001604]) #Gc(s)
s3 = signal.lti([16200,342160.2,1823164.2,215622],[8296.2,149333,821522,1344116.2,215.6,0]) #G(s)*Gc(s)
#signal.bode takes transfer function as input and returns frequency,magnitude and phase arrays
w1,mag1,phase1 = signal.bode(s1,n=1000)
w2,mag2,phase2 = signal.bode(s2,n=1000)
w3,mag3,phase3 = signal.bode(s3,n=1000)
plt.figure()
plt.subplot(2,1,1)
plt.grid()
plt.xlabel('Frequency(rad/s)')
plt.ylabel('Magnitude(db)')
plt.semilogx(w1, mag1,label='Uncompensated') # Magnitude plot for G(s)
plt.semilogx(w2, mag2,label='Compensator') # Magnitude plot for Gc(s)
plt.semilogx(w3, mag3,label='Compensated') # Magnitude plot for G(s)*Gc(s)
plt.plot(38.95,0,'o')
plt.text(38.95,0, '({}, {})'.format(38.95,0))
plt.plot(0.0001604,0,'o')
plt.text(0.0001604,0, '({}, {})'.format(0.0001604,0))
plt.plot(0.121,-57.55,'o')
plt.text(0.121,-57.55, '({}, {})'.format(0.121,-57.55))
plt.plot(1.21,0,'o')
plt.text(1.21,0, '({}, {})'.format(1.21,0))
plt.legend()
plt.subplot(2,1,2)
plt.grid()
plt.xlabel('Frequency(rad/s)')
plt.ylabel('Phase(degree)')
plt.semilogx(w1, phase1,label='Uncompensated') # Phase plot for G(s)
plt.semilogx(w2, phase2,label='Compensator') # Phase plot for Gc(s)
plt.semilogx(w3, phase3,label='Compensated') # Phase plot for G(s)*Gc(s)
plt.annotate('', (1.21,-117), (1.21,-127), arrowprops=dict(facecolor='red',arrowstyle='<|-|>',mutation_scale=15))
plt.annotate("Lag in Phase",(1.21,-117))
plt.plot(38.95,-184,'o')
plt.text(38.95,-184, '({}, {})'.format(38.95,-184))
plt.legend()
#if using termux
plt.savefig('./figs/ee18btech11030/ee18btech11030_2.pdf')
plt.savefig('./figs/ee18btech11030/ee18btech11030_2.eps')
subprocess.run(shlex.split("termux-open ./figs/ee18btech11030/ee18btech11030_2.pdf"))
#else
#plt.show()
| [
"gadepall@gmail.com"
] | gadepall@gmail.com |
d871c5cfc9ab2fb5f9fd61aa0dca96c2093b5d22 | d15db6af7db42745262775a7402877bcee37e22b | /HaiZhiTestEngine.py | 86915fb8c4b7fb93d612043b3e5712e3833f47a9 | [] | no_license | NotTodayNotMe/HaiZhiInterface | 0838916245f56ae369a7de3a64d597cc40065b7d | 7ad1c555fbc9b3bf53a1235c523c24910d1cf71a | refs/heads/master | 2020-03-28T22:36:16.689070 | 2018-08-27T09:28:34 | 2018-08-27T09:28:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,519 | py | #coding:utf-8
import datetime
import json
from HistoryTrading import HistoryTrading
from RealTimeTrading import RealTimeTrading
from HaizhiData import HaizhiData
'''装饰器'''
def input_checker(func):
'''
用于监测函数输入是否合法,code,volume均转化为str
:param func:
:return:
'''
def _input_checker(self,**kwargs):
# 股票代码检查
if isinstance(kwargs['code'], str):
pass
elif isinstance(kwargs['code'], int):
kwargs['code'] = str(kwargs['code'])
while len(kwargs['code']) < 6:
kwargs['code'] = '0' + kwargs['code']
else:
raise TypeError, 'code must be str or int'
# 股票交易量检查
if isinstance(kwargs['volume'], str):
pass
elif isinstance(kwargs['volume'], int):
kwargs['volume'] = str(kwargs['volume'])
else:
raise TypeError, 'volume must be str or int'
#回测日期检查
if isinstance(self._core,HistoryTrading):
if 'date' not in kwargs:
kwargs['date'] = self._current_time.strftime('%Y-%m-%d')
elif isinstance(kwargs['date'],datetime.datetime):
kwargs['date'] = kwargs['date'].strftime('%Y-%m-%d')
elif isinstance(kwargs['date'],str):
pass
else:
raise TypeError,'date must be str or datetime object'
#返回函数
#print kwargs
res = func(self, **kwargs)
return res
return _input_checker
class HaiZhiTestEngine(object):
def __init__(self,user_id='',password = '',type = 'RealTimeTrading'):
'''
{'buy_sell': 'sell', 'code': '000006', 'volume': '100', 'price': '1', 'price_type': 'now_price', 'effect_term': '2'}
:param user_id:用户id
:param password: 用户密码
:param type: 交易引擎类型,默认为实盘交易引擎
:param stratagy_name: 交易策略名称,默认为空,当选用回测引擎时,必填
'''
if type == 'RealTimeTrading':
self._core = RealTimeTrading(userid=user_id, password=password)
elif type == 'HistoryTrading':
stratagy_name = user_id
self._current_time = datetime.datetime.today()-datetime.timedelta(days=1)
self._core = HistoryTrading(userid=user_id,password=password,strategy_name = stratagy_name)
self._core.create_strategy(stratagy_name)
elif type == 'HaizhiData':
self._core = HaizhiData(userid=user_id, password=password)
else:
raise ValueError,'type must be "RealTimeTrading" or "HistoryTrading"'
#显示当前的交易引擎类型
@property
def core(self):
'''
返回当前的引擎类型
:return:
'''
return self._core.__class__
#显示当前回测引擎时间
@property
def current_time(self):
'''
返回当前的引擎时间,主要用于回测
:return:
'''
if isinstance(self._core,RealTimeTrading):
return datetime.datetime.now().strftime('%Y-%m-%d,%H:%M:%S')
elif isinstance(self._core,HistoryTrading):
return self._current_time.strftime('%Y-%m-%d')
@current_time.setter
def current_time(self,date):
'''
自由设定引擎时间
:param date:
:return:
'''
if isinstance(self._core,HistoryTrading):
if isinstance(date,str):
self._current_time = datetime.datetime.strptime(date,'%Y-%m-%d')
elif isinstance(date,datetime.datetime):
self._current_time = date
else:
raise TypeError, '%s can not operate on current_time' % (self._core.__class__)
def shift_current_time(self,days):
'''
按时间步长调整时间
:param days:
:return:
'''
if isinstance(self._core,RealTimeTrading):
raise TypeError,'RealTimeTrading can not operate on current_time'
elif isinstance(self._core,HistoryTrading):
self._current_time += datetime.timedelta(days=days)
return self._current_time.strftime('%Y-%m-%d')
#购买
@input_checker
def buy(self,code,volume,price_type='now_price',price=None,date=None,effect_term = 1):
if isinstance(self._core,RealTimeTrading):
dic = {'code':code,
'volume':volume,
'price_type': price_type,
'price': price,
'effect_term':str(effect_term)}
self._core.set_stock_dic(dic)
res = self._core.buy()
return json.loads(res)
elif isinstance(self._core,HistoryTrading):
if not date:
date = self._current_time.strftime("%Y-%m-%d")
dic = {'date':date,
'code': code,
'volume': volume,
'price_type': 'average_price',
}
self._core.set_stock_dic(dic)
res = self._core.bt_buy()
return json.loads(res)
#卖出
@input_checker
def sell(self,code,volume,price_type='now_price',price=None,date=None,effect_term = 1):
if isinstance(self._core,RealTimeTrading):
dic = {'code':code,
'volume':volume,
'price_type': price_type,
'price': price,
'effect_term':str(effect_term)}
self._core.set_stock_dic(dic)
res = self._core.sell()
return json.loads(res)
elif isinstance(self._core,HistoryTrading):
if not date:
date = self._current_time.strftime("%Y-%m-%d")
dic = {'date': date,
'code': code,
'volume': volume,
'price_type': 'average_price',
}
self._core.set_stock_dic(dic)
res = self._core.bt_sell()
return json.loads(res)
#撤单
def cancel_order(self,pre_id):
if isinstance(self._core,RealTimeTrading):
return self._core.cancel_order(pre_id)
else:
raise TypeError
#资产和持仓情况
def query_profit(self):
if isinstance(self._core, RealTimeTrading):
return json.loads(self._core.query_profit())
elif isinstance(self._core,HistoryTrading):
pass
#委托查询
def query_records(self,start="2018-4-4", end="2018-04-05"):
if isinstance(self._core,RealTimeTrading):
return json.loads(self._core.query_records(start,end))
#历史交割查询
def query_history_records(self,start='',end=''):
if isinstance(self._core,RealTimeTrading):
return json.loads(self._core.query_history_records(start,end))
elif isinstance(self._core,HistoryTrading):
return json.loads(self._core.bt_query_history_records(start, end))
#历史交割单输出到csv文件
def history_to_csv(self,path='history_record'):
if isinstance(self._core,RealTimeTrading):
pass
elif isinstance(self._core,HistoryTrading):
return self._core.get_history_csv(path)
#查询策略
def list_stratagy(self):
if isinstance(self._core,HistoryTrading):
return json.loads(self._core.get_strategy())
else:
raise AttributeError, '%s has no attribute stratagy_name' % (self._core.__class__)
# 设置策略名称
def set_stratagy(self, stratagy_name):
if isinstance(self._core, HistoryTrading):
self._core.set_strategy_name(stratagy_name)
else:
raise AttributeError, '%s has no attribute stratagy_name' % (self._core.__class__)
#创建策略
def create_stratagy(self,stratagy_name):
if isinstance(self._core,HistoryTrading):
return self._core.create_strategy(stratagy_name)
else:
raise AttributeError
#删除策略
def del_stratagy(self,stratagy_name):
if isinstance(self._core,HistoryTrading):
return self._core.del_strategy(stratagy_name)
else:
raise AttributeError, '%s has no attribute stratagy_name' % (self._core.__class__)
# 获取某个时期单只股票的某些属性
def get_stock_args(self, code, startday="", endday="", args=[]):
if isinstance(self._core, HaizhiData):
return self._core.get_stock_args(code, startday, endday, args)
else:
raise TypeError
# 获取某个时期所有股票的某个属性
def get_stocks_arg(self, startday="", endday="", arg=""):
if isinstance(self._core, HaizhiData):
return self._core.get_stocks_arg(startday, endday, arg)
else:
raise TypeError
# 获取某个时期沪市或深市的所有股票代码
def get_exchange_stocks(self, startday="", endday="", exchange="all"):
if isinstance(self._core, HaizhiData):
return self._core.get_exchange_stocks(startday, endday, exchange)
else:
raise TypeError
# 获取某个时期某个板块的所有股票代码
def get_plate_stocks(self,startday="", endday="", plate=""):
if isinstance(self._core, HaizhiData):
return self._core.get_plate_stocks(startday, endday, plate)
else:
raise TypeError
| [
"787162506@qq.com"
] | 787162506@qq.com |
c9f81bef1f3181735e2d92ff5e734356f7d6e16f | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/SQLAlchemy/sqlalchemy/cimmutabledict.pyi | 1a1a3006afc360bf3f13c4a33677a997d14fb729 | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 737 | pyi | from _typeshed import SupportsKeysAndGetItem
from collections.abc import Iterable
from typing import Generic, TypeVar, overload
from typing_extensions import final
_KT = TypeVar("_KT")
_KT2 = TypeVar("_KT2")
_VT = TypeVar("_VT")
_VT2 = TypeVar("_VT2")
@final
class immutabledict(dict[_KT, _VT], Generic[_KT, _VT]):
@overload
def union(self, __dict: dict[_KT2, _VT2]) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
@overload
def union(self, __dict: None = None, **kw: SupportsKeysAndGetItem[_KT2, _VT2]) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
def merge_with(
self, *args: SupportsKeysAndGetItem[_KT | _KT2, _VT2] | Iterable[tuple[_KT2, _VT2]] | None
) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
| [
"noreply@github.com"
] | sobolevn.noreply@github.com |
6399e8279ba126093ebb5008d5f3db2f7f0e9f0f | e4c1cc89b0014ec932014eb25302c0f431800017 | /polls/models.py | 0264edf31be5314445db6416b6f03c6d2ca17f62 | [] | no_license | jungdoyoon/polls_example | f3b5f98adb68a0fc87d6b591fffcc1ada4baf63b | c05f5841867dbf08a4c21bf5ea1864a5f94c0484 | refs/heads/master | 2022-07-27T17:45:52.365080 | 2020-05-20T05:57:48 | 2020-05-20T05:57:48 | 265,464,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from django.db import models
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date=models.DateField('date published')
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text=models.CharField(max_length=200)
votes= models.IntegerField(default=0)
def __str__(self):
return self.choice_text | [
"jungdo8016@naver.com"
] | jungdo8016@naver.com |
64b7d70b08eb9417de61cee04e088e8fa686e7e3 | 9f4705a8472dc42427c5509ccce94106c38bd9ee | /Q3.py | dc28bed1e17d458d14072f3e05e63b542a27a38d | [] | no_license | Aniket-Bhagat/Computing_tools_8 | 305382aaa6bb3d6020ccf00d10aa3e2783c5de67 | 109a21b6647fadd3c5e7e9503b517b3c010a0d8f | refs/heads/master | 2020-04-22T04:13:18.329327 | 2019-02-11T11:12:44 | 2019-02-11T11:12:44 | 170,115,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | import numpy, matplotlib.pyplot as plt
from scipy.stats.kde import gaussian_kde
data = numpy.loadtxt('data.dat', dtype='float', delimiter='\t', unpack=True)
def probdist(colno):
global data
plt.subplot(2,2,colno+1)
kde = gaussian_kde( data[colno] )
dist_space = numpy.linspace( min(data[colno]), max(data[colno]), 100 )
plt.plot( dist_space, kde(dist_space) )
plt.title('Distribution for column %d'%(colno+1))
plt.suptitle('Probability Distribution of Data in file')
probdist(0)
probdist(1)
probdist(2)
probdist(3)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.show()
# import numpy as np
# import scipy.stats as stats
# import matplotlib.pyplot as plt
# data = np.loadtxt('data.dat', dtype='float', delimiter='\t', unpack=True)
# data = sorted(data[0])
# fit = stats.norm.pdf(data, np.mean(data), np.std(data))
# plt.plot(data,fit,'-')
# # plt.hist(data,normed=True)
# plt.show() | [
"noreply@github.com"
] | Aniket-Bhagat.noreply@github.com |
9023e00f19ae2dd73801a944716c50ce967f6522 | ff99e4847f91a288cec57124d7beb4b672db2f1e | /maestro/providers/aws/vpc_location.py | c79afa0de62d6c133d13b85bc2d0e8b0932e1234 | [
"Apache-2.0"
] | permissive | tunein/Maestro | e50afdbc9fe61340a2c4e82511bcec4d50957567 | 789205fdbe85242189c50e407445c57ca916e42c | refs/heads/development | 2021-09-29T23:21:14.327896 | 2021-09-21T21:06:31 | 2021-09-21T21:06:31 | 130,405,209 | 12 | 2 | Apache-2.0 | 2021-09-21T21:06:31 | 2018-04-20T19:26:32 | Python | UTF-8 | Python | false | false | 2,463 | py | #Import external libs
import boto3
import sys
import json
import os
from botocore.exceptions import ClientError
#This is only here for printing pretty colors
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
#Establish our boto resources
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
def get_vpc_id(vpc_name):
'''
Gets the unique ID of the given VPC by AWS and returns it
ex: vpc-a1b2c4d4
args:
vpc_name: name of the vpc
'''
filters = [{'Name': 'tag:Name', 'Values': ['%s' % vpc_name]}]
vpcs = list(ec2.vpcs.filter(Filters=filters))
for vpc in vpcs:
try:
response = client.describe_vpcs(
VpcIds=[
vpc.id,
]
)
vpc_id = response['Vpcs'][0]['VpcId']
if len(vpc_id)!=0:
return vpc_id
else:
print(color.RED + "Couldn't find the ID for your vpc, check the name and try again" + color.END)
return False
except ClientError as error:
print(color.RED + error.response['Error']['Message'] + color.END)
def get_subnets(vpc_id):
'''
Takes the ID from "get_vpc_id" and gathers all private subnets
then it puts them in a list and returns them for the lambda config
args:
vpc_id: the unique ID given to the VPC by aws
'''
vpc = ec2.Vpc(vpc_id)
subnets = list(vpc.subnets.all())
ids = {}
list_id = []
for subnet in subnets:
try:
info = ec2.Subnet(subnet.id)
get_tags = list(info.tags)
dumper = json.dumps(get_tags, indent=4)
loader = json.loads(dumper)
for item in loader:
private_tag = item['Value']
if 'private' in private_tag:
ids.update({private_tag: subnet.id})
except ClientError as error:
print(color.RED + error.response['Error']['Message'] + color.END)
for key, value in ids.items():
list_id.append(value)
return list_id
def main(vpc_name):
'''
Main entry point of this module, for simplicities sake
args:
vpc_name: taken from the config
'''
vpc_id = get_vpc_id(vpc_name)
return get_subnets(vpc_id) | [
"mmoon@tunein.com"
] | mmoon@tunein.com |
20a079bd1af4db6c499e81e182bb3635f71069b9 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/pyr_Tcrop256_pad20_jit15/pyr_3s/L8/step09_3side_L8.py | 492f375600b24d9111789d8a77bc4776a8444e6d | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82,606 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_to_M import I_to_M
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_I_to_M
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
use_what_gen_op = I_to_M( Tight_crop(pad_size=20, resize=(256, 256), jit_scale= 0) )
use_what_train_step = Train_step_I_to_M( Tight_crop(pad_size=20, resize=(256, 256), jit_scale=15) )
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
# 3
pyramid_1side_1__2side_1__3side_1 = [3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3]
# 6
pyramid_1side_2__2side_1__3side_1 = [3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3]
pyramid_1side_2__2side_2__3side_1 = [3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3]
pyramid_1side_2__2side_2__3side_2 = [3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3]
# 10
pyramid_1side_3__2side_1__3side_1 = [3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3]
pyramid_1side_3__2side_2__3side_1 = [3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3]
pyramid_1side_3__2side_2__3side_2 = [3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3]
pyramid_1side_3__2side_3__3side_1 = [3, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3]
pyramid_1side_3__2side_3__3side_2 = [3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3]
pyramid_1side_3__2side_3__3side_3 = [3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3]
# 15
pyramid_1side_4__2side_1__3side_1 = [3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3]
pyramid_1side_4__2side_2__3side_1 = [3, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 3]
pyramid_1side_4__2side_2__3side_2 = [3, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3, 3]
pyramid_1side_4__2side_3__3side_1 = [3, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 3]
pyramid_1side_4__2side_3__3side_2 = [3, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 3]
pyramid_1side_4__2side_3__3side_3 = [3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3, 3]
pyramid_1side_4__2side_4__3side_1 = [3, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3]
pyramid_1side_4__2side_4__3side_2 = [3, 3, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 3]
pyramid_1side_4__2side_4__3side_3 = [3, 3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3]
pyramid_1side_4__2side_4__3side_4 = [3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3]
# 21
pyramid_1side_5__2side_1__3side_1 = [3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3]
pyramid_1side_5__2side_2__3side_1 = [3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 3]
pyramid_1side_5__2side_2__3side_2 = [3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, 3]
pyramid_1side_5__2side_3__3side_1 = [3, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3]
pyramid_1side_5__2side_3__3side_2 = [3, 3, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 3, 3]
pyramid_1side_5__2side_3__3side_3 = [3, 3, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3, 3, 3]
pyramid_1side_5__2side_4__3side_1 = [3, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3]
pyramid_1side_5__2side_4__3side_2 = [3, 3, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 3, 3]
pyramid_1side_5__2side_4__3side_3 = [3, 3, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 3, 3]
pyramid_1side_5__2side_4__3side_4 = [3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3, 3, 3]
pyramid_1side_5__2side_5__3side_1 = [3, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3]
pyramid_1side_5__2side_5__3side_2 = [3, 3, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3, 3]
pyramid_1side_5__2side_5__3side_3 = [3, 3, 3, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 3, 3]
pyramid_1side_5__2side_5__3side_4 = [3, 3, 3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3]
pyramid_1side_5__2side_5__3side_5 = [3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3]
# 28
pyramid_1side_6__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 3]
pyramid_1side_6__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3]
pyramid_1side_6__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3]
pyramid_1side_6__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3]
pyramid_1side_6__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 2, 3, 3]
pyramid_1side_6__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 3, 3, 3]
pyramid_1side_6__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3]
pyramid_1side_6__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3]
pyramid_1side_6__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 3, 3, 3]
pyramid_1side_6__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 0, 0, 0, 0, 0, 1, 1, 3, 3, 3, 3]
pyramid_1side_6__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 3]
pyramid_1side_6__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3]
pyramid_1side_6__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3]
pyramid_1side_6__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 0, 0, 0, 0, 0, 1, 2, 3, 3, 3, 3]
pyramid_1side_6__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 1, 3, 3, 3, 3, 3]
pyramid_1side_6__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 3]
pyramid_1side_6__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 3]
pyramid_1side_6__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 3, 3, 3]
pyramid_1side_6__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 0, 0, 0, 0, 0, 2, 2, 3, 3, 3, 3]
pyramid_1side_6__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3, 3]
pyramid_1side_6__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3]
# 36
pyramid_1side_7__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 3]
pyramid_1side_7__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3]
pyramid_1side_7__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 3, 3]
pyramid_1side_7__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3]
pyramid_1side_7__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2, 3, 3]
pyramid_1side_7__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3]
pyramid_1side_7__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3]
pyramid_1side_7__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3]
pyramid_1side_7__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3]
pyramid_1side_7__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 1, 0, 0, 0, 1, 1, 1, 3, 3, 3, 3]
pyramid_1side_7__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3]
pyramid_1side_7__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3]
pyramid_1side_7__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3]
pyramid_1side_7__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 1, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3]
pyramid_1side_7__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 1, 0, 0, 0, 1, 1, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 2, 2, 3]
pyramid_1side_7__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 2, 3, 3]
pyramid_1side_7__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3]
pyramid_1side_7__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 1, 0, 0, 0, 1, 2, 2, 3, 3, 3, 3]
pyramid_1side_7__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 1, 0, 0, 0, 1, 3, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_1 = [3, 2, 2, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_7__2side_7__3side_2 = [3, 3, 2, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_7__2side_7__3side_3 = [3, 3, 3, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_7__2side_7__3side_4 = [3, 3, 3, 3, 2, 2, 2, 0, 0, 0, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_5 = [3, 3, 3, 3, 3, 2, 2, 0, 0, 0, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_6 = [3, 3, 3, 3, 3, 3, 2, 0, 0, 0, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_7 = [3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3]
# 45
pyramid_1side_8__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 3]
pyramid_1side_8__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 2, 3]
pyramid_1side_8__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 3, 3]
pyramid_1side_8__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 2, 2, 3]
pyramid_1side_8__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 2, 3, 3]
pyramid_1side_8__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 3, 3, 3]
pyramid_1side_8__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 3]
pyramid_1side_8__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 3, 3]
pyramid_1side_8__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 3, 3, 3]
pyramid_1side_8__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 1, 1, 0, 1, 1, 1, 1, 3, 3, 3, 3]
pyramid_1side_8__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 1, 1, 0, 1, 1, 1, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 1, 1, 0, 1, 1, 1, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 1, 1, 0, 1, 1, 2, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 1, 1, 0, 1, 1, 2, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 1, 1, 0, 1, 1, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_1 = [3, 2, 2, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_7__3side_2 = [3, 3, 2, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_7__3side_3 = [3, 3, 3, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_7__3side_4 = [3, 3, 3, 3, 2, 2, 2, 1, 0, 1, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_5 = [3, 3, 3, 3, 3, 2, 2, 1, 0, 1, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_6 = [3, 3, 3, 3, 3, 3, 2, 1, 0, 1, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_7 = [3, 3, 3, 3, 3, 3, 3, 1, 0, 1, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_1 = [3, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_8__3side_2 = [3, 3, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_8__3side_3 = [3, 3, 3, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_8__3side_4 = [3, 3, 3, 3, 2, 2, 2, 2, 0, 2, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_5 = [3, 3, 3, 3, 3, 2, 2, 2, 0, 2, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_6 = [3, 3, 3, 3, 3, 3, 2, 2, 0, 2, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_7 = [3, 3, 3, 3, 3, 3, 3, 2, 0, 2, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_8 = [3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3]
# 55
pyramid_1side_9__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3]
pyramid_1side_9__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3]
pyramid_1side_9__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3]
pyramid_1side_9__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3]
pyramid_1side_9__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3]
pyramid_1side_9__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3]
pyramid_1side_9__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3]
pyramid_1side_9__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3]
pyramid_1side_9__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3]
pyramid_1side_9__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3]
pyramid_1side_9__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_1 = [3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_7__3side_2 = [3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_7__3side_3 = [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_7__3side_4 = [3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_5 = [3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_6 = [3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_7 = [3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_1 = [3, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_8__3side_2 = [3, 3, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_8__3side_3 = [3, 3, 3, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_8__3side_4 = [3, 3, 3, 3, 2, 2, 2, 2, 1, 2, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_5 = [3, 3, 3, 3, 3, 2, 2, 2, 1, 2, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_6 = [3, 3, 3, 3, 3, 3, 2, 2, 1, 2, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_7 = [3, 3, 3, 3, 3, 3, 3, 2, 1, 2, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_8 = [3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_1 = [3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_9__3side_2 = [3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_9__3side_3 = [3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_9__3side_4 = [3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_5 = [3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_6 = [3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_7 = [3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_8 = [3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_9 = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
#########################################################################################
ch032_limit_pyramid_1side_1__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_1__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_2__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_2__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_2__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_8, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_8, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_8, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_9 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_9, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_limit_pyramid_1side_1__2side_1__3side_1
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
print(use_model.model_describe)
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
84b0c5269b2439b5b470b7c84eed3eb96c57cd6d | 28979d6e7873687ee5dd2ff3b838629d03baaa58 | /djangoTutorials/djangoTutorials/wsgi.py | 6aa24c0f51532e0f8900408e467b4426e5ffbffd | [] | no_license | NSNSingh/tryingDjango | 469768d92b5b2398042aef27016ae3d3078233d4 | 22acaa189c5ce2f22bb861a41a72f970a52706df | refs/heads/master | 2021-01-02T08:44:40.961863 | 2017-08-03T12:35:46 | 2017-08-03T12:35:46 | 99,059,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | """
WSGI config for djangoTutorials project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoTutorials.settings")
application = get_wsgi_application()
| [
"sachinsngh64@gmail.com"
] | sachinsngh64@gmail.com |
f96decde8d13d57b8a17e6a18603082d48517ed8 | 091d605dcd15b61abb88b7e7c00fc2ccadc5c51a | /KIM_dipole.py | a27f6fed3ffb721d594d6ec9241146fad23d6f0b | [] | no_license | yqian1/OpenKIM | cb0abeee3f5e3d3b9d649118d8c85a5e283c8ab6 | 2f8d7469b4f67eac4c20265ef11815838307c04e | refs/heads/main | 2023-01-06T09:28:46.923355 | 2020-10-28T23:51:23 | 2020-10-28T23:51:23 | 308,100,239 | 0 | 0 | null | 2020-10-28T18:40:50 | 2020-10-28T18:01:31 | Python | UTF-8 | Python | false | false | 6,077 | py | import numpy as np
import itertools
'''
This script generates MD++ supercells for core energy calculations
The dislocation plane normal is along the x-direction
The dislocation line is along the z-direction
Nicolas Bertin, 08/31/2020
'''
def find_burgers_coord(theta, c2, n2, c3, n3):
# compute Burgers vector in scaled coordinates
d2 = n2*c2
d3 = n3*c3
det = d2[0]*d3[1]-d2[1]*d3[0]
if np.abs(det) > 1e20:
a = 0.5*(d3[1]-d3[0])/det
b = 0.5*(d2[0]-d2[1])/det
else:
det = d2[2]*d3[1]-d2[1]*d3[2]
a = 0.5*(d3[1]-d3[2])/det
b = 0.5*(d2[2]-d2[1])/det
# make sure always remove atom, not insert atom
if theta < 0:
a = -a
b = -b
return np.array([0.,a,b])
def generate_script(celldata, theta):
# Find corresponding cell
ind = np.argwhere(np.abs(celldata['theta']-theta)<1e-2)
if ind.size == 0:
raise Exception('Cannot find character angle in the cell data')
ind = ind[0]
angle = celldata['theta'][ind]
c1 = celldata['c1'][ind][0]
n1 = celldata['n1'][ind]
c2 = celldata['c2'][ind][0]
n2 = celldata['n2'][ind]
c3 = celldata['c3'][ind][0]
n3 = celldata['n3'][ind]
bs = celldata['bs'][ind][0]
# Generate MD++ script
script = '# -*-shell-script-*-\n'
script += '#MD++ script to compute core energies\n'
script += 'setnolog\n'
script += 'setoverwrite\n'
script += 'dirname = runs/KIM/dipole_%.2f_ref\n' % angle
script += '#------------------------------------------------------------\n'
script += '#Read in EAM/MEAM potential\n'
script += '#potfile = "~/Potentials/w_version3.eam" eamgrid = 5000 readeam\n'
script += 'potfile = ~/Documents/Codes/MD++/potentials/EAMDATA/eamdata.W.Marinica13 eamgrid = 80001 readeam\n'
script += 'NNM = 100\n'
script += '#------------------------------------------------------------\n'
script += 'latticestructure = body-centered-cubic\n'
script += 'latticeconst = 3.14339 # (A) for W_cea\n'
script += '\n'
script += 'makecnspec = [%4d %4d %4d %4d #(x) dipole direction\n' % (c1[0], c1[1], c1[2], n1)
script += ' %4d %4d %4d %4d #(y)\n' % (c2[0], c2[1], c2[2], n2)
script += ' %4d %4d %4d %4d ] #(z) dislocation line\n' % (c3[0], c3[1], c3[2], n3)
script += '\n'
script += 'makecn finalcnfile = perf.cn writecn\n'
script += '#-------------------------------------------------------------\n'
script += '#Create Dislocation Dipole by using linear elastic solutions\n'
script += '\n'
script += 'mkdipole = [ 3 1 #z(dislocation line), y(dipole direction)\n'
script += ' %12.8f %12.8f %12.8f #(bx,by,bz)\n' % (bs[0], bs[1], bs[2])
script += ' -0.01 -0.2499 0.251 #(x0,y0,y1) #type (2)\n'
script += ' 0.278 -10 10 -10 10 1 ] #nu, number of images, shiftbox\n'
script += '\n'
script += 'makedipole finalcnfile = makedp_%.2f.lammps writeLAMMPS\n' % angle
script += '#-------------------------------------------------------------\n'
script += '#Conjugate-Gradient relaxation\n'
script += 'conj_ftol = 1e-7 conj_fevalmax = 3000\n'
script += 'conj_fixbox = 1 conj_dfpred = 1e-4\n'
script += 'relax\n'
script += 'eval\n'
script += 'finalcnfile = dipole_%.2f.lammps writeLAMMPS\n' % angle
script += 'quit\n'
return script
# supercell size
ar = 1.5 # aspect ratio x/y
n2 = 10.0 # supercell size along the y-direction
n3 = 3.0 # supercell size along the z-direction
mult = 3.0 # multiplication factor
bv = np.array([1,1,1]) # Burgers vector direction
c1 = np.array([-1,1,0]) # dislocation plane index
# maximum Miller index of repeat vectors allowed to
# generate supercells of various character angles
nmax = 10
# generate in-plane discrete directions
p = np.array(list(itertools.permutations(range(1, nmax+1), 2)))
p = np.vstack(([1,0], [1,1], [0,1], p))
m = np.gcd(p[:,0], p[:,1])
p = p / m[:, np.newaxis]
# generate global supercell repeat vectors
if np.abs(np.dot(bv, c1)) > 1e-5:
raise Exception('Burgers vector and dislocation plane must be orthogonal')
y0 = np.cross(c1, bv)
my = np.gcd(y0[0], np.gcd(y0[1], y0[2]))
y0 = y0 / my
x = bv / np.linalg.norm(bv)
y = y0 / np.linalg.norm(y0)
c3plus = np.outer(p[:,0], bv) + np.outer(p[:,1], y0)
c3minus = np.outer(p[:,0], bv) - np.outer(p[:,1], y0)
c3 = np.unique(np.vstack((c3plus, c3minus)), axis=0)
# compute character angles
c3n = np.linalg.norm(c3, axis=1)
c3x = np.dot(c3, x)
c3y = np.dot(c3, y)
angle = np.arctan2(c3y, c3x)*180.0/np.pi
ia = np.argsort(angle)
angle = angle[ia]
c3 = c3[ia]
# compute complementary supercell repeat vector
c2 = np.cross(c3, c1)
# determine supercell size
m2 = np.gcd(c2[:,0], np.gcd(c2[:,1], c2[:,2]))
cm2 = c2 / m2[:, np.newaxis]
l2 = np.linalg.norm(cm2, axis=1)
n2 = np.ceil(mult*n2/l2)
m3 = np.gcd(c3[:,0], np.gcd(c3[:,1], c3[:,2]))
cm3 = c3 / m3[:, np.newaxis]
l3 = np.linalg.norm(cm3, axis=1)
n3 = np.ceil(mult*n3/l3)
# adjust aspect ratio
cm1 = np.tile(c1, (c3.shape[0], 1))
l1 = np.linalg.norm(cm1, axis=1)
n1 = np.round(ar*n2*l2/l1)
# select orientations with acceptable Miller indices
cmax = np.max(np.abs(np.hstack((cm1,cm2,cm3))), axis=1)
ind = (cmax<=nmax)
# Burgers vector in scaled coordinates
bs = np.zeros((angle.size,3))
for i in range(angle.size):
bs[i] = find_burgers_coord(angle[i], cm2[i], n2[i], cm3[i], n3[i])
# all supercells data
celldata = {
"theta": angle[ind],
"c1": cm1[ind],
"n1": n1[ind],
"c2": cm2[ind],
"n2": n2[ind],
"c3": cm3[ind],
"n3": n3[ind],
"bs": bs[ind]
}
# Generate MD++ script for a given character angle
theta = 90.0 # edge dislocation
#theta = 0.0 # screw dislocation
#theta = 70.53 # M111 dislocation
script = generate_script(celldata, theta)
print(script)
if 0:
# Print MD++ script into file
script_file = open('/Users/bertin1/Documents/Codes/MD++/scripts/KIM/W-dipole_test.script', 'w')
script_file.write(script)
script_file.close() | [
"noreply@github.com"
] | yqian1.noreply@github.com |
38a31c1facdab62182e07fb91d89b3bbd83243e6 | 15961f319555c38ebb80b4801edcc84a24f9415b | /loader.py | 13c80e259f87a77668787c28d9ab90ed9859d98e | [] | no_license | elifiner/crmz | f4a7897cbbcc6b7d0fac4fc8ad7705bc55e81c5f | cd15ff574a1e523aa1594f2929b62c3f3f75d153 | refs/heads/master | 2020-12-26T02:49:29.500188 | 2015-06-29T12:56:44 | 2015-06-29T12:56:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | import sys
companies = []
for line in open(sys.argv[1]):
companies.append(eval(line.strip()))
print(companies) | [
"eli.finer@gmail.com"
] | eli.finer@gmail.com |
6e0e7be32af312f6e4e5c22864d619f58343b46b | 07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8 | /lib/python3.6/site-packages/qtconsole/usage.py | 9748f0e934f04e3c18259feed28ecd2d79a87874 | [] | no_license | cronos91/ML-exercise | 39c5cd7f94bb90c57450f9a85d40c2f014900ea4 | 3b7afeeb6a7c87384049a9b87cac1fe4c294e415 | refs/heads/master | 2021-05-09T22:02:55.131977 | 2017-12-14T13:50:44 | 2017-12-14T13:50:44 | 118,736,043 | 0 | 0 | null | 2018-01-24T08:30:23 | 2018-01-24T08:30:22 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4999bdffa49207a0fc3b0d1a32be17cab386bc93cb7e7f592a5154ee85dcc4e9
size 8349
| [
"seokinj@jangseog-in-ui-MacBook-Pro.local"
] | seokinj@jangseog-in-ui-MacBook-Pro.local |
68d7bf5e288f8d694bff5efa73f76d5d085e833e | 24c8c76dee0cfbeaa3bc61666e63801aaf86afbc | /acmicpc.net/2798/test/script.py | 3b02d2dfb3a6b5edce5714ff49fb0809cb7d0a71 | [] | no_license | developers-algorithm-study/mjy9088 | e018f93f87394e106163259659a913b2ec2dd7f5 | 04f8fa0611a76aba45956ef82218b0e15152faa9 | refs/heads/master | 2022-09-16T17:55:21.387115 | 2022-08-18T14:32:35 | 2022-08-18T14:33:46 | 173,717,731 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import sys
from os import path, sep
from importlib import import_module
base_path = path.dirname(path.dirname(path.abspath(__file__)))
root_path = path.dirname(path.dirname(base_path))
sys.path.append(root_path + sep + 'test')
if import_module('pipe').test(
prelaunch_tasks=[['cargo', 'build']],
popen_params=sep.join([base_path, 'target', 'debug', 'acmicpc_2798']),
path_to_cases_json=sep.join([base_path, 'test', 'cases.json'])
):
print('Passed all cases')
else:
exit(1)
| [
"mjy9088@naver.com"
] | mjy9088@naver.com |
6daa8b703512b7e2e3c7130d9d9b62f52575be17 | 5d049b79b10480e0e03dbd699c496b5eaa050eb5 | /tests/test_preprocessing.py | 26b4df9beff58bd1556114ab91b4a684dcb652f5 | [] | no_license | isseychua/type-hints-demo | 96bd46fd3c54acd5fa2f14913a2e1c1522d109dd | 2e2ae03e9b2c1254807a7f01bc47869ecea6bb24 | refs/heads/master | 2023-08-02T16:36:36.479644 | 2021-10-10T11:26:19 | 2021-10-10T11:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,073 | py | import unittest
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from pandas.testing import assert_frame_equal, assert_series_equal
from src.preprocessing import (add_derived_title, add_is_alone_column,
categorize_column, impute_nans, train_model)
class TestProcessing(unittest.TestCase):
def test_add_derived_title(self):
df = pd.DataFrame({
'Name': ['Smith, Mr. Owen Harris ', 'Heikkinen, Miss. Laina ', 'Allen, Mlle. Maisie',
'Allen, Ms. Maisie', 'Allen, Mme. Maisie',
# rare titles
'Smith, Lady. Owen Harris ', 'Heikkinen, Countess. X ', 'Allen, Capt. Maisie',
'Smith, Col. Owen Harris ', 'Heikkinen, Don. Laina ', 'Allen, Dr. Maisie',
'Smith, Major. Owen Harris ', 'Heikkinen, Rev. Laina ', 'Allen, Sir. Maisie',
'Smith, Jonkheer. Owen Bob ', 'Heikkinen, Dona. Laina '
],
})
expected = pd.DataFrame({
'Name': ['Smith, Mr. Owen Harris ', 'Heikkinen, Miss. Laina ', 'Allen, Mlle. Maisie',
'Allen, Ms. Maisie', 'Allen, Mme. Maisie',
'Smith, Lady. Owen Harris ', 'Heikkinen, Countess. X ', 'Allen, Capt. Maisie',
'Smith, Col. Owen Harris ', 'Heikkinen, Don. Laina ', 'Allen, Dr. Maisie',
'Smith, Major. Owen Harris ', 'Heikkinen, Rev. Laina ', 'Allen, Sir. Maisie',
'Smith, Jonkheer. Owen Bob ', 'Heikkinen, Dona. Laina '
],
'Title': ['Mr', 'Miss', 'Miss',
'Miss', 'Mrs',
'Rare', 'Rare', 'Rare',
'Rare', 'Rare', 'Rare',
'Rare', 'Rare', 'Rare',
'Rare', 'Rare']
})
assert_frame_equal(expected, add_derived_title(df))
def test_categorize_column_into_2_categories(self):
series = pd.Series([5, 20, 10, 25]) # bins: [ 4.98 15. 25. ]
assert_series_equal(
pd.Series([1, 2, 1, 2]), categorize_column(series, num_bins=2))
def test_categorize_column_into_5_categories(self):
# bins: [ -0.1, 20. , 40. , 60. , 80. , 100. ]
series = pd.Series([0, 30, 50, 80, 100])
assert_series_equal(
pd.Series([1, 2, 3, 4, 5]), categorize_column(series, num_bins=5))
def test_add_is_alone_column(self):
# df = df['SibSp'] + df['Parch']
df = pd.DataFrame({
'SibSp': [0, 1, 2, 0, 0],
'Parch': [0, 0, 5, 0, 1]
})
expected = pd.DataFrame({
'SibSp': [0, 1, 2, 0, 0],
'Parch': [0, 0, 5, 0, 1],
'IsAlone': [1, 0, 0, 1, 0]
})
assert_frame_equal(expected, add_is_alone_column(df))
def test_impute_nans_for_categorical_columns_replaces_na_with_most_frequent_mode(self):
df = pd.DataFrame({
'some_categorical_column': ['A', 'A', 'B', np.nan, 'A', np.nan]
})
expected = pd.DataFrame({
'some_categorical_column': ['A', 'A', 'B', 'A', 'A', 'A']
})
assert_frame_equal(expected, impute_nans(
df, categorical_columns=['some_categorical_column']))
def test_impute_nans_for_continuous_columns_replaces_na_with_median(self):
df = pd.DataFrame({
# median value: 20
'some_continuous_column': [10, 20, np.nan, np.nan, 30]
})
expected = pd.DataFrame({
'some_continuous_column': [10, 20, 20, 20, 30]
})
assert_frame_equal(expected, impute_nans(df, continuous_columns=[
'some_continuous_column']), check_dtype=False)
def test_train_model_should_return_instance_of_model_and_accuracy_score(self):
model, accuracy = train_model(DecisionTreeClassifier, [[1, 1, 1], [1, 1, 1]], [0, 1])
self.assertIsInstance(model, DecisionTreeClassifier)
self.assertIsInstance(accuracy, float)
| [
"davidtan@thoughtworks.com"
] | davidtan@thoughtworks.com |
31af98181d7dd6927e3d2117e9d1f89471704af2 | 23a8b03599b0e97157739c367168b481f046726f | /vocabchallenge/dictionaries/french_dict_edit.py | 2861d4fd9f9037fe02a7f792bde2f9ad0a532a0f | [] | no_license | lzbotha/VocabChallenge | 04be101d5a6bf49dcaf895417a9ecf095b69fd14 | 5abff66a397c66c9ce79aaf2fd27925289013db9 | refs/heads/master | 2021-01-19T18:11:07.222922 | 2014-03-17T07:59:59 | 2014-03-17T07:59:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py |
f = open('french_dictionary.tsv','r')
outpt = open('french_dictionary_v1.tsv','w')
def remove_formatting(text):
temp = ''
for c in text:
if not c =='[' and not c==']' and not c =='#':
temp = temp + c
return temp
# as it stands now this seperates all words with a direct translation
for line in f:
dictitem = line.split('\t')
if dictitem[2]!='Suffix' and dictitem[2]!='Prefix' and dictitem[2]!='Proper noun' and dictitem[2]!='Symbol' and dictitem[2]!='Proverb' and dictitem[2]!='Abbreviation' and dictitem[2]!='Initialism':
if not 'initialism' in dictitem[-1] and not 'Arabic spelling' in dictitem[-1] and 'initialism' not in line and 'abbreviation' not in line:
if '[[' in dictitem[-1] and not '{{' in dictitem[-1] and not '[[#English' in dictitem[-1]:
if len(dictitem[-1].split(' ')) == 2:
if not '|' in dictitem[-1].split(' ')[1]:
outpt.write(dictitem[1]+'\t'+remove_formatting(dictitem[-1]))
else:
outpt.write(dictitem[1]+'\t'+remove_formatting(dictitem[-1]))
f.close() | [
"leonardzbotha@gmail.com"
] | leonardzbotha@gmail.com |
96a1a69d636663d00ed646ff53f6c1fde2ee639b | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /9zsDKijmBffmnk9AP_4.py | f1bdc518936ccc7193328054c14e0aff9757174a | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py |
def programmers(one, two, three):
return max(one, two, three)-min(one, two, three)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c23e2c7b72b4b8a49f79f3091cb3eb3ca241fb0f | a3123495438f8054f15711be138e10bef4b8c8ce | /parallax_eddie_robot/src/parallax_eddie_robot/msg/_Velocity.py | c57d36e43ccd2d56f604a02ce4b991f1402545b3 | [] | no_license | haikalpribadi/haikalpribadi-ros-pkg | 53aeb2335dee131bcc626afb13ceb39a56f00556 | a3f9c2580e82beabe83a463cdbe46c40dc70ea1b | refs/heads/master | 2021-01-10T20:32:15.132608 | 2020-05-20T13:41:45 | 2020-05-20T13:41:45 | 3,742,934 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,195 | py | """autogenerated by genmsg_py from Velocity.msg. Do not edit."""
import roslib.message
import struct
class Velocity(roslib.message.Message):
_md5sum = "9d5c2dcd348ac8f76ce2a4307bd63a13"
_type = "parallax_eddie_robot/Velocity"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float32 linear
float32 angular
"""
__slots__ = ['linear','angular']
_slot_types = ['float32','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
linear,angular
@param args: complete set of field values, in .msg order
@param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Velocity, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.linear is None:
self.linear = 0.
if self.angular is None:
self.angular = 0.
else:
self.linear = 0.
self.angular = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
@param buff: buffer
@type buff: StringIO
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.linear, _x.angular))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
@param str: byte array of serialized message
@type str: str
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.linear, _x.angular,) = _struct_2f.unpack(str[start:end])
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
@param buff: buffer
@type buff: StringIO
@param numpy: numpy python module
@type numpy module
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.linear, _x.angular))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
@param str: byte array of serialized message
@type str: str
@param numpy: numpy python module
@type numpy: module
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.linear, _x.angular,) = _struct_2f.unpack(str[start:end])
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
_struct_I = roslib.message.struct_I
_struct_2f = struct.Struct("<2f")
| [
"haikal.pribadi@gmail.com"
] | haikal.pribadi@gmail.com |
7ab25735908dffad4ff145d77a16b3adf7334ef5 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/djcelery/tests/_compat.py | 4969b5c033405ba7bf924e2166b838b11922e304 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 113 | py | # coding: utf-8
try:
from unittest.mock import patch
except ImportError:
from mock import patch # noqa
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
4b2a96ef55a632d0fcb49ebd4f0e74caf63bdc4b | 6804c33960e66afa43c54f147e4ca86f32b2ee70 | /stands/recognizer/gender_detect.py | 76806f349f582ea4cc556999d2079cddb56533c8 | [] | no_license | ApiProdject/apiproject | 9cd6be2781f75338a41c2376e8303b910bbc4307 | 6e35244e0c003b6b5331def17c8c1d7ad671dab2 | refs/heads/master | 2022-07-10T16:44:50.954386 | 2020-05-17T05:37:54 | 2020-05-17T05:37:54 | 244,185,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | import os
import cv2
import numpy as np
from apiproject.settings import STATIC_DIR
from infoPoints.models import InfoPoint
from stands.recognizer.SSRNET_model import SSR_net_general
class GenderRecognizer:
__instance__ = None
@staticmethod
def get_instance():
if not GenderRecognizer.__instance__:
GenderRecognizer()
return GenderRecognizer.__instance__
def __init__(self):
if GenderRecognizer.__instance__ is None:
self.gender_net = SSR_net_general(64, [3, 3, 3], 1, 1)()
self.gender_net.load_weights(os.path.join(STATIC_DIR, 'models/ssrnet_gender_3_3_3_64_1.0_1.0.h5'))
GenderRecognizer.__instance__ = self
else:
raise Exception("This class is a singleton!")
def gender(self, face):
blob = cv2.resize(face, (64, 64))
blob = cv2.normalize(blob, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
gender = self.gender_net.predict(np.expand_dims(blob, axis=0))
return 1 if (gender >= 0.5) else 2
| [
"egorgavrilenko6@gmail.com"
] | egorgavrilenko6@gmail.com |
00158ec2eee07a649a6064356b537bc1f351953c | d292e8094cdfbd2dd6e35d775f6edfa8e209db47 | /src/tests/test_profiled_forward.py | 8d869d8d2f7ad953dbfc20f8331029c0c13c6f71 | [
"MIT"
] | permissive | SpookyWoogin/robot2018 | 81562d96ffd42aa3642f8f746a8126a2940827ec | a8ddf6a64b883904b15031e0ae13b2056faed4f5 | refs/heads/master | 2020-09-02T01:05:00.897963 | 2019-01-26T05:00:02 | 2019-01-26T05:00:02 | 219,100,518 | 0 | 0 | MIT | 2019-11-02T03:55:26 | 2019-11-02T03:55:26 | null | UTF-8 | Python | false | false | 2,129 | py | from unittest.mock import MagicMock
from data_logger import DataLogger
from robot import Rockslide
from commands.profiled_forward import ProfiledForward
def test_ProfiledForward1(Notifier):
robot = Rockslide()
robot.robotInit()
command = ProfiledForward(10)
command.initialize()
command.execute()
command.isFinished()
command.end()
log_trajectory = True
def test_ProfiledForward2(Notifier, sim_hooks):
global log_trajectory
robot = Rockslide()
robot.robotInit()
DT = robot.getPeriod()
robot.drivetrain.getLeftEncoder = getLeftEncoder = MagicMock()
robot.drivetrain.getRightEncoder = getRightEncoder = MagicMock()
getLeftEncoder.return_value = 0
getRightEncoder.return_value = 0
command = ProfiledForward(10)
command.initialize()
t = 0
pos_ft = 0
if log_trajectory:
logger = DataLogger("test_profiled_forward2.csv")
logger.log_while_disabled = True
logger.do_print = True
logger.add('t', lambda: t)
logger.add('pos', lambda: pos_ft)
logger.add('target_pos', lambda: command.dist_ft)
logger.add('v', lambda: command.profiler_l.current_target_v)
logger.add('max_v', lambda: command.max_v_encps)
logger.add('a', lambda: command.profiler_l.current_a)
logger.add('max_a', lambda: command.max_acceleration)
logger.add('voltage', lambda: command.drivetrain.getVoltage())
logger.add('vpl', lambda: command.drivetrain.motor_lb.get())
logger.add('adist', lambda: command.profiler_l.adist)
logger.add('err', lambda: command.profiler_l.err)
while t < 10:
if log_trajectory:
logger.log()
getLeftEncoder.return_value = pos_ft * command.drivetrain.ratio
getRightEncoder.return_value = -pos_ft * command.drivetrain.ratio
command.execute()
v = command.profiler_l.current_target_v
pos_ft += v * DT
t += DT
sim_hooks.time = t
if command.isFinished():
break
command.end()
if log_trajectory:
logger.log()
logger.close()
| [
"ellery-newcomer@utulsa.edu"
] | ellery-newcomer@utulsa.edu |
047660a9b15f645d34c790dbd31c938415f1e740 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_02_01/models/__init__.py | 82c172aa1eb5e798e13af3d8f39e6216f291614d | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 12,525 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import AccountSasParameters
from ._models_py3 import ActiveDirectoryProperties
from ._models_py3 import AzureEntityResource
from ._models_py3 import AzureFilesIdentityBasedAuthentication
from ._models_py3 import BlobContainer
from ._models_py3 import BlobInventoryPolicy
from ._models_py3 import BlobInventoryPolicyDefinition
from ._models_py3 import BlobInventoryPolicyFilter
from ._models_py3 import BlobInventoryPolicyRule
from ._models_py3 import BlobInventoryPolicySchema
from ._models_py3 import BlobRestoreParameters
from ._models_py3 import BlobRestoreRange
from ._models_py3 import BlobRestoreStatus
from ._models_py3 import BlobServiceItems
from ._models_py3 import BlobServiceProperties
from ._models_py3 import ChangeFeed
from ._models_py3 import CheckNameAvailabilityResult
from ._models_py3 import CloudErrorBody
from ._models_py3 import CorsRule
from ._models_py3 import CorsRules
from ._models_py3 import CustomDomain
from ._models_py3 import DateAfterCreation
from ._models_py3 import DateAfterModification
from ._models_py3 import DeleteRetentionPolicy
from ._models_py3 import DeletedAccount
from ._models_py3 import DeletedAccountListResult
from ._models_py3 import DeletedShare
from ._models_py3 import Dimension
from ._models_py3 import Encryption
from ._models_py3 import EncryptionIdentity
from ._models_py3 import EncryptionScope
from ._models_py3 import EncryptionScopeKeyVaultProperties
from ._models_py3 import EncryptionScopeListResult
from ._models_py3 import EncryptionService
from ._models_py3 import EncryptionServices
from ._models_py3 import Endpoints
from ._models_py3 import ErrorResponse
from ._models_py3 import ErrorResponseBody
from ._models_py3 import ExtendedLocation
from ._models_py3 import FileServiceItems
from ._models_py3 import FileServiceProperties
from ._models_py3 import FileShare
from ._models_py3 import FileShareItem
from ._models_py3 import FileShareItems
from ._models_py3 import GeoReplicationStats
from ._models_py3 import IPRule
from ._models_py3 import Identity
from ._models_py3 import ImmutabilityPolicy
from ._models_py3 import ImmutabilityPolicyProperties
from ._models_py3 import KeyCreationTime
from ._models_py3 import KeyPolicy
from ._models_py3 import KeyVaultProperties
from ._models_py3 import LastAccessTimeTrackingPolicy
from ._models_py3 import LeaseContainerRequest
from ._models_py3 import LeaseContainerResponse
from ._models_py3 import LegalHold
from ._models_py3 import LegalHoldProperties
from ._models_py3 import ListAccountSasResponse
from ._models_py3 import ListBlobInventoryPolicy
from ._models_py3 import ListContainerItem
from ._models_py3 import ListContainerItems
from ._models_py3 import ListQueue
from ._models_py3 import ListQueueResource
from ._models_py3 import ListQueueServices
from ._models_py3 import ListServiceSasResponse
from ._models_py3 import ListTableResource
from ._models_py3 import ListTableServices
from ._models_py3 import ManagementPolicy
from ._models_py3 import ManagementPolicyAction
from ._models_py3 import ManagementPolicyBaseBlob
from ._models_py3 import ManagementPolicyDefinition
from ._models_py3 import ManagementPolicyFilter
from ._models_py3 import ManagementPolicyRule
from ._models_py3 import ManagementPolicySchema
from ._models_py3 import ManagementPolicySnapShot
from ._models_py3 import ManagementPolicyVersion
from ._models_py3 import MetricSpecification
from ._models_py3 import Multichannel
from ._models_py3 import NetworkRuleSet
from ._models_py3 import ObjectReplicationPolicies
from ._models_py3 import ObjectReplicationPolicy
from ._models_py3 import ObjectReplicationPolicyFilter
from ._models_py3 import ObjectReplicationPolicyRule
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointConnectionListResult
from ._models_py3 import PrivateLinkResource
from ._models_py3 import PrivateLinkResourceListResult
from ._models_py3 import PrivateLinkServiceConnectionState
from ._models_py3 import ProtocolSettings
from ._models_py3 import ProxyResource
from ._models_py3 import QueueServiceProperties
from ._models_py3 import Resource
from ._models_py3 import ResourceAccessRule
from ._models_py3 import RestorePolicyProperties
from ._models_py3 import Restriction
from ._models_py3 import RoutingPreference
from ._models_py3 import SKUCapability
from ._models_py3 import SasPolicy
from ._models_py3 import ServiceSasParameters
from ._models_py3 import ServiceSpecification
from ._models_py3 import Sku
from ._models_py3 import SkuInformation
from ._models_py3 import SmbSetting
from ._models_py3 import StorageAccount
from ._models_py3 import StorageAccountCheckNameAvailabilityParameters
from ._models_py3 import StorageAccountCreateParameters
from ._models_py3 import StorageAccountInternetEndpoints
from ._models_py3 import StorageAccountKey
from ._models_py3 import StorageAccountListKeysResult
from ._models_py3 import StorageAccountListResult
from ._models_py3 import StorageAccountMicrosoftEndpoints
from ._models_py3 import StorageAccountRegenerateKeyParameters
from ._models_py3 import StorageAccountUpdateParameters
from ._models_py3 import StorageQueue
from ._models_py3 import StorageSkuListResult
from ._models_py3 import SystemData
from ._models_py3 import Table
from ._models_py3 import TableServiceProperties
from ._models_py3 import TagFilter
from ._models_py3 import TagProperty
from ._models_py3 import TrackedResource
from ._models_py3 import UpdateHistoryProperty
from ._models_py3 import Usage
from ._models_py3 import UsageListResult
from ._models_py3 import UsageName
from ._models_py3 import UserAssignedIdentity
from ._models_py3 import VirtualNetworkRule
from ._storage_management_client_enums import (
AccessTier,
AccountStatus,
BlobInventoryPolicyName,
BlobRestoreProgressStatus,
Bypass,
CorsRuleAllowedMethodsItem,
CreatedByType,
DefaultAction,
DirectoryServiceOptions,
EnabledProtocols,
EncryptionScopeSource,
EncryptionScopeState,
ExpirationAction,
ExtendedLocationTypes,
GeoReplicationStatus,
HttpProtocol,
IdentityType,
ImmutabilityPolicyState,
ImmutabilityPolicyUpdateType,
InventoryRuleType,
KeyPermission,
KeySource,
KeyType,
Kind,
LargeFileSharesState,
LeaseContainerRequestAction,
LeaseDuration,
LeaseState,
LeaseStatus,
ListContainersInclude,
ListSharesExpand,
ManagementPolicyName,
MinimumTlsVersion,
Name,
Permissions,
PrivateEndpointConnectionProvisioningState,
PrivateEndpointServiceConnectionStatus,
ProvisioningState,
PublicAccess,
PutSharesExpand,
Reason,
ReasonCode,
RootSquashType,
RoutingChoice,
RuleType,
Services,
ShareAccessTier,
SignedResource,
SignedResourceTypes,
SkuName,
SkuTier,
State,
StorageAccountExpand,
UsageUnit,
)
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
'AccountSasParameters',
'ActiveDirectoryProperties',
'AzureEntityResource',
'AzureFilesIdentityBasedAuthentication',
'BlobContainer',
'BlobInventoryPolicy',
'BlobInventoryPolicyDefinition',
'BlobInventoryPolicyFilter',
'BlobInventoryPolicyRule',
'BlobInventoryPolicySchema',
'BlobRestoreParameters',
'BlobRestoreRange',
'BlobRestoreStatus',
'BlobServiceItems',
'BlobServiceProperties',
'ChangeFeed',
'CheckNameAvailabilityResult',
'CloudErrorBody',
'CorsRule',
'CorsRules',
'CustomDomain',
'DateAfterCreation',
'DateAfterModification',
'DeleteRetentionPolicy',
'DeletedAccount',
'DeletedAccountListResult',
'DeletedShare',
'Dimension',
'Encryption',
'EncryptionIdentity',
'EncryptionScope',
'EncryptionScopeKeyVaultProperties',
'EncryptionScopeListResult',
'EncryptionService',
'EncryptionServices',
'Endpoints',
'ErrorResponse',
'ErrorResponseBody',
'ExtendedLocation',
'FileServiceItems',
'FileServiceProperties',
'FileShare',
'FileShareItem',
'FileShareItems',
'GeoReplicationStats',
'IPRule',
'Identity',
'ImmutabilityPolicy',
'ImmutabilityPolicyProperties',
'KeyCreationTime',
'KeyPolicy',
'KeyVaultProperties',
'LastAccessTimeTrackingPolicy',
'LeaseContainerRequest',
'LeaseContainerResponse',
'LegalHold',
'LegalHoldProperties',
'ListAccountSasResponse',
'ListBlobInventoryPolicy',
'ListContainerItem',
'ListContainerItems',
'ListQueue',
'ListQueueResource',
'ListQueueServices',
'ListServiceSasResponse',
'ListTableResource',
'ListTableServices',
'ManagementPolicy',
'ManagementPolicyAction',
'ManagementPolicyBaseBlob',
'ManagementPolicyDefinition',
'ManagementPolicyFilter',
'ManagementPolicyRule',
'ManagementPolicySchema',
'ManagementPolicySnapShot',
'ManagementPolicyVersion',
'MetricSpecification',
'Multichannel',
'NetworkRuleSet',
'ObjectReplicationPolicies',
'ObjectReplicationPolicy',
'ObjectReplicationPolicyFilter',
'ObjectReplicationPolicyRule',
'Operation',
'OperationDisplay',
'OperationListResult',
'PrivateEndpoint',
'PrivateEndpointConnection',
'PrivateEndpointConnectionListResult',
'PrivateLinkResource',
'PrivateLinkResourceListResult',
'PrivateLinkServiceConnectionState',
'ProtocolSettings',
'ProxyResource',
'QueueServiceProperties',
'Resource',
'ResourceAccessRule',
'RestorePolicyProperties',
'Restriction',
'RoutingPreference',
'SKUCapability',
'SasPolicy',
'ServiceSasParameters',
'ServiceSpecification',
'Sku',
'SkuInformation',
'SmbSetting',
'StorageAccount',
'StorageAccountCheckNameAvailabilityParameters',
'StorageAccountCreateParameters',
'StorageAccountInternetEndpoints',
'StorageAccountKey',
'StorageAccountListKeysResult',
'StorageAccountListResult',
'StorageAccountMicrosoftEndpoints',
'StorageAccountRegenerateKeyParameters',
'StorageAccountUpdateParameters',
'StorageQueue',
'StorageSkuListResult',
'SystemData',
'Table',
'TableServiceProperties',
'TagFilter',
'TagProperty',
'TrackedResource',
'UpdateHistoryProperty',
'Usage',
'UsageListResult',
'UsageName',
'UserAssignedIdentity',
'VirtualNetworkRule',
'AccessTier',
'AccountStatus',
'BlobInventoryPolicyName',
'BlobRestoreProgressStatus',
'Bypass',
'CorsRuleAllowedMethodsItem',
'CreatedByType',
'DefaultAction',
'DirectoryServiceOptions',
'EnabledProtocols',
'EncryptionScopeSource',
'EncryptionScopeState',
'ExpirationAction',
'ExtendedLocationTypes',
'GeoReplicationStatus',
'HttpProtocol',
'IdentityType',
'ImmutabilityPolicyState',
'ImmutabilityPolicyUpdateType',
'InventoryRuleType',
'KeyPermission',
'KeySource',
'KeyType',
'Kind',
'LargeFileSharesState',
'LeaseContainerRequestAction',
'LeaseDuration',
'LeaseState',
'LeaseStatus',
'ListContainersInclude',
'ListSharesExpand',
'ManagementPolicyName',
'MinimumTlsVersion',
'Name',
'Permissions',
'PrivateEndpointConnectionProvisioningState',
'PrivateEndpointServiceConnectionStatus',
'ProvisioningState',
'PublicAccess',
'PutSharesExpand',
'Reason',
'ReasonCode',
'RootSquashType',
'RoutingChoice',
'RuleType',
'Services',
'ShareAccessTier',
'SignedResource',
'SignedResourceTypes',
'SkuName',
'SkuTier',
'State',
'StorageAccountExpand',
'UsageUnit',
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk() | [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
8cb2376ed52ba4138dc95464f109798211500d6a | 4d9b71dc822dd62cade383629ea8ef469d2e83ae | /planning/SpCoNavi0.1.py | d05de2b52e4530add0ef3afd16f9a86a6519b889 | [
"MIT"
] | permissive | sunnySKYwhy/SpCoNavi | cb2eaded8de5c0d5ec254d415dcc3418783db7f1 | 88edac8b204ad58380a00685f7d5159d5d937271 | refs/heads/master | 2023-03-19T23:52:29.411030 | 2020-02-19T11:57:54 | 2020-02-19T11:57:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,383 | py | #coding:utf-8
###########################################################
# SpCoNavi: Spatial Concept-based Path-Planning Program (開発中)
# Akira Taniguchi 2018/12/13-2019/3/10-
###########################################################
##########---遂行タスク---##########
#テスト実行・デバッグ
#ムダの除去・さらなる高速化
##########---作業終了タスク---##########
##文字コードをsjisのままにした
##現状、Xtは2次元(x,y)として計算(角度(方向)θは考慮しない)
##配列はlistかnumpy.arrayかを注意
##地図が大きいとメモリを大量に消費する・処理が重くなる恐れがある
##状態遷移確率(動作モデル)は確定モデルで近似計算する
##range() -> xrange()
##numbaのjitで高速化(?)and並列化(?)
##PathはROSの座標系と2次元配列上のインデックスの両方を保存する
##ViterbiPathの計算でlogを使う:PathWeightMapは確率で計算・保存、Transitionはlogで計算・保存する
##事前計算できるものはできるだけファイル読み込みする形にもできるようにした
###(単語辞書生成、単語認識結果(N-best)、事前計算可能な確率値、Transition(T_horizonごとに保持)、・・・)
##Viterbiの計算処理をTransitionをそのまま使わないように変更した(ムダが多く、メモリ消費・処理時間がかかる要因)
##Viterbiのupdate関数を一部numpy化(高速化)
#sum_i_GaussMultiがnp.arrayになっていなかった(?)⇒np.array化したが計算上変わらないはず (2019/02/17)⇒np.arrayにすると、numbaがエラーを吐くため元に戻した.
###未確認・未使用
#pi_2_pi
#Prob_Triangular_distribution_pdf
#Motion_Model_Odometry
#Motion_Model_Odometry_No_theta
###確認済み
#ReadParameters
#ReadSpeech
#SpeechRecognition
#WordDictionaryUpdate2
#SavePath
#SaveProbMap
#ReadMap
#ReadCostMap
#PathPlanner
#ViterbiPath
##########---保留---##########
#状態遷移確率(動作モデル)を確率モデルで計算する実装
#状態数の削減のための近似手法の実装
#並列処理
#SendPath
#SendProbMap
#PathDistance
#PostProbXt
##############################################
import os
import sys
import glob
import time
import random
import numpy as np
import scipy as sp
#from numpy.random import multinomial #,uniform #,dirichlet
from scipy.stats import multivariate_normal,multinomial #,t,invwishart,rv_discrete
#from numpy.linalg import inv, cholesky
from math import pi as PI
from math import cos,sin,sqrt,exp,log,degrees,radians,atan2 #,gamma,lgamma,fabs,fsum
from __init__ import *
from JuliusNbest_dec import *
from submodules import *
from numba import jit, njit, prange
from scipy.io import mmwrite, mmread
from scipy.sparse import lil_matrix, csr_matrix
from itertools import izip
#マップを読み込む⇒確率値に変換⇒2次元配列に格納
def ReadMap(outputfile):
#outputfolder + trialname + navigation_folder + map.csv
gridmap = np.loadtxt(outputfile + "map.csv", delimiter=",")
print "Read map: " + outputfile + "map.csv"
return gridmap
#コストマップを読み込む⇒確率値に変換⇒2次元配列に格納
def ReadCostMap(outputfile):
#outputfolder + trialname + navigation_folder + contmap.csv
costmap = np.loadtxt(outputfile + "costmap.csv", delimiter=",")
print "Read costmap: " + outputfile + "contmap.csv"
return costmap
#場所概念の学習済みパラメータを読み込む
def ReadParameters(particle_num, filename):
#THETA = [W,W_index,Mu,Sig,Pi,Phi_l,K,L]
r = particle_num
i = 0
for line in open(filename + 'index' + str(r) + '.csv', 'r'): ##読み込む
itemList = line[:-1].split(',')
#print itemList
if (i == 0):
L = len(itemList) -1
elif (i == 1):
K = len(itemList) -1
i += 1
print "L:",L,"K:",K
W_index = []
i = 0
#テキストファイルを読み込み
for line in open(filename + 'W_list' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
if(i == 0):
for j in xrange(len(itemList)):
if (itemList[j] != ""):
W_index = W_index + [itemList[j]]
i = i + 1
#####パラメータW、μ、Σ、φ、πを入力する#####
Mu = [ np.array([ 0.0, 0.0 ]) for i in xrange(K) ] #[ np.array([[ 0.0 ],[ 0.0 ]]) for i in xrange(K) ] #位置分布の平均(x,y)[K]
Sig = [ np.array([ [0.0, 0.0],[0.0, 0.0] ]) for i in xrange(K) ] #位置分布の共分散(2×2次元)[K]
W = [ [0.0 for j in xrange(len(W_index))] for c in xrange(L) ] #場所の名前(多項分布:W_index次元)[L]
#theta = [ [0.0 for j in xrange(DimImg)] for c in xrange(L) ]
Pi = [ 0.0 for c in xrange(L)] #場所概念のindexの多項分布(L次元)
Phi_l = [ [0.0 for i in xrange(K)] for c in xrange(L) ] #位置分布のindexの多項分布(K次元)[L]
i = 0
##Muの読み込み
for line in open(filename + 'mu' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
Mu[i] = np.array([ float(itemList[0]) , float(itemList[1]) ])
#Mu[i] = np.array([[ float(itemList[0]) ],[ float(itemList[1]) ]])
i = i + 1
i = 0
##Sigの読み込み
for line in open(filename + 'sig' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
Sig[i] = np.array([[ float(itemList[0]), float(itemList[1]) ], [ float(itemList[2]), float(itemList[3]) ]])
i = i + 1
##phiの読み込み
c = 0
#テキストファイルを読み込み
for line in open(filename + 'phi' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != "":
Phi_l[c][i] = float(itemList[i])
c = c + 1
##Piの読み込み
for line in open(filename + 'pi' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != '':
Pi[i] = float(itemList[i])
##Wの読み込み
c = 0
#テキストファイルを読み込み
for line in open(filename + 'W' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != '':
#print c,i,itemList[i]
W[c][i] = float(itemList[i])
c = c + 1
"""
##thetaの読み込み
c = 0
#テキストファイルを読み込み
for line in open(filename + 'theta' + str(r) + '.csv', 'r'):
itemList = line[:-1].split(',')
for i in xrange(len(itemList)):
if itemList[i] != '':
#print c,i,itemList[i]
theta[c][i] = float(itemList[i])
c = c + 1
"""
THETA = [W,W_index,Mu,Sig,Pi,Phi_l,K,L]
return THETA
#音声ファイルを読み込み
def ReadSpeech(num):
# wavファイルを指定
files = glob.glob(speech_folder_go)
files.sort()
speech_file = files[num]
return speech_file
#音声データを受け取り、音声認識を行う⇒文字列配列を渡す・保存
def SpeechRecognition(speech_file, W_index, step, trialname, outputfile):
##学習した単語辞書を用いて音声認識し、BoWを得る
St = RecogNbest( speech_file, step, trialname )
#print St
Otb_B = [0 for i in xrange(len(W_index))] #[[] for j in xrange(len(St))]
for j in xrange(len(St)):
for i in xrange(5):
St[j] = St[j].replace("<s>", "")
St[j] = St[j].replace("</s>", "")
St[j] = St[j].replace(" <s> ", "")
St[j] = St[j].replace("<sp>", "")
St[j] = St[j].replace(" </s>", "")
St[j] = St[j].replace(" ", " ")
St[j] = St[j].replace("\n", "")
print j,St[j]
Otb = St[j].split(" ")
for j2 in xrange(len(Otb)):
#print n,j,len(Otb_Samp[r][n])
for i in xrange(len(W_index)):
#print W_index[i].decode('sjis'),Otb[j]
if (W_index[i].decode('sjis') == Otb[j2] ): #'utf8'
Otb_B[i] = Otb_B[i] + 1
#print W_index[i].decode('sjis'),Otb[j]
print Otb_B
# 認識結果をファイル保存
f = open( outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_St.csv" , "w") # , "sjis" )
for i in xrange(len(St)):
f.write(St[i].encode('sjis'))
f.write('\n')
f.close()
return Otb_B
#角度を[-π,π]に変換(参考:https://github.com/AtsushiSakai/PythonRobotics)
def pi_2_pi(angle):
return (angle + PI) % (2 * PI) - PI
#三角分布の確率密度関数
def Prob_Triangular_distribution_pdf(a,b):
prob = max( 0, ( 1 / (sqrt(6)*b) ) - ( abs(a) / (6*(b**2)) ) )
return prob
#確率分布の選択
def Motion_Model_Prob(a,b):
if (MotionModelDist == "Gauss"):
p = multivariate_normal.pdf(a, 0, b)
elif (MotionModelDist == "Triangular"):
p = Prob_Triangular_distribution_pdf(a, b)
return p
#オドメトリ動作モデル(確率ロボティクスp.122) #現状、不使用
def Motion_Model_Odometry(xt,ut,xt_1):
#ut = (xt_1_bar, xt_bar), xt_1_bar = (x_bar, y_bar, theta_bar), xt_bar = (x_dash_bar, y_dash_bar, theta_dash_bar)
x_dash, y_dash, theta_dash = xt
x, y, theta = xt_1
xt_1_bar, xt_bar = ut
x_dash_bar, y_dash_bar, theta_dash_bar = xt_bar
x_bar, y_bar, theta_bar = xt_1_bar
delta_rot1 = atan2(y_dash_bar - y_bar, x_dash_bar - x_bar) - theta_bar
delta_trans = sqrt( (x_dash_bar - x_bar)**2 + (y_dash_bar - y_bar)**2 )
delta_rot2 = theta_dash_bar - theta_bar - delta_rot1
delta_rot1_hat = atan2(y_dash - y, x_dash - x) - theta
delta_trans_hat = sqrt( (x_dash - x)**2 + (y_dash - y)**2 )
delta_rot2_hat = theta_dash - theta - delta_rot1_hat
p1 = Motion_Model_Prob(pi_2_pi(delta_rot1 - delta_rot1_hat), odom_alpha1*(delta_rot1_hat**2) + odom_alpha2*(delta_trans_hat**2))
p2 = Motion_Model_Prob(delta_trans - delta_trans_hat, odom_alpha3*(delta_trans_hat**2) + odom_alpha4*(delta_rot1_hat**2+delta_rot2_hat**2))
p3 = Motion_Model_Prob(pi_2_pi(delta_rot2 - delta_rot2_hat), odom_alpha1*(delta_rot2_hat**2) + odom_alpha2*(delta_trans_hat**2))
return p1*p2*p3
#オドメトリ動作モデル(簡略版) #角度は考慮せず、移動量に応じて確率が決まる(ドーナツ型分布)
def Motion_Model_Odometry_No_theta(xt,ut,xt_1):
#ut = (xt_1_bar, xt_bar), xt_1_bar = (x_bar, y_bar), xt_bar = (x_dash_bar, y_dash_bar)
#utは相対的な位置関係で良い
x_dash, y_dash = xt
x, y = xt_1
delta_trans = cmd_vel #sqrt( (x_dash_bar - x_bar)**2 + (y_dash_bar - y_bar)**2 )
delta_trans_hat = sqrt( (x_dash - x)**2 + (y_dash - y)**2 )
p2 = Motion_Model_Prob( delta_trans - delta_trans_hat, odom_alpha3*(delta_trans_hat**2) )
return p2 #p1*p2*p3
#動作モデル(独自) #角度は考慮せず、移動先位置に応じて確率が決まる(ガウス分布)
def Motion_Model_Original(xt,ut,xt_1):
xt = np.array(xt)
#ut = np.array(ut)
xt_1 = np.array(xt_1)
dist = np.sum((xt-xt_1)**2)
px = Motion_Model_Prob( xt[0] - (xt_1[0]+ut[0]), odom_alpha3*dist )
py = Motion_Model_Prob( xt[1] - (xt_1[1]+ut[1]), odom_alpha3*dist )
return px*py
#ROSの地図座標系をPython内の2次元配列のインデックス番号に対応付ける
def Map_coordinates_To_Array_index(X):
X = np.array(X)
Index = np.round( (X - origin) / resolution ).astype(int) #四捨五入してint型にする
return Index
#Python内の2次元配列のインデックス番号からROSの地図座標系への変換
def Array_index_To_Map_coordinates(Index):
Index = np.array(Index)
X = np.array( (Index * resolution) + origin )
return X
#gridmap and costmap から確率の形のCostMapProbを得ておく
@jit(parallel=True)
def CostMapProb_jit(gridmap, costmap):
CostMapProb = (100.0 - costmap) / 100.0 #コストマップを確率の形にする
#gridの数値が0(非占有)のところだけ数値を持つようにマスクする
GridMapProb = 1*(gridmap == 0) #gridmap * (gridmap != 100) * (gridmap != -1) #gridmap[][]が障害物(100)または未探索(-1)であれば確率0にする
return CostMapProb * GridMapProb
#@jit(nopython=True, parallel=True)
@jit(parallel=True) #並列化されていない?1CPUだけ使用される
def PostProbMap_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K):
PostProbMap = np.zeros((map_length,map_width))
#愚直な実装(for文の多用)
#memo: np.vectorize or np.frompyfunc の方が処理は早い?
for length in prange(map_length):
for width in prange(map_width):
if (CostMapProb[length][width] != 0.0): #(gridmap[length][width] != -1) and (gridmap[length][width] != 100): #gridmap[][]が障害物(100)または未探索(-1)であれば計算を省く
X_temp = Array_index_To_Map_coordinates([width, length]) #地図と縦横の座標系の軸が合っているか要確認
#print X_temp,Mu
sum_i_GaussMulti = [ np.sum([multivariate_normal.pdf(X_temp, mean=Mu[k], cov=Sig[k]) * Phi_l[c][k] for k in xrange(K)]) for c in xrange(L) ]
#sum_c_ProbCtsum_i = np.sum( LookupTable_ProbCt * sum_i_GaussMulti )
PostProbMap[length][width] = np.sum( LookupTable_ProbCt * sum_i_GaussMulti ) #sum_c_ProbCtsum_i
return CostMapProb * PostProbMap
@jit(parallel=True)
def PostProb_ij(Index_temp,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K):
if (CostMapProb[Index_temp[1]][Index_temp[0]] != 0.0):
X_temp = Array_index_To_Map_coordinates(Index_temp) #地図と縦横の座標系の軸が合っているか要確認
#print X_temp,Mu
sum_i_GaussMulti = [ np.sum([multivariate_normal.pdf(X_temp, mean=Mu[k], cov=Sig[k]) * Phi_l[c][k] for k in xrange(K)]) for c in xrange(L) ] ##########np.array( ) !!! np.arrayにすると、numbaがエラーを吐く
PostProb = np.sum( LookupTable_ProbCt * sum_i_GaussMulti ) #sum_c_ProbCtsum_i
else:
PostProb = 0.0
return PostProb
#@jit(parallel=True) #並列化されていない?1CPUだけ使用される
def PostProbMap_nparray_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K): #,IndexMap):
PostProbMap = np.array([ [ PostProb_ij([width, length],Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) for width in xrange(map_width) ] for length in xrange(map_length) ])
return CostMapProb * PostProbMap
#@jit(nopython=True, parallel=True)
#@jit #(parallel=True) #なぜかエラーが出る
def Transition_log_jit(state_num,IndexMap_one_NOzero,MoveIndex_list):
#Transition = np.ones((state_num,state_num)) * approx_log_zero
Transition = [[approx_log_zero for j in range(state_num)] for i in range(state_num)]
print "Memory OK"
#print IndexMap_one_NOzero
#今、想定している位置1セルと隣接する8セルのみの遷移を考えるようにすればよい
for n in prange(state_num):
#Index_2D = IndexMap_one_NOzero[n] #.tolist()
MoveIndex_list_n = MoveIndex_list + IndexMap_one_NOzero[n] #.tolist() #Index_2D #絶対座標系にする
MoveIndex_list_n_list = MoveIndex_list_n.tolist()
for c in prange(len(MoveIndex_list_n_list)):
#print c
if (MoveIndex_list_n_list[c] in IndexMap_one_NOzero):
m = IndexMap_one_NOzero.index(MoveIndex_list_n_list[c]) #cは移動可能な状態(セル)とは限らない
Transition[n][m] = 0.0 #1 #このインデックスは状態から状態への繊維確率(地図のx,yではない)
# print n,m,c
return Transition
def Transition_sparse_jit(state_num,IndexMap_one_NOzero,MoveIndex_list):
Transition = lil_matrix((state_num,state_num)) #[[0 for j in range(state_num)] for i in range(state_num)])
print "Memory OK"
#今、想定している位置1セルと隣接する8セルのみの遷移を考えるようにすればよい
for n in xrange(state_num):
#Index_2D = IndexMap_one_NOzero[n] #.tolist()
MoveIndex_list_n = MoveIndex_list + IndexMap_one_NOzero[n] #.tolist() #Index_2D #絶対座標系にする
MoveIndex_list_n_list = MoveIndex_list_n.tolist()
for c in xrange(len(MoveIndex_list_n_list)):
if (MoveIndex_list_n_list[c] in IndexMap_one_NOzero): #try:
m = IndexMap_one_NOzero.index(MoveIndex_list_n_list[c]) #cは移動可能な状態(セル)とは限らない
Transition[n,m] = 1 #このインデックスは状態から状態への繊維確率(地図のx,yではない)
# print n,m,c
#Transition_csr = Transition.tocsr()
#print "Transformed sparse csr format OK"
return Transition.tocsr() #Transition_csr
#動的計画法によるグローバルパス推定(SpCoNaviの計算)
def PathPlanner(S_Nbest, X_init, THETA, CostMapProb): #gridmap, costmap):
print "[RUN] PathPlanner"
#THETAを展開
W, W_index, Mu, Sig, Pi, Phi_l, K, L = THETA
#ROSの座標系の現在位置を2次元配列のインデックスにする
X_init_index = X_init ###TEST #Map_coordinates_To_Array_index(X_init)
print "Initial Xt:",X_init_index
#MAPの縦横(length and width)のセルの長さを計る
map_length = len(CostMapProb) #len(costmap)
map_width = len(CostMapProb[0]) #len(costmap[0])
print "MAP[length][width]:",map_length,map_width
#事前計算できるものはしておく
LookupTable_ProbCt = np.array([multinomial.pmf(S_Nbest, sum(S_Nbest), W[c])*Pi[c] for c in xrange(L)]) #Ctごとの確率分布 p(St|W_Ct)×p(Ct|Pi) の確率値
###SaveLookupTable(LookupTable_ProbCt, outputfile)
###LookupTable_ProbCt = ReadLookupTable(outputfile) #事前計算結果をファイル読み込み(計算する場合と大差ないかも)
print "Please wait for PostProbMap"
output = outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_PathWeightMap.csv"
if (os.path.isfile(output) == False) or (UPDATE_PostProbMap == 1): #すでにファイルがあれば作成しない
#PathWeightMap = PostProbMap_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) #マルチCPUで高速化できるかも #CostMapProb * PostProbMap #後の処理のために、この時点ではlogにしない
PathWeightMap = PostProbMap_nparray_jit(CostMapProb,Mu,Sig,Phi_l,LookupTable_ProbCt,map_length,map_width,L,K) #,IndexMap)
#[TEST]計算結果を先に保存
SaveProbMap(PathWeightMap, outputfile)
else:
PathWeightMap = ReadProbMap(outputfile)
#print "already exists:", output
print "[Done] PathWeightMap."
#[メモリ・処理の軽減]初期位置のセルからT_horizonよりも離れた位置のセルをすべて2次元配列から消す([(2*T_horizon)+1][(2*T_horizon)+1]の配列になる)
Bug_removal_savior = 0 #座標変換の際にバグを生まないようにするためのフラグ
x_min = X_init_index[0] - T_horizon
x_max = X_init_index[0] + T_horizon
y_min = X_init_index[1] - T_horizon
y_max = X_init_index[1] + T_horizon
if (x_min>=0 and x_max<=map_width and y_min>=0 and y_max<=map_length):
PathWeightMap = PathWeightMap[x_min:x_max+1, y_min:y_max+1] # X[-T+I[0]:T+I[0],-T+I[1]:T+I[1]]
X_init_index = [T_horizon, T_horizon]
#再度、MAPの縦横(length and width)のセルの長さを計る
map_length = len(PathWeightMap)
map_width = len(PathWeightMap[0])
else:
print "[WARNING] The initial position (or init_pos +/- T_horizon) is outside the map."
Bug_removal_savior = 1 #バグを生まない(1)
#print X_init, X_init_index
#計算量削減のため状態数を減らす(状態空間を一次元配列にする⇒0の要素を除く)
#PathWeight = np.ravel(PathWeightMap)
PathWeight_one_NOzero = PathWeightMap[PathWeightMap!=0.0]
state_num = len(PathWeight_one_NOzero)
print "PathWeight_one_NOzero state_num:", state_num
#地図の2次元配列インデックスと一次元配列の対応を保持する
IndexMap = np.array([[(i,j) for j in xrange(map_width)] for i in xrange(map_length)])
IndexMap_one_NOzero = IndexMap[PathWeightMap!=0.0].tolist() #先にリスト型にしてしまう #実装上、np.arrayではなく2次元配列リストにしている
print "IndexMap_one_NOzero"
#1次元配列上の初期位置
if (X_init_index in IndexMap_one_NOzero):
X_init_index_one = IndexMap_one_NOzero.index(X_init_index)
else:
print "[ERROR] The initial position is not a movable position on the map."
#print X_init, X_init_index
X_init_index_one = 0
print "Initial index", X_init_index_one
#移動先候補のインデックス座標のリスト(相対座標)
MoveIndex_list = MovePosition_2D([0,0]) #.tolist()
#MoveIndex_list = np.round(MovePosition(X_init_index)).astype(int)
print "MoveIndex_list"
"""
#状態遷移確率(動作モデル)の計算
print "Please wait for Transition"
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_sparse.mtx" # + "_Transition_log.csv"
if (os.path.isfile(output_transition) == False): #すでにファイルがあれば作成しない
#IndexMap_one_NOzero内の2次元配列上のインデックスと一致した要素のみ確率1を持つようにする
#Transition = Transition_log_jit(state_num,IndexMap_one_NOzero,MoveIndex_list)
Transition = Transition_sparse_jit(state_num,IndexMap_one_NOzero,MoveIndex_list)
#[TEST]計算結果を先に保存
#SaveTransition(Transition, outputfile)
SaveTransition_sparse(Transition, outputfile)
else:
Transition = ReadTransition_sparse(state_num, outputfile) #ReadTransition(state_num, outputfile)
#print "already exists:", output_transition
Transition_one_NOzero = Transition #[PathWeightMap!=0.0]
print "[Done] Transition distribution."
"""
#Viterbi Algorithmを実行
Path_one = ViterbiPath(X_init_index_one, np.log(PathWeight_one_NOzero), state_num,IndexMap_one_NOzero,MoveIndex_list, outputname, X_init, Bug_removal_savior) #, Transition_one_NOzero)
#1次元配列のインデックスを2次元配列のインデックスへ⇒ROSの座標系にする
Path_2D_index = np.array([ IndexMap_one_NOzero[Path_one[i]] for i in xrange(len(Path_one)) ])
if ( Bug_removal_savior == 0):
Path_2D_index_original = Path_2D_index + np.array(X_init) - T_horizon
else:
Path_2D_index_original = Path_2D_index
Path_ROS = Array_index_To_Map_coordinates(Path_2D_index_original) #ROSのパスの形式にできればなおよい
#Path = Path_2D_index_original #Path_ROS #必要な方をPathとして返す
print "Init:", X_init
print "Path:\n", Path_2D_index_original
return Path_2D_index_original, Path_ROS, PathWeightMap
#移動位置の候補:現在の位置(2次元配列のインデックス)の近傍8セル+現在位置1セル
def MovePosition_2D(Xt):
PostPosition_list = np.array([ [-1,-1],[-1,0],[-1,1], [0,-1],[0,0], [0,1], [1,-1],[1,0],[1,1] ])*cmd_vel + np.array(Xt)
return PostPosition_list
#Viterbi Path計算用関数(参考:https://qiita.com/kkdd/items/6cbd949d03bc56e33e8e)
#@jit(parallel=True)
def update(cost, trans, emiss):
COST = 0 #COST, INDEX = range(2) #0,1
arr = [c[COST]+t for c, t in zip(cost, trans)]
max_arr = max(arr)
#print max_arr + emiss, arr.index(max_arr)
return max_arr + emiss, arr.index(max_arr)
#なぜか重くてTが進まない(不採用)
def update_sparse(cost, trans, emiss):
COST = 0 #COST, INDEX = range(2) #0,1
trans_log = [(trans[0,i]==0)*approx_log_zero for i in xrange(trans.get_shape()[1])] #trans.toarray()
arr = [c[COST]+t for c, t in zip(cost, trans_log)]
#index = [i for i in xrange(trans.get_shape()[1])]
#arr = [c[COST]+np.log(trans[0,t]) for c, t in zip(cost, index)]
max_arr = max(arr)
#print max_arr + emiss, arr.index(max_arr)
return max_arr + emiss, arr.index(max_arr)
@jit #jitはコードによってエラーが出る場合があるので注意
def update_lite(cost, n, emiss, state_num,IndexMap_one_NOzero,MoveIndex_list,Transition):
#Transition = np.array([approx_log_zero for j in prange(state_num)]) #emissのindex番号に応じて、これをつくる処理を入れる
for i in prange(len(Transition)):
Transition[i] = approx_log_zero
#今、想定している位置1セルと隣接する8セルのみの遷移を考えるようにすればよい
#Index_2D = IndexMap_one_NOzero[n] #.tolist()
MoveIndex_list_n = MoveIndex_list + IndexMap_one_NOzero[n] #Index_2D #絶対座標系にする
MoveIndex_list_n_list = MoveIndex_list_n.tolist()
count_t = 0
for c in prange(len(MoveIndex_list_n_list)): #prangeの方がxrangeより速い
if (MoveIndex_list_n_list[c] in IndexMap_one_NOzero):
m = IndexMap_one_NOzero.index(MoveIndex_list_n_list[c]) #cは移動可能な状態(セル)とは限らない
Transition[m] = 0.0 #1 #このインデックスは状態から状態への繊維確率(地図のx,yではない)
count_t += 1
#計算上おかしい場合はエラー表示を出す.
if (count_t == 0): #遷移確率がすべて0.移動できないということを意味する.
print "[ERROR] All transition is approx_log_zero."
elif (count_t == 1): #遷移確率がひとつだけある.移動可能な座標が一択.
print "[WARNING] One transition is zero."
#trans = Transition #np.array(Transition)
arr = cost + Transition #trans
#max_arr = np.max(arr)
max_arr_index = np.argmax(arr)
#return max_arr + emiss, np.where(arr == max_arr)[0][0] #np.argmax(arr)#arr.index(max_arr)
return arr[max_arr_index] + emiss, max_arr_index
#def transition(m, n):
# return [[1.0 for i in xrange(m)] for j in xrange(n)]
#def emission(n):
# return [random.random() for j in xrange(n)]
#ViterbiPathを計算してPath(軌道)を返す
#@jit(parallel=True) #print関係(?)のエラーが出たので一時避難
def ViterbiPath(X_init, PathWeight, state_num,IndexMap_one_NOzero,MoveIndex_list, outputname, X_init_original, Bug_removal_savior): #, Transition):
#Path = [[0,0] for t in xrange(T_horizon)] #各tにおけるセル番号[x,y]
print "Start Viterbi Algorithm"
INDEX = 1 #COST, INDEX = range(2) #0,1
INITIAL = (approx_log_zero, X_init) # (cost, index) #indexに初期値の一次元配列インデックスを入れる
#print "Initial:",X_init
cost = [INITIAL for i in prange(len(PathWeight))]
cost[X_init] = (0.0, X_init) #初期位置は一意に与えられる(確率log(1.0))
trellis = []
e = PathWeight #emission(nstates[i])
m = [i for i in prange(len(PathWeight))] #Transition #transition(nstates[i-1], nstates[i]) #一つ前から現在への遷移
Transition = np.array([approx_log_zero for j in prange(state_num)]) #参照渡しになってしまう
temp = 1
#Forward
print "Forward"
for i in prange(T_horizon): #len(nstates)): #計画区間まで1セルずつ移動していく+1+1
#このfor文の中でiを別途インディケータとして使わないこと
print "T:",i+1
if (i+1 == T_restart):
outputname_restart = outputfile + "T"+str(T_restart)+"N"+str(N_best)+"A"+str(Approx)+"S"+str(init_position_num)+"G"+str(speech_num)
trellis = ReadTrellis(outputname_restart, i+1)
cost = trellis[-1]
if (i+1 >= T_restart):
#cost = [update(cost, t, f) for t, f in zip(m, e)]
#cost = [update_sparse(cost, Transition[t], f) for t, f in zip(m, e)] #なぜか遅い
cost_np = np.array([cost[c][0] for c in prange(len(cost))])
#Transition = np.array([approx_log_zero for j in prange(state_num)]) #参照渡しになってしまう
#cost = [update_lite(cost_np, t, e[t], state_num,IndexMap_one_NOzero,MoveIndex_list) for t in prange(len(e))]
cost = [update_lite(cost_np, t, f, state_num,IndexMap_one_NOzero,MoveIndex_list,Transition) for t, f in izip(m, e)] #izipの方がメモリ効率は良いが、zipとしても処理速度は変わらない
trellis.append(cost)
#print "i", i, [(c[COST], c[INDEX]) for c in cost] #前のノードがどこだったか(どこから来たか)を記録している
if (SAVE_T_temp == temp):
#Backward temp
last = [trellis[-1][j][0] for j in xrange(len(trellis[-1]))]
path_one = [last.index(max(last))] #最終的にいらないが計算上必要⇒最後のノードの最大値インデックスを保持する形でもできるはず
#print "last",last,"max",path
for x in reversed(trellis):
path_one = [x[path_one[0]][INDEX]] + path_one
#print "x", len(x), x
path_one = path_one[1:len(path_one)] #初期位置と処理上追加した最後の遷移を除く
SavePathTemp(X_init_original, path_one, i+1, outputname, IndexMap_one_NOzero, Bug_removal_savior)
if (SAVE_Trellis == 1):
SaveTrellis(trellis, outputname, i+1)
temp = 0
temp += 1
#最後の遷移確率は一様にすればよいはず
e_last = [0.0]
m_last = [[0.0 for i in range(len(PathWeight))]]
cost = [update(cost, t, f) for t, f in zip(m_last, e_last)]
trellis.append(cost)
#Backward
print "Backward"
#last = [trellis[-1][i][0] for i in xrange(len(trellis[-1]))]
path = [0] #[last.index(max(last))] #最終的にいらないが計算上必要⇒最後のノードの最大値インデックスを保持する形でもできるはず
#print "last",last,"max",path
for x in reversed(trellis):
path = [x[path[0]][INDEX]] + path
#print "x", len(x), x
path = path[1:len(path)-1] #初期位置と処理上追加した最後の遷移を除く
print 'Maximum prob path:', path
return path
#推定されたパスを(トピックかサービスで)送る
#def SendPath(Path):
#パスをファイル保存する(形式未定)
def SavePath(X_init, Path, Path_ROS, outputname):
print "PathSave"
if (SAVE_X_init == 1):
# ロボット初期位置をファイル保存(index)
np.savetxt(outputname + "_X_init.csv", X_init, delimiter=",")
# ロボット初期位置をファイル保存(ROS)
np.savetxt(outputname + "_X_init_ROS.csv", Array_index_To_Map_coordinates(X_init), delimiter=",")
# 結果をファイル保存(index)
np.savetxt(outputname + "_Path.csv", Path, delimiter=",")
# 結果をファイル保存(ROS)
np.savetxt(outputname + "_Path_ROS.csv", Path_ROS, delimiter=",")
print "Save Path: " + outputname + "_Path.csv and _Path_ROS.csv"
#パスをファイル保存する(形式未定)
def SavePathTemp(X_init, Path_one, temp, outputname, IndexMap_one_NOzero, Bug_removal_savior):
print "PathSaveTemp"
#1次元配列のインデックスを2次元配列のインデックスへ⇒ROSの座標系にする
Path_2D_index = np.array([ IndexMap_one_NOzero[Path_one[i]] for i in xrange(len(Path_one)) ])
if ( Bug_removal_savior == 0):
Path_2D_index_original = Path_2D_index + np.array(X_init) - T_horizon
else:
Path_2D_index_original = Path_2D_index
Path_ROS = Array_index_To_Map_coordinates(Path_2D_index_original) #
#Path = Path_2D_index_original #Path_ROS #必要な方をPathとして返す
# 結果をファイル保存(index)
np.savetxt(outputname + "_Path" + str(temp) + ".csv", Path_2D_index_original, delimiter=",")
# 結果をファイル保存(ROS)
np.savetxt(outputname + "_Path_ROS" + str(temp) + ".csv", Path_ROS, delimiter=",")
print "Save Path: " + outputname + "_Path" + str(temp) + ".csv and _Path_ROS" + str(temp) + ".csv"
def SaveTrellis(trellis, outputname, temp):
print "SaveTrellis"
# 結果をファイル保存
np.save(outputname + "_trellis" + str(temp) + ".npy", trellis) #, delimiter=",")
print "Save trellis: " + outputname + "_trellis" + str(temp) + ".npy"
def ReadTrellis(outputname, temp):
print "ReadTrellis"
# 結果をファイル保存
trellis = np.load(outputname + "_trellis" + str(temp) + ".npy") #, delimiter=",")
print "Read trellis: " + outputname + "_trellis" + str(temp) + ".npy"
return trellis
#パス計算のために使用したLookupTable_ProbCtをファイル保存する
def SaveLookupTable(LookupTable_ProbCt, outputfile):
# 結果をファイル保存
output = outputfile + "LookupTable_ProbCt.csv"
np.savetxt( output, LookupTable_ProbCt, delimiter=",")
print "Save LookupTable_ProbCt: " + output
#パス計算のために使用したLookupTable_ProbCtをファイル読み込みする
def ReadLookupTable(outputfile):
# 結果をファイル読み込み
output = outputfile + "LookupTable_ProbCt.csv"
LookupTable_ProbCt = np.loadtxt(output, delimiter=",")
print "Read LookupTable_ProbCt: " + output
return LookupTable_ProbCt
#パス計算のために使用した確率値コストマップをファイル保存する
def SaveCostMapProb(CostMapProb, outputfile):
# 結果をファイル保存
output = outputfile + "CostMapProb.csv"
np.savetxt( output, CostMapProb, delimiter=",")
print "Save CostMapProb: " + output
#パス計算のために使用した確率値コストマップをファイル読み込みする
def ReadCostMapProb(outputfile):
# 結果をファイル読み込み
output = outputfile + "CostMapProb.csv"
CostMapProb = np.loadtxt(output, delimiter=",")
print "Read CostMapProb: " + output
return CostMapProb
#パス計算のために使用した確率値マップを(トピックかサービスで)送る
#def SendProbMap(PathWeightMap):
#パス計算のために使用した確率値マップをファイル保存する
def SaveProbMap(PathWeightMap, outputfile):
# 結果をファイル保存
output = outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_PathWeightMap.csv"
np.savetxt( output, PathWeightMap, delimiter=",")
print "Save PathWeightMap: " + output
#パス計算のために使用した確率値マップをファイル読み込みする
def ReadProbMap(outputfile):
# 結果をファイル読み込み
output = outputfile + "N"+str(N_best)+"G"+str(speech_num) + "_PathWeightMap.csv"
PathWeightMap = np.loadtxt(output, delimiter=",")
print "Read PathWeightMap: " + output
return PathWeightMap
def SaveTransition(Transition, outputfile):
# 結果をファイル保存
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_log.csv"
#np.savetxt(outputfile + "_Transition_log.csv", Transition, delimiter=",")
f = open( output_transition , "w")
for i in xrange(len(Transition)):
for j in xrange(len(Transition[i])):
f.write(str(Transition[i][j]) + ",")
f.write('\n')
f.close()
print "Save Transition: " + output_transition
def ReadTransition(state_num, outputfile):
Transition = [[approx_log_zero for j in xrange(state_num)] for i in xrange(state_num)]
# 結果をファイル読み込み
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_log.csv"
#Transition = np.loadtxt(outputfile + "_Transition_log.csv", delimiter=",")
i = 0
#テキストファイルを読み込み
for line in open(output_transition, 'r'):
itemList = line[:-1].split(',')
for j in xrange(len(itemList)):
if itemList[j] != '':
Transition[i][j] = float(itemList[j])
i = i + 1
print "Read Transition: " + output_transition
return Transition
def SaveTransition_sparse(Transition, outputfile):
# 結果をファイル保存(.mtx形式)
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_sparse"
mmwrite(output_transition, Transition)
print "Save Transition: " + output_transition
def ReadTransition_sparse(state_num, outputfile):
#Transition = [[0 for j in xrange(state_num)] for i in xrange(state_num)]
# 結果をファイル読み込み
output_transition = outputfile + "T"+str(T_horizon) + "_Transition_sparse.mtx"
Transition = mmread(output_transition).tocsr() #.todense()
print "Read Transition: " + output_transition
return Transition
##単語辞書読み込み書き込み追加
def WordDictionaryUpdate2(step, filename, W_list):
LIST = []
LIST_plus = []
i_best = len(W_list)
hatsuon = [ "" for i in xrange(i_best) ]
TANGO = []
##単語辞書の読み込み
for line in open('./lang_m/' + lang_init, 'r'):
itemList = line[:-1].split(' ')
LIST = LIST + [line]
for j in xrange(len(itemList)):
itemList[j] = itemList[j].replace("[", "")
itemList[j] = itemList[j].replace("]", "")
TANGO = TANGO + [[itemList[1],itemList[2]]]
#print TANGO
if (1):
##W_listの単語を順番に処理していく
for c in xrange(i_best): # i_best = len(W_list)
#W_list_sj = unicode(MI_best[c][i], encoding='shift_jis')
W_list_sj = unicode(W_list[c], encoding='shift_jis')
if len(W_list_sj) != 1: ##1文字は除外
#for moji in xrange(len(W_list_sj)):
moji = 0
while (moji < len(W_list_sj)):
flag_moji = 0
#print len(W_list_sj),str(W_list_sj),moji,W_list_sj[moji]#,len(unicode(W_list[i], encoding='shift_jis'))
for j in xrange(len(TANGO)):
if (len(W_list_sj)-2 > moji) and (flag_moji == 0):
#print TANGO[j],j
#print moji
if (unicode(TANGO[j][0], encoding='shift_jis') == W_list_sj[moji]+"_"+W_list_sj[moji+2]) and (W_list_sj[moji+1] == "_"):
###print moji,j,TANGO[j][0]
hatsuon[c] = hatsuon[c] + TANGO[j][1]
moji = moji + 3
flag_moji = 1
for j in xrange(len(TANGO)):
if (len(W_list_sj)-1 > moji) and (flag_moji == 0):
#print TANGO[j],j
#print moji
if (unicode(TANGO[j][0], encoding='shift_jis') == W_list_sj[moji]+W_list_sj[moji+1]):
###print moji,j,TANGO[j][0]
hatsuon[c] = hatsuon[c] + TANGO[j][1]
moji = moji + 2
flag_moji = 1
#print len(W_list_sj),moji
for j in xrange(len(TANGO)):
if (len(W_list_sj) > moji) and (flag_moji == 0):
#else:
if (unicode(TANGO[j][0], encoding='shift_jis') == W_list_sj[moji]):
###print moji,j,TANGO[j][0]
hatsuon[c] = hatsuon[c] + TANGO[j][1]
moji = moji + 1
flag_moji = 1
print W_list_sj,hatsuon[c]
else:
print W_list_sj, "(one name)" #W_list[c]
print JuliusVer,HMMtype
if (JuliusVer == "v4.4" and HMMtype == "DNN"):
#hatsuonのすべての単語の音素表記を"*_I"にする
for i in xrange(len(hatsuon)):
hatsuon[i] = hatsuon[i].replace("_S","_I")
hatsuon[i] = hatsuon[i].replace("_B","_I")
hatsuon[i] = hatsuon[i].replace("_E","_I")
#hatsuonの単語の先頭の音素を"*_B"にする
for i in xrange(len(hatsuon)):
#onsohyoki_index = onsohyoki.find(target)
hatsuon[i] = hatsuon[i].replace("_I","_B", 1)
#hatsuonの単語の最後の音素を"*_E"にする
hatsuon[i] = hatsuon[i][0:-2] + "E "
#hatsuonの単語の音素の例外処理(N,q)
hatsuon[i] = hatsuon[i].replace("q_S","q_I")
hatsuon[i] = hatsuon[i].replace("q_B","q_I")
hatsuon[i] = hatsuon[i].replace("N_S","N_I")
#print type(hatsuon),hatsuon,type("N_S"),"N_S"
##各場所の名前の単語ごとに
meishi = u'名詞'
meishi = meishi.encode('shift-jis')
##単語辞書ファイル生成
fp = open( filename + '/WDnavi.htkdic', 'w')
for list in xrange(len(LIST)):
if (list < 3):
fp.write(LIST[list])
#if (UseLM == 1):
if (1):
##新しい単語を追加
c = 0
for mi in xrange(i_best): # i_best = len(W_list)
if hatsuon[mi] != "":
if ((W_list[mi] in LIST_plus) == False): #同一単語を除外
flag_tango = 0
for j in xrange(len(TANGO)):
if(W_list[mi] == TANGO[j][0]):
flag_tango = -1
if flag_tango == 0:
LIST_plus = LIST_plus + [W_list[mi]]
fp.write(LIST_plus[c] + "+" + meishi +" [" + LIST_plus[c] + "] " + hatsuon[mi])
fp.write('\n')
c = c+1
fp.close()
########################################
if __name__ == '__main__':
print "[START] SpCoNavi."
#学習済みパラメータフォルダ名を要求
trialname = sys.argv[1]
#print trialname
#trialname = raw_input("trialname?(folder) >")
#読み込むパーティクル番号を要求
particle_num = sys.argv[2] #0
#ロボット初期位置の候補番号を要求
init_position_num = sys.argv[3] #0
#音声命令のファイル番号を要求
speech_num = sys.argv[4] #0
i = 0
#重みファイルを読み込み
for line in open(datafolder + trialname + '/'+ str(step) + '/weights.csv', 'r'): ##読み込む
if (i == 0):
MAX_Samp = int(line)
i += 1
#最大尤度のパーティクル番号を保存
particle_num = MAX_Samp
if (SAVE_time == 1):
#開始時刻を保持
start_time = time.time()
##FullPath of folder
filename = datafolder + trialname + "/" + str(step) +"/"
print filename, particle_num
outputfile = outputfolder + trialname + navigation_folder
outputname = outputfile + "T"+str(T_horizon)+"N"+str(N_best)+"A"+str(Approx)+"S"+str(init_position_num)+"G"+str(speech_num)
#Makedir( outputfolder + trialname )
Makedir( outputfile )
#Makedir( outputname )
#学習済みパラメータの読み込み #THETA = [W,W_index,Mu,Sig,Pi,Phi_l,K,L]
THETA = ReadParameters(particle_num, filename)
W_index = THETA[1]
##単語辞書登録
if (os.path.isfile(filename + '/WDnavi.htkdic') == False): #すでに単語辞書ファイルがあれば作成しない
WordDictionaryUpdate2(step, filename, W_index)
else:
print "Word dictionary already exists:", filename + '/WDnavi.htkdic'
if (os.path.isfile(outputfile + "CostMapProb.csv") == False): #すでにファイルがあれば計算しない
##マップの読み込み
gridmap = ReadMap(outputfile)
##コストマップの読み込み
costmap = ReadCostMap(outputfile)
#コストマップを確率の形にする
CostMapProb = CostMapProb_jit(gridmap, costmap)
#確率化したコストマップの書き込み
SaveCostMapProb(CostMapProb, outputfile)
else:
#確率化したコストマップの読み込み
CostMapProb = ReadCostMapProb(outputfile)
##音声ファイルを読み込み
speech_file = ReadSpeech(int(speech_num))
if (SAVE_time == 1):
#音声認識開始時刻(初期化読み込み処理終了時刻)を保持
start_recog_time = time.time()
time_init = start_recog_time - start_time
fp = open( outputname + "_time_init.txt", 'w')
fp.write(str(time_init)+"\n")
fp.close()
#音声認識
S_Nbest = SpeechRecognition(speech_file, W_index, step, trialname, outputfile)
if (SAVE_time == 1):
#音声認識終了時刻(PP開始時刻)を保持
end_recog_time = time.time()
time_recog = end_recog_time - start_recog_time
fp = open( outputname + "_time_recog.txt", 'w')
fp.write(str(time_recog)+"\n")
fp.close()
#パスプランニング
Path, Path_ROS, PathWeightMap = PathPlanner(S_Nbest, X_candidates[int(init_position_num)], THETA, CostMapProb) #gridmap, costmap)
if (SAVE_time == 1):
#PP終了時刻を保持
end_pp_time = time.time()
time_pp = end_pp_time - end_recog_time
fp = open( outputname + "_time_pp.txt", 'w')
fp.write(str(time_pp)+"\n")
fp.close()
#パスの移動距離
#Distance = PathDistance(Path)
#パスを送る
#SendPath(Path)
#パスを保存
SavePath(X_candidates[int(init_position_num)], Path, Path_ROS, outputname)
#確率値マップを送る
#SendProbMap(PathWeightMap)
#確率値マップを保存(PathPlanner内部で実行)
#####SaveProbMap(PathWeightMap, outputname)
print "[END] SpCoNavi."
########################################
| [
"a.taniguchi@em.ci.ritsumei.ac.jp"
] | a.taniguchi@em.ci.ritsumei.ac.jp |
abc4485dd7fd0ee1e358442f4b46caf996041df3 | 7c9425e73f12622042bdc783b014976e8e8498dd | /django/pages/views.py | 38dcfa4cb9e002ace8e4c2f9a3a79d003575ad7b | [] | no_license | SameerKhan5669/python-WebFramworks | 7f6822780ac7d133e2f2f54d3ad53c816db37943 | 62d1d1d61d698d9dd4b4aba386b9e75aa14cf676 | refs/heads/main | 2023-08-31T18:17:27.791069 | 2021-10-04T17:22:16 | 2021-10-04T17:22:16 | 407,669,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | from django.shortcuts import render
# Create your views here.
# pages/views.py
from django.http import HttpResponse
def homePageView(request):
return HttpResponse('Hello, World!') | [
"sameer.khan@freshbooks.com"
] | sameer.khan@freshbooks.com |
5f7755aabf8fbe67914c3bbf540ddcfad5fe2dca | 036fb4fc50bb1fab2cca125484bfe3a0726894bc | /note.py | 8f21429c0e16dfa22801f6af73dc70acbbde5a8c | [] | no_license | SWC-Painist/Backend_Api | 8df5ebc46bde9831dbd04e63be47bf4815868d16 | 34924745985780eb832edaf5d3c4f809c86b1d30 | refs/heads/main | 2023-04-14T04:17:34.969567 | 2021-03-29T10:58:22 | 2021-03-29T10:58:22 | 351,360,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,986 | py | NUM_NOTENAME_LIST = ['Not Piano Key' for i in range(0,109)]
TELVE_TONE_TEMPERAMENT = ['c_','c#_/db_,','d_','d#_/eb_','e_','f_','f#_/gb_','g_','g#_/ab_','a_','a#_/bb_','b_']
#Generate Notename List
for i in range(0,109):
if i < 24 :
continue
else:
octa,num = int((i-24)/12), ((i-24)%12)
NUM_NOTENAME_LIST[i] = TELVE_TONE_TEMPERAMENT[num].replace('_',str(octa+1))
NUM_NOTENAME_LIST[21] = 'c0'
NUM_NOTENAME_LIST[22] = 'a#0/bb0'
NUM_NOTENAME_LIST[23] = 'b0'
STR_TO_MIDI_MAP = {}
for i,s in enumerate(NUM_NOTENAME_LIST) :
if s == 'Not Piano Key':
continue
if s.find('/') != -1:
s = s.split('/')
STR_TO_MIDI_MAP.update({s[0]:i,s[1]:i})
else:
STR_TO_MIDI_MAP.update({s:i})
class pianoNote:
'''
Note class.
contains pitch time(length) velocity and name
'''
def __init__(self, __mNum : int, __start : int, __end, __velo : int):
'''
Args:
__mNum : midi node number
__start : time start
__end : time end
__velo : note velocity
constructor for midi event
'''
self.MidiNum = __mNum
self.start = __start
self.end = __end
self.velocity = __velo
self.TimeDiv = 0
self.chord = False
self.Modifier = ''
self.dot = 0
self.name = NUM_NOTENAME_LIST[self.MidiNum]
def find_modifier(self, note_str : str):
sharp = note_str.find('#')
flat = note_str.find('&')
if sharp != -1:
if note_str.find('##') != -1:
return '##'
else:
return '#'
elif flat != -1:
if note_str.find('&&') != -1:
return 'bb'
else:
return 'b'
elif note_str.find('n') != -1:
return 'n'
return ''
def fromStr(self, from_str : str):
back_index = from_str.__len__() - 1
while back_index >= 0 and from_str[back_index] == '.':
self.dot = self.dot + 1
back_index = back_index - 1
from_str = from_str[0:back_index+1].split('/')
self.TimeDiv = int(from_str[1])
note_len = 1000/self.TimeDiv
note_len = note_len * 1.5**self.dot
self.end = self.start + note_len
self.Modifier = self.find_modifier(from_str[0])
if self.Modifier == '##':
self.name = from_str[0][0] + self.Modifier + from_str[0][-1]
fake_name = self.name[0]+self.name[-1]
self.MidiNum = STR_TO_MIDI_MAP.get(fake_name) + 2
elif self.Modifier == 'bb':
self.name = from_str[0][0] + self.Modifier + from_str[0][-1]
fake_name = self.name[0]+self.name[-1]
self.MidiNum = STR_TO_MIDI_MAP.get(fake_name) - 2
elif self.Modifier == 'n':
self.name = from_str[0][0] + from_str[0][-1]
self.MidiNum = STR_TO_MIDI_MAP.get(self.name)
else :
self.name = from_str[0][0] + self.Modifier + from_str[0][-1]
self.MidiNum = STR_TO_MIDI_MAP.get(self.name)
def setChord(self,flag : bool):
self.chord = flag
def setModifier(self,modifier : str):
if self.Modifier == modifier:
return
#remove old
if self.Modifier == '#' :
self.MidiNum = self.MidiNum - 1
elif self.Modifier == '##':
self.MidiNum = self.MidiNum - 2
elif self.Modifier == 'b' :
self.MidiNum = self.MidiNum + 1
elif self.Modifier == 'bb':
self.MidiNum = self.MidiNum + 2
#set new
if modifier == '#' :
self.MidiNum = self.MidiNum + 1
elif modifier == '##':
self.MidiNum = self.MidiNum + 2
elif modifier == 'b' :
self.MidiNum = self.MidiNum - 1
elif modifier == 'bb':
self.MidiNum = self.MidiNum - 2
self.Modifier = modifier
self.name = self.name[0] + modifier + self.name[-1]
def __eq__(self, __rhs) -> bool:
'''
operator=.
true only if this two notes has same pitch, same length, same velocity
'''
return self.MidiNum == __rhs.MidiNum and self.start == __rhs.start and self.end == __rhs.end and self.velocity == __rhs.length
def SamePitch(self, __cmp) -> bool:
'''
true if this two notes has same pitch
'''
return self.MidiNum == __cmp.MidiNum
def __str__(self) -> str:
return('Note: {}, Name: {}, Start: {}ms, End: {}ms, Velo: {}'.format(self.MidiNum,self.name,self.start,self.end,self.velocity))
if __name__ == '__main__':
print('not the main module only for temperament check')
for i in NUM_NOTENAME_LIST:
print(i,end=', ')
| [
"noreply@github.com"
] | SWC-Painist.noreply@github.com |
ce0c8512a2373bffac1635858e730b38b204d9dd | 37bc60b070be22a5e22321655c8490df2285b07c | /translate.py | 5f414fdbd164ef00cfcaa2c3eddd47a0378d4518 | [] | no_license | TheWover/DidierStevensSuite | 2ab56d33472a242a5d49359d643c4e669c7a7e04 | 17f08aee76b98f95fc94b4e9c6131786d62b4716 | refs/heads/master | 2020-07-30T01:00:00.497949 | 2019-09-17T18:46:00 | 2019-09-17T18:46:00 | 210,027,232 | 1 | 0 | null | 2019-09-21T17:32:54 | 2019-09-21T17:32:53 | null | UTF-8 | Python | false | false | 27,454 | py | #!/usr/bin/env python
__description__ = 'Translate bytes according to a Python expression'
__author__ = 'Didier Stevens'
__version__ = '2.5.6'
__date__ = '2019/02/26'
"""
Source code put in public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
No input validation (neither output) is performed by this program: it contains injection vulnerabilities
Developed with Python 2.7, tested with 2.7 and 3.3
History:
2007/08/20: start
2014/02/24: rewrite
2014/02/27: manual
2015/11/04: added option -f
2015/11/05: continue
2016/02/20: added option -r
2016/04/25: 2.3.0 added StdoutWriteChunked() and option -R
2016/09/07: 2.3.1 added option -e
2016/09/09: continue
2016/09/13: man
2017/02/10: 2.4.0 added input filename # support
2017/02/26: fixed Python 3 str vs bytes bug
2017/06/04: 2.5.0 added #e# support
2017/06/16: continued #e# support
2017/07/29: added -2 option
2017/08/09: 2.5.1 #e# chr can take a second argument
2017/09/09: added functions Sani1 and Sani2 to help with input/output sanitization
2018/01/29: 2.5.2 added functions GzipD and ZlibD; and fixed stdin/stdout for Python 3
2018/02/12: 2.5.3 when the Python expression returns None (in stead of a byte value), no byte is written to output.
2018/03/05: 2.5.4 updated #e# expressions
2018/04/27: added option literalfilenames
2019/02/20: 2.5.5 added ZlibRawD
2019/02/26: 2.5.6 updated help
Todo:
"""
import optparse
import sys
import os
import textwrap
import re
import math
import binascii
import random
import zlib
import gzip
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
def PrintManual():
manual = '''
Manual:
Translate.py is a Python script to perform bitwise operations on files (like XOR, ROL/ROR, ...). You specify the bitwise operation to perform as a Python expression, and pass it as a command-line argument.
translate.py malware -o malware.decoded "byte ^ 0x10"
This will read file malware, perform XOR 0x10 on each byte (this is, expressed in Python: byte ^ 0x10), and write the result to file malware.decoded.
byte is a variable containing the current byte from the input file. Your expression has to evaluate to the modified byte. When your expression evaluates to None, no byte will be written to output. This can be used to delete bytes from the input.
For complex manipulation, you can define your own functions in a script file and load this with translate.py, like this:
translate.py malware -o malware.decoded "Process(byte)" process.py
process.py must contain the definition of function Process. Function Process must return the modified byte.
Another variable is also available: position. This variable contains the position of the current byte in the input file, starting from 0.
If only part of the file has to be manipulated, while leaving the rest unchanged, you can do it like this:
def Process(byte):
if position >= 0x10 and position < 0x20:
return byte ^ 0x10
else:
return byte
This example will perform an XOR 0x10 operation from the 17th byte till the 32nd byte included. All other bytes remain unchanged.
Because Python has built-in shift operators (<< and >>) but no rotate operators, I've defined 2 rotate functions that operate on a byte: rol (rotate left) and ror (rotate right). They accept 2 arguments: the byte to rotate and the number of bit positions to rotate. For example, rol(0x01, 2) gives 0x04.
translate.py malware -o malware.decoded "rol(byte, 2)"
Another function I defined is IFF (the IF Function): IFF(expression, valueTrue, valueFalse). This function allows you to write conditional code without an if statement. When expression evaluates to True, IFF returns valueTrue, otherwise it returns valueFalse.
And yet 2 other functions I defined are Sani1 and Sani2. They can help you with input/output sanitization: Sani1 accepts a byte as input and returns the same byte, except if it is a control character. All control characters (except VT, LF and CR) are replaced by a space character (0x20). Sani2 is like Sani1, but sanitizes even more bytes: it sanitizes control characters like Sani1, and also all bytes equal to 0x80 and higher.
translate.py malware -o malware.decoded "IFF(position >= 0x10 and position < 0x20, byte ^ 0x10, byte)"
By default this program translates individual bytes via the provided Python expression. With option -f (fullread), translate.py reads the input file as one byte sequence and passes it to the function specified by the expression. This function needs to take one string as an argument and return one string (the translated file).
Option -r (regex) uses a regular expression to search through the file and then calls the provided function with a match argument for each matched string. The return value of the function (a string) is used to replace the matched string.
Option -R (filterregex) is similar to option -r (regex), except that it does not operate on the complete file, but on the file filtered for the regex.
Here are 2 examples with a regex. The input file (test-ah.txt) contains the following: 1234&H41&H42&H43&H444321
The first command will search for strings &Hxx and replace them with the character represented in ASCII by hexadecimal number xx:
translate.py -r "&H(..)" test-ah.txt "lambda m: chr(int(m.groups()[0], 16))"
Output: 1234ABCD4321
The second command is exactly the same as the first command, except that it uses option -R in stead or -r:
translate.py -R "&H(..)" test-ah.txt "lambda m: chr(int(m.groups()[0], 16))"
Output: ABCD
Option -e (execute) is used to execute Python commands before the command is executed. This can, for example, be used to import modules.
Here is an example to decompress a Flash file (.swf):
translate.py -f -e "import zlib" sample.swf "lambda b: zlib.decompress(b[8:])"
You can use build in function ZlibD too, and ZlibRawD for inflating without header, and GzipD for gzip decompression.
A second file can be used as input with option -2. The value of the current byte of the second input file is stored in variable byte2 (this too advances byte per byte together with the primary input file).
Example:
translate.py -2 #021230 #Scbpbt "byte + byte2 - 0x30"
Output:
Secret
In stead of using an input filename, the content can also be passed in the argument. To achieve this, prefix the text with character #.
If the text to pass via the argument contains control characters or non-printable characters, hexadecimal (#h#) or base64 (#b#) can be used.
Example:
translate.py #h#89B5B4AEFDB4AEFDBCFDAEB8BEAFB8A9FC "byte ^0xDD"
Output:
This is a secret!
File arguments that start with #e# are a notational convention to use expressions to generate data. An expression is a single function/string or the concatenation of several functions/strings (using character + as concatenation operator).
Strings can be characters enclosed by single quotes ('example') or hexadecimal strings prefixed by 0x (0xBEEF).
4 functions are available: random, loremipsum, repeat and chr.
Function random takes exactly one argument: an integer (with value 1 or more). Integers can be specified using decimal notation or hexadecimal notation (prefix 0x).
The random function generates a sequence of bytes with a random value (between 0 and 255), the argument specifies how many bytes need to be generated. Remark that the random number generator that is used is just the Python random number generator, not a cryptographic random number generator.
Example:
tool.py #e#random(100)
will make the tool process data consisting of a sequence of 100 random bytes.
Function loremipsum takes exactly one argument: an integer (with value 1 or more).
The loremipsum function generates "lorem ipsum" text (fake latin), the argument specifies the number of sentences to generate.
Example: #e#loremipsum(2) generates this text:
Ipsum commodo proin pulvinar hac vel nunc dignissim neque eget odio erat magna lorem urna cursus fusce facilisis porttitor congue eleifend taciti. Turpis duis suscipit facilisi tristique dictum praesent natoque sem mi egestas venenatis per dui sit sodales est condimentum habitasse ipsum phasellus non bibendum hendrerit.
Function chr takes one argument or two arguments.
chr with one argument takes an integer between 0 and 255, and generates a single byte with the value specified by the integer.
chr with two arguments takes two integers between 0 and 255, and generates a byte sequence with the values specified by the integers.
For example #e#chr(0x41,0x45) generates data ABCDE.
Function repeat takes two arguments: an integer (with value 1 or more) and a byte sequence. This byte sequence can be a quoted string of characters (single quotes), like 'ABCDE' or an hexadecimal string prefixed with 0x, like 0x4142434445.
The repeat function will create a sequence of bytes consisting of the provided byte sequence (the second argument) repeated as many times as specified by the first argument.
For example, #e#repeat(3, 'AB') generates byte sequence ABABAB.
When more than one function needs to be used, the byte sequences generated by the functions can be concatenated with the + operator.
For example, #e#repeat(10,0xFF)+random(100) will generate a byte sequence of 10 FF bytes followed by 100 random bytes.
To prevent the tool from processing file arguments with wildcard characters or special initial characters (@ and #) differently, but to process them as normal files, use option --literalfilenames.
'''
for line in manual.split('\n'):
print(textwrap.fill(line))
def rol(byte, count):
return (byte << count | byte >> (8- count)) & 0xFF
def ror(byte, count):
return (byte >> count | byte << (8- count)) & 0xFF
#Sanitize 1: Sanitize input: return space (0x20) for all control characters, except HT, LF and CR
def Sani1(byte):
if byte in [0x09, 0x0A, 0x0D]:
return byte
if byte < 0x20:
return 0x20
return byte
#Sanitize 2: Sanitize input: return space (0x20) for all bytes equal to 0x80 and higher, and all control characters, except HT, LF and CR
def Sani2(byte):
if byte in [0x09, 0x0A, 0x0D]:
return byte
if byte < 0x20:
return 0x20
if byte >= 0x80:
return 0x20
return byte
def GzipD(data):
return gzip.GzipFile('', 'r', fileobj=StringIO(data)).read()
def ZlibD(data):
return zlib.decompress(data)
def ZlibRawD(data):
return zlib.decompress(data, -8)
# CIC: Call If Callable
def CIC(expression):
if callable(expression):
return expression()
else:
return expression
# IFF: IF Function
def IFF(expression, valueTrue, valueFalse):
if expression:
return CIC(valueTrue)
else:
return CIC(valueFalse)
#Convert String To Bytes If Python 3
def CS2BIP3(string):
if sys.version_info[0] > 2:
return bytes([ord(x) for x in string])
else:
return string
def Output(fOut, data):
if fOut != sys.stdout:
fOut.write(data)
else:
StdoutWriteChunked(data)
def LoremIpsumSentence(minimum, maximum):
words = ['lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur', 'adipiscing', 'elit', 'etiam', 'tortor', 'metus', 'cursus', 'sed', 'sollicitudin', 'ac', 'sagittis', 'eget', 'massa', 'praesent', 'sem', 'fermentum', 'dignissim', 'in', 'vel', 'augue', 'scelerisque', 'auctor', 'libero', 'nam', 'a', 'gravida', 'odio', 'duis', 'vestibulum', 'vulputate', 'quam', 'nec', 'cras', 'nibh', 'feugiat', 'ut', 'vitae', 'ornare', 'justo', 'orci', 'varius', 'natoque', 'penatibus', 'et', 'magnis', 'dis', 'parturient', 'montes', 'nascetur', 'ridiculus', 'mus', 'curabitur', 'nisl', 'egestas', 'urna', 'iaculis', 'lectus', 'maecenas', 'ultrices', 'velit', 'eu', 'porta', 'hac', 'habitasse', 'platea', 'dictumst', 'integer', 'id', 'commodo', 'mauris', 'interdum', 'malesuada', 'fames', 'ante', 'primis', 'faucibus', 'accumsan', 'pharetra', 'aliquam', 'nunc', 'at', 'est', 'non', 'leo', 'nulla', 'sodales', 'porttitor', 'facilisis', 'aenean', 'condimentum', 'rutrum', 'facilisi', 'tincidunt', 'laoreet', 'ultricies', 'neque', 'diam', 'euismod', 'consequat', 'tempor', 'elementum', 'lobortis', 'erat', 'ligula', 'risus', 'donec', 'phasellus', 'quisque', 'vivamus', 'pellentesque', 'tristique', 'venenatis', 'purus', 'mi', 'dictum', 'posuere', 'fringilla', 'quis', 'magna', 'pretium', 'felis', 'pulvinar', 'lacinia', 'proin', 'viverra', 'lacus', 'suscipit', 'aliquet', 'dui', 'molestie', 'dapibus', 'mollis', 'suspendisse', 'sapien', 'blandit', 'morbi', 'tellus', 'enim', 'maximus', 'semper', 'arcu', 'bibendum', 'convallis', 'hendrerit', 'imperdiet', 'finibus', 'fusce', 'congue', 'ullamcorper', 'placerat', 'nullam', 'eros', 'habitant', 'senectus', 'netus', 'turpis', 'luctus', 'volutpat', 'rhoncus', 'mattis', 'nisi', 'ex', 'tempus', 'eleifend', 'vehicula', 'class', 'aptent', 'taciti', 'sociosqu', 'ad', 'litora', 'torquent', 'per', 'conubia', 'nostra', 'inceptos', 'himenaeos']
sample = random.sample(words, random.randint(minimum, maximum))
sample[0] = sample[0].capitalize()
return ' '.join(sample) + '.'
def LoremIpsum(sentences):
return ' '.join([LoremIpsumSentence(15, 30) for i in range(sentences)])
STATE_START = 0
STATE_IDENTIFIER = 1
STATE_STRING = 2
STATE_SPECIAL_CHAR = 3
STATE_ERROR = 4
FUNCTIONNAME_REPEAT = 'repeat'
FUNCTIONNAME_RANDOM = 'random'
FUNCTIONNAME_CHR = 'chr'
FUNCTIONNAME_LOREMIPSUM = 'loremipsum'
def Tokenize(expression):
result = []
token = ''
state = STATE_START
while expression != '':
char = expression[0]
expression = expression[1:]
if char == "'":
if state == STATE_START:
state = STATE_STRING
elif state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
state = STATE_STRING
token = ''
elif state == STATE_STRING:
result.append([STATE_STRING, token])
state = STATE_START
token = ''
elif char >= '0' and char <= '9' or char.lower() >= 'a' and char.lower() <= 'z':
if state == STATE_START:
token = char
state = STATE_IDENTIFIER
else:
token += char
elif char == ' ':
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
elif state == STATE_STRING:
token += char
else:
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
result.append([STATE_SPECIAL_CHAR, char])
elif state == STATE_STRING:
token += char
else:
result.append([STATE_SPECIAL_CHAR, char])
token = ''
if state == STATE_IDENTIFIER:
result.append([state, token])
elif state == STATE_STRING:
result = [[STATE_ERROR, 'Error: string not closed', token]]
return result
def ParseFunction(tokens):
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] == STATE_STRING or tokens[0][0] == STATE_IDENTIFIER and tokens[0][1].startswith('0x'):
return [[FUNCTIONNAME_REPEAT, [[STATE_IDENTIFIER, '1'], tokens[0]]], tokens[1:]]
if tokens[0][0] != STATE_IDENTIFIER:
print('Parsing error')
return None, tokens
function = tokens[0][1]
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '(':
print('Parsing error')
return None, tokens
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
arguments = []
while True:
if tokens[0][0] != STATE_IDENTIFIER and tokens[0][0] != STATE_STRING:
print('Parsing error')
return None, tokens
arguments.append(tokens[0])
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] != STATE_SPECIAL_CHAR or (tokens[0][1] != ',' and tokens[0][1] != ')'):
print('Parsing error')
return None, tokens
if tokens[0][0] == STATE_SPECIAL_CHAR and tokens[0][1] == ')':
tokens = tokens[1:]
break
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
return [[function, arguments], tokens]
def Parse(expression):
tokens = Tokenize(expression)
if len(tokens) == 0:
print('Parsing error')
return None
if tokens[0][0] == STATE_ERROR:
print(tokens[0][1])
print(tokens[0][2])
print(expression)
return None
functioncalls = []
while True:
functioncall, tokens = ParseFunction(tokens)
if functioncall == None:
return None
functioncalls.append(functioncall)
if len(tokens) == 0:
return functioncalls
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '+':
print('Parsing error')
return None
tokens = tokens[1:]
def InterpretInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
try:
return int(token[1])
except:
return None
def Hex2Bytes(hexadecimal):
if len(hexadecimal) % 2 == 1:
hexadecimal = '0' + hexadecimal
try:
return binascii.a2b_hex(hexadecimal)
except:
return None
def InterpretHexInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
bytes = Hex2Bytes(token[1][2:])
if bytes == None:
return None
integer = 0
for byte in bytes:
integer = integer * 0x100 + ord(byte)
return integer
def InterpretNumber(token):
number = InterpretInteger(token)
if number == None:
return InterpretHexInteger(token)
else:
return number
def InterpretBytes(token):
if token[0] == STATE_STRING:
return token[1]
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
return Hex2Bytes(token[1][2:])
def CheckFunction(functionname, arguments, countarguments, maxcountarguments=None):
if maxcountarguments == None:
if countarguments == 0 and len(arguments) != 0:
print('Error: function %s takes no arguments, %d are given' % (functionname, len(arguments)))
return True
if countarguments == 1 and len(arguments) != 1:
print('Error: function %s takes 1 argument, %d are given' % (functionname, len(arguments)))
return True
if countarguments != len(arguments):
print('Error: function %s takes %d arguments, %d are given' % (functionname, countarguments, len(arguments)))
return True
else:
if len(arguments) < countarguments or len(arguments) > maxcountarguments:
print('Error: function %s takes between %d and %d arguments, %d are given' % (functionname, countarguments, maxcountarguments, len(arguments)))
return True
return False
def CheckNumber(argument, minimum=None, maximum=None):
number = InterpretNumber(argument)
if number == None:
print('Error: argument should be a number: %s' % argument[1])
return None
if minimum != None and number < minimum:
print('Error: argument should be minimum %d: %d' % (minimum, number))
return None
if maximum != None and number > maximum:
print('Error: argument should be maximum %d: %d' % (maximum, number))
return None
return number
def Interpret(expression):
functioncalls = Parse(expression)
if functioncalls == None:
return None
decoded = ''
for functioncall in functioncalls:
functionname, arguments = functioncall
if functionname == FUNCTIONNAME_REPEAT:
if CheckFunction(functionname, arguments, 2):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
bytes = InterpretBytes(arguments[1])
if bytes == None:
print('Error: argument should be a byte sequence: %s' % arguments[1][1])
return None
decoded += number * bytes
elif functionname == FUNCTIONNAME_RANDOM:
if CheckFunction(functionname, arguments, 1):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
decoded += ''.join([chr(random.randint(0, 255)) for x in range(number)])
elif functionname == FUNCTIONNAME_LOREMIPSUM:
if CheckFunction(functionname, arguments, 1):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
decoded += LoremIpsum(number)
elif functionname == FUNCTIONNAME_CHR:
if CheckFunction(functionname, arguments, 1, 2):
return None
number = CheckNumber(arguments[0], minimum=1, maximum=255)
if number == None:
return None
if len(arguments) == 1:
decoded += chr(number)
else:
number2 = CheckNumber(arguments[1], minimum=1, maximum=255)
if number2 == None:
return None
decoded += ''.join([chr(n) for n in range(number, number2 + 1)])
else:
print('Error: unknown function: %s' % functionname)
return None
return decoded
def FilenameCheckHash(filename):
if filename.startswith('#h#'):
return Hex2Bytes(filename[3:])
elif filename.startswith('#b#'):
try:
return binascii.a2b_base64(filename[3:])
except:
return None
elif filename.startswith('#e#'):
return Interpret(filename[3:])
elif filename.startswith('#'):
return filename[1:]
else:
return ''
def Transform(fIn, fIn2, fOut, commandPython):
position = 0
while True:
inbyte = fIn.read(1)
if not inbyte:
break
byte = ord(inbyte)
if fIn2 != None:
inbyte2 = fIn2.read(1)
byte2 = ord(inbyte2)
outbyte = eval(commandPython)
if outbyte != None:
fOut.write(chr(outbyte))
position += 1
#Fix for http://bugs.python.org/issue11395
def StdoutWriteChunked(data):
if sys.version_info[0] > 2:
sys.stdout.buffer.write(data)
else:
while data != '':
sys.stdout.write(data[0:10000])
try:
sys.stdout.flush()
except IOError:
return
data = data[10000:]
def Translate(filenameInput, commandPython, options):
if filenameInput == '':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
try:
fIn = sys.stdin.buffer
except:
fIn = sys.stdin
else:
decoded = FilenameCheckHash(filenameInput)
if options.literalfilenames or decoded == '':
fIn = open(filenameInput, 'rb')
elif decoded == None:
print('Error parsing filename: ' + filenameInput)
return
else:
fIn = StringIO(decoded)
if options.secondbytestream != '':
decoded = FilenameCheckHash(options.secondbytestream)
if options.literalfilenames or decoded == '':
fIn2 = open(options.secondbytestream, 'rb')
elif decoded == None:
print('Error parsing filename: ' + options.secondbytestream)
return
else:
fIn2 = StringIO(decoded)
else:
fIn2 = None
if options.output == '':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
fOut = sys.stdout
else:
fOut = open(options.output, 'wb')
if options.script != '':
execfile(options.script, globals())
if options.execute != '':
exec(options.execute, globals())
if options.fullread:
Output(fOut, eval(commandPython)(fIn.read()))
elif options.regex != '' or options.filterregex != '':
content = fIn.read()
if options.regex != '':
Output(fOut, re.sub(options.regex, eval(commandPython), content))
else:
Output(fOut, re.sub(options.filterregex, eval(commandPython), ''.join([x.group() for x in re.finditer(options.filterregex, content)])))
else:
Transform(fIn, fIn2, fOut, commandPython)
if fIn != sys.stdin:
fIn.close()
if fIn2 != None:
fIn2.close()
if fOut != sys.stdout:
fOut.close()
def Main():
moredesc = '''
Example: translate.py -o svchost.exe.dec svchost.exe 'byte ^ 0x10'
"byte" is the current byte in the file, 'byte ^ 0x10' does an X0R 0x10
Extra functions:
rol(byte, count)
ror(byte, count)
IFF(expression, valueTrue, valueFalse)
Sani1(byte)
Sani2(byte)
ZlibD(bytes)
ZlibRawD(bytes)
GzipD(bytes)
Variable "position" is an index into the input file, starting at 0
Source code put in the public domain by Didier Stevens, no Copyright
Use at your own risk
https://DidierStevens.com'''
oParser = optparse.OptionParser(usage='usage: %prog [options] [file-in] [file-out] command [script]\n' + __description__ + moredesc, version='%prog ' + __version__)
oParser.add_option('-o', '--output', default='', help='Output file (default is stdout)')
oParser.add_option('-s', '--script', default='', help='Script with definitions to include')
oParser.add_option('-f', '--fullread', action='store_true', default=False, help='Full read of the file')
oParser.add_option('-r', '--regex', default='', help='Regex to search input file for and apply function to')
oParser.add_option('-R', '--filterregex', default='', help='Regex to filter input file for and apply function to')
oParser.add_option('-e', '--execute', default='', help='Commands to execute')
oParser.add_option('-2', '--secondbytestream', default='', help='Second bytestream')
oParser.add_option('-l', '--literalfilenames', action='store_true', default=False, help='Do not interpret filenames')
oParser.add_option('-m', '--man', action='store_true', default=False, help='print manual')
(options, args) = oParser.parse_args()
if options.man:
oParser.print_help()
PrintManual()
return
if len(args) == 0 or len(args) > 4:
oParser.print_help()
elif len(args) == 1:
Translate('', args[0], options)
elif len(args) == 2:
Translate(args[0], args[1], options)
elif len(args) == 3:
options.output = args[1]
Translate(args[0], args[2], options)
elif len(args) == 4:
options.output = args[1]
options.script = args[3]
Translate(args[0], args[2], options)
if __name__ == '__main__':
Main()
| [
"didier.stevens@gmail.com"
] | didier.stevens@gmail.com |
7ab15aaa9ab86f2169fe7e677124ec25d9b66c6e | 6071edc45eace43e4c9335650986c7588652a714 | /NER-AV.py | 7094e5d9ba46880ceadb3e9846542cde72daefe5 | [] | no_license | Chadni-Islam/cybersecproject1 | 47e685ab34e544f2229bac39895a6bac3275a9d8 | e3b5892295637f814924c66e9e9bfe37dea27bca | refs/heads/master | 2020-04-12T00:41:51.842388 | 2018-12-16T06:09:56 | 2018-12-16T06:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,170 | py | # Named Entity Recognintion on Alien Vault blog posts
# Scrapes info about malware/threats from page and attempts to extract threat data
import utils
import spacy
import pandas as pd
import csv
from bs4 import BeautifulSoup
import requests
import pprint
from OTXv2 import OTXv2
from OTXv2 import IndicatorTypes
from googleapiclient.discovery import build
# Initialise nlp corpus
nlp = spacy.load('en_core_web_sm')
# Google api key and search engine id
my_api_key = 'AIzaSyAVfCR6inp74mBkr7w12TVLH3l4vkWwsiw'
my_cse_id = '003774403560526411905:wsb8ncz3hw4'
# Performs google search using google custom search API
def google_search(search_term, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
return res['items']
# initialise Open Threat Exchange API
otx = OTXv2("1bc976440bad33a81703fcec442f158153fe93976770874ea1af79680a84f0c7")
# open a list of countries
f = open("other/country.csv", "rb")
# Keywords to detect if attack vector or asset
# Add space in front of try so it isn't picked up as a part of a word
attackKeys = ["attacker", "trick", " try ", " tries ", "attempt", "launch"]
assetKeys = ["result", "ability", "grant", "installation", "corrupt", "poison", "after ", "information"]
# Get some blog pages
links = utils.getAlienVaultPage(3)
otxurl = "https://otx.alienvault.com/pulse/"
for link in links:
# Scrape blog page
text = utils.scrapeAlienVault(link)
# Information we can extract
title = [] # Extracted from TITLE
rawtext = [] # Raw paragraph text, so accuracy can be checked
names = [] # NER - ORG
country = [] # NER - GPE
date = [] # NER - DATE
attackvectors = [] # Attack Vectors (found with keywords)
assets = [] # Assets (found with keywords)
capeclink = [] # CAPEC Article found (to see accuraccy)
likelihood = [] # Obtain from CAPEC
severity = [] # Obtain from CAPEC
risk = [] # Obtain from likelihood and severity
maliciousness = [] # 1 - least, 5 - maximum
indicators = [] # For now extracted from OTX links
# Extracting csv name
temp = link.split('-')
csvName = temp[-5] + '-' + temp[-4] + '-' + temp[-3] + '-' + temp[-2] + '-' + temp[-1] + ".csv"
# Iterates through pparagraphs of blog post
for count, t in enumerate(text):
# Create a list of sentences abstraction
sents = t.split(". ")
# Ignore paragraphs with only one sentence
if len(sents) < 3:
continue
# TITLE
# Extracting title based on delimeters
temp = t.split('%')[0]
temp = temp.split('-')
# If no '-', then no relevant title/category (for now)
if len(temp) == 1:
continue
else:
title.append(temp[-1])
# Cut title, perform nlp
t = t.split('%')[1]
doc = nlp(t)
# Append Raw text
rawtext.append(t)
# NAMES COUNTRIES DATES
# Extracting names/countries/dates
tempN, tempC, tempD = "", "", ""
for X in doc.ents:
# Threat name
if X.label_ == 'ORG':
# Ignoring 'Open Threat Exchange'
if (X.text == "Open Threat Exchange"):
continue
tempN += X.text + ', '
# Country/Area
elif X.label_ == 'GPE':
# Check that entity is actually a country
isCountry = False
for row in f:
row = str(row)
if X.text.lower() in row.lower():
isCountry = True
tempC += row.split(",")[2] + ' '
# Else not a country, so assume ORG
if not isCountry:
tempN += X.text + ', '
# Date
elif X.label_ == 'DATE':
tempD += X.text + ', '
names.append(tempN)
country.append(tempC)
date.append(tempD)
# INDICATORS
# Extracting OTX links for indicators
if (otxurl in t):
pulseID = t.split(otxurl)[-1]
tempI = ""
# Get all indicators for a specific pulse
results = otx.get_pulse_indicators(pulseID)
for count, indicator in enumerate(results):
# Only get first 5 for now, some have too many
if count > 5:
break
tempI += indicator["indicator"] + " (" + indicator["type"] + ")\n"
indicators.append(tempI)
else:
indicators.append("")
# MALICIOUSNESS
# Identify maliciousness by keywords which follow mitre rules from: www.mitre.org/sites/default/files/pdf/10_2914.pdf
malic = 0
key2 = ["target", "data", "information", "access"]
key3 = ["backdoor", "install"]
key4 = ["military", "government", "nation", "defense", "defence"]
for k in key2:
if k in t:
malic = 2
break
for k in key3:
if k in t:
malic = 3
break
for k in key4:
if k in t:
malic += 1
break
# If still 0, couldn't identify
if malic == 0:
malic = '-'
maliciousness.append(malic)
# ATTACKVECTORS ASSETS LIKELIHOOD SEVERITY
asses = ""
attacks = ""
caplink = ""
likeli = ""
sev = ""
# iterate through sentences
for i in sents:
# apply nlp
doc = nlp(i)
# Iterate through attack keywords
for j in attackKeys:
# If keyword in sentence
if j in i.lower():
# Iterate through nlp tokens
for count, token in enumerate(doc):
# Only keep nouns and verbs
if token.pos_ == "NOUN" or token.pos_ == "VERB":
attacks += token.text + ' '
# Break after first keyword found
break
# Iterate through asset keywords
for j in assetKeys:
# if keyword in sentence
if j in i.lower():
short = "" # A shorter version of the sentence
# Iterate through nlp tokens
c = 0
for count, token in enumerate(doc):
# Only keep nouns and verbs
if token.pos_ == "NOUN" or token.pos_ == "VERB":
asses += token.text + ' '
c += 1
# Only take 3 for best search results
if c < 4:
short += token.text + ' '
# Search for a CAPEC resource
query = "capec.mitre.org " + short
res = google_search(query, my_api_key, my_cse_id, num=10)
# Get first relevant link
for r in res:
# Only take capec data definitions
if "capec.mitre.org/data/definition" in r['link']:
caplink = r['title']
# Get page
page = requests.get(r['link'])
soup = BeautifulSoup(page.text, 'html.parser')
# Take first two detail parameters
for count, rf in enumerate(soup.find_all(id="Detail")):
tex = rf.find('p')
if count == 0:
try:
likeli = tex.get_text()
except AttributeError:
pass
elif count == 1:
try:
sev = tex.get_text()
except AttributeError:
pass
else:
break
break
break
attackvectors.append(attacks)
assets.append(asses)
capeclink.append(caplink)
likelihood.append(likeli)
severity.append(sev)
# RISK
# Calculated from likelihood and severity
# Options Very Low, Low, Medium, High, Very High
# Risk Matrix taken from https://itsecurity.uiowa.edu/resources/everyone/determining-risk-levels
ris = ""
if ((sev == "Very Low") or (sev == "Low" and (likeli == "Medium" or likeli == "Low" or likeli == "Very Low")) or (sev == "Medium" and likeli == "Very Low")):
ris = "Low"
elif ((sev == "Low" and (likeli == "Very High" or likeli == "High")) or (sev == "Medium" and (likeli == "High" or likeli == "Medium" or likeli == "Low")) or (sev == "High" and (likeli == "Medium" or likeli == "Low" or likeli == "Very Low")) or (sev == "Very High" and (likeli == "Low" or likeli == "Very Low"))):
ris = "Medium"
elif ((sev == "Medium" and likeli == "Very High") or (sev == "High" and (likeli == "Very High" or likeli == "High")) or (sev == "Very High" and (likeli == "Very High" or likeli == "High" or likeli == "Medium"))):
ris = "High"
risk.append(ris)
# Combine data into a pandas dataframe
ThreatInfo = pd.DataFrame({
"Title": title,
"RawText": rawtext,
"Names": names,
"Country": country,
"Date": date,
"Attack Vectors": attackvectors,
"Assets": assets,
"Likelihood": likelihood,
"Severity": severity,
"Risk": risk,
"Maliciousness": maliciousness,
"Indicators": indicators
})
ThreatInfo.to_csv("output/3/" + csvName, encoding='utf-8', columns=["Title", "RawText", "Date", "Names", "Country", "Attack Vectors", "Assets", "Likelihood", "Severity", "Risk", "Maliciousness", "Indicators"])
| [
"a1706489@student.adelaide.edu.au"
] | a1706489@student.adelaide.edu.au |
c58f1c2970ecc1f52452603ec752fee605c737c0 | 053221e1d90b365f68701dbd5b6466f30d1f6fd7 | /Day2/vd9.py | fd7cce53fa7b1ae816f5b6dbeb603d15b41e478e | [] | no_license | pytutorial/py2011E | eceb4d563cc807294b08b818edadd521ed8da488 | 306437369b0bfe55a2fa827b098283856242e731 | refs/heads/main | 2023-02-28T23:57:32.851536 | 2021-01-30T14:56:12 | 2021-01-30T14:56:12 | 318,186,117 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # vd9.py
# Chương trình dự báo thời tiết
# Cho T(độ C), w (km/h), p(atm)
# In ra : Có mưa ?
T = float(input('Nhiệt độ (C):'))
w = float(input('Tốc độ gió (km/h):'))
p = float(input('Áp suất khí quyển(atm):'))
rain = False # default
if T >= 21:
if w >= 3 and p > 0.87:
rain = True
else:
if w >= 7 or p > 1.04:
rain = True
print(rain)
| [
"duongthanhtungvn01@gmail.com"
] | duongthanhtungvn01@gmail.com |
c830596b2f898d2ead4f94528ad2f3100de2be7b | 7786de317489fa258c7504b2fc96341e970e45db | /tests/unit/test_cf_storage_object.py | 40cecc402ed6e56b9c96465a85a7524220df10d6 | [
"MIT"
] | permissive | tvaught/pyrax | 7207158d832721ca6ccde2e9c328855155a60915 | 8a310435239c536921490e04a984ff8a82b18eb8 | refs/heads/master | 2020-12-25T10:10:54.714401 | 2013-05-30T19:56:21 | 2013-05-30T19:56:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,903 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import random
import unittest
from mock import patch
from mock import MagicMock as Mock
import pyrax
from pyrax.cf_wrapper.storage_object import StorageObject
import pyrax.exceptions as exc
from tests.unit.fakes import FakeContainer
from tests.unit.fakes import FakeIdentity
from tests.unit.fakes import FakeResponse
class CF_StorageObjectTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
reload(pyrax)
self.orig_connect_to_cloudservers = pyrax.connect_to_cloudservers
self.orig_connect_to_cloudfiles = pyrax.connect_to_cloudfiles
self.orig_connect_to_cloud_databases = pyrax.connect_to_cloud_databases
ctclb = pyrax.connect_to_cloud_loadbalancers
self.orig_connect_to_cloud_loadbalancers = ctclb
ctcbs = pyrax.connect_to_cloud_blockstorage
self.orig_connect_to_cloud_blockstorage = ctcbs
super(CF_StorageObjectTest, self).__init__(*args, **kwargs)
self.obj_name = "testobj"
self.container_name = "testcont"
pyrax.connect_to_cloudservers = Mock()
pyrax.connect_to_cloud_loadbalancers = Mock()
pyrax.connect_to_cloud_databases = Mock()
pyrax.connect_to_cloud_blockstorage = Mock()
@patch('pyrax.cf_wrapper.client.Container', new=FakeContainer)
def setUp(self):
pyrax.connect_to_cloudservers = Mock()
pyrax.connect_to_cloud_loadbalancers = Mock()
pyrax.connect_to_cloud_databases = Mock()
pyrax.connect_to_cloud_blockstorage = Mock()
pyrax.clear_credentials()
pyrax.identity = FakeIdentity()
pyrax.set_credentials("fakeuser", "fakeapikey")
pyrax.connect_to_cloudfiles()
self.client = pyrax.cloudfiles
self.container = FakeContainer(self.client, self.container_name, 0, 0)
self.container.name = self.container_name
self.client.get_container = Mock(return_value=self.container)
self.client.connection.get_container = Mock()
self.client.connection.head_object = Mock()
objs = [{"name": self.obj_name, "content_type": "test/test",
"bytes": 444, "hash": "abcdef0123456789"}]
self.client.connection.head_object.return_value = ({}, objs)
self.client.connection.get_container.return_value = ({}, objs)
self.storage_object = self.client.get_object(self.container, "testobj")
self.client._container_cache = {}
self.container.object_cache = {}
def tearDown(self):
self.client = None
self.container = None
self.storage_object = None
pyrax.connect_to_cloudservers = self.orig_connect_to_cloudservers
pyrax.connect_to_cloudfiles = self.orig_connect_to_cloudfiles
pyrax.connect_to_cloud_databases = self.orig_connect_to_cloud_databases
octclb = self.orig_connect_to_cloud_loadbalancers
pyrax.connect_to_cloud_loadbalancers = octclb
octcbs = self.orig_connect_to_cloud_blockstorage
pyrax.connect_to_cloud_blockstorage = octcbs
def test_read_attdict(self):
tname = "something"
ttype = "foo/bar"
tbytes = 12345
tlastmodified = "2222-02-22T22:22:22.222222"
tetag = "123123123"
dct = {"name": tname, "content_type": ttype, "bytes": tbytes,
"last_modified": tlastmodified, "hash": tetag}
obj = self.storage_object
obj._read_attdict(dct)
self.assertEqual(obj.name, tname)
self.assertEqual(obj.content_type, ttype)
self.assertEqual(obj.total_bytes, tbytes)
self.assertEqual(obj.last_modified, tlastmodified)
self.assertEqual(obj.etag, tetag)
def test_subdir(self):
tname = "something"
dct = {"subdir": tname}
obj = self.storage_object
obj._read_attdict(dct)
self.assertEqual(obj.name, tname)
def test_get(self):
obj = self.storage_object
obj.client.connection.get_object = Mock()
meta = {"a": "b"}
data = "This is the contents of the file"
obj.client.connection.get_object.return_value = (meta, data)
ret = obj.get()
self.assertEqual(ret, data)
ret = obj.get(include_meta=True)
self.assertEqual(ret, (meta, data))
def test_delete(self):
obj = self.storage_object
obj.client.connection.delete_object = Mock()
obj.delete()
obj.client.connection.delete_object.assert_called_with(
obj.container.name, obj.name)
def test_purge(self):
obj = self.storage_object
cont = obj.container
cont.cdn_uri = None
self.assertRaises(exc.NotCDNEnabled, obj.purge)
cont.cdn_uri = "http://example.com"
obj.client.connection.cdn_request = Mock()
obj.purge()
obj.client.connection.cdn_request.assert_called_with("DELETE",
cont.name, obj.name, hdrs={})
def test_get_metadata(self):
obj = self.storage_object
obj.client.connection.head_object = Mock()
obj.client.connection.head_object.return_value = {
"X-Object-Meta-Foo": "yes",
"Some-Other-Key": "no"}
meta = obj.get_metadata()
self.assertEqual(meta, {"X-Object-Meta-Foo": "yes"})
def test_set_metadata(self):
obj = self.storage_object
obj.client.connection.post_object = Mock()
obj.client.connection.head_object = Mock(return_value={})
obj.set_metadata({"newkey": "newval"})
obj.client.connection.post_object.assert_called_with(obj.container.name,
obj.name, {"x-object-meta-newkey": "newval"})
def test_remove_metadata_key(self):
obj = self.storage_object
obj.client.connection.post_object = Mock()
obj.client.connection.head_object = Mock(return_value={})
obj.remove_metadata_key("newkey")
obj.client.connection.post_object.assert_called_with(obj.container.name,
obj.name, {})
def test_change_content_type(self):
obj = self.storage_object
obj.client.change_object_content_type = Mock()
obj.change_content_type("foo")
obj.client.change_object_content_type.assert_called_once_with(
obj.container, obj, new_ctype="foo", guess=False)
def test_get_temp_url(self):
obj = self.storage_object
obj.client.get_temp_url = Mock()
secs = random.randint(1, 1000)
obj.get_temp_url(seconds=secs)
obj.client.get_temp_url.assert_called_with(obj.container, obj,
seconds=secs, method="GET")
def test_repr(self):
obj = self.storage_object
rep = obj.__repr__()
self.assert_("<Object " in rep)
self.assert_(obj.name in rep)
self.assert_(obj.content_type in rep)
if __name__ == "__main__":
unittest.main()
| [
"ed@leafe.com"
] | ed@leafe.com |
d59306979796aafc1ce71802b7397335571e7929 | e4df66483ef535aa89e6348b72a7d683f472b034 | /products/migrations/0004_auto_20210703_1331.py | b7d13cbbb8a1cea2c4dcab80894d85b6efbcce80 | [] | no_license | Summersby95/james-boutique | 595b083c996dfd2f78f6912058b83118e77627a2 | ceeeddd796fe9a807d24d4ed222536762e565cf1 | refs/heads/master | 2023-06-20T09:27:54.979372 | 2021-07-14T21:52:42 | 2021-07-14T21:52:42 | 381,137,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # Generated by Django 3.2.4 on 2021-07-03 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20210629_1032'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.AddField(
model_name='product',
name='has_sizes',
field=models.BooleanField(blank=True, default=False, null=True),
),
]
| [
"47246572+BigbyWolf95@users.noreply.github.com"
] | 47246572+BigbyWolf95@users.noreply.github.com |
586bebcb89179bee8bb4bb171079ab83e1625aa4 | e0da81d30c5178cee999801c8d6673d782878bfa | /create_tables.py | b7b8e401daa01509b4388a3ae021767ff7398c3c | [] | no_license | as234545/Sparkify_ETL | 22bf7c233dcf7f53e1870c304fd564f5e16a0628 | ecc6758e44e79736f4ad8d3b0d5bfb86be9234e9 | refs/heads/master | 2022-10-17T08:50:40.238072 | 2020-06-11T18:16:31 | 2020-06-11T18:16:31 | 262,396,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def create_database():
"""
- Creates and connects to the sparkifydb
- Returns the connection and cursor to sparkifydb
"""
# connect to default database
conn = psycopg2.connect("host=127.0.0.1 dbname=postgres user=[] password=[]")
conn.set_session(autocommit=True)
cur = conn.cursor()
# create sparkify database with UTF8 encoding
cur.execute("DROP DATABASE IF EXISTS sparkifydb")
cur.execute("CREATE DATABASE sparkifydb WITH ENCODING 'utf8' TEMPLATE template0")
# close connection to default database
conn.close()
# connect to sparkify database
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=postgres password=postgres")
cur = conn.cursor()
return cur, conn
def drop_tables(cur, conn):
"""
Drops each table using the queries in `drop_table_queries` list.
"""
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
"""
Creates each table using the queries in `create_table_queries` list.
"""
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
"""
- Drops (if exists) and Creates the sparkify database.
- Establishes connection with the sparkify database and gets
cursor to it.
- Drops all the tables.
- Creates all tables needed.
- Finally, closes the connection.
"""
cur, conn = create_database()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | as234545.noreply@github.com |
fd2de93e61b8ec7175144413c12aa76acc68faf3 | 16841aa873355de23833f4a78e77cf7440345f6d | /code/server/es_run_all.py | e6aba881549458302a29449b1c089f8ed433b300 | [] | no_license | ShyGuyPy/Shiny_Forecasting_Automation | bf42ad603fbcd3c82849c79e4667e0c4391d428d | 60f060bacf90ed593c92e2f17092c44480b2fd77 | refs/heads/master | 2023-05-04T22:05:06.680250 | 2023-04-18T19:50:03 | 2023-04-18T19:50:03 | 224,223,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | ##this is a workaronud to an issue were win32com module would not
#import properly whe run through r reticulate module
import win32com.client
import win32gui as wg
import win32con
import time
#
# if (__name__ == '__main__'):
def open_and_ID(prog_ID, win_ID):
program_handle = win32com.client.Dispatch(prog_ID)
app_ID = wg.FindWindow(None, win_ID)
print(app_ID)
# wait_time(2)
wg.ShowWindow(app_ID, win32con.SW_MAXIMIZE)
wg.SetActiveWindow(app_ID)
#wg.SetForegroundWindow(app_ID)
#print(program_handle)
return program_handle
def run_by_id(prog_ID, win_ID):
program_handle = win32com.client.Dispatch(prog_ID)
app_ID = wg.FindWindow(None, win_ID)
program_handle.Execute("""ExecuteMenuCommand(6000)""")
def set_and_run(prog_ID, win_ID, SetEndTime, SetStartTime, SetNumSim, SetNumStep):
program_handle = win32com.client.Dispatch(prog_ID)
app_ID = wg.FindWindow(None, win_ID)
# sets the setting parameters into a string that can be fed into the MODL execute
execute_input = """SetRunParameters({}, {}, {}, {})""".format(SetEndTime, SetStartTime, SetNumSim, SetNumStep)
program_handle.Execute(execute_input)
def wait_time(x):
time.sleep(x)
def test_click():
print("click works")
def run_all():
#open model
es_handle = open_and_ID("Extend.application", "ExtendSim")
wait_time(20)
#sets run parameters and then run the model
set_and_run("Extend.application", "ExtendSim", 1000, 0 , 1, 1)
#
wait_time(30)
#run open model
run_by_id("Extend.application", "ExtendSim")
run_all()
| [
"luke.vawter1@gmail.com"
] | luke.vawter1@gmail.com |
2b2292edfd105992c36aa4fca01ce951238696ab | 439add47001009e173418b30cfb820b0e92989ed | /apps/users/urls.py | 03905571d8e624a6de7bd52b2762f03ca522ec43 | [] | no_license | AngelMercado/primeTed | 31b410d7c64da1001f40bae824f7a700f46dcd40 | 7a05d2ea257334cb726d39e80e2209f9cdbf0578 | refs/heads/master | 2021-01-01T17:59:28.360220 | 2017-07-24T17:27:57 | 2017-07-24T17:27:57 | 98,215,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | from django.conf.urls import patterns, include, url
from .views import PanelView,RegistrateView,LogOut,LoginView
from apps.home.views import HomeView
urlpatterns = patterns('',
url(r'^$',PanelView.as_view(),name='panel'),
url(r'^login$',LoginView.as_view(),name='login'),
url(r'^registrateGratis$',RegistrateView.as_view(),name='registrate'),
url(r'^inicio$',LogOut,name='logout'),
url(r'^home$',HomeView.as_view(),name='home'),
) | [
"myjava@outlook.es"
] | myjava@outlook.es |
9f17a97976b8031844c5b47af19eedcf16363869 | e03250b86ba042c55f05882998c6a19cd4f39c31 | /sicknote_app_v00_01.py | d3a1e941ef6455949b2f67cfab4e0366544cf4a2 | [] | no_license | nzwi/sicknote-flask-endpoint | a467d519a0fd31b5ff9d45b8dfd8306cb88eadc7 | cf7241c951b04292df9a4b8161446e30db8b4f84 | refs/heads/master | 2020-03-09T06:50:36.832533 | 2018-04-08T14:32:04 | 2018-04-08T14:32:04 | 128,649,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | ##
# Title: Python Flask endpoint within Amazon Virtual Private Cloud (VPC)
# to allow lambda to communicate with ethereum helper functions
# Version: v00_01
# Author: Nzwisisa Chidembo <nzwisisa@gmail.com>
##
from flask import Flask, jsonify, request
# Replace <helper function file> with your helper python file
import <helper function file> as sk
app = Flask(__name__)
@app.route('/', methods=["POST"])
def post():
if request.is_json:
data = request.get_json()
res = sk.lambda_handler(data,[])
return jsonify(res)
else:
return jsonify(state='Request was not JSON')
# Include the internal VPC ip address of your AWS EC2 instant
if __name__ == '__main__':
app.run(host='xxxxxxxxx',debug=True)
| [
"nzwisisa@gmail.com"
] | nzwisisa@gmail.com |
5088ff9a441d0a89a9acc0d64fff0a8dc6f8e028 | 9c0f298d56ef554b6bb004545dcd02988211df7d | /uebung07/uebung07-examples/tasks-show.py | fde8acdb8d610cd2ba32c22be0559d10cc9a70d5 | [] | no_license | n1tr0-5urf3r/InTech-2020 | 96d418360b47c17a7c2e4f00d32680fcb603a802 | 43d5659907586e6f5b55eb872cc8136c0b059678 | refs/heads/master | 2022-11-17T20:58:31.782540 | 2020-07-14T12:05:45 | 2020-07-14T12:05:45 | 259,252,184 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | #!/usr/bin/python3
# coding=utf-8
from tasks_lib import read_all_tasks, get_done_tasks, get_open_tasks
from tasks_lib import print_header, print_tasks, print_footer, print_form, print_navigation
import cgi
form = cgi.FieldStorage(encoding='utf8')
# Welchen Zustand sollen die angezeigten Tasks haben? Default-Wert: all
state = form.getfirst('state', 'all')
all_tasks = read_all_tasks()
# Filtere die Tasks nach dem entsprechenden Zustand
if state == "open":
tasks = get_open_tasks(all_tasks)
prefix = "offene"
elif state == "done":
tasks = get_done_tasks(all_tasks)
prefix = "erledigte"
else:
tasks = all_tasks
prefix = ""
# Ab hier:Ausgabe des HTML-Codes
print_header("{} {} Aufgaben".format(len(tasks), prefix))
print_navigation()
print_tasks(tasks)
print_form()
print_footer()
| [
"fabi@ihlecloud.de"
] | fabi@ihlecloud.de |
8be8b9d514ef8af40f16b0f5750beca00056be18 | 661ccc272af5d72a4aea6cecebd59879ab8458f5 | /test_scores.py | 8bbbb94312640aef4bdc7d5c8d9fba98e5442c39 | [] | no_license | Monitor-Wang/ERMDA | 9e03718292404f5a0a8cf0bb29974ef2ea981675 | cdafa1e3bba24b16f81c427c29009ecbbc716a88 | refs/heads/main | 2023-08-30T19:32:34.156473 | 2021-11-05T12:52:03 | 2021-11-05T12:52:03 | 424,944,508 | 0 | 0 | null | 2021-11-05T12:42:11 | 2021-11-05T12:42:11 | null | UTF-8 | Python | false | false | 1,979 | py | # -*- coding: utf-8 -*-
from sklearn.metrics import roc_auc_score
import numpy as np
def calculate_performace(num, y_pred, y_prob, y_test):
tp = 0
fp = 0
tn = 0
fn = 0
for index in range(num):
if y_test[index] ==1:
if y_test[index] == y_pred[index]:
tp = tp + 1
else:
fn = fn + 1
else:
if y_test[index] == y_pred[index]:
tn = tn + 1
else:
fp = fp + 1
acc = float(tp + tn)/num
try:
precision = float(tp)/(tp + fp)
recall = float(tp)/ (tp + fn)
f1_score = float((2*precision*recall)/(precision+recall))
#MCC = float(tp*tn-fp*fn)/(np.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
except ZeroDivisionError:
print("You can't divide by 0.")
precision=recall=f1_score = 100
AUC = roc_auc_score(y_test, y_prob)
return tp, fp, tn, fn, acc, precision, recall, f1_score, AUC
def base_learners_results(metric_dict, fold_num, group_num, f):
for i in range(group_num):
ave_acc = 0
ave_prec = 0
ave_recall = 0
ave_f1_score = 0
ave_auc = 0
ave_sum = 0
bl_metric_list = []
for fold in range(fold_num):
temp_list = metric_dict[fold]
bl_metric_list.append(temp_list[i])
bl_metric_list = np.array(bl_metric_list)
ave_acc = np.mean(bl_metric_list[:,0])
ave_prec = np.mean(bl_metric_list[:,1])
ave_recall = np.mean(bl_metric_list[:,2])
ave_f1_score = np.mean(bl_metric_list[:,3])
ave_auc = np.mean(bl_metric_list[:,4])
ave_sum = np.mean(bl_metric_list[:,5])
f.write('the '+ str(i+1)+ ' base learner proformance: \tAcc\t'+ str(ave_acc)+'\tprec\t'+ str(ave_prec)+ '\trecall\t'+str(ave_recall)+'\tf1_score\t'+str(ave_f1_score)+'\tAUC\t'+ str(ave_auc)+'\tSum\t'+ str(ave_sum)+'\n') | [
"noreply@github.com"
] | Monitor-Wang.noreply@github.com |
449d5c2f3a0a020d0c74ca688990cf14ec87f350 | c99b89e8b4d5ebdae4aaaf26c33dd8075e61b5e4 | /AnchorDxLimsApp/RandDTaskAssignment.py | 852174993b8a463ae1c594f0e83a627c6404016d | [] | no_license | ranandrom/Lims | 1afa9f86829b5c09b10bc802501f745c489045c6 | 8a762cad72a334054f4211e46a4b36b403dc06c2 | refs/heads/master | 2020-03-12T00:14:45.192049 | 2018-04-23T09:44:45 | 2018-04-23T09:44:45 | 128,862,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,184 | py | # encoding: utf-8
from django.shortcuts import render
from AnchorDxLimsApp import models
from itertools import chain
# Create your views here.
#coding:utf-8
from django.shortcuts import render,HttpResponse
# 研发样本实验任务分配首页
def RandDExperimentalTaskAssignmentHomePage(request):
try:
username = request.session['username']
department = request.session['department']
except Exception:
return render(request, "index.html")
else:
print(r'首页,username: ', username, department)
temp = {"username": username, "department": department}
temp_myInfo = models.UserInfo.objects.filter(username=username) # 用户信息
# temp_SystemMessage = models.UserSystemMessage.objects.filter(Receiver=username) # 用户信息
temp_SystemMessage_Unread = models.UserSystemMessage.objects.filter(Receiver=username,
ReadingState='未读') # 用户信息
num_SystemMessage_Unread = len(temp_SystemMessage_Unread)
# 预处理任务列表
Pretreatment_not_audited = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=0, sample_review='1',
TissueSampleSign=0) # 任务未分配信息
Pretreatment_audited = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=1, sample_review='1',
TissueSampleSign=0) # 任务已分配信息
# DNA提取任务列表
# DNA_not_audited = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=0, TissueSampleSign=1) # 任务未分配信息
temp_not_Pretreatment = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=0, sample_review='1',
TissueSampleSign=1) # 任务未分配信息
temp_Pretreatment = models.RandDSamplePretreatmentInfo.objects.filter(Next_TaskProgress_Sign=0) # 任务未分配信息
DNA_not_audited = chain(temp_not_Pretreatment, temp_Pretreatment) # 合并所有数据表数据
# DNA_audited = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=1, TissueSampleSign=1) # 任务已分配信息
temp_not_Pretreatment_audited = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=1,
sample_review='1',
TissueSampleSign=1) # 任务已分配信息
temp_Pretreatment_audited = models.RandDSamplePretreatmentInfo.objects.filter(
Next_TaskProgress_Sign=1) # 任务已分配信息
DNA_audited = chain(temp_not_Pretreatment_audited, temp_Pretreatment_audited) # 合并所有数据表数据
# 预文库构建任务列表
temp_Fin_unaud = models.clinicalSampleInfo.objects.filter(contract_review=0) # 财务未审核信息
temp_Fin_NoPass = models.clinicalSampleInfo.objects.filter(contract_review=2) # 财务审核不通过信息
PreLibCon_not_audited = models.RandDSampleDNAExtractInfo.objects.filter(Next_TaskProgress_Sign=0) # 任务未分配信息
PreLibCon_audited = models.RandDSampleDNAExtractInfo.objects.filter(Next_TaskProgress_Sign=1) # 任务已分配信息
# 终文库构建任务列表
FinLibCon_not_audited = models.RandDSamplePreLibConInfo.objects.filter(Next_TaskProgress_Sign=0) # 任务未分配信息
FinLibCon_audited = models.RandDSamplePreLibConInfo.objects.filter(Next_TaskProgress_Sign=1) # 任务已分配信息
# 上机测序任务列表
ComputerSeq_not_audited = models.RandDSampleFinLibConInfo.objects.filter(Next_TaskProgress_Sign=0) # 任务未分配信息
ComputerSeq_audited = models.RandDSampleFinLibConInfo.objects.filter(Next_TaskProgress_Sign=1) # 任务已分配信息
# 其他信息列表
# 任务暂停信息
temp_Pretreatment = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=2, sample_review='1') # 预处理任务暂停信息
temp_DNAExtract = models.RandDSamplePretreatmentInfo.objects.filter(Next_TaskProgress_Sign=2) # DNA提取任务暂停信息
temp_PreLibCon = models.RandDSampleDNAExtractInfo.objects.filter(Next_TaskProgress_Sign=2) # 预文库构建任务暂停信息
temp_FinLibCon = models.RandDSamplePreLibConInfo.objects.filter(Next_TaskProgress_Sign=2) # 终文库构建任务暂停信息
temp_SeqCom = models.RandDSampleFinLibConInfo.objects.filter(Next_TaskProgress_Sign=2) # 上机测序任务暂停信息
temp_suspend = chain(temp_Pretreatment, temp_DNAExtract, temp_PreLibCon, temp_FinLibCon, temp_SeqCom) # 合并所有数据表数据
# 任务终止信息
# temp_stop = models.clinicalSampleInfo.objects.filter(Next_TaskProgress_Sign=3) # 任务终止信息
temp_Pretreatment_stop = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=3 , sample_review='1') # 预处理任务终止信息
temp_DNAExtract_stop = models.RandDSamplePretreatmentInfo.objects.filter(Next_TaskProgress_Sign=3) # DNA提取任务终止信息
temp_PreLibCon_stop = models.RandDSampleDNAExtractInfo.objects.filter(Next_TaskProgress_Sign=3) # 预文库构建任务终止信息
temp_FinLibCon_stop = models.RandDSamplePreLibConInfo.objects.filter(Next_TaskProgress_Sign=3) # 终文库构建任务终止信息
temp_SeqCom_stop = models.RandDSampleFinLibConInfo.objects.filter(Next_TaskProgress_Sign=3) # 上机测序任务终止信息
temp_stop = chain(temp_Pretreatment_stop, temp_DNAExtract_stop, temp_PreLibCon_stop, temp_FinLibCon_stop,
temp_SeqCom_stop) # 合并所有数据表数据
return render(request, "modelspage/RandDExperimentalTaskAssignment.html", {"userinfo": temp,
"Pretreatment_not_audited": Pretreatment_not_audited,
"Pretreatment_audited": Pretreatment_audited,
"DNA_not_audited": DNA_not_audited,
"DNA_audited": DNA_audited,
"PreLibCon_not_audited": PreLibCon_not_audited,
"PreLibCon_audited": PreLibCon_audited,
"FinLibCon_not_audited": FinLibCon_not_audited,
"FinLibCon_audited": FinLibCon_audited,
"ComputerSeq_not_audited": ComputerSeq_not_audited,
"ComputerSeq_audited": ComputerSeq_audited,
"Fin_unaud": temp_Fin_unaud,
"Fin_NoPass": temp_Fin_NoPass,
"suspend": temp_suspend,
"stop": temp_stop,
"myInfo": temp_myInfo,
"SystemMessage": temp_SystemMessage_Unread,
"num_SystemMessage_Unread": num_SystemMessage_Unread})
| [
"ramandrom@139.com"
] | ramandrom@139.com |
cf3ee11aac574e0f1e461602f57fd51ffa9135bb | 4fdc839b92bf50d342467d7f453093fa4233af9d | /templateLoader/help/source/conf.py | b2993fe10b57ab82126175183061902aef62b806 | [] | no_license | lpofredc/Qgis-plugin-templateLoader | f8d848192639018d655eb2ca6c8846d608ad2a4d | c3b46eecd5481693315e7d294cd82a513508bdc8 | refs/heads/master | 2020-03-27T14:13:54.580319 | 2017-05-10T13:51:48 | 2017-05-10T13:51:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,047 | py | # -*- coding: utf-8 -*-
#
# templateloader documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 17:11:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'templateloader'
copyright = u'2013, PnC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'templateclassdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'templateloader.tex', u'templateloader Documentation',
u'PnC', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'templateclass', u'templateloader Documentation',
[u'PnC'], 1)
]
| [
"amandine.sahl@gmail.com"
] | amandine.sahl@gmail.com |
a5a11cfef9f4349cd1bbbda6164070d5f154324b | ad682d2145f440c078a431a40d2153a204771026 | /method/DepBased/WM_OLPDM.py | 7889685fa719f8816d1f5051b2aece6f7cb45c2f | [] | no_license | barry800414/NewsCrawler | d81f1ee4b0e0c4a997dda1efd24d1430e222d318 | 18c10f10508558600f734d659e724d4e27f071a3 | refs/heads/master | 2021-05-03T13:11:29.696108 | 2015-07-01T16:38:05 | 2015-07-01T16:38:05 | 26,075,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,697 | py | #!/usr/bin/env python3
import sys
import json
import math
from collections import defaultdict
import numpy as np
from scipy.sparse import csr_matrix, hstack
from sklearn.grid_search import ParameterGrid
import WordModelImproved as WM
import OneLayerPhraseDepModel as OLPDM
from PhraseDepTree import loadPhraseFile
from sentiDictSum import readSentiDict
from RunExperiments import *
import ErrorAnalysis as EA
from misc import *
import dataTool
import Parameter
'''
This code implements the baseline (tf, tf-idf) features
for training and testing (supervised document-level learning)
Author: Wei-Ming Chen
Date: 2015/02/16
'''
# Depricated
def mainProcedure(labelNewsList, paramsIter, clfList, allowedFirstLayerWord,
allowedRel, topicMap=None, topicId=None):
oldms = dict()
for p in paramsIter:
# generate tfidf features
print('generating tfidf features...', file=sys.stderr)
(X1, y1) = tfidf.generateXY(labelNewsList, newsCols=p['columnSource'],
statCol=p['statementCol'], feature=p['feature'])
print('X1: (%d, %d)' % (X1.shape[0], X1.shape[1]), file=sys.stderr)
# generate OLPDM features
print('generating OLPDM features...', file=sys.stderr)
# saving model for speed up
if p['seedWordPOSType'] not in oldms:
allowedSeedWord = { topicId: set(p['seedWordPOSType']) for topicId in topicSet }
oldm = OLPDM.OneLayerPhraseDepModel(labelNewsList, topicPhraseList, allowedSeedWord,
'tag', allowedFirstLayerWord, 'word', allowedRel)
oldms[p['seedWordPOSType']] = oldm
else:
oldm = oldms[p['seedWordPOSType']]
(X2, y2) = oldm.genXY()
print('X2: (%d, %d)' % (X2.shape[0], X2.shape[1]), file=sys.stderr)
# merge (horozontally align) two matrix
X = DataTool.hstack(X1, X2)
print('X: %d %d' % (X.shape[0], X.shape[1]), file=sys.stderr)
if topicMap == None: #self train -> self test
prefix = "%d, %s, %s, %s" % (topicId, 'OLPDM+' + str(p['feature']),
toStr(p['columnSource']), p['statementCol'])
RunExp.selfTrainTest(X, y1, clfList, "MacroF1", testSize=0.2, prefix=prefix)
else: # all-train-and-test and leave-one-test
prefix = "all, %s, %s, %s" % ('OLPDM+' + str(p['feature']),
toStr(p['columnSource']), p['statementCol'])
RunExp.allTrainTest(X, y1, topicMap, clfList, "MacroF1", testSize=0.2, prefix=prefix)
RunExp.leaveOneTest(X, y1, topicMap, clfList, "MacroF1", prefix=prefix)
# generate word model features and dependency model features, then merge them
def genXY(labelNewsList, olpdm, topicSet, sentiDict, params, volc):
# generate WM features
print('generating word features...', file=sys.stderr)
p = params['WM']['model settings']
allowedPOS = set(['VA', 'VV', 'NN', 'NR', 'AD', 'JJ', 'FW'])
wm = WM.WordModel(labelNewsList, newsCols=p['col'], statCol=p['stat'],
feature=p['feature'], allowedPOS=allowedPOS, volc=volc)
(X1, y1) = wm.genXY(p['minCnt'])
volc1 = WM.getVolc()
print('X1: (%d, %d)' % (X1.shape[0], X1.shape[1]), file=sys.stderr)
# generate OLPDM features
print('generating OLPDM features...', file=sys.stderr)
p = params['OLPDM']['model settings']
allowedSeedWord = initAllowedSet(topicSet, p['seedWordType'])
allowedFirstLayerWord = initAllowedSet(topicSet, p['firstLayerType'], sentiDict)
allowedRel = { t: None for t in topicSet }
olpdm.setModel(allowedSeedWord, p['seedWordType']['type'],
allowedFirstLayerWord, p['firstLayerType']['type'],
allowedRel, p['minCnt'])
(X2, y2) = olpdm.genXY()
volc2 = olpdm.getVolc()
print('X2: (%d, %d)' % (X2.shape[0], X2.shape[1]), file=sys.stderr)
assert np.array_equal(y1, y2)
# merge (horozontally align) two matrix
X = DataTool.hstack(X1, X2)
volc3 = mergeVolc(volc1, volc2)
print('X: (%d, %d)' % (X.shape[0], X.shape[1]), file=sys.stderr)
return (X, y1, volc3)
if __name__ == '__main__':
if len(sys.argv) != 6:
print('Usage:', sys.argv[0], 'TagAndDepLabelNewsJson phraseJson sentiDict WMParamsJson OLPDMParamsJson', file=sys.stderr)
exit(-1)
# arguments
labelNewsJson = sys.argv[1]
phraseJson = sys.argv[2]
sentiDictFile = sys.argv[3]
WMParamsJson = sys.argv[4]
OLPDMParamsJson = sys.argv[5]
# load labels and news
with open(labelNewsJson, 'r') as f:
labelNewsList = json.load(f)
# ====== initialization ======
# load phrases
topicPhraseList = loadPhraseFile(phraseJson)
# load sentiment dictionary
sentiDict = readSentiDict(sentiDictFile)
# get the set of all possible topic
topicSet = set([labelNews['statement_id'] for labelNews in labelNewsList])
# contruct in the process of constructing phrase dependency tree
allowedFirstLayerWord = { topicId: set(sentiDict.keys()) for topicId in topicSet }
allowedRel = { topicId: None for topicId in topicSet }
topicMap = [ labelNewsList[i]['statement_id'] for i in range(0, len(labelNewsList)) ]
# ====== initalizing parameters ======
clfList = ['NaiveBayes', 'MaxEnt', 'SVM']
randSeedList = [1, 2, 3, 4, 5]
# print result of first Line
ResultPrinter.printFirstLine()
# ==================================================================== #
# Run experiments on given list of parameters #
# ==================================================================== #
# read best parameters of two model
WMParams = Parameter.loadFrameworkTopicParams(WMParamsJson)
OLPDMParams = Parameter.loadFrameworkTopicParams(OLPDMParamsJson)
# ============= Run for self-train-test ===============
print('Self-Train-Test...', file=sys.stderr)
labelNewsInTopic = dataTool.divideLabel(labelNewsList)
for t in topicSet:
bestR = None
olpdm = OLPDM.OneLayerPhraseDepModel(labelNewsInTopic[t], topicPhraseList)
paramsIter = Parameter.getParamsIter(WMParams['SelfTrainTest'][t], 'WM',
OLPDMParams['SelfTrainTest'][t], 'OLPDM')
for p in paramsIter:
(X, y, volc) = genXY(labelNewsInTopic[t], olpdm, topicSet, sentiDict, p)
rsList = RunExp.runTask(X, y, volc, 'SelfTrainTest', p,
clfList, topicId=t, randSeedList=randSeedList)
for rs in rsList:
if rs != None:
bestR = keepBestResult(bestR, rs, 'MacroF1')
with open('WM_OLPDM_SelfTrainTest_topic%d.pickle' % t, 'w+b') as f:
pickle.dump(bestR, f)
olpdm = OLPDM.OneLayerPhraseDepModel(labelNewsList, topicPhraseList)
# ============= Run for all-train-test ================
print('All-Train-Test...', file=sys.stderr)
paramsIter = Parameter.getParamsIter(WMParams['AllTrainTest'], 'WM',
OLPDMParams['AllTrainTest'], 'OLPDM')
bestR = None
for p in paramsIter:
(X, y, volc) = genXY(labelNewsList, olpdm, topicSet,
sentiDict, p)
rsList = RunExp.runTask(X, y, volc, 'AllTrainTest', p, clfList,
topicMap=topicMap, randSeedList=randSeedList)
for rs in rsList:
if rs != None:
bestR = keepBestResult(bestR, rs, 'MacroF1')
with open('WM_OLPDM_AllTrainTest.pickle', 'w+b') as f:
pickle.dump(bestR, f)
# ============= Run for leave-one-test ================
print('Leave-One-Test...', file=sys.stderr)
for t in topicSet:
bestR = None
paramsIter = Parameter.getParamsIter(WMParams['LeaveOneTest'][t], 'tfidf',
OLPDMParams['LeaveOneTest'][t], 'OLPDM')
for p in paramsIter:
(X, y, volc) = genXY(labelNewsList, olpdm, topicSet, sentiDict, p)
rsList = RunExp.runTask(X, y, volc, 'LeaveOneTest', p, clfList,
topicMap=topicMap, topicId=t, randSeedList=randSeedList)
for rs in rsList:
if rs != None:
bestR = keepBestResult(bestR, rs[t], 'MacroF1')
with open('WM_OLPDM_LeaveOneTest_topic%d.pickle' % t, 'w+b') as f:
pickle.dump(bestR, f)
'''
# run all combination
params = { 'feature': ['0/1', 'tf', 'tfidf'],
'column': [['content'], ['title'], ['title', 'content']],
'statement': [False, True],
'seedWordPOSType': [('NP',), ('NP', 'NR'), ('NP', 'NN', 'NR')]
}
paramsIter = ParameterGrid(params)
mainProcedure(labelNewsList, paramsIter, clfList, allowedFirstLayerWord,
allowedRel, topicMap=topicMap, topicId=None)
topicLabelNewsList = dataTool.divideLabel(labelNewsList)
for topicId, labelNewsList in topicLabelNewsList.items():
mainProcedure(labelNewsList, paramsIter, clfList, allowedFirstLayerWord,
allowedRel, topicMap=None, topicId=topicId)
'''
'''
oldms = dict()
# all topic are mixed to train and predict/ leave-one-test
for p in paramsIter:
# generate tfidf features
print('generating tfidf features...', file=sys.stderr)
(X1, y1) = tfidf.generateXY(labelNewsList, newsCols=p['column'],
statementCol=p['statement'], feature=p['feature'])
print('X1: (%d, %d)' % (X1.shape[0], X1.shape[1]), file=sys.stderr)
# generate OLPDM features
print('generating OLPDM features...', file=sys.stderr)
# saving model for speed up
if p['seedWordPOSType'] not in oldms:
allowedSeedWord = { topicId: set(p['seedWordPOSType']) for topicId in topicSet }
print(allowedSeedWord)
oldm = OLPDM.OneLayerPhraseDepModel(labelNewsList, topicPhraseList, allowedSeedWord,
'tag', allowedFirstLayerWord, 'word', allowedRel)
oldms[p['seedWordPOSType']] = oldm
else:
oldm = oldms[p['seedWordPOSType']]
(X2, y2) = oldm.genXY()
print('X2: (%d, %d)' % (X2.shape[0], X2.shape[1]), file=sys.stderr)
# merge (horozontally align) two matrix
X = DataTool.hstack(X1, X2)
print('X: %d %d' % (X.shape[0], X.shape[1]), file=sys.stderr)
# all train and test
prefix = "all, %s, %s, %s" % ('OLPDM+' + str(p['feature']), list2Str(p['column']), p['statement'])
RunExp.allTrainTest(X, y1, topicMap, clfList, "MacroF1", testSize=0.2, prefix=prefix)
# leave one test
RunExp.leaveOneTest(X, y1, topicMap, clfList, "MacroF1", prefix=prefix)
'''
| [
"barry800414@gmail.com"
] | barry800414@gmail.com |
047c84b87840d1b7ec7ee3291c29a71e590f7b89 | 8341678973612363868a36d89b1c464cbe0f4a79 | /app.py | c3b2815b752fcfd98f7940020234f803e7b9c6ed | [] | no_license | sveco86/magiogo-iptv-server | 45c80ce8e21d6488933e6cd4be9ed3873c013581 | 00610e6e73cfd0b79c136c48dcf4f87c08239040 | refs/heads/master | 2023-03-26T12:04:46.038199 | 2021-03-10T12:29:42 | 2021-03-10T12:29:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,348 | py | import atexit
import gzip
from pathlib import Path
import xmltv
from apscheduler.schedulers.background import BackgroundScheduler
from flask import Flask, redirect, render_template
from magiogo import *
from parse_season_number import parse_season_number
app = Flask(__name__, static_url_path="/", static_folder="public")
# Ensure public dir exists
Path("public").mkdir(exist_ok=True)
last_refresh = None
@app.route('/')
def index():
return render_template("index.html", last_refresh=last_refresh)
@app.route('/channel/<channel_id>')
def channel_redirect(channel_id):
stream_info = magio.channel_stream_info(channel_id)
return redirect(stream_info.url, code=303)
@app.errorhandler(404)
def page_not_found(e):
# Redirect all to index page
return redirect('/')
def gzip_file(file_path):
with open(file_path, 'rb') as src, gzip.open(f'{file_path}.gz', 'wb') as dst:
dst.writelines(src)
def generate_m3u8(channels):
magio_iptv_server_public_url = os.environ.get('MAGIO_SERVER_PUBLIC_URL', "http://127.0.0.1:5000")
with open("public/magioPlaylist.m3u8", "w", encoding="utf-8") as text_file:
text_file.write("#EXTM3U\n")
for channel in channels:
text_file.write(f'#EXTINF:-1 tvg-id="{channel.id}" tvg-logo="{channel.logo}",{channel.name}\n')
text_file.write(f"{magio_iptv_server_public_url}/channel/{channel.id}\n")
def generate_xmltv(channels):
date_from = datetime.datetime.now() - datetime.timedelta(days=0)
date_to = datetime.datetime.now() + datetime.timedelta(days=int(os.environ.get('MAGIO_GUIDE_DAYS', 7)))
channel_ids = list(map(lambda c: c.id, channels))
epg = magio.epg(channel_ids, date_from, date_to)
with open("public/magioGuide.xmltv", "wb") as guide_file:
writer = xmltv.Writer(
date=datetime.datetime.now().strftime("%Y%m%d%H%M%S"),
generator_info_name="MagioGoIPTVServer",
generator_info_url="",
source_info_name="Magio GO Guide",
source_info_url="https://skgo.magio.tv/v2/television/epg")
# Write channels
for channel in channels:
channel_dict = {'display-name': [(channel.name, u'sk')],
'icon': [{'src': channel.logo}],
'id': channel.id}
writer.addChannel(channel_dict)
# Write programmes
for (channel_id, programmes) in epg.items():
for programme in programmes:
programme_dict = {
'category': [(genre, u'en') for genre in programme.genres],
'channel': channel_id,
'credits': {'producer': [producer for producer in programme.producers],
'actor': [actor for actor in programme.actors],
'writer': [writer for writer in programme.writers],
'director': [director for director in programme.directors]},
'date': str(programme.year),
'desc': [(programme.description,
u'')],
'icon': [{'src': programme.poster}, {'src': programme.thumbnail}],
'length': {'units': u'seconds', 'length': str(programme.duration)},
'start': programme.start_time.strftime("%Y%m%d%H%M%S"),
'stop': programme.end_time.strftime("%Y%m%d%H%M%S"),
'title': [(programme.title, u'')]}
# Define episode info only if provided
if programme.episodeNo is not None:
# Since seasonNo seems to be always null, try parsing the season from the title (e.g. Kosti X. = 10)
if programme.seasonNo is None:
(show_title_sans_season, programme.seasonNo) = parse_season_number(programme.title)
programme_dict['title'] = [(show_title_sans_season, u'')]
programme_dict['episode-num'] = [
(f'{(programme.seasonNo or 1) - 1} . {(programme.episodeNo or 1) - 1} . 0', u'xmltv_ns')]
writer.addProgramme(programme_dict)
writer.write(guide_file, True)
# Gzip the guide file
gzip_file("public/magioGuide.xmltv")
def refresh():
channels = magio.channels()
print("Generating .m3u8 playlist")
generate_m3u8(channels)
print("Generating XMLTV guide")
generate_xmltv(channels)
print("Refreshing finished!")
global last_refresh
last_refresh = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
# Quality config
qualityString = os.environ.get('MAGIO_QUALITY', "HIGH")
qualityMapping = {"LOW": MagioQuality.low, "MEDIUM": MagioQuality.medium, "HIGH": MagioQuality.high, "EXTRA": MagioQuality.extra}
quality = qualityMapping[qualityString]
print(f"Stream quality configured to: {qualityString} ({quality})")
# Initial playlist and xmltv load
print("Logging in to Magio Go TV")
magio = MagioGo(os.environ.get('MAGIO_USERNAME'), os.environ.get('MAGIO_PASSWORD'), quality)
refresh()
# Load new playlist and xmltv everyday
scheduler = BackgroundScheduler()
scheduler.add_job(refresh, 'interval', hours=int(os.environ.get('MAGIO_GUIDE_REFRESH_HOURS', 12)))
scheduler.start()
atexit.register(lambda: scheduler.shutdown())
| [
"lukas.kusik@gmail.com"
] | lukas.kusik@gmail.com |
f5ec3f1b0f0acf25ad487555a7f33120f6d5522a | 63cb8173f398a99b69c6345e05943ec1c5bdccd6 | /main.py | 53e4564a5e1c358618aff9084bc49191c9e348c7 | [] | no_license | Blender3D/Deskboard | 596ff809ae1f7ad15bff0eca4f8e36e44ee8976f | 693361c010c1b1a7489480c406ec92354d8dc766 | refs/heads/master | 2021-01-22T05:24:36.441601 | 2012-09-17T03:07:52 | 2012-09-17T03:07:52 | 5,835,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,024 | py | #!/usr/bin/env python2
import os, re, sys, json, datetime, time, glob, ConfigParser, subprocess
from functools import wraps
import psutil
import dbus
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
from dbus.mainloop.qt import DBusQtMainLoop
from WebkitQObject import WebkitQObject
from desktop import DesktopLauncher, Desktop
from music import MusicBackend
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
DBusQtMainLoop(set_as_default=True)
def cached_property(function):
result = None
@wraps(function)
def wrapper(*args, **kwargs):
if result:
return result
result = function(*args, **kwargs)
return result
return wrapper
def debug(function):
@wraps(function)
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
print '{}() -> {}'.format(function.__name__, result)
return result
return wrapper
class WebkitQObject(QObject):
def __init__(self):
super(WebkitQObject, self).__init__()
self.__cache__ = []
def store(self, item):
self.__cache__.append(item)
return self.__cache__[-1]
class System(QObject):
def __init__(self):
super(System, self).__init__()
@pyqtProperty(QVariant)
@debug
def ram(self):
return dict(psutil.phymem_usage().__dict__)
@pyqtSlot(QVariant)
@debug
def cpu(self):
return {
'usage': psutil.cpu_percent(),
'cores': psutil.cpu_percent(percpu=True)
}
class Background(QWebView):
def __init__(self):
super(Background, self).__init__()
self.resize(QApplication.desktop().size())
geometry = self.frameGeometry()
geometry.moveCenter(QDesktopWidget().availableGeometry().center())
self.move(geometry.topLeft())
self.frame = self.page().mainFrame()
self.settings = QWebSettings.globalSettings()
self.settings.setAttribute(QWebSettings.LocalContentCanAccessRemoteUrls, True)
self.settings.setAttribute(QWebSettings.LocalContentCanAccessRemoteUrls, True)
self.settings.setAttribute(QWebSettings.LocalContentCanAccessFileUrls, True)
self.settings.setAttribute(QWebSettings.LocalStorageEnabled, True)
self.settings.setAttribute(QWebSettings.AutoLoadImages, True)
self.setAttribute(Qt.WA_X11NetWmWindowTypeDesktop)
system_info = System()
music_info = MusicBackend()
desktop_info = Desktop()
self.frame.addToJavaScriptWindowObject('system', system_info)
self.frame.addToJavaScriptWindowObject('desktop', desktop_info)
self.frame.addToJavaScriptWindowObject('music', music_info)
def load_theme(self, name):
path = os.path.abspath('themes/{name}/index.html'.format(name=name))
if not os.path.exists(path):
return False
self.load(QUrl.fromLocalFile(path))
self.load(QUrl('http://gridster.net/'))
return True
if __name__ == '__main__':
app = QApplication(sys.argv)
background = Background()
background.load_theme('text')
background.show()
sys.exit(app.exec_())
| [
"452469+Blender3D@users.noreply.github.com"
] | 452469+Blender3D@users.noreply.github.com |
c3269a9d2921b1dd7aedb9e987d48a9a1cb04198 | 99a5e59f1f6dccd580989e92fc148143bef9ae23 | /store/models/customer.py | b9b79996b4d8e12facf7e9df4adc870b19fb17d9 | [] | no_license | Sachin-Kahandal/eshop | 743ce2c48c913f6aa41c6388395478b3fc01c1aa | c58b7f959ff4294c069bba1f1bca8f78294a4483 | refs/heads/master | 2023-02-22T13:01:22.113343 | 2021-01-26T14:56:57 | 2021-01-26T14:56:57 | 331,246,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | from django.db import models
from django.contrib.auth.hashers import make_password, check_password
class Customer(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
phone = models.CharField(max_length=10)
address = models.CharField(max_length=100)
email = models.EmailField()
password = models.CharField(max_length=500)
def __str__(self):
return self.first_name + ' ' + self.last_name
def register(self):
self.save()
# checks if email exists
def emailExists(self):
if Customer.objects.filter(email = self.email):
return True
else:
return False
# checks if phone exists
def phoneExists(self):
if Customer.objects.filter(phone = self.phone):
return True
else:
return False
@staticmethod
def get_customer_email(email):
try:
customer = Customer.objects.get(email = email)
return customer
except:
return None
| [
"54132749+SachinKahandal@users.noreply.github.com"
] | 54132749+SachinKahandal@users.noreply.github.com |
8ea369755709ea09b07fed508e95099cc47b316a | 406d942b98d15f45393cb864b21ee3345eb9cc8f | /Coursera_Algorithms/max_mult.py | e81e650090f64723bc6c303c457c8ff250116381 | [] | no_license | msekhar12/Algorithms_Exercises | c3804d64f9cf43da92e20b151807952b41ac89c3 | c2454987060f8c0404d4fdb215c7b2eb6f8c677a | refs/heads/master | 2020-04-07T06:17:24.602842 | 2019-02-03T14:34:49 | 2019-02-03T14:34:49 | 158,129,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | # python 3
# Max product of 2 numbers from an array of integers
# Input will be 2 lines.
# The first line will contain the number of elements in the array, and the second line will be space separated numbers:
def find_max_product(n, l):
if n <= 1:
return None
comps = 0
if l[0] > l[1]:
max_1 = l[0]
max_2 = l[1]
comps += 1
else:
max_2 = l[0]
max_1 = l[1]
comps += 1
for i in range(2, n):
if l[i] > max_1:
max_2 = max_1
max_1 = l[i]
comps += 1
elif l[i] > max_2 and l[i] <= max_1:
max_2 = l[i]
comps += 1
return max_1, max_2, max_1*max_2, comps
n = int(input())
l = [int(x) for x in input().split()]
print(find_max_product(n, l))
| [
"sekhar@Sekhars-MacBook-Pro.local"
] | sekhar@Sekhars-MacBook-Pro.local |
5f32e4fa86ea444a96fde64ff2b9e4259b98b9f7 | 5002037a61b129ade69f675137cd9e16966518a2 | /apps/gallery/migrations/0007_auto_20190801_1340.py | 1f0c84144319147a27c1d5b54a3f020d9da65176 | [
"Apache-2.0"
] | permissive | mrtaalebi/sitigo | e290f1e952a3c47b9fb356177e5c7ea708dcd708 | cce8b4f5299b58d7365789ead416d4568b443743 | refs/heads/master | 2022-12-11T00:09:07.196902 | 2020-11-19T20:34:58 | 2020-11-19T20:34:58 | 194,496,364 | 0 | 0 | Apache-2.0 | 2019-07-05T14:29:39 | 2019-06-30T09:06:47 | JavaScript | UTF-8 | Python | false | false | 489 | py | # Generated by Django 2.2.3 on 2019-08-01 09:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gallery', '0006_auto_20190801_1338'),
]
operations = [
migrations.AlterField(
model_name='image',
name='city',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='gallery.City'),
),
]
| [
"the.doors.are.locked@gmail.com"
] | the.doors.are.locked@gmail.com |
b8b058c24e942784ccc2a2b2ef0ed358711175a1 | 400086979e153dea632339ff23e0a2cce3e40d77 | /starting_kit/code/model.py | eb9f26fa61b3eeba03160dc4ff64357707d068ca | [] | no_license | PhamAlexT/MOSQUITO | 99b1c7c3eb2490ec5c073bbf1da1d5697d4032bf | 6c93a49367c62b9159bfa3291b0dd0de9a4558e4 | refs/heads/master | 2020-12-30T05:19:39.235458 | 2020-05-09T12:37:11 | 2020-05-09T12:37:11 | 238,873,909 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,523 | py | '''
Sample predictive model.
You must supply at least 4 methods:
- fit: trains the model.
- predict: uses the model to perform predictions.
- save: saves the model.
- load: reloads the model.
'''
import pickle
import numpy as np
from os.path import isfile
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier
# Preprocessing de la bibliothèque
from prePro import prepro
# Preprocessing de la bib scikit learn
from sklearn.preprocessing import StandardScaler
class model (BaseEstimator):
def __init__(self, classifier = RandomForestClassifier(random_state=42, n_estimators = 100, max_depth=100)):
'''
Constructeur de notre classe "model"
param :
classifier = Un modèle de classification (Par défault : RandomForest)
'''
# Notre modèle
self.classifier = classifier
# Preprocessing de la Team prepro
self.preprocessing1 = prepro()
# Preprocessing de la bibliothèque Scikit Learn
self.preprocessing2 = StandardScaler()
def fit(self, X, y, sample_weights=None):
"""
Preprocess the training set and build a forest of trees from it
params:
X : training dataset
y : Labels of each data on the dataset
return :
Our model 'Trained'
"""
X = self.preprocessing1.fit_transform(X,y)
X = self.preprocessing2.fit_transform(X,y)
self.classifier.fit(X, y)
return self
def predict_proba(self, X):
"""
Predict class probabilities
param :
X : The input dataset
return :
The class probabilities of the input samples
"""
X = self.preprocessing1.transform(X)
X = self.preprocessing2.transform(X)
y_proba = self.classifier.predict_proba(X)
return y_proba
def predict(self, X):
"""
Predict the class of a given dataset
param :
X : The dataset
return
The predicted classes
"""
y_proba = self.predict_proba(X)
y_pred = np.argmax(y_proba, axis=1)
return y_pred
def save(self, path="./"):
pickle.dump(self, open(path + '_model.pickle', "wb"))
def load(self, path="./"):
modelfile = path + '_model.pickle'
if isfile(modelfile):
with open(modelfile, 'rb') as f:
self = pickle.load(f)
print("Model reloaded from: " + modelfile)
return self | [
"liliaizri99@gmail.com"
] | liliaizri99@gmail.com |
565584f78e17bf14cc57d09723b4166b4d3c8e6f | a0a7c9997676217387738f4c89e0665fd403b2c0 | /basic/perceptron.py | aa96363c3aeeb78fe704d83b5dcbfd81f0ff02d7 | [] | no_license | whoisalan/MACHINELEARNING | 86f51bd64fc46c68574b89846d6a11c88cf94272 | 3a658eae6e8818a8286045665b7e3e4d23c284db | refs/heads/master | 2020-04-09T18:15:47.313308 | 2018-12-05T14:23:02 | 2018-12-05T14:23:02 | 160,506,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | import random
import numpy as np
import matplotlib.pyplot as plt
def sign(v):
if v>=0:
return 1
else:
return -1
def train(train_num,train_datas,lr):
w=[0,0]
b=0
for i in range(train_num):
# 随机梯度下降
x=random.choice(train_datas)
x1,x2,y=x
if(y*sign((w[0]*x1+w[1]*x2+b))<=0):
w[0]+=lr*y*x1
w[1]+=lr*y*x2
b+=lr*y
return w,b
def plot_points(train_datas,w,b):
plt.figure()
x1 = np.linspace(0, 8, 100)
x2 = (-b-w[0]*x1)/w[1]
plt.plot(x1, x2, color='r', label='y1 data')
datas_len=len(train_datas)
for i in range(datas_len):
if(train_datas[i][-1]==1):
plt.scatter(train_datas[i][0],train_datas[i][1],s=50)
else:
plt.scatter(train_datas[i][0],train_datas[i][1],marker='x',s=50)
plt.show()
if __name__=='__main__':
train_data1 = [[1, 3, 1], [2, 2, 1], [3, 8, 1], [2, 6, 1]] # 正样本
train_data2 = [[2, 1, -1], [4, 1, -1], [6, 2, -1], [7, 3, -1]] # 负样本
train_datas = train_data1 + train_data2 # 样本集
w,b=train(train_num=50,train_datas=train_datas,lr=0.01)
plot_points(train_datas,w,b)
| [
"alanznala@163.com"
] | alanznala@163.com |
fed740e3a86c5c0992ca482c58875e9b14269012 | 1bfad01139237049eded6c42981ee9b4c09bb6de | /RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/pimsm/router/interface/learnedmdtinfo/learnedmdtinfo.py | b27f8bb6f94a4485f17effd4ef1a42a2e0f065ba | [
"MIT"
] | permissive | kakkotetsu/IxNetwork | 3a395c2b4de1488994a0cfe51bca36d21e4368a5 | f9fb614b51bb8988af035967991ad36702933274 | refs/heads/master | 2020-04-22T09:46:37.408010 | 2019-02-07T18:12:20 | 2019-02-07T18:12:20 | 170,284,084 | 0 | 0 | MIT | 2019-02-12T08:51:02 | 2019-02-12T08:51:01 | null | UTF-8 | Python | false | false | 4,210 | py |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LearnedMdtInfo(Base):
"""The LearnedMdtInfo class encapsulates a system managed learnedMdtInfo node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the LearnedMdtInfo property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'learnedMdtInfo'
def __init__(self, parent):
super(LearnedMdtInfo, self).__init__(parent)
@property
def Age(self):
"""The amount of time (in seconds) remaining before this TLV times out.
Returns:
number
"""
return self._get_attribute('age')
@property
def CeGroupAddress(self):
"""The CE group address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('ceGroupAddress')
@property
def CeSourceAddress(self):
"""The CE source address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('ceSourceAddress')
@property
def MdtGroupAddress(self):
"""The MDT (PE) group address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('mdtGroupAddress')
@property
def MdtSourceAddress(self):
"""The MDT (PE) source address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('mdtSourceAddress')
def find(self, Age=None, CeGroupAddress=None, CeSourceAddress=None, MdtGroupAddress=None, MdtSourceAddress=None):
"""Finds and retrieves learnedMdtInfo data from the server.
All named parameters support regex and can be used to selectively retrieve learnedMdtInfo data from the server.
By default the find method takes no parameters and will retrieve all learnedMdtInfo data from the server.
Args:
Age (number): The amount of time (in seconds) remaining before this TLV times out.
CeGroupAddress (str): The CE group address contained in this data MDT TLV.
CeSourceAddress (str): The CE source address contained in this data MDT TLV.
MdtGroupAddress (str): The MDT (PE) group address contained in this data MDT TLV.
MdtSourceAddress (str): The MDT (PE) source address contained in this data MDT TLV.
Returns:
self: This instance with matching learnedMdtInfo data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of learnedMdtInfo data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the learnedMdtInfo data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"hubert.gee@keysight.com"
] | hubert.gee@keysight.com |
2a632ed951bfbd64396511d30034e84b2fb566c2 | 6a1174a7215cfd4c345cc8723cfb9a3bf053b108 | /conditional_image_manipulation/data/preprocess.py | 5b8fe0e726ad94bbe9b499668959e940bd8b6a00 | [
"MIT"
] | permissive | jlezama/disentangling-jacobian | e55e84bafe3c4f41d76e108bcb1dfb9cd6697b1e | c570945055c735a15b9adba093b7c688c7310aad | refs/heads/master | 2021-06-30T03:09:21.393511 | 2020-10-06T14:35:11 | 2020-10-06T14:35:11 | 169,459,503 | 26 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,488 | py | # Code from FaderNetworks by Facebook
#!/usr/bin/env python
import os
import matplotlib.image as mpimg
import cv2
import numpy as np
import torch
N_IMAGES = 202599
IMG_SIZE = 256
IMG_PATH = 'data/images_%i_%i.pth' % (IMG_SIZE, IMG_SIZE)
ATTR_PATH = 'data/attributes.pth'
def preprocess_images():
if os.path.isfile(IMG_PATH):
print("%s exists, nothing to do." % IMG_PATH)
return
print("Reading images from img_align_celeba/ ...")
raw_images = []
for i in range(1, N_IMAGES + 1):
if i % 10000 == 0:
print(i)
raw_images.append(mpimg.imread('img_align_celeba/%06i.jpg' % i)[20:-20])
if len(raw_images) != N_IMAGES:
raise Exception("Found %i images. Expected %i" % (len(raw_images), N_IMAGES))
print("Resizing images ...")
all_images = []
for i, image in enumerate(raw_images):
if i % 10000 == 0:
print(i)
assert image.shape == (178, 178, 3)
if IMG_SIZE < 178:
image = cv2.resize(image, (IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_AREA)
elif IMG_SIZE > 178:
image = cv2.resize(image, (IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_LANCZOS4)
assert image.shape == (IMG_SIZE, IMG_SIZE, 3)
all_images.append(image)
data = np.concatenate([img.transpose((2, 0, 1))[None] for img in all_images], 0)
data = torch.from_numpy(data)
assert data.size() == (N_IMAGES, 3, IMG_SIZE, IMG_SIZE)
print("Saving images to %s ..." % IMG_PATH)
torch.save(data[:20000].clone(), 'data/images_%i_%i_20000.pth' % (IMG_SIZE, IMG_SIZE))
torch.save(data, IMG_PATH)
def preprocess_attributes():
if os.path.isfile(ATTR_PATH):
print("%s exists, nothing to do." % ATTR_PATH)
return
attr_lines = [line.rstrip() for line in open('list_attr_celeba.txt', 'r')]
assert len(attr_lines) == N_IMAGES + 2
attr_keys = attr_lines[1].split()
attributes = {k: np.zeros(N_IMAGES, dtype=np.bool) for k in attr_keys}
for i, line in enumerate(attr_lines[2:]):
image_id = i + 1
split = line.split()
assert len(split) == 41
assert split[0] == ('%06i.jpg' % image_id)
assert all(x in ['-1', '1'] for x in split[1:])
for j, value in enumerate(split[1:]):
attributes[attr_keys[j]][i] = value == '1'
print("Saving attributes to %s ..." % ATTR_PATH)
torch.save(attributes, ATTR_PATH)
preprocess_images()
preprocess_attributes()
| [
"jlezama@gmail.com"
] | jlezama@gmail.com |
efdf6baebd3af374b832d92ff380d8150baa87ab | 5b41d2e551982784a1e53e49f12c7b058403bca8 | /venv/Scripts/easy_install-script.py | 1bbb1a2552e96f5fe5ac7d0cb9794956d25349a1 | [] | no_license | TsvetkovEvgenij/HelloWorld | 79b67793bc49e9f1fe717e3dc5ec98f2f9ea58cf | 1fa97bbb2015502c1289828fc36e598fb874d29a | refs/heads/master | 2020-09-23T21:33:43.203083 | 2019-12-03T10:27:07 | 2019-12-03T10:27:07 | 225,592,335 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 469 | py | #!C:\Users\Ковшикова\PycharmProjects\HelloWorld\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"tsvetkov.evgenij@gmail.com"
] | tsvetkov.evgenij@gmail.com |
35dcdc43f617001a03482a66d869671fe3c327ec | 867b776ad26475b4fffb28cb4a7dbbff167863dc | /src/CsvReader.py | 4f5a987fa9454f377b78579e649bdbeb273161f7 | [] | no_license | knp56/Calculator | 2e108bb144edc98523c5bf055e5daa3c39bad867 | 4e8ff3064ebbc6ecf511c6283b29f84c184c39a1 | refs/heads/main | 2023-06-22T12:04:52.687412 | 2021-07-01T21:02:11 | 2021-07-01T21:02:11 | 382,148,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | import csv
#from pprint import pprint
def ClassFactory(class_name, dictionary):
return type(class_name, (object,), dictionary)
class CsvReader:
data = []
def __init__(self,filepath):
self.opdata = []
with open(filepath) as text_data:
csv_data = csv.DictReader(text_data)
for row in csv_data:
self.opdata.append(row)
self.data.append(row)
text_data.close()
pass
def return_data_as_objects(self, class_name):
objects = []
for row in self.data:
objects.append(ClassFactory(class_name,row))
return objects | [
"knp56@njit.edu"
] | knp56@njit.edu |
175fc58cdec9dfa2614265d9e8687f6653571759 | 9ddb76f8bac669e89e2ae0c5de68bef6e81b7dd6 | /GamebotsParser.py | 865a444d3a048ea5f293af25aac7c910f18928bf | [] | no_license | formica-multiuso/ugc | 51598755823c912c9ba0af8fc75fa1c0b26817db | 0aa35de26412bc3855acb2407abafde77a430137 | refs/heads/master | 2016-09-06T16:25:06.158155 | 2013-08-05T10:31:56 | 2013-08-05T10:31:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | import sys
import socket
import threading
import select
class GamebotsParser(threading.Thread):
def __init__(self,socket,name):
threading.Thread.__init__(self)
self.socket = socket
self.name = name
def run(self):
while 1:
rlist, wlist, elist = select.select( [self.socket], [], [] )
self.messageBuffer = self.socket.recv(2048)
messages = self.messageBuffer.split('\n')
print "\n" + "\033[34m" + "[" + self.name + "] " + "\033[0m"
for message in messages:
pair = message.split(' ',1)
if len(pair) > 1:
print "\033[33m" + pair[0] + "\033[0m"
payload = ''.join(pair[1])
tokens = payload.split('{')
tokens = ''.join(tokens)
tokens = tokens.split('}')
# Here I need to return sensors (SEN) information to the IRobot class splitted in dictionary
for token in tokens:
print token
def parser(self):
pass
| [
"formica@member.fsf.org"
] | formica@member.fsf.org |
10235484ece4de8311785cbc4ef11abeb245c5aa | 577aa26c18c7ae5a2be3b1d4ef6cc4a88f0f455a | /bot.py | 379414f1d2d588c384aa3390f91f5be270390603 | [] | no_license | boringcactus/head-receiver-bot | add9f78ee771b597be8e342d914601a1270abcfe | e8012db3c2cae22771e1f68965eff7ff85a14014 | refs/heads/master | 2021-04-18T17:42:25.066518 | 2020-03-24T02:31:15 | 2020-03-24T02:31:15 | 249,567,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,972 | py | import logging
import os
from io import BytesIO
import telegram
from telegram.ext import Updater, MessageHandler, Filters
from dotenv import load_dotenv, find_dotenv
from PIL import Image, ImageDraw, ImageFont
load_dotenv(find_dotenv())
font = ImageFont.truetype("SourceSansPro-Regular.ttf", 48)
def apply(name, photo):
outfile = BytesIO()
with Image.open('orig.png') as base:
result = base.convert('RGBA')
icon = Image.open(BytesIO(photo)).convert('RGBA')
icon = icon.transform(base.size, Image.PERSPECTIVE, [1/0.39, 0, -430, 0.01807, 1/0.49, -365, 0, 0, 1])
result.alpha_composite(icon)
draw = ImageDraw.Draw(result)
nw, nh = draw.textsize(name, font=font)
draw.rectangle([(420 - nw / 2, 100 - nh / 2), (420 + nw / 2, 100 + nh / 2)], fill=(190, 190, 190, 255))
draw.text((420 - nw / 2, 100 - nh / 2), name, font=font, fill=(0, 0, 0, 255))
result.save(outfile, 'PNG')
return outfile.getvalue()
def process(update: telegram.Update, context):
target = update.effective_user
if update.effective_message is not None and update.effective_message.forward_from is not None:
target = update.effective_message.forward_from
name = target.full_name
photos = target.get_profile_photos(limit=1).photos
if len(photos) == 0:
error = "Can't find profile picture for {}".format(name)
context.bot.send_message(chat_id=update.effective_chat.id, text=error)
return
photo_all_sizes = target.get_profile_photos(limit=1).photos[0]
photo_best_size = max(photo_all_sizes, key=lambda x: x.width)
photo_file = photo_best_size.get_file()
photo = photo_file.download_as_bytearray()
result = apply(name, photo)
context.bot.send_photo(chat_id=update.effective_chat.id, photo=BytesIO(result))
log_message = 'Handled request for "{}"'.format(name)
if target is not update.effective_user:
log_message += ' on behalf of "{}"'.format(update.effective_user.full_name)
logger.info(log_message)
if __name__ == "__main__":
# Set these variable to the appropriate values
TOKEN = os.environ.get('TG_BOT_TOKEN')
NAME = "head-receiver-bot"
# Port is given by Heroku
PORT = os.environ.get('PORT')
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Set up the Updater
updater = Updater(token=TOKEN, use_context=True)
dp = updater.dispatcher
# Add handlers
dp.add_handler(MessageHandler(Filters.all, process))
# Start the webhook
if PORT is None:
updater.start_polling()
else:
updater.start_webhook(listen="0.0.0.0",
port=int(PORT),
url_path=TOKEN)
updater.bot.setWebhook("https://{}.herokuapp.com/{}".format(NAME, TOKEN))
updater.idle()
| [
"melody@boringcactus.com"
] | melody@boringcactus.com |
66e2289930010858f17f6b47cd882273cfaacfe3 | 2d009dceeb7893d7441e42b1944b7ef317e561ab | /Bisection.py | 7ffb910041ff7d3efd04283482a133810206349c | [] | no_license | dweatherstone/calculusdrw | d22a3abf238aa29657ece79c8227411c38e82709 | 9eaf7f059ed219cdfc1f8968dadafdb2387ea059 | refs/heads/master | 2022-04-18T20:24:02.323455 | 2020-04-20T15:44:05 | 2020-04-20T15:44:05 | 257,258,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,793 | py | from Generalroots import RootStatus, Root
class Bisection(Root):
"""The simplest root finding algorithm is the bisection method. The
algorithm applies to any continuous function on an interval where
the value of the function changes sign from to . The idea is simple:
divide the interval in two, a solution must exist within one
subinterval, select the subinterval where the sign of changes
and repeat.
"""
def __init__(self, func):
""" Initialising an object to calculate the root of a function using
the Bisection method.
Parameters
----------
func (function): The function for which we are trying to approximate a
solution.
"""
super().__init__(func)
def find_root(self, start_interval, end_interval, num_iter = 100):
""" Approximate solution of f(x) = 0 on interval [a, b] using the
bisection method.
Parameters
----------
start_interval (number): The lower bound of the interval in which to
search for a solution.
end_interval (number): The upper bound of the interval in which to
search for a solution.
num_iter (positive integer): The number of iterations to implement.
Returns
-------
xn (number): Result of Bisection method. The midpoint of the Nth interval
computed by the bisection method. The intial interval
[a_0,b_0] is given by [a,b]. If f(m_n) == 0 for some
midpoint m_n = (a_n + b_n)/2, then the function returns this
solution. If all signs of values f(a_n), f(b_n) and f(m_n)
are the same at any iteration, the bisection methode fails
and returns None.
"""
assert num_iter > 0
assert end_interval > start_interval
if self.f(start_interval)*self.f(end_interval) >= 0:
self.status = RootStatus.method_fails
return None
a_n = start_interval
b_n = end_interval
for _ in range(1, num_iter+1):
m_n = (a_n + b_n)/2
f_m_n = self.f(m_n)
if self.f(a_n)*f_m_n < 0:
a_n = a_n
b_n = m_n
elif self.f(b_n)*f_m_n < 0:
a_n = m_n
b_n = b_n
elif f_m_n == 0:
self.status = RootStatus.root_found
self.xn.append(m_n)
return self.xn
else:
self.status = RootStatus.method_fails
return None
m_n = (a_n + b_n)/2
self.xn.append(m_n)
self.status = RootStatus.exceeded_max_iter
return self.xn | [
"davidweatherstone@gmail.com"
] | davidweatherstone@gmail.com |
27460d30d032a0d1bdc979b8ff2544520320a468 | 240f4b564a53ead9076276258e5f3749fc9efb99 | /yproblem/utils.py | 68982c510dd39d6f80232e5fd0460f3e155bc350 | [
"MIT"
] | permissive | dbojanjac/effective2D | fe7c5fd8d53684274b3ffa28f43723d5d4d8c276 | 8d124a103a5bd8e68d1bc23c4e10fe4d3cd27759 | refs/heads/master | 2023-02-17T12:06:08.483294 | 2020-09-10T17:23:21 | 2020-09-10T17:23:21 | 168,364,828 | 1 | 0 | MIT | 2021-01-13T16:35:32 | 2019-01-30T15:21:40 | GLSL | UTF-8 | Python | false | false | 386 | py | import dolfin as df
import matplotlib.pyplot as plt
def save_field_plots(output_folder, f1, f2):
df.plot(f1)
plt.savefig(output_folder + "/plots/f1.pdf")
df.plot(f2)
plt.savefig(output_folder + "/plots/f2.pdf")
def save_pvd(output_folder, f1, f2):
f = df.File(output_folder + "/PVD/f1.pvd")
f << f1
f = df.File(output_folder + "/PVD/f2.pvd")
f << f2
| [
"darko.janekovic@fer.hr"
] | darko.janekovic@fer.hr |
76ba16f3571ee2c45140eede623e32fb986c8881 | 25cab1a6c2d5370be53ba54236f5947e2eb9cb28 | /data/audio/__init__.py | bf7c93a9fb836f15bd0979177f3a783ab03b8c24 | [] | no_license | chenliming-1/tr_apiCode | b9aca368e656ae4651dc9e2a4bfae80ecb59ec95 | 36cd39a9d1dac4df5a4ecc57c1bb391a863c0035 | refs/heads/main | 2023-08-26T22:49:25.086745 | 2021-10-20T10:22:55 | 2021-10-20T10:22:55 | 402,780,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | #!Date:2019/02/21 17:02
# !@Author:龚远琪
from .uploadaudio import uploadaudio
__all__ = ['uploadaudio'] | [
"gongyq@histudy.com"
] | gongyq@histudy.com |
f66d8eca2d435b8587e7ca130d23d12400ed0211 | 3fbd28e72606e5358328bfe4b99eb0349ca6a54f | /.history/a_Young_Physicist_20210607193741.py | 863458084f547b6a9bf662840ab4c6ff7880d758 | [] | no_license | Tarun1001/codeforces | f0a2ef618fbd45e3cdda3fa961e249248ca56fdb | 576b505d4b8b8652a3f116f32d8d7cda4a6644a1 | refs/heads/master | 2023-05-13T04:50:01.780931 | 2021-06-07T21:35:26 | 2021-06-07T21:35:26 | 374,399,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | n= int(input())
x=[]
for i in range(n):
p=map(int,input().split()))
x.append(p)
a=b=c=0
for i in x:
a+=i[0]
b+=i[1]
c+=i[2]
if a==b==c==0:
print("YES")
else:
print("NO")
| [
"tarunsivasai8@gmail.com"
] | tarunsivasai8@gmail.com |
20076d99682732c095519240df2c951bfe0aae37 | 55ab64b67d8abc02907eb43a54ff6c326ded6b72 | /scripts/startup/tila_OP_SmartDelete.py | cc9ba649d4972b3487b5351419e9a875b4d2745a | [
"MIT"
] | permissive | Tilapiatsu/blender-custom_config | 2f03b0bb234c3b098d2830732296d199c91147d0 | 00e14fc190ebff66cf50ff911f25cf5ad3529f8f | refs/heads/master | 2023-08-16T14:26:39.990840 | 2023-08-16T01:32:41 | 2023-08-16T01:32:41 | 161,249,779 | 6 | 2 | MIT | 2023-04-12T05:33:59 | 2018-12-10T23:25:14 | Python | UTF-8 | Python | false | false | 2,449 | py | import bpy
bl_info = {
"name": "Tila : Smart Delete",
"author": "Tilapiatsu",
"version": (1, 0, 0, 0),
"blender": (2, 80, 0),
"location": "View3D",
"category": "Object",
}
class TILA_SmartDeleteOperator(bpy.types.Operator):
bl_idname = "object.tila_smartdelete"
bl_label = "TILA: Smart Delete"
bl_options = {'REGISTER', 'UNDO'}
menu: bpy.props.BoolProperty(name='call_menu', default=False)
def execute(self, context):
if context.space_data.type == 'VIEW_3D':
if self.menu:
if context.mode == 'EDIT_MESH':
bpy.ops.wm.call_menu(name='VIEW3D_MT_edit_mesh_delete')
elif context.mode == 'EDIT_CURVE':
bpy.ops.wm.call_menu(name='VIEW3D_MT_edit_curve_delete')
else:
if context.mode == 'EDIT_MESH':
current_mesh_mode = context.tool_settings.mesh_select_mode[:]
# if vertex mode on
if current_mesh_mode[0]:
bpy.ops.mesh.dissolve_verts()
# if edge mode on
if current_mesh_mode[1]:
bpy.ops.mesh.dissolve_edges(use_verts=True)
# if face mode on
if current_mesh_mode[2]:
bpy.ops.mesh.delete(type='FACE')
elif context.mode == 'EDIT_CURVE':
bpy.ops.curve.delete(type='VERT')
elif context.mode == 'EDIT_GPENCIL':
try:
bpy.ops.gpencil.delete(type='POINTS')
except Exception as e:
print("Warning: %r" % e)
elif context.mode == 'EDIT_METABALL':
bpy.ops.mball.delete_metaelems('EXEC_DEFAULT')
elif context.mode == 'OBJECT':
bpy.ops.object.delete(use_global=False, confirm=False)
elif context.space_data.type == 'OUTLINER':
bpy.ops.outliner.delete()
elif context.space_data.type == 'FILE_BROWSER':
bpy.ops.file.delete()
# elif context.space_data.type == 'IMAGE_EDITOR':
# layout.label("No Context! image editor")
return {'FINISHED'}
addon_keymaps = []
classes = (TILA_SmartDeleteOperator,)
register, unregister = bpy.utils.register_classes_factory(classes)
if __name__ == "__main__":
register()
| [
"tilapiatsu@hotmail.fr"
] | tilapiatsu@hotmail.fr |
26f2074d53662e3b75784826d1b3d465efd230e4 | 91397c476203a77c597f80769b9b8ac850a2dedb | /mongodb app/main.py | 2501f63bdb9279a0539f7b97b9511c019bf851c3 | [] | no_license | firdaussalim/Perpustakaan-App | 29cdf2d0963065f6d89901a19a0f258ebc165ac6 | dcf579f725b8ef90a3f6412599b77ac47b7b50a4 | refs/heads/master | 2023-06-06T11:34:05.261274 | 2021-06-25T10:06:23 | 2021-06-25T10:06:23 | 380,193,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from fastapi import FastAPI
from books_route import router as books_router
app = FastAPI()
app.include_router(books_router)
@app.get("/")
async def read_main():
return {"message": "Hello Bigger Applications!"} | [
"firdaus.salim24@gmail.com"
] | firdaus.salim24@gmail.com |
3c3083f149d724f150c0f60864c4c9d6ed10495d | 27856ac3b3311728fe103911f7cbc0f20cbdfa8f | /bot/config.py | 549488d5ab4942dbe9d3762ea0d3e81b3afc860a | [] | no_license | 535521469/crawl_free_ip_proxy | 2c314f5037e45508071593bbcfa27e16751e4078 | 977c7fc422e8d49dd1d195cf8d7d1475da427e04 | refs/heads/master | 2016-09-06T13:25:25.738769 | 2013-05-01T07:28:25 | 2013-05-01T07:28:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | # encoding=utf8
'''
Created on 2013-4-24
@author: corleone
'''
from bot.configutil import ConfigFile
import os
def read_config():
cfg_path = os.sep.join([os.getcwd(), os.curdir, 'fetchproxy.cfg'])
configdata = ConfigFile.readconfig(cfg_path).data
return configdata
configdata = read_config()
| [
"535521469@qq.com"
] | 535521469@qq.com |
e7af462a10aa2b01aba8dc9f51eaeb8d8f8f1589 | d9df48207e020367a2195bc3381db61c4eee4d9a | /Python/05.plot/01_bar.py | e31d9ea8c39aa23c9988a0b5448584e37556d445 | [] | no_license | surkjin/kosmo41_surkjin | d1872c39784b9c34f3016bf9cc1f347414b61816 | 2a262c4ae44415690034e8ce04e858732aa12c70 | refs/heads/master | 2020-03-21T04:42:09.070599 | 2018-12-12T06:52:33 | 2018-12-12T06:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 11 14:38:07 2018
@author: kosmo30
"""
#!/usr/bin/env python3
import matplotlib.pyplot as plt
plt.style.use('ggplot')
customers = ['ABC','DEF','GHI','JKL','MNO']
customers_index = range(len(customers))
sale_amounts =[127, 90, 201, 111, 232]
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
ax1.bar(customers_index, sale_amounts, align='center', color='green')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
plt.xticks(customers_index, customers, rotation=0, fontsize='small')
plt.xlabel('Customer Name')
plt.ylabel('Sale Amount')
plt.title('Sale Amount per Customer')
plt.savefig('./output/01_bar_plot.png', dpi=400, bbox_inches='tight')
plt.show()
| [
"surkjin@gmail.com"
] | surkjin@gmail.com |
2e0c29033010b2955a644b542267326c9713f927 | 0581b4564d1e9683b49d754565c9b6f21a75d387 | /shop/models.py | 428c095c6557bc850101eca14ab7d66adb31631d | [] | no_license | AndreyIvanyutin/Webshop | 0deffcf5094fb86e74c6fe8b2d06d4dd2cdda93d | 8839824a5513b2e8d1c0d7f73f442095474cc8aa | refs/heads/master | 2020-12-14T09:56:12.219190 | 2017-07-08T17:25:29 | 2017-07-08T17:25:29 | 95,473,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,618 | py | from django.contrib.auth.models import User
from django.db import models
# Модель категории
class Category(models.Model):
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True, unique=True)
image = models.ImageField(upload_to='category', blank=True, null=True)
class Meta:
ordering = ['name']
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
def __str__(self):
return self.name
class SubCategory(models.Model):
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True, unique=True)
image = models.ImageField(upload_to='subcategory', blank=True, null=True)
category = models.ForeignKey(Category)
class Meta:
ordering = ['name']
verbose_name = 'Подкатегория'
verbose_name_plural = 'Подкатегории'
def __str__(self):
return self.name
# Модель продукта
class Product(models.Model):
subcategory = models.ForeignKey(SubCategory, verbose_name="Категория", blank=True, null=True)
name = models.CharField(max_length=200, db_index=True, verbose_name="Название")
image = models.ImageField(upload_to='products', blank=True, null=True, verbose_name="Изображение товара")
description = models.TextField(blank=True, verbose_name="Описание")
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="Цена")
stock = models.PositiveIntegerField(verbose_name="На складе")
available = models.BooleanField(default=True, verbose_name="Доступен")
created = models.DateTimeField(auto_now_add=True, verbose_name="Создан")
updated = models.DateTimeField(auto_now=True, verbose_name="Обновлено")
class Meta:
ordering = ['name']
index_together = [
['id', 'name']
]
verbose_name = 'Продукт'
verbose_name_plural = 'Продукты'
def __str__(self):
return self.name
class FeedBack(models.Model):
content = models.TextField()
product = models.ForeignKey(Product)
pass
class Customer(models.Model):
user = models.OneToOneField(User)
user_name = models.CharField(max_length=200, default='', db_index=True, verbose_name="Name")
#def __unicode__(self):
# return self.user
# first_name = models.CharField(max_length=50, default=True, verbose_name='Имя')
# last_name = models.CharField(max_length=50, default=True, verbose_name='Фамилия')
# password = models.CharField(max_length=100, default=True)
# phone = models.CharField(max_length=10, default=True, verbose_name='Телефон')
# email = models.EmailField(default=True)
# date_of_birth = models.DateField(default=True, verbose_name='Дата рождения')
avatar = models.ImageField(upload_to='customer_avatar', blank=True, null=True, verbose_name="Avatar")
created = models.DateTimeField(auto_now_add=True, blank=True, null=True, verbose_name="Создан")
updated = models.DateTimeField(auto_now=True, blank=True, null=True, verbose_name="Обновлено")
#orders =
#reviews =
#wishes =
def __str__(self):
return self.user_name
class Meta:
verbose_name = 'Профиль'
verbose_name_plural = 'Профили'
#class Orders(models.Model):
#name = models.CharField(max_length=200, db_index=True, verbose_name="Заказы")
#quantity = models.PositiveIntegerField(verbose_name="Колличество")
#created = models.DateTimeField(auto_now_add=True, verbose_name="Создан")
#done = models.BooleanField(default=True, verbose_name="Выполнен")
#canceled = models.BooleanField(default=True, verbose_name="Отменен")
#orders = models.ForeignKey(Customer)
# def __str__(self):
# return self.name
#class Reviews(models.Model):
#name = models.CharField(max_length=200, db_index=True, verbose_name="Отзывы")
#product = models.ManyToManyField(Product)
#created = models.DateTimeField(auto_now_add=True, verbose_name="Создан")
#caption = models.CharField(max_length=200, db_index=True, verbose_name="Заголовок")
#text = models.TextField(blank=True, verbose_name="Текст отзыва")
# reviews = models.ForeignKey(Customer)
# ?? stars = models.CharField(max_length=5)
# def __str__(self):
# return self.name
| [
"andrey.ivanyutin@gmail.com"
] | andrey.ivanyutin@gmail.com |
964276026958767292e65a825344e9d65be28f17 | aedc785e2666674aa638e09b39f990956e01e546 | /src/testoob/running/processed_helper.py | ce9c99487e3afb02b805b3964c4a4035cd146d2f | [
"Apache-2.0"
] | permissive | callmewilko/testoob | 9be21f2b0d6b6a95cb6a0e14918c55e6e9c1593f | b71b53c15d1b0a736ab40dbad4255e0984968373 | refs/heads/master | 2020-04-04T08:00:35.098595 | 2018-11-05T16:41:10 | 2018-11-05T16:41:10 | 155,756,643 | 0 | 0 | null | 2018-11-01T18:20:24 | 2018-11-01T18:13:15 | Python | UTF-8 | Python | false | false | 1,616 | py | # Testoob, Python Testing Out Of (The) Box
# Copyright (C) 2005-2006 The Testoob Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Helper for processed running"
class ProcessedRunnerHelper:
"A helper class to make ProcessedRunner shorter and clearer."
def __init__(self, max_processes):
self._fixturesList = [[] for i in xrange(max_processes)]
self._load_balance_idx = 0
def register_fixture(self, fixture):
self._fixturesList[self._load_balance_idx].append(fixture)
self._load_balance_idx = (self._load_balance_idx + 1) % len(self._fixturesList)
def start(self, reporter):
from os import fork, pipe, fdopen, waitpid
from sys import exit
children = []
for processFixtures in self._fixturesList:
pid = fork()
if pid == 0:
self._run_fixtures(processFixtures, reporter)
exit()
children.append(pid)
for child in children:
waitpid(child, 0)
def _run_fixtures(self, fixtures, reporter):
[fixture(reporter) for fixture in fixtures]
| [
""
] | |
37c86035036c62d52190241df1ef24d041718a06 | 35212726c5c6d60eb48660068c962eeebea3353f | /utils/options/example.py | b1e24cce6a10d3ffa407ad7a1f94a33b54562f5a | [] | no_license | frankfralick/Charted | 5dfaa8c2e56f239c89d107ed5722d3d30cefa074 | 3ee6053549074da03f8c9881baf5b44d8d1c81ac | refs/heads/master | 2021-01-10T19:37:04.746769 | 2013-06-20T15:43:30 | 2013-06-20T15:43:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,136 | py | """
Demonstration code used in, or while writing, the documentation.
"""
from options import Options, attrs, Unset
class ClassicShape(object):
name = 'Shapes Rule!'
color = 'purple'
height = 50
width = 50
def __init__(self, name=None, color='white', height=10, width=10):
self.name = name
self.color = color
self.height = height
self.width = width
def draw(self, **kwargs):
name = kwargs.get('name', self.name)
color = kwargs.get('color', self.color)
height = kwargs.get('height', self.height)
width = kwargs.get('width', self.width)
print "color='{}', width={}, name='{}', height={}".format(color, width, name, height)
def draw2(self, name=None, color=None, height=None, width=None):
name = name or self.name
color = color or self.color
height = height or self.height
width = width or self.width
print "color='{}', width={}, name='{}', height={}".format(color, width, name, height)
def draw3(self, name=None, color=None, height=None, width=None):
name = name or self.name or ClassicShape.name
color = color or self.color or ClassicShape.color
height = height or self.height or ClassicShape.height
width = width or self.width or ClassicShape.width
print "color='{}', width={}, name='{}', height={}".format(color, width, name, height)
oldone = ClassicShape(name='one')
oldone.draw()
oldone.draw(color='red')
oldone.draw(color='green', width=22)
print "--"
oldone.draw2()
oldone.draw2(color='red')
oldone.draw2(color='green', width=22)
print "--"
oldone.draw3()
oldone.draw3(color='red')
oldone.draw3(color='green', width=22)
print '==='
def relative_meta(key):
def setter(v, current):
return int(v) + current[key] if isinstance(v, str) else v
return setter
def relative(value, currently):
return int(value) + currently if isinstance(value, str) else value
def relmath(value, currently):
if isinstance(value, str):
if value.startswith('*'):
return currently * int(value[1:])
elif value.startswith('/'):
return currently / int(value[1:])
else:
return currently + int(value)
else:
return value
class Shape(object):
options = Options(
name = None,
color = 'white',
height = 10,
width = 10,
)
options.magic(
height = lambda v, cur: cur.height + int(v) if isinstance(v, str) else v,
width = lambda v, cur: cur.height + int(v) + cur.width if isinstance(v, str) else v,
)
options.magic(
height = lambda v, cur: relmath(v, cur.height),
width = lambda v, cur: relmath(v, cur.width)
)
def __init__(self, **kwargs):
self.options = Shape.options.push(kwargs)
def _attrs(self, opts):
nicekeys = [ k for k in opts.keys() if not k.startswith('_') ]
return ', '.join([ "{}={}".format(k, repr(opts[k])) for k in nicekeys ])
def draw(self, **kwargs):
opts = self.options.push(kwargs)
print attrs(opts)
def draw2(self, **kwargs):
opts = self.options.push(kwargs)
print self._attrs(opts)
def set(self, **kwargs):
self.options.set(**kwargs)
def is_tall(self, **kwargs):
opts = self.options.push(kwargs)
return opts.height > 100
@options.magical('name')
def capitalize_name(self, v, cur):
return ' '.join(w.capitalize() for w in v.split())
one = Shape(name='one')
one.draw()
one.draw(color='red')
one.draw(color='green', width=22)
print '--'
Shape.options.set(color='blue')
one.draw()
one.draw(height=100)
one.draw(height=44, color='yellow')
print '---'
one.draw(width='+200')
one.draw()
print '----'
one.draw(width='*4', height='/2')
one.draw2(width='*4', height='/2')
print '-----'
one.set(width='*10', color='orange')
one.draw()
one.set(color=Unset)
one.draw()
print "------"
one.set(name='a shape')
one.draw() | [
"frankfralick@gmail.com"
] | frankfralick@gmail.com |
ff224afdc46082bd19994708a0dc8289239eb5e4 | 9bc0d33e1c3454393ea74d85b531801d6aa28a55 | /baselines/duet/test_ranking.py | 20ddb3c6a7f5158fc67751c3eb22e468eb15f604 | [
"MIT"
] | permissive | skallumadi/mnsrf_ranking_suggestion | 4c604ce5fc394c6d1d1efebb68af08bd2349c696 | 37cbf55d27e8595b990c0a66449e7bfe3027cc8c | refs/heads/master | 2021-01-25T14:03:23.465568 | 2017-10-09T06:40:10 | 2017-10-09T06:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | ###############################################################################
# Author: Wasi Ahmad
# Project: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/10/wwwfp0192-mitra.pdf
# Date Created: 7/23/2017
#
# File Description: This script evaluates test ranking performance.
###############################################################################
import torch, helper, util, data, os
from duet import DUET
from ranking_eval_functions import mean_average_precision, NDCG
args = util.get_args()
def compute_ranking_performance(model, test_batch, test_clicks, test_labels):
local_score = model.local_model(test_batch, test_clicks)
distributed_score = model.distributed_model(test_batch, test_clicks)
total_score = local_score + distributed_score
MAP = mean_average_precision(total_score, test_labels)
NDCG_at_1 = NDCG(total_score, test_labels, 1)
NDCG_at_3 = NDCG(total_score, test_labels, 3)
NDCG_at_10 = NDCG(total_score, test_labels, 5)
return MAP, NDCG_at_1, NDCG_at_3, NDCG_at_10
def test_ranking(model, test_batches):
num_batches = len(test_batches)
map, ndcg_1, ndcg_3, ndcg_10 = 0, 0, 0, 0
for batch_no in range(1, num_batches + 1):
test_queries, test_docs, test_labels = helper.batch_to_tensor(test_batches[batch_no - 1], model.dictionary,
model.config.max_query_length,
model.config.max_doc_length)
if model.config.cuda:
test_queries = test_queries.cuda()
test_docs = test_docs.cuda()
test_labels = test_labels.cuda()
ret_val = compute_ranking_performance(model, test_queries, test_docs, test_labels)
map += ret_val[0]
ndcg_1 += ret_val[1]
ndcg_3 += ret_val[2]
ndcg_10 += ret_val[3]
map = map / num_batches
ndcg_1 = ndcg_1 / num_batches
ndcg_3 = ndcg_3 / num_batches
ndcg_10 = ndcg_10 / num_batches
print('MAP - ', map)
print('NDCG@1 - ', ndcg_1)
print('NDCG@3 - ', ndcg_3)
print('NDCG@10 - ', ndcg_10)
if __name__ == "__main__":
dictionary = data.Dictionary(5)
dictionary.load_dictionary(args.save_path, 'vocab.csv', 5000)
model = DUET(dictionary, args)
if 'CUDA_VISIBLE_DEVICES' in os.environ:
cuda_visible_devices = [int(x) for x in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
if len(cuda_visible_devices) > 1:
model = torch.nn.DataParallel(model, device_ids=cuda_visible_devices)
if args.cuda:
model = model.cuda()
helper.load_model_states_from_checkpoint(model, os.path.join(args.save_path, 'model_best.pth.tar'), 'state_dict')
print('Model and dictionary loaded.')
model.eval()
test_corpus = data.Corpus(args.data, 'session_test.txt', dictionary)
print('Test set size = ', len(test_corpus.data))
test_batches = helper.batchify(test_corpus.data, args.batch_size)
print('Number of test batches = ', len(test_batches))
test_ranking(model, test_batches)
| [
"wasiahmad@ucla.edu"
] | wasiahmad@ucla.edu |
5570cc0247c6ea3b0cdc3dc2629e40b676e7e7e7 | 02ec15c829f1755fb7981c561a40c8f4a968a028 | /corn/consumption.py | 5e852833107ff9c83d9df7d77a7a7a7763de6f50 | [
"MIT"
] | permissive | UGA-BSAIL/Corn-Segmentation | 0cf2b4beadf99c0f0ecf679264a50892188e6d14 | 79df856e3fc487508e24e9821e5ca49911064c73 | refs/heads/master | 2023-02-19T01:52:48.751927 | 2021-01-17T23:13:57 | 2021-01-17T23:13:57 | 250,403,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,312 | py | # -*- coding: utf-8 -*-
"""
@purpose: This file is used for batch detection of images using all models.
@input: Add all Model paths to "weights" list, and test image directory path "strDirectory".
@output: Masked images along with percent consumption will be saved under output/ directory. A matlab file will be created for all Prediction and Ground Truth values.
Created on Sun Dec 23 03:54:14 2018
@author: shrin
"""
import os
import sys
import numpy as np
import tensorflow as tf
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import save_image
#import 2 different classes
import corn_2class
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
CORN_DIR = os.path.join(ROOT_DIR, "datasets/corn")
config_2class= corn_2class.CornConfig()
CORN_DIR = os.path.join(ROOT_DIR, "datasets/corn")
class InferenceConfig(config_2class.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.8
config_2class = InferenceConfig()
config_2class.display()
dataset_2class = corn_2class.CornDataset()
dataset_2class.load_corn(CORN_DIR, "test")
dataset_2class.prepare()
print("Images: {}\nClasses: {}".format(len(dataset_2class.image_ids), dataset_2class.class_names))
# Create model in inference mode
with tf.device("/gpu:0"):
model_2class = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,
config=config_2class)
def get_ax(rows=1, cols=1, size=16):
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# Set path to corn weights file
#weights_path_2class = os.path.join(ROOT_DIR, "logs/appr1/2class_300im_600ep/mask_rcnn_corn_2class_0600.h5")
# Load weights
#print("Loading weights ", weights_path_3class)
#model_3class.load_weights(weights_path_3class, by_name=True)
#print("Loading weights ", weights_path_2class)
#model_2class.load_weights(weights_path_2class, by_name=True)
def get_cornList(r, n_classes, image) :
from collections import Counter
cornList = []
redCornList=[]
yellowCornList=[]
no_of_corns = no_of_red = no_of_yellow = 0
classes = r['class_ids']
masks = r['masks']
regions = r['rois']
cornMasks = []
redCornMasks = []
#print(regions)
#print(classes)
#print(masks.shape)
offset = round(((image.shape)[1])*0.075)
#print('Offset : ',offset)
class_detected = Counter(classes)
no_of_corns = class_detected[1]
if(n_classes == 2) :
no_of_red = class_detected[2]
elif(n_classes == 3) :
no_of_yellow = class_detected[2]
no_of_red = class_detected[3]
#print(no_of_corns, no_of_red, no_of_yellow)
for index, roi, class_id in zip(range(len(regions)), regions, classes):
mask = masks[:,:,index]
if(class_id == 1):
#print(mask.shape)
cornList.append({'cornRoi' : roi, 'class_id' : class_id, 'mask' : mask, 'mask_pixels' : (mask.sum()), 'redCorns' : [], 'yellowCorns' : []})
cornMasks.append(mask)
if(class_id == 2 and n_classes == 2) :
redCornList.append({'redCornRoi' : roi, 'class_id' : class_id, 'mask' : mask, 'mask_pixels' : (mask.sum())})
redCornMasks.append(mask)
elif(class_id == 2 and n_classes == 3) :
yellowCornList.append({'yellowCornRoi' : roi, 'class_id' : class_id, 'mask' : mask, 'mask_pixels' : (mask.sum())})
if(class_id == 3 and n_classes == 3) :
redCornList.append({'redCornRoi' : roi, 'class_id' : class_id, 'mask' : mask, 'mask_pixels' : (mask.sum())})
#redCornIdx = []
for corn in cornList:
corn_y1 = corn['cornRoi'][0] - offset
corn_x1 = corn['cornRoi'][1] - offset
corn_y2 = corn['cornRoi'][2] + offset
corn_x2 = corn['cornRoi'][3] + offset
corn_area = corn['mask_pixels']
eaten_area = 0
# print('RedCorns Before : ', corn['redCorns'])
for redCorn in redCornList:
if((corn_y1 <= redCorn['redCornRoi'][0]) and (corn_x1 <= redCorn['redCornRoi'][1])
and (corn_y2 >= redCorn['redCornRoi'][2]) and (corn_x2 >= redCorn['redCornRoi'][3])):
corn['redCorns'].append(redCorn)
eaten_area += redCorn['mask_pixels']
#redCornIdx.append(redCorn)
#redCornList.remove(redCorn)
percent_eaten = round((eaten_area / corn_area) * 100 , 3)
corn.update({'percent_eaten' : percent_eaten})
#print('RedCorns After : ', corn['redCorns'])
#redCornList = [e for e in redCornList if e not in redCornIdx]
# if len(redCornList) > 0 :
# print("There are ", len(redCornList) ," undetected corn cob present which are almost fully consumed.")
#print('RedCorns After : ', redCornList)
#print('Final CORNS : \n', cornList)
leftCorn={}
left_idx = len(cornList) - 1
if(len(cornList) > 1):
for corn_idx in range(len(cornList)):
corn = cornList[corn_idx]
corn_y1 = corn['cornRoi'][0]
corn_x1 = corn['cornRoi'][1]
corn_y2 = corn['cornRoi'][2]
corn_x2 = corn['cornRoi'][3]
height = corn_x2 - corn_x1
width = corn_y2 - corn_y1
replaceLeft = False
if(len(leftCorn) == 0):
replaceLeft = True
else :
if(height > width) :
if(corn_y1 < leftCorn['cornRoi'][0]) :
replaceLeft = True
elif(width > height) :
if(corn_x1 < leftCorn['cornRoi'][1]) :
replaceLeft = True
if replaceLeft:
leftCorn = corn
left_idx = corn_idx
cornList.pop(left_idx)
cornList.append(leftCorn)
if len(cornMasks) > 0:
ret_cornMasks = np.transpose(np.asarray(cornMasks),(1,2,0))
else:
ret_cornMasks = cornMasks
if len(redCornMasks) > 0:
ret_redCornMasks = np.transpose(np.asarray(redCornMasks),(1,2,0))
else:
ret_redCornMasks = redCornMasks
return cornList, ret_cornMasks, ret_redCornMasks
def compute_percent_est_accuracy(gt_percent_est, pred_percent_est, thresh):
if (gt_percent_est - thresh) <= pred_percent_est <= (gt_percent_est + thresh) :
error = 0
elif(gt_percent_est > pred_percent_est):
error = (gt_percent_est - thresh) - pred_percent_est
elif(gt_percent_est < pred_percent_est):
error = (gt_percent_est + thresh) - pred_percent_est
return (100 - math.fabs(error))
def compare_corns(cornList):
if cornList[0]['percent_eaten'] < cornList[1]['percent_eaten']:
return 1
else:
return 0
def compare_performance(gt_corns, pred_corns, left_eaten_count):
#make percent acc calculations
percent_est_accuracy = 0
for gt_corn, pred_corn in zip(gt_corns, pred_corns):
#make percent acc calculations
est_accuracy = compute_percent_est_accuracy(gt_corn['percent_eaten'], pred_corn['percent_eaten'], thresh=1.0)
pred_corn.update({'est_accuracy' : est_accuracy})
percent_est_accuracy += est_accuracy
percent_est_accuracy = percent_est_accuracy / len(pred_corns)
#make left vs right predictions
comparison_accuracy = 0
if(len(gt_corns) > 1 and len(pred_corns) > 1):
gt_left_eaten_more = compare_corns(gt_corns)
#print('gt_left_eaten_more : ' , gt_left_eaten_more)
pred_left_eaten_more = compare_corns(pred_corns)
#print('pred_left_eaten_more : ' , pred_left_eaten_more)
if(pred_left_eaten_more == 1) :
#print('Left corn has been eaten more than Right.')
left_eaten_count += 1
#else:
print('Right corn has been eaten more than Left.')
if(gt_left_eaten_more == pred_left_eaten_more):
comparison_accuracy = 1
else:
comparison_accuracy = 1
return percent_est_accuracy, left_eaten_count, comparison_accuracy
def compute_batch_ap(dataset, image_ids, verbose=1):
APs = []
mean_weight_iou = []
for image_id in image_ids:
try:
# Load image
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset, config_2class,
image_id, use_mini_mask=False)
# Run object detection
results = model_2class.detect_molded(image[np.newaxis], image_meta[np.newaxis], verbose=0)
# Compute AP over range 0.5 to 0.95
r = results[0]
visualize.save_image(image, "test"+str(image_id), r['rois'], r['masks'],
r['class_ids'],r['scores'],['BG', 'Whole Corn','Bare Cob'],scores_thresh=0.8,mode=0, captions=None, show_mask=True)
gt_r = {"class_ids": gt_class_id,
"rois": gt_bbox,
"masks": gt_mask}
gt_corns, gt_corn_masks, gt_red_corn_masks = get_cornList(gt_r, 2, image)
# print('gt_mask size: ',gt_corn_masks.shape)
pred_corns, pred_cornMasks, pred_redCornMasks = get_cornList(r, 2, image)
#print(pred_corns)
print(image_id, "Image" , os.path.basename(dataset_2class.source_image_link(image_id)))
print(image_id, 'percent_eaten_gt', gt_corns[1]['percent_eaten'])
print(image_id, 'percent_eaten_pred', pred_corns[1]['percent_eaten'])
print(image_id, 'percent_eaten_gt', gt_corns[0]['percent_eaten'])
print(image_id, 'percent_eaten_pred', pred_corns[0]['percent_eaten'])
print("*****************************************************************")
images.append(os.path.basename(dataset_2class.source_image_link(image_id)))
gt_one.append(gt_corns[1]['percent_eaten'])
pred_one.append(pred_corns[1]['percent_eaten'])
gt_two.append(gt_corns[0]['percent_eaten'])
pred_two.append(pred_corns[0]['percent_eaten'])
except:
print("image Id :", image_id)
print(sys.exc_info())
ap = 0
APs.append(ap)
if verbose:
info = dataset.image_info[image_id]
meta = modellib.parse_image_meta(image_meta[np.newaxis,...])
print("{:3} {} AP: {:.2f}".format(
meta["image_id"][0], meta["original_image_shape"][0], ap))
pass
return APs
weights = []
#logs 50 image
weights.append("/home/ssa49593/Mask_RCNN/logs/50im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/50im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/50im_3/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/50im_4/mask_rcnn_corn_2class_0600.h5")
weights.append("/home/ssa49593/Mask_RCNN/logs/50im_5/mask_rcnn_corn_2class_0600.h5")
#logs 100 image
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/100im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/home/ssa49593/Mask_RCNN/logs/100im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/100im_3/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/100im_4/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/100im_5/mask_rcnn_corn_2class_0600.h5")
#logs 150 image
weights.append("/work/cylilab/Mask_RCNN/logs/150im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/150im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/150im_3/mask_rcnn_corn_2class_0600.h5")
weights.append("/home/ssa49593/Mask_RCNN/logs/150im_4/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/150im_5/mask_rcnn_corn_2class_0600.h5")
#logs 200 image
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/200im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/200im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/200im_3/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/200im_4/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/200im_5/mask_rcnn_corn_2class_0600.h5")
#logs 250 image
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/250im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/250im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/250im_5/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/250im_4/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/appr1/2class_250im_600ep/mask_rcnn_corn_2class_0600.h5")
#logs 300 logs
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/300im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/300im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/300im_4/mask_rcnn_corn_2class_0600.h5")
import scipy.io
import numpy as np
# Run on test set
for weights_path in weights:
# Load weights
#print("Loading weights ", weights_path_3class)
#model_3class.load_weights(weights_path_3class, by_name=True)
images = []
gt_one = []
pred_one = []
gt_two = []
pred_two = []
#weights_path = os.path.join(ROOT_DIR, "logs/appr1/2class_050im_600ep/50im_2/mask_rcnn_corn_2class_0600.h5")
print("Loading weights ", weights_path)
model_2class.load_weights(weights_path, by_name=True)
APs = compute_batch_ap(dataset_2class, dataset_2class.image_ids[5:6])
filename = weights_path[0:len(weights_path)-29] + "PRCurve.mat"
scipy.io.savemat(filename, mdict={'ImageIds': images, 'GT_Left': gt_one, 'Pred_Left': pred_one, 'GT_Right': gt_two, 'Pred_Right': pred_two})
break | [
"noreply@github.com"
] | UGA-BSAIL.noreply@github.com |
8dcc2947e1a739ffad867c6bf674d20d81008c49 | 0abd812a50ba3330734fcbb0088a74c5ad6735a2 | /python/utf8_for_emojis.py | 695f4f879e0986f5202ac4876ea2878fd0bf97aa | [] | no_license | scMarth/Learning | a914af6f6327454234e5f98dfc8cf95d6d4f8077 | ae696461c2c8edc9944879503cce01d525cf4ce0 | refs/heads/master | 2023-08-03T05:13:03.162533 | 2023-07-28T22:58:51 | 2023-07-28T22:58:51 | 120,689,926 | 2 | 0 | null | 2022-12-11T13:14:07 | 2018-02-08T00:33:42 | JavaScript | UTF-8 | Python | false | false | 3,984 | py | # convert json to csv
import arcpy, os, shutil, numpy, json, codecs
fields = {
'request' : [ \
'id', \
'master', \
'addDate', \
'addDateUnix', \
'lastAction', \
'lastActionUnix', \
'dept', \
'displayDate', \
'displayLastAction', \
'status', \
'streetId', \
'streetName', \
'streetNum', \
'crossStreetId', \
'crossStreetName', \
'cityId', \
'cityName', \
'district', \
'comments', \
'privateNotes', \
'submitter', \
'typeId', \
'typeName', \
'priorityValue', \
'latitude', \
'longitude', \
'aggregatorId', \
'aggregatorInfo', \
'origin', \
'priorityToDisplay' \
],
'activity' : [ \
'actDate', \
'actDateUnix', \
'attachments', \
'code', \
'codeDesc', \
'comments', \
'displayDate', \
'id', \
'notify', \
'requestId', \
'routeId', \
'user', \
'files', \
'isEditable' \
],
'attachment' : [ \
'createDateUnix', \
'createDate', \
'fileName', \
'id', \
'parent', \
'parentType', \
'size', \
'user' \
],
'submitter' : [ \
'id', \
'firstName', \
'lastName', \
'middleInitial', \
'address', \
'address2', \
'city', \
'state', \
'zip', \
'email', \
'phone', \
'phoneExt', \
'altPhone', \
'altPhoneExt', \
'password', \
'aggregatorId', \
'verified', \
'banned', \
'twitterId', \
'twitterScreenName', \
'notifyEmail', \
'notifyPhone', \
'notifyAltPhone', \
'notifyMail', \
'notifyPush', \
'notifyPhoneSms', \
'notifyAltPhoneSms' \
]
}
def escaped(inputStr):
# return inputStr
return inputStr.translate(str.maketrans({ \
# "]": r"\]", \
# "^": r"\^", \
# "$": r"\$", \
# "*": r"\*", \
# ".": r"\.", \
# "/": r"\/",\
# so far, I've seen carriage returns, line feeds, and double-quotes that can mess up records. '\'' is escaped just in case
"\r": r"\r", \
"\n": r"\n", \
"\\": r"\\", \
'\"': r'\"' \
}))
# reads a json file path then creates a fgdb for that json file in 'workspace'
# the json file contains json data that is returned from the requests/dump method
def write_json_file_to_csv(workspace, json_path):
with open(json_path) as json_file:
data = json.load(json_file)
for key in data:
if key == 'deleted':
continue
output_filepath = workspace + r'\\' + key.upper() + '.csv'
print('Writing' + output_filepath)
# delete file if it exists
if os.path.exists(output_filepath):
os.unlink(output_filepath)
with codecs.open(output_filepath, 'w', encoding='utf8') as file:
# write header
for i in range(len(fields[key]) - 1):
file.write(escaped(fields[key][i]) + ',')
file.write(escaped(fields[key][-1]) + '\n')
# write records
for i in range(len(data[key])):
record = data[key][i]
# print(record)
for j in range(len(fields[key]) - 1):
# print(j)
file.write('"' + escaped(str(record[fields[key][j]])) + '",')
file.write('"' + escaped(str(record[fields[key][-1]])) + '"\n')
print('{} records written.\n'.format(len(data[key])))
workspace = os.path.dirname(__file__) + r'\request_data'
write_json_file_to_csv(workspace, workspace + r'\response.json') | [
"vlantaca@gmail.com"
] | vlantaca@gmail.com |
2b8f12babeff6dcb5935f88b5bbc52db63205ad7 | 3b1a27c72024dc6ac932b39df28d2fb3a6e26a5b | /22_sum_floats/sum_floats.py | ac5da9e7fe7bc8d0012faf56b9f8f6f6c2894334 | [] | no_license | petermoyano/py_ds | 423f95dd5ae308343e52db9a7178936062c8ce36 | 3b1ff9880a8a1c08ee1061c2f239be31c167a0fc | refs/heads/master | 2023-07-18T05:43:07.387039 | 2021-09-07T23:21:58 | 2021-09-07T23:21:58 | 402,805,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | def sum_floats(nums):
"""Return sum of floating point numbers in nums.
>>> sum_floats([1.5, 2.4, 'awesome', [], 1])
3.9
>>> sum_floats([1, 2, 3])
0
"""
# hint: to find out if something is a float, you should use the
# "isinstance" function --- research how to use this to find out
# if something is a float!
sol = [num for num in nums if isinstance(num, float)]
sum = 0
for num in sol:
sum += num
return sum | [
"pedromoyano454@gmail.com"
] | pedromoyano454@gmail.com |
1c9e3b879141282edd5569d79e16594bb83d4f29 | f51ac19ce4d1df15eba02c4b3481533087d5ef9e | /day03/xiaohuar/start.py | 06058cbe787a1bb3530230ff1fa09be09169f548 | [] | no_license | disenQF/xpy903_scrapy | c9e0818f4ad08614f933ec800d680439e3f22ea6 | 7fd1f89f2cbf046b59774071c48801dfc3c5b54d | refs/heads/master | 2022-08-09T13:53:10.104037 | 2019-09-27T09:06:15 | 2019-09-27T09:06:15 | 210,261,888 | 1 | 0 | null | 2022-07-29T22:35:50 | 2019-09-23T04:05:10 | Python | UTF-8 | Python | false | false | 156 | py | #!/usr/bin/python3
# coding: utf-8
from scrapy import cmdline
if __name__ == '__main__':
cmdline.execute(['scrapy', 'crawl', 'hua', '-o', 'hua.json']) | [
"610039018@qq.com"
] | 610039018@qq.com |
95141bfe82d59cd91b74d094bbf932c628c5c5be | 04d76de80ac3d57c6b7428cfed3d86f85cef3ab5 | /Week-4-Good-Programming-Practices/Problem_Set_4/Problem 2 - Dealing with Hands.py | 89c250947d83b8c8b29ec2b5484cb6273e1737f9 | [] | no_license | ojwills/MIT-6.00.1x-Intro-to-CS-and-Python | 2d1e7bfff3af302ffb7377528e81c5e423dd36aa | e92a8421e8967d4f482868334bdb16e0aaf0fea8 | refs/heads/master | 2022-06-24T19:36:13.595243 | 2020-05-09T16:57:18 | 2020-05-09T16:57:18 | 139,490,535 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,712 | py | #Problem 2 - Dealing with Hands
#10.0/10.0 points (graded)
#**Please read this problem entirely!!** The majority of this problem consists of learning how to read code, which is an incredibly useful and important skill. At the end, you will implement a short function. Be sure to take your time on this problem - it may seem easy, but reading someone else's code can be challenging and this is an important exercise.
#
#
#Representing hands
#A hand is the set of letters held by a player during the game. The player is initially dealt a set of random letters. For example, the player could start out with the following hand: a, q, l, m, u, i, l. In our program, a hand will be represented as a dictionary: the keys are (lowercase) letters and the values are the number of times the particular letter is repeated in that hand. For example, the above hand would be represented as:
#
#hand = {'a':1, 'q':1, 'l':2, 'm':1, 'u':1, 'i':1}
#Notice how the repeated letter 'l' is represented. Remember that with a dictionary, the usual way to access a value is hand['a'], where 'a' is the key we want to find. However, this only works if the key is in the dictionary; otherwise, we get a KeyError. To avoid this, we can use the call hand.get('a',0). This is the "safe" way to access a value if we are not sure the key is in the dictionary. d.get(key,default) returns the value for key if key is in the dictionary d, else default. If default is not given, it returns None, so that this method never raises a KeyError. For example:
#
#>>> hand['e']
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#KeyError: 'e'
#>>> hand.get('e', 0)
#0
#Converting words into dictionary representation
#One useful function we've defined for you is getFrequencyDict, defined near the top of ps4a.py. When given a string of letters as an input, it returns a dictionary where the keys are letters and the values are the number of times that letter is represented in the input string. For example:
#
#>>> getFrequencyDict("hello")
#{'h': 1, 'e': 1, 'l': 2, 'o': 1}
#As you can see, this is the same kind of dictionary we use to represent hands.
#
#Displaying a hand
#Given a hand represented as a dictionary, we want to display it in a user-friendly way. We have provided the implementation for this in the displayHand function. Take a few minutes right now to read through this function carefully and understand what it does and how it works.
#
#Generating a random hand
#The hand a player is dealt is a set of letters chosen at random. We provide you with the implementation of a function that generates this random hand, dealHand. The function takes as input a positive integer n, and returns a new object, a hand containing n lowercase letters. Again, take a few minutes (right now!) to read through this function carefully and understand what it does and how it works.
#
#Removing letters from a hand (you implement this)
#The player starts with a hand, a set of letters. As the player spells out words, letters from this set are used up. For example, the player could start out with the following hand: a, q, l, m, u, i, l. The player could choose to spell the word quail . This would leave the following letters in the player's hand: l, m. Your task is to implement the function updateHand, which takes in two inputs - a hand and a word (string). updateHand uses letters from the hand to spell the word, and then returns a copy of the hand, containing only the letters remaining. For example:
#
#>>> hand = {'a':1, 'q':1, 'l':2, 'm':1, 'u':1, 'i':1}
#>>> displayHand(hand) # Implemented for you
#a q l l m u i
#>>> hand = updateHand(hand, 'quail') # You implement this function!
#>>> hand
#{'a':0, 'q':0, 'l':1, 'm':1, 'u':0, 'i':0}
#>>> displayHand(hand)
#l m
#Implement the updateHand function. Make sure this function has no side effects: i.e., it must not mutate the hand passed in. Before pasting your function definition here, be sure you've passed the appropriate tests in test_ps4a.py.
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
r = dict(hand)
for letter in word:
if letter in r.keys():
r[letter] -= 1
return r
#Correct | [
"noreply@github.com"
] | ojwills.noreply@github.com |
17e914aac8110ab19e8448f67594dcc2b1be380c | cee96536d5115a20bd271d7ff5626da496197ac6 | /test_coco.py | ce245527e8ec25e646dbf982ae9dda955ca58fb4 | [] | no_license | YaojwDefgun/new-YOLOv1_PyTorch | 0855a8b0dcf8960057ccf82dcf341f480069a789 | f81b1b033fe2ad9a62bd61ad0bab0f47a4463f42 | refs/heads/master | 2023-01-03T21:28:34.243705 | 2020-10-22T12:21:31 | 2020-10-22T12:21:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,138 | py | import os
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from data.cocodataset import *
from data import config, BaseTransform, VOCAnnotationTransform, VOCDetection, VOC_ROOT, VOC_CLASSES
import numpy as np
import cv2
import time
from decimal import *
parser = argparse.ArgumentParser(description='YOLO Detection')
parser.add_argument('-v', '--version', default='yolo',
help='yolo.')
parser.add_argument('-d', '--dataset', default='COCO_val',
help='we use VOC, COCO_val, COCO_test-dev, to test.')
parser.add_argument('-bk', '--backbone', type=str, default='r18',
help='r18, r50, d19')
parser.add_argument('--trained_model', default='weights/coco/',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--visual_threshold', default=0.3, type=float,
help='Final confidence threshold')
parser.add_argument('--cuda', default=True, type=bool,
help='Use cuda to test model')
parser.add_argument('--dataset_root', default='/home/k303/object-detection/dataset/COCO/',
help='Location of VOC root directory')
parser.add_argument('-f', default=None, type=str,
help="Dummy arg so we can load in Jupyter Notebooks")
parser.add_argument('--debug', action='store_true', default=False,
help='debug mode where only one image is trained')
args = parser.parse_args()
coco_class_labels = ('background',
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat', 'traffic light', 'fire hydrant', 'street sign', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack', 'umbrella',
'shoe', 'eye glasses', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'plate', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'mirror', 'dining table', 'window', 'desk',
'toilet', 'door', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'blender', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
coco_class_index = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67,
70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
def test_net(net, device, testset, transform, thresh, mode='voc'):
class_color = [(np.random.randint(255),np.random.randint(255),np.random.randint(255)) for _ in range(80)]
num_images = len(testset)
for index in range(num_images):
print('Testing image {:d}/{:d}....'.format(index+1, num_images))
if args.dataset == 'COCO_val' or args.dataset == 'COCO-test' or args.dataset == 'COCO_test-dev':
img, _ = testset.pull_image(index)
elif args.dataset == 'VOC':
img = testset.pull_image(index)
# img_id, annotation = testset.pull_anno(i)
x = torch.from_numpy(transform(img)[0][:, :, (2, 1, 0)]).permute(2, 0, 1)
x = x.unsqueeze(0).to(device)
t0 = time.clock()
y = net(x) # forward pass
detections = y
print("detection time used ", Decimal(time.clock()) - Decimal(t0), "s")
# scale each detection back up to the image
scale = np.array([[img.shape[1], img.shape[0],
img.shape[1], img.shape[0]]])
bbox_pred, scores, cls_inds = detections
# map the boxes to origin image scale
bbox_pred *= scale
for i, box in enumerate(bbox_pred):
cls_indx = cls_inds[i]
xmin, ymin, xmax, ymax = box
if scores[i] > thresh:
box_w = int(xmax - xmin)
cv2.rectangle(img, (int(xmin), int(ymin)), (int(xmax), int(ymax)), class_color[int(cls_indx)], 2)
cv2.rectangle(img, (int(xmin), int(abs(ymin)-15)), (int(xmin+box_w*0.55), int(ymin)), class_color[int(cls_indx)], -1)
cls_id = coco_class_index[int(cls_indx)]
cls_name = coco_class_labels[cls_id]
mess = '%s: %.3f' % (cls_name, scores[i])
cv2.putText(img, mess, (int(xmin), int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2)
cv2.imshow('detection', img)
cv2.waitKey(0)
# print('Saving the' + str(index) + '-th image ...')
# cv2.imwrite('test_images/' + args.dataset+ '3/' + str(index).zfill(6) +'.jpg', img)
def test():
# get device
if args.cuda:
cudnn.benchmark = True
device = torch.device("cuda")
else:
device = torch.device("cpu")
# load net
num_classes = 80
if args.dataset == 'COCO_val':
cfg = config.coco_af
input_size = cfg['min_dim']
testset = COCODataset(
data_dir=args.dataset_root,
json_file='instances_val2017.json',
name='val2017',
img_size=cfg['min_dim'][0],
debug=args.debug)
elif args.dataset == 'COCO_test-dev':
cfg = config.coco_af
input_size = cfg['min_dim']
testset = COCODataset(
data_dir=args.dataset_root,
json_file='image_info_test-dev2017.json',
name='test2017',
img_size=cfg['min_dim'][0],
debug=args.debug)
elif args.dataset == 'VOC':
cfg = config.voc_af
input_size = cfg['min_dim']
testset = VOCDetection(VOC_ROOT, [('2007', 'test')], None, VOCAnnotationTransform())
# build model
if args.version == 'yolo':
from models.yolo import myYOLO
net = myYOLO(device, input_size=input_size, num_classes=num_classes, trainable=False)
print('Let us test YOLO on the %s dataset ......' % (args.dataset))
else:
print('Unknown Version !!!')
exit()
net.load_state_dict(torch.load(args.trained_model, map_location=device))
net.to(device).eval()
print('Finished loading model!')
# evaluation
test_net(net, device, testset,
BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)),
thresh=args.visual_threshold)
if __name__ == '__main__':
test() | [
"1394571815@qq.com"
] | 1394571815@qq.com |
d5ccdd17e5f6a90bc2827ddfac825f3f325a9b19 | 11ef04d5323d2972429adc93ca3795f4c9b3ca35 | /blog/views.py | cbf3e15b60fcf326667bb452194741102230c997 | [] | no_license | ZveRuss/my-blog | b976895cc89f2896c39cfb45c2e4bcb13f4a3393 | 46913b368d13d6a724ae25278d3b43d3695c02ca | refs/heads/master | 2020-04-15T16:44:39.905856 | 2019-01-11T08:59:27 | 2019-01-11T08:59:27 | 164,848,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | from django.shortcuts import render
from .models import Post
from django.utils import timezone
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
votes = models.IntegerField(default=0)
| [
"jack8644@yandex.ru"
] | jack8644@yandex.ru |
544f012ed613c50b88a731844aa93e3c38e64a57 | 79047f578878605269c454b05a43e7fb085dbe48 | /fairseq/playaround.py | 76821ecd0a59bace39acbd2cc08c31c94be313c2 | [
"MIT"
] | permissive | PANhuihuihuihui/NLP | 463249d7a7e374cf157096785363becd5da850eb | 9b00d54ad3e64355f02feeb4f045cacf7fca0bc9 | refs/heads/main | 2023-06-06T03:55:36.571861 | 2021-06-29T06:12:48 | 2021-06-29T06:12:48 | 330,099,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | import pandas as pd
import numpy as np
df = pd.read_csv('../input/alldata.csv') | [
"phjhk@connect.hku.hk"
] | phjhk@connect.hku.hk |
954aed060f4c9eddef7723e818e80e360517b911 | 7d6cb605a02dff3031da0bc6a334fdf9f0579412 | /Project/nn trim and preprocessed/muti_layer_nn.py | ec1d62419cb8c0181e7fe1dd5b192f402c5f26f1 | [] | no_license | 18369766918/Matthew_Project | f98fc4aec156e8920521863b25abf805649cf5c5 | 938a5b20865f842ef8695b2a0413ece31766fe00 | refs/heads/master | 2020-12-30T16:58:47.804504 | 2017-05-12T21:16:00 | 2017-05-12T21:16:00 | 91,042,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,203 | py | import tensorflow as tf
import numpy as np
from math import exp
from LoadDataSet import load_and_process_training_Data,load_and_process_test_data
tf.set_random_seed(0)
# Get all pre processed data
# load training data, test data
#train_x,train_y= load_and_process_training_Data('targfeatures_train.txt','nontargetFeatures_train.txt')
#test_x,test_y = load_and_process_test_data('testfeatures.txt','testlabels.txt')
train_x,train_y= load_and_process_training_Data('trainfeatures.txt','trainlabels.txt')
test_x,test_y = load_and_process_test_data('testfeatures.txt','testlabels.txt')
# set up parameters we need for nn model
# trained neural network path
save_path = "nn_saved_model/model_compress_samenode/model.ckpt"
# The number of class you want to have in NN. In this case we want NN to determine which dataset belone
# to target signal or non_target signal
n_classes = 2
# Number of node each hidden layer will have
n_nodes_hl1 = 100
n_nodes_hl2 = 100
n_nodes_hl3 = 100
# number of times we iterate through training data
num_epochs = 100
# computer may not have enough memory, so we divide the train into batch each batch have 100 data features.
batch_size = 100
# These are placeholders for some values in graph
# tf.placeholder(dtype, shape=None(optional), name=None(optional))
# It's a tensor to hold our datafeatures
x = tf.placeholder(tf.float32, [None,len(train_x[0])])
# Every row has either [1,0] for targ or [0,1] for non_target. placeholder to hold one hot value
Y_C = tf.placeholder(tf.int8, [None, n_classes])
# variable learning rate
lr = tf.placeholder(tf.float32)
# neural network model
def neural_network_model(data):
# layers contain weights and bias for case like all neurons fired a 0 into the layer, we will need result out
# When using RELUs, make sure biases are initialised with small *positive* values for example 0.1 = tf.ones([K])/10
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([len(train_x[0]), n_nodes_hl1])),'bias':tf.Variable(tf.ones([n_nodes_hl1])/10)}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),'bias':tf.Variable(tf.ones([n_nodes_hl2])/10)}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),'bias':tf.Variable(tf.ones([n_nodes_hl3])/10)}
# no more bias when come to the output layer
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),'bias':tf.Variable(tf.zeros([n_classes]))}
# multiplication of the raw input data multipled by their unique weights (starting as random, but will be optimized)
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['bias'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['bias'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['bias'])
l3 = tf.nn.relu(l3)
# We repeat this process for each of the hidden layers, all the way down to our output, where we have the final values still being the multiplication of the input and the weights, plus the output layer's bias values.
Ylogits = tf.matmul(l3,output_layer['weights']) + output_layer['bias']
return Ylogits
# set up the training process
def train_neural_network(x):
# produce the prediction base on output of nn model
Ylogits = neural_network_model(x)
# measure the error use build in cross entropy function, the value that we want to minimize
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_C))
# To optimize our cost (cross_entropy), reduce error, default learning_rate is 0.001, but you can change it, this case we use default
# optimizer = tf.train.GradientDescentOptimizer(0.003)
optimizer = tf.train.AdamOptimizer(lr)
train_step = optimizer.minimize(cross_entropy)
# start the session
with tf.Session() as sess:
# We initialize all of our variables first before start
sess.run(tf.global_variables_initializer())
# iterate epoch count time (cycles of feed forward and back prop), each epoch means neural see through all train_data once
for epoch in range(num_epochs):
# count the total cost per epoch, declining mean better result
epoch_loss=0
i=0
# learning rate decay
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 150
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * exp(-epoch/decay_speed)
# divide the dataset in to dataset/batch_size in case run out of memory
while i < len(train_x):
# load train data
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
train_data = {x: batch_x, Y_C: batch_y, lr: learning_rate}
# train
# sess.run(train_step,feed_dict=train_data)
# run optimizer and cost against batch of data.
_, c = sess.run([train_step, cross_entropy], feed_dict=train_data)
epoch_loss += c
i+=batch_size
print('Epoch', epoch, 'completed out of',num_epochs,'loss:',epoch_loss)
# how many predictions we made that were perfect matches to their labels
# test model
# test data
test_data = {x:test_x, Y_C:test_y}
# calculate accuracy
correct_prediction = tf.equal(tf.argmax(Ylogits, 1), tf.argmax(Y_C, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
print('Accuracy:',accuracy.eval(test_data))
# result matrix, return the position of 1 in array
result = (sess.run(tf.argmax(Ylogits.eval(feed_dict=test_data),1)))
answer = []
for i in range(len(test_y)):
if test_y[i] == [0,1]:
answer.append(1)
elif test_y[i]==[1,0]:
answer.append(0)
answer = np.array(answer)
printResultandCorrectMatrix(result,answer)
np.savetxt('nn_prediction.txt', Ylogits.eval(feed_dict={x: test_x}), delimiter=',',newline="\r\n")
# save the nn model for later use again
# 'Saver' op to save and restore all the variables
saver = tf.train.Saver()
saver.save(sess, save_path)
print("Model saved in file: %s" % save_path)
# load the trained neural network model
def test_loaded_neural_network():
Ylogits = neural_network_model(x)
saver = tf.train.Saver()
with tf.Session() as sess:
# load saved model
saver.restore(sess, save_path)
print("Loading variables from ‘%s’." % save_path)
np.savetxt('nn_prediction.txt', Ylogits.eval(feed_dict={x: test_x}), delimiter=',',newline="\r\n")
# test model
# calculate accuracy
correct_prediction = tf.equal(tf.argmax(Ylogits, 1), tf.argmax(Y_C, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
print('Accuracy:',accuracy.eval({x:test_x, Y_C:test_y}))
# result matrix
result = (sess.run(tf.argmax(Ylogits.eval(feed_dict={x:test_x}),1)))
# answer matrix
answer = []
for i in range(len(test_y)):
if test_y[i] == [0,1]:
answer.append(1)
elif test_y[i]==[1,0]:
answer.append(0)
answer = np.array(answer)
printResultandCorrectMatrix(result,answer)
print(Ylogits.eval(feed_dict={x: test_x}).shape)
def printResultandCorrectMatrix(result,answer):
print("Result matrix: ")
print(result)
# counter for positive and negative reflection
positiveCount = 0
negativeCount = 0
for i in np.nditer(result):
if i == 0:
positiveCount+=1
elif i == 1:
negativeCount+=1
print("Positive count ", positiveCount)
print("Negative count ", negativeCount)
print("Answer matrix: ")
print(answer)
countCorrectMatch = 0
for i in range(len(answer)):
if answer[i]==0:
if result[i]==0:
countCorrectMatch+=1
print("Correct match labels is ", countCorrectMatch)
''' plot result
def plotGraph(s,prediction):
import matplotlib.pyplot as plt
xx = [v[0] for v in test_x]
yy = [v[1] for v in test_y]
x_min, x_max = min(xx) - 0.5, max(xx) + 0.5
y_min, y_max = min(yy) - 0.5, max(yy) + 0.5
xxx, yyy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))
pts = np.c_[xxx.ravel(), yyy.ravel()].tolist()
# ---> Important
z = s.run(tf.argmax(prediction, 1), feed_dict = {x: pts})
z = np.array(z).reshape(xxx.shape)
plt.pcolormesh(xxx, yyy, z)
plt.scatter(xx, yy, c=['r' if v[0] == 1 else 'b' for v in y_data], edgecolor='k', s=50)
plt.show()
'''
#train_neural_network(x)
test_loaded_neural_network()
| [
"matthew@desktop-jo4saar.algomau.auc.ca"
] | matthew@desktop-jo4saar.algomau.auc.ca |
93fe75d32ccb18339ef6ff1b37d1cfbe0b3c0c1e | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/dlymuh001/question2.py | 34d73fd549c0a400164a5301a2e7cc2b38ba5c3b | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,297 | py | def cat():
lick = input("Did the cat lick it? (yes/no)\n")
if (lick == "yes"):
healthy = input("Is your cat healthy? (yes/no)\n")
if (healthy == "yes"):
return "Eat it"
elif (healthy == "no"):
return "Your call"
elif (lick == "no"):
return "Eat it"
print("Welcome to the 30 Second Rule Expert")
print("------------------------------------")
print("Answer the following questions by selecting from among the options.")
decision = ""
seen = input("Did anyone see you? (yes/no)\n")
if (seen == "yes"):
person = input("Was it a boss/lover/parent? (yes/no)\n")
if (person == "yes"):
expensive = input("Was it expensive? (yes/no)\n")
if (expensive == "yes"):
cut_off = input("Can you cut off the part that touched the floor? (yes/no)\n")
if (cut_off == "yes"):
decision = "Eat it"
elif (cut_off == "no"):
decision = "Your call"
elif (expensive == "no"):
chocolate = input("Is it chocolate? (yes/no)\n")
if (chocolate == "yes"):
decision = "Eat it"
elif (chocolate == "no"):
decision = "Don\'t eat it"
elif (person == "no"):
decision = "Eat it"
elif (seen == "no"):
sticky = input("Was it sticky? (yes/no)\n")
if (sticky == "yes"):
raw_steak = input("Is it a raw steak? (yes/no)\n")
if (raw_steak == "yes"):
puma = input("Are you a puma? (yes/no)\n")
if (puma == "yes"):
decision = "Eat it"
elif (puma == "no"):
decision = "Don\'t eat it"
elif (raw_steak == "no"):
decision = cat()
elif (sticky == "no"):
emausaurus = input("Is it an Emausaurus? (yes/no)\n")
if (emausaurus == "yes"):
megalosaurus = input("Are you a Megalosaurus? (yes/no)\n")
if (megalosaurus == "yes"):
decision = "Eat it"
elif (megalosaurus == "no"):
decision = "Don\'t eat it"
elif (emausaurus == "no"):
decision = cat()
##output decision
print ("Decision:", decision, sep = " ", end = ".")
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.