repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/CIF_2016/non_moving_window/without_stl_decomposition/create_o12_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../../datasets/binary_data/CIF_2016/non_moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
output_size = 12,
train_file_path='../../../../datasets/text_data/CIF_2016/non_moving_window/without_stl_decomposition/cif_12.txt',
validate_file_path='../../../../datasets/text_data/CIF_2016/non_moving_window/without_stl_decomposition/cif_12v.txt',
test_file_path='../../../../datasets/text_data/CIF_2016/non_moving_window/without_stl_decomposition/cif_test_12.txt',
binary_train_file_path=output_path + 'cif_12.tfrecords',
binary_validation_file_path=output_path + 'cif_12v.tfrecords',
binary_test_file_path=output_path + 'cif_test_12.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,143 | 51 | 125 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/CIF_2016/non_moving_window/without_stl_decomposition/create_o6_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../../datasets/binary_data/CIF_2016/non_moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
output_size = 6,
train_file_path = '../../../../datasets/text_data/CIF_2016/non_moving_window/without_stl_decomposition/cif_6.txt',
validate_file_path = '../../../../datasets/text_data/CIF_2016/non_moving_window/without_stl_decomposition/cif_6v.txt',
test_file_path = '../../../../datasets/text_data/CIF_2016/non_moving_window/without_stl_decomposition/cif_test_6.txt',
binary_train_file_path = output_path + 'cif_6.tfrecords',
binary_validation_file_path = output_path + 'cif_6v.tfrecords',
binary_test_file_path = output_path + 'cif_test_6.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,148 | 51.227273 | 126 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/NN5/moving_window/create_tfrecords.py | from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../datasets/binary_data/NN5/moving_window/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
input_size = 9,
output_size = 56,
train_file_path = '../../../datasets/text_data/NN5/moving_window/nn5_stl_56i9.txt',
validate_file_path = '../../../datasets/text_data/NN5/moving_window/nn5_stl_56i9v.txt',
test_file_path = '../../../datasets/text_data/NN5/moving_window/nn5_test_56i9.txt',
binary_train_file_path = output_path + 'nn5_stl_56i9.tfrecords',
binary_validation_file_path = output_path + 'nn5_stl_56i9v.tfrecords',
binary_test_file_path = output_path + 'nn5_test_56i9.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,051 | 44.73913 | 95 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/NN5/moving_window/without_stl_decomposition/create_tfrecords.py | from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../../datasets/binary_data/NN5/moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
input_size = 9,
output_size = 56,
train_file_path = '../../../../datasets/text_data/NN5/moving_window/without_stl_decomposition/nn5_56i9.txt',
validate_file_path = '../../../../datasets/text_data/NN5/moving_window/without_stl_decomposition/nn5_56i9v.txt',
test_file_path = '../../../../datasets/text_data/NN5/moving_window/without_stl_decomposition/nn5_test_56i9.txt',
binary_train_file_path = output_path + 'nn5_56i9.tfrecords',
binary_validation_file_path = output_path + 'nn5_56i9v.tfrecords',
binary_test_file_path = output_path + 'nn5_test_56i9.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,151 | 49.086957 | 120 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/NN5/non_moving_window/create_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../datasets/binary_data/NN5/non_moving_window/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
output_size = 56,
train_file_path = '../../../datasets/text_data/NN5/non_moving_window/nn5_stl_56.txt',
validate_file_path = '../../../datasets/text_data/NN5/non_moving_window/nn5_stl_56v.txt',
test_file_path = '../../../datasets/text_data/NN5/non_moving_window/nn5_test_56.txt',
binary_train_file_path = output_path + 'nn5_stl_56.tfrecords',
binary_validation_file_path = output_path + 'nn5_stl_56v.tfrecords',
binary_test_file_path = output_path + 'nn5_test_56.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,035 | 46.090909 | 97 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/NN5/non_moving_window/without_stl_decomposition/create_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../../datasets/binary_data/NN5/non_moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
output_size = 56,
train_file_path = '../../../../datasets/text_data/NN5/non_moving_window/without_stl_decomposition/nn5_56.txt',
validate_file_path = '../../../../datasets/text_data/NN5/non_moving_window/without_stl_decomposition/nn5_56v.txt',
test_file_path = '../../../../datasets/text_data/NN5/non_moving_window/without_stl_decomposition/nn5_test_56.txt',
binary_train_file_path = output_path + 'nn5_56.tfrecords',
binary_validation_file_path = output_path + 'nn5_56v.tfrecords',
binary_test_file_path = output_path + 'nn5_test_56.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,135 | 50.636364 | 122 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/kaggle_web_traffic/moving_window/create_tfrecords.py | from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../datasets/binary_data/kaggle_web_traffic/moving_window/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
input_size = 9,
output_size = 59,
train_file_path = '../../../datasets/text_data/kaggle_web_traffic/moving_window/kaggle_stl_59i9.txt',
validate_file_path = '../../../datasets/text_data/kaggle_web_traffic/moving_window/kaggle_stl_59i9v.txt',
test_file_path = '../../../datasets/text_data/kaggle_web_traffic/moving_window/kaggle_test_59i9.txt',
binary_train_file_path = output_path + 'kaggle_stl_59i9.tfrecords',
binary_validation_file_path = output_path + 'kaggle_stl_59i9v.tfrecords',
binary_test_file_path = output_path + 'kaggle_test_59i9.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
tfrecord_writer = TFRecordWriter(
input_size=74,
output_size=59,
train_file_path='../../../datasets/text_data/kaggle_web_traffic/moving_window/kaggle_stl_59i74.txt',
validate_file_path='../../../datasets/text_data/kaggle_web_traffic/moving_window/kaggle_stl_59i74v.txt',
test_file_path='../../../datasets/text_data/kaggle_web_traffic/moving_window/kaggle_test_59i74.txt',
binary_train_file_path=output_path + 'kaggle_stl_59i74.tfrecords',
binary_validation_file_path=output_path + 'kaggle_stl_59i74v.tfrecords',
binary_test_file_path=output_path + 'kaggle_test_59i74.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,996 | 50.205128 | 113 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/kaggle_web_traffic/moving_window/without_stl_decomposition/create_tfrecords.py | from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../../datasets/binary_data/kaggle_web_traffic/moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
input_size = 9,
output_size = 59,
train_file_path = '../../../../datasets/text_data/kaggle_web_traffic/moving_window/without_stl_decomposition/kaggle_59i9.txt',
validate_file_path = '../../../../datasets/text_data/kaggle_web_traffic/moving_window/without_stl_decomposition/kaggle_59i9v.txt',
test_file_path = '../../../../datasets/text_data/kaggle_web_traffic/moving_window/without_stl_decomposition/kaggle_test_59i9.txt',
binary_train_file_path = output_path + 'kaggle_59i9.tfrecords',
binary_validation_file_path = output_path + 'kaggle_59i9v.tfrecords',
binary_test_file_path = output_path + 'kaggle_test_59i9.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
tfrecord_writer = TFRecordWriter(
input_size=74,
output_size=59,
train_file_path='../../../../datasets/text_data/kaggle_web_traffic/moving_window/without_stl_decomposition/kaggle_59i74.txt',
validate_file_path='../../../../datasets/text_data/kaggle_web_traffic/moving_window/without_stl_decomposition/kaggle_59i74v.txt',
test_file_path='../../../../datasets/text_data/kaggle_web_traffic/moving_window/without_stl_decomposition/kaggle_test_59i74.txt',
binary_train_file_path=output_path + 'kaggle_59i74.tfrecords',
binary_validation_file_path=output_path + 'kaggle_59i74v.tfrecords',
binary_test_file_path=output_path + 'kaggle_test_59i74.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 2,163 | 54.487179 | 138 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/kaggle_web_traffic/non_moving_window/create_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../datasets/binary_data/kaggle_web_traffic/non_moving_window/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
output_size = 59,
train_file_path = '../../../datasets/text_data/kaggle_web_traffic/non_moving_window/kaggle_stl_59.txt',
validate_file_path = '../../../datasets/text_data/kaggle_web_traffic/non_moving_window/kaggle_stl_59v.txt',
test_file_path = '../../../datasets/text_data/kaggle_web_traffic/non_moving_window/kaggle_test_59.txt',
binary_train_file_path = output_path + 'kaggle_stl_59.tfrecords',
binary_validation_file_path = output_path + 'kaggle_stl_59v.tfrecords',
binary_test_file_path = output_path + 'kaggle_test_59.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,113 | 49.636364 | 115 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/kaggle_web_traffic/non_moving_window/without_stl_decomposition/create_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../../datasets/binary_data/kaggle_web_traffic/non_moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
output_size = 59,
train_file_path = '../../../../datasets/text_data/kaggle_web_traffic/non_moving_window/without_stl_decomposition/kaggle_59.txt',
validate_file_path = '../../../../datasets/text_data/kaggle_web_traffic/non_moving_window/without_stl_decomposition/kaggle_59v.txt',
test_file_path = '../../../../datasets/text_data/kaggle_web_traffic/non_moving_window/without_stl_decomposition/kaggle_test_59.txt',
binary_train_file_path = output_path + 'kaggle_59.tfrecords',
binary_validation_file_path = output_path + 'kaggle_59v.tfrecords',
binary_test_file_path = output_path + 'kaggle_test_59.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,217 | 54.363636 | 140 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/M4/moving_window/create_tfrecords.py | from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../datasets/binary_data/M4/moving_window/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
# macro data
tfrecord_writer = TFRecordWriter(
input_size = 15,
output_size = 18,
train_file_path = '../../../datasets/text_data/M4/moving_window/m4_stl_monthly_macro_18i15.txt',
validate_file_path = '../../../datasets/text_data/M4/moving_window/m4_stl_monthly_macro_18i15v.txt',
test_file_path = '../../../datasets/text_data/M4/moving_window/m4_test_monthly_macro_18i15.txt',
binary_train_file_path = output_path + 'm4_stl_monthly_macro_18i15.tfrecords',
binary_validation_file_path = output_path + 'm4_stl_monthly_macro_18i15v.tfrecords',
binary_test_file_path = output_path + 'm4_test_monthly_macro_18i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# micro data
tfrecord_writer = TFRecordWriter(
input_size=15,
output_size=18,
train_file_path='../../../datasets/text_data/M4/moving_window/m4_stl_monthly_micro_18i15.txt',
validate_file_path='../../../datasets/text_data/M4/moving_window/m4_stl_monthly_micro_18i15v.txt',
test_file_path='../../../datasets/text_data/M4/moving_window/m4_test_monthly_micro_18i15.txt',
binary_train_file_path=output_path + 'm4_stl_monthly_micro_18i15.tfrecords',
binary_validation_file_path=output_path + 'm4_stl_monthly_micro_18i15v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_micro_18i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# industry data
tfrecord_writer = TFRecordWriter(
input_size=15,
output_size=18,
train_file_path='../../../datasets/text_data/M4/moving_window/m4_stl_monthly_industry_18i15.txt',
validate_file_path='../../../datasets/text_data/M4/moving_window/m4_stl_monthly_industry_18i15v.txt',
test_file_path='../../../datasets/text_data/M4/moving_window/m4_test_monthly_industry_18i15.txt',
binary_train_file_path=output_path + 'm4_stl_monthly_industry_18i15.tfrecords',
binary_validation_file_path=output_path + 'm4_stl_monthly_industry_18i15v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_industry_18i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# finance data
tfrecord_writer = TFRecordWriter(
input_size=15,
output_size=18,
train_file_path='../../../datasets/text_data/M4/moving_window/m4_stl_monthly_finance_18i15.txt',
validate_file_path='../../../datasets/text_data/M4/moving_window/m4_stl_monthly_finance_18i15v.txt',
test_file_path='../../../datasets/text_data/M4/moving_window/m4_test_monthly_finance_18i15.txt',
binary_train_file_path=output_path + 'm4_stl_monthly_finance_18i15.tfrecords',
binary_validation_file_path=output_path + 'm4_stl_monthly_finance_18i15v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_finance_18i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# other data
tfrecord_writer = TFRecordWriter(
input_size=5,
output_size=18,
train_file_path='../../../datasets/text_data/M4/moving_window/m4_stl_monthly_other_18i5.txt',
validate_file_path='../../../datasets/text_data/M4/moving_window/m4_stl_monthly_other_18i5v.txt',
test_file_path='../../../datasets/text_data/M4/moving_window/m4_test_monthly_other_18i5.txt',
binary_train_file_path=output_path + 'm4_stl_monthly_other_18i5.tfrecords',
binary_validation_file_path=output_path + 'm4_stl_monthly_other_18i5v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_other_18i5.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# demographic data
tfrecord_writer = TFRecordWriter(
input_size=15,
output_size=18,
train_file_path='../../../datasets/text_data/M4/moving_window/m4_stl_monthly_demo_18i15.txt',
validate_file_path='../../../datasets/text_data/M4/moving_window/m4_stl_monthly_demo_18i15v.txt',
test_file_path='../../../datasets/text_data/M4/moving_window/m4_test_monthly_demo_18i15.txt',
binary_train_file_path=output_path + 'm4_stl_monthly_demo_18i15.tfrecords',
binary_validation_file_path=output_path + 'm4_stl_monthly_demo_18i15v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_demo_18i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 5,637 | 50.724771 | 109 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/M4/moving_window/without_stl_decomposition/create_tfrecords.py | from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "'../../../../datasets/binary_data/M4/moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
# macro data
tfrecord_writer = TFRecordWriter(
input_size = 15,
output_size = 18,
train_file_path = '../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_macro_18i15.txt',
validate_file_path = '../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_macro_18i15v.txt',
test_file_path = '../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_test_monthly_macro_18i15.txt',
binary_train_file_path = output_path + 'm4_monthly_macro_18i15.tfrecords',
binary_validation_file_path = output_path + 'm4_monthly_macro_18i15v.tfrecords',
binary_test_file_path = output_path + 'm4_test_monthly_macro_18i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# micro data
tfrecord_writer = TFRecordWriter(
input_size=15,
output_size=18,
train_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_micro_18i15.txt',
validate_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_micro_18i15v.txt',
test_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_test_monthly_micro_18i15.txt',
binary_train_file_path=output_path + 'm4_monthly_micro_18i15.tfrecords',
binary_validation_file_path=output_path + 'm4_monthly_micro_18i15v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_micro_18i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# industry data
tfrecord_writer = TFRecordWriter(
input_size=15,
output_size=18,
train_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_industry_18i15.txt',
validate_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_industry_18i15v.txt',
test_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_test_monthly_industry_18i15.txt',
binary_train_file_path=output_path + 'm4_monthly_industry_18i15.tfrecords',
binary_validation_file_path=output_path + 'm4_monthly_industry_18i15v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_industry_18i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# finance data
tfrecord_writer = TFRecordWriter(
input_size=15,
output_size=18,
train_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_finance_18i15.txt',
validate_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_finance_18i15v.txt',
test_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_test_monthly_finance_18i15.txt',
binary_train_file_path=output_path + 'm4_monthly_finance_18i15.tfrecords',
binary_validation_file_path=output_path + 'm4_monthly_finance_18i15v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_finance_18i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# other data
tfrecord_writer = TFRecordWriter(
input_size=5,
output_size=18,
train_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_other_18i5.txt',
validate_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_other_18i5v.txt',
test_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_test_monthly_other_18i5.txt',
binary_train_file_path=output_path + 'm4_monthly_other_18i5.tfrecords',
binary_validation_file_path=output_path + 'm4_monthly_other_18i5v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_other_18i5.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# demographic data
tfrecord_writer = TFRecordWriter(
input_size=15,
output_size=18,
train_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_demo_18i15.txt',
validate_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_monthly_demo_18i15v.txt',
test_file_path='../../../../datasets/text_data/M4/moving_window/without_stl_decomposition/m4_test_monthly_demo_18i15.txt',
binary_train_file_path=output_path + 'm4_monthly_demo_18i15.tfrecords',
binary_validation_file_path=output_path + 'm4_monthly_demo_18i15v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_demo_18i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 6,093 | 54.908257 | 134 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/M4/non_moving_window/create_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../datasets/binary_data/M4/non_moving_window/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
# macro data
tfrecord_writer = TFRecordWriter(
output_size = 18,
train_file_path = '../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_macro_18.txt',
validate_file_path = '../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_macro_18v.txt',
test_file_path = '../../../datasets/text_data/M4/non_moving_window/m4_test_monthly_macro_18.txt',
binary_train_file_path = output_path + 'm4_stl_monthly_macro_18.tfrecords',
binary_validation_file_path = output_path + 'm4_stl_monthly_macro_18v.tfrecords',
binary_test_file_path = output_path + 'm4_test_monthly_macro_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# micro data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_micro_18.txt',
validate_file_path='../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_micro_18v.txt',
test_file_path='../../../datasets/text_data/M4/non_moving_window/m4_test_monthly_micro_18.txt',
binary_train_file_path=output_path + 'm4_stl_monthly_micro_18.tfrecords',
binary_validation_file_path=output_path + 'm4_stl_monthly_micro_18v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_micro_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# industry data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_industry_18.txt',
validate_file_path='../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_industry_18v.txt',
test_file_path='../../../datasets/text_data/M4/non_moving_window/m4_test_monthly_industry_18.txt',
binary_train_file_path=output_path + 'm4_stl_monthly_industry_18.tfrecords',
binary_validation_file_path=output_path + 'm4_stl_monthly_industry_18v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_industry_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# finance data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_finance_18.txt',
validate_file_path='../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_finance_18v.txt',
test_file_path='../../../datasets/text_data/M4/non_moving_window/m4_test_monthly_finance_18.txt',
binary_train_file_path=output_path + 'm4_stl_monthly_finance_18.tfrecords',
binary_validation_file_path=output_path + 'm4_stl_monthly_finance_18v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_finance_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# other data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_other_18.txt',
validate_file_path='../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_other_18v.txt',
test_file_path='../../../datasets/text_data/M4/non_moving_window/m4_test_monthly_other_18.txt',
binary_train_file_path=output_path + 'm4_stl_monthly_other_18.tfrecords',
binary_validation_file_path=output_path + 'm4_stl_monthly_other_18v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_other_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# demographic data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_demo_18.txt',
validate_file_path='../../../datasets/text_data/M4/non_moving_window/m4_stl_monthly_demo_18v.txt',
test_file_path='../../../datasets/text_data/M4/non_moving_window/m4_test_monthly_demo_18.txt',
binary_train_file_path=output_path + 'm4_stl_monthly_demo_18.tfrecords',
binary_validation_file_path=output_path + 'm4_stl_monthly_demo_18v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_demo_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 5,476 | 52.174757 | 110 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/M4/non_moving_window/without_stl_decomposition/create_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../../datasets/binary_data/M4/non_moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
# macro data
tfrecord_writer = TFRecordWriter(
output_size = 18,
train_file_path = '../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_macro_18.txt',
validate_file_path = '../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_macro_18v.txt',
test_file_path = '../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_test_monthly_macro_18.txt',
binary_train_file_path = output_path + 'm4_monthly_macro_18.tfrecords',
binary_validation_file_path = output_path + 'm4_monthly_macro_18v.tfrecords',
binary_test_file_path = output_path + 'm4_test_monthly_macro_18.tfrecords',
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# micro data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_micro_18.txt',
validate_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_micro_18v.txt',
test_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_test_monthly_micro_18.txt',
binary_train_file_path=output_path + 'm4_monthly_micro_18.tfrecords',
binary_validation_file_path=output_path + 'm4_monthly_micro_18v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_micro_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# industry data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_industry_18.txt',
validate_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_industry_18v.txt',
test_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_test_monthly_industry_18.txt',
binary_train_file_path=output_path + 'm4_monthly_industry_18.tfrecords',
binary_validation_file_path=output_path + 'm4_monthly_industry_18v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_industry_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# finance data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_finance_18.txt',
validate_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_finance_18v.txt',
test_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_test_monthly_finance_18.txt',
binary_train_file_path=output_path + 'm4_monthly_finance_18.tfrecords',
binary_validation_file_path=output_path + 'm4_monthly_finance_18v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_finance_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# other data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_other_18.txt',
validate_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_other_18v.txt',
test_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_test_monthly_other_18.txt',
binary_train_file_path=output_path + 'm4_monthly_other_18.tfrecords',
binary_validation_file_path=output_path + 'm4_monthly_other_18v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_other_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# demographic data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_demo_18.txt',
validate_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_monthly_demo_18v.txt',
test_file_path='../../../../datasets/text_data/M4/non_moving_window/without_stl_decomposition/m4_test_monthly_demo_18.txt',
binary_train_file_path=output_path + 'm4_monthly_demo_18.tfrecords',
binary_validation_file_path=output_path + 'm4_monthly_demo_18v.tfrecords',
binary_test_file_path=output_path + 'm4_test_monthly_demo_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 5,932 | 56.601942 | 135 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/Tourism/moving_window/create_tfrecords.py | from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../datasets/binary_data/Tourism/moving_window/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
input_size = 15,
output_size = 24,
train_file_path = '../../../datasets/text_data/Tourism/moving_window/tourism_stl_24i15.txt',
validate_file_path = '../../../datasets/text_data/Tourism/moving_window/tourism_stl_24i15v.txt',
test_file_path = '../../../datasets/text_data/Tourism/moving_window/tourism_test_24i15.txt',
binary_train_file_path = output_path + 'tourism_stl_24i15.tfrecords',
binary_validation_file_path = output_path + 'tourism_stl_24i15v.tfrecords',
binary_test_file_path = output_path + 'tourism_test_24i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,098 | 46.782609 | 104 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/Tourism/moving_window/without_stl_decomposition/create_tfrecords.py | from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../../datasets/binary_data/Tourism/moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
input_size = 15,
output_size = 24,
train_file_path = '../../../../datasets/text_data/Tourism/moving_window/without_stl_decomposition/tourism_24i15.txt',
validate_file_path = '../../../../datasets/text_data/Tourism/moving_window/without_stl_decomposition/tourism_24i15v.txt',
test_file_path = '../../../../datasets/text_data/Tourism/moving_window/without_stl_decomposition/tourism_test_24i15.txt',
binary_train_file_path = output_path + 'tourism_24i15.tfrecords',
binary_validation_file_path = output_path + 'tourism_24i15v.tfrecords',
binary_test_file_path = output_path + 'tourism_test_24i15.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,198 | 51.130435 | 129 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/Tourism/non_moving_window/create_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../datasets/binary_data/Tourism/non_moving_window/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
output_size = 24,
train_file_path='../../../datasets/text_data/Tourism/non_moving_window/tourism_stl_24.txt',
validate_file_path='../../../datasets/text_data/Tourism/non_moving_window/tourism_stl_24v.txt',
test_file_path='../../../datasets/text_data/Tourism/non_moving_window/tourism_test_24.txt',
binary_train_file_path=output_path + 'tourism_stl_24.tfrecords',
binary_validation_file_path=output_path + 'tourism_stl_24v.tfrecords',
binary_test_file_path=output_path + 'tourism_test_24.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,063 | 47.363636 | 103 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/Tourism/non_moving_window/without_stl_decomposition/create_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../../datasets/binary_data/Tourism/non_moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
tfrecord_writer = TFRecordWriter(
output_size = 24,
train_file_path='../../../../datasets/text_data/Tourism/non_moving_window/without_stl_decomposition/tourism_24.txt',
validate_file_path='../../../../datasets/text_data/Tourism/non_moving_window/without_stl_decomposition/tourism_24v.txt',
test_file_path='../../../../datasets/text_data/Tourism/non_moving_window/without_stl_decomposition/tourism_test_24.txt',
binary_train_file_path=output_path + 'tourism_24.tfrecords',
binary_validation_file_path=output_path + 'tourism_24v.tfrecords',
binary_test_file_path=output_path + 'tourism_test_24.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 1,163 | 51.909091 | 128 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/M3/train_test_data_splitter.py | import csv
data_file = "../../datasets/text_data/M3/M3C.csv"
train_data_file = "../../datasets/text_data/M3/Train_Dataset.csv"
results_file = "../../datasets/text_data/M3/Test_Dataset.csv"
with open(data_file, "r") as original_file, open(train_data_file, "w") as train_out, open(results_file, "w") as results_out:
data_reader = csv.reader(original_file, delimiter=",")
train_data_writer = csv.writer(train_out, delimiter=",")
results_writer = csv.writer(results_out, delimiter=";")
next(data_reader, None)
for row in data_reader:
number_of_columns = row[3]
train_data = row[7 : 7 + int(number_of_columns)]
train_data.insert(0, row[4])
train_data.insert(0, row[0])
results_data = row[7 + int(number_of_columns): len(row)]
results_data.insert(0, row[4])
results_data.insert(0, row[0])
print(results_data)
train_data_writer.writerow(train_data)
results_writer.writerow(results_data)
| 986 | 35.555556 | 124 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/M3/moving_window/create_tfrecords.py | from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../datasets/binary_data/M3/moving_window/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
# macro data
tfrecord_writer = TFRecordWriter(
input_size = 12,
output_size = 18,
train_file_path = '../../../datasets/text_data/M3/moving_window/m3_stl_monthly_macro_18i12.txt',
validate_file_path = '../../../datasets/text_data/M3/moving_window/m3_stl_monthly_macro_18i12v.txt',
test_file_path = '../../../datasets/text_data/M3/moving_window/m3_test_monthly_macro_18i12.txt',
binary_train_file_path = output_path + 'm3_stl_monthly_macro_18i12.tfrecords',
binary_validation_file_path = output_path + 'm3_stl_monthly_macro_18i12v.tfrecords',
binary_test_file_path = output_path + 'm3_test_monthly_macro_18i12.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# micro data
tfrecord_writer = TFRecordWriter(
input_size=13,
output_size=18,
train_file_path='../../../datasets/text_data/M3/moving_window/m3_stl_monthly_micro_18i13.txt',
validate_file_path='../../../datasets/text_data/M3/moving_window/m3_stl_monthly_micro_18i13v.txt',
test_file_path='../../../datasets/text_data/M3/moving_window/m3_test_monthly_micro_18i13.txt',
binary_train_file_path=output_path + 'm3_stl_monthly_micro_18i13.tfrecords',
binary_validation_file_path=output_path + 'm3_stl_monthly_micro_18i13v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_micro_18i13.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# industry data
tfrecord_writer = TFRecordWriter(
input_size=13,
output_size=18,
train_file_path='../../../datasets/text_data/M3/moving_window/m3_stl_monthly_industry_18i13.txt',
validate_file_path='../../../datasets/text_data/M3/moving_window/m3_stl_monthly_industry_18i13v.txt',
test_file_path='../../../datasets/text_data/M3/moving_window/m3_test_monthly_industry_18i13.txt',
binary_train_file_path=output_path + 'm3_stl_monthly_industry_18i13.tfrecords',
binary_validation_file_path=output_path + 'm3_stl_monthly_industry_18i13v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_industry_18i13.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# finance data
tfrecord_writer = TFRecordWriter(
input_size=13,
output_size=18,
train_file_path='../../../datasets/text_data/M3/moving_window/m3_stl_monthly_finance_18i13.txt',
validate_file_path='../../../datasets/text_data/M3/moving_window/m3_stl_monthly_finance_18i13v.txt',
test_file_path='../../../datasets/text_data/M3/moving_window/m3_test_monthly_finance_18i13.txt',
binary_train_file_path=output_path + 'm3_stl_monthly_finance_18i13.tfrecords',
binary_validation_file_path=output_path + 'm3_stl_monthly_finance_18i13v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_finance_18i13.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# other data
tfrecord_writer = TFRecordWriter(
input_size=13,
output_size=18,
train_file_path='../../../datasets/text_data/M3/moving_window/m3_stl_monthly_other_18i13.txt',
validate_file_path='../../../datasets/text_data/M3/moving_window/m3_stl_monthly_other_18i13v.txt',
test_file_path='../../../datasets/text_data/M3/moving_window/m3_test_monthly_other_18i13.txt',
binary_train_file_path=output_path + 'm3_stl_monthly_other_18i13.tfrecords',
binary_validation_file_path=output_path + 'm3_stl_monthly_other_18i13v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_other_18i13.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# demographic data
tfrecord_writer = TFRecordWriter(
input_size=13,
output_size=18,
train_file_path='../../../datasets/text_data/M3/moving_window/m3_stl_monthly_demo_18i13.txt',
validate_file_path='../../../datasets/text_data/M3/moving_window/m3_stl_monthly_demo_18i13v.txt',
test_file_path='../../../datasets/text_data/M3/moving_window/m3_test_monthly_demo_18i13.txt',
binary_train_file_path=output_path + 'm3_stl_monthly_demo_18i13.tfrecords',
binary_validation_file_path=output_path + 'm3_stl_monthly_demo_18i13v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_demo_18i13.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 5,644 | 50.788991 | 109 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/M3/moving_window/without_stl_decomposition/create_tfrecords.py | from tfrecords_handler.moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../../datasets/binary_data/M3/moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
# macro data
tfrecord_writer = TFRecordWriter(
input_size = 12,
output_size = 18,
train_file_path = '../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_macro_18i12.txt',
validate_file_path = '../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_macro_18i12v.txt',
test_file_path = '../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_test_monthly_macro_18i12.txt',
binary_train_file_path = output_path + 'm3_monthly_macro_18i12.tfrecords',
binary_validation_file_path = output_path + 'm3_monthly_macro_18i12v.tfrecords',
binary_test_file_path = output_path + 'm3_test_monthly_macro_18i12.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# micro data
tfrecord_writer = TFRecordWriter(
input_size=13,
output_size=18,
train_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_micro_18i13.txt',
validate_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_micro_18i13v.txt',
test_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_test_monthly_micro_18i13.txt',
binary_train_file_path=output_path + 'm3_monthly_micro_18i13.tfrecords',
binary_validation_file_path=output_path + 'm3_monthly_micro_18i13v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_micro_18i13.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# industry data
tfrecord_writer = TFRecordWriter(
input_size=13,
output_size=18,
train_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_industry_18i13.txt',
validate_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_industry_18i13v.txt',
test_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_test_monthly_industry_18i13.txt',
binary_train_file_path=output_path + 'm3_monthly_industry_18i13.tfrecords',
binary_validation_file_path=output_path + 'm3_monthly_industry_18i13v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_industry_18i13.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# finance data
tfrecord_writer = TFRecordWriter(
input_size=13,
output_size=18,
train_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_finance_18i13.txt',
validate_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_finance_18i13v.txt',
test_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_test_monthly_finance_18i13.txt',
binary_train_file_path=output_path + 'm3_monthly_finance_18i13.tfrecords',
binary_validation_file_path=output_path + 'm3_monthly_finance_18i13v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_finance_18i13.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# other data
tfrecord_writer = TFRecordWriter(
input_size=13,
output_size=18,
train_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_other_18i13.txt',
validate_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_other_18i13v.txt',
test_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_test_monthly_other_18i13.txt',
binary_train_file_path=output_path + 'm3_monthly_other_18i13.tfrecords',
binary_validation_file_path=output_path + 'm3_monthly_other_18i13v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_other_18i13.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# demographic data
tfrecord_writer = TFRecordWriter(
input_size=13,
output_size=18,
train_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_demo_18i13.txt',
validate_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_monthly_demo_18i13v.txt',
test_file_path='../../../../datasets/text_data/M3/moving_window/without_stl_decomposition/m3_test_monthly_demo_18i13.txt',
binary_train_file_path=output_path + 'm3_monthly_demo_18i13.tfrecords',
binary_validation_file_path=output_path + 'm3_monthly_demo_18i13v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_demo_18i13.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 6,099 | 54.963303 | 134 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/M3/non_moving_window/create_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../datasets/binary_data/M3/non_moving_window/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
# macro data
tfrecord_writer = TFRecordWriter(
output_size = 18,
train_file_path = '../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_macro_18.txt',
validate_file_path = '../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_macro_18v.txt',
test_file_path = '../../../datasets/text_data/M3/non_moving_window/m3_test_monthly_macro_18.txt',
binary_train_file_path = output_path + 'm3_stl_monthly_macro_18.tfrecords',
binary_validation_file_path = output_path + 'm3_stl_monthly_macro_18v.tfrecords',
binary_test_file_path = output_path + 'm3_test_monthly_macro_18.tfrecords',
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# micro data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_micro_18.txt',
validate_file_path='../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_micro_18v.txt',
test_file_path='../../../datasets/text_data/M3/non_moving_window/m3_test_monthly_micro_18.txt',
binary_train_file_path=output_path + 'm3_stl_monthly_micro_18.tfrecords',
binary_validation_file_path=output_path + 'm3_stl_monthly_micro_18v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_micro_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# industry data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_industry_18.txt',
validate_file_path='../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_industry_18v.txt',
test_file_path='../../../datasets/text_data/M3/non_moving_window/m3_test_monthly_industry_18.txt',
binary_train_file_path=output_path + 'm3_stl_monthly_industry_18.tfrecords',
binary_validation_file_path=output_path + 'm3_stl_monthly_industry_18v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_industry_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# finance data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_finance_18.txt',
validate_file_path='../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_finance_18v.txt',
test_file_path='../../../datasets/text_data/M3/non_moving_window/m3_test_monthly_finance_18.txt',
binary_train_file_path=output_path + 'm3_stl_monthly_finance_18.tfrecords',
binary_validation_file_path=output_path + 'm3_stl_monthly_finance_18v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_finance_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# other data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_other_18.txt',
validate_file_path='../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_other_18v.txt',
test_file_path='../../../datasets/text_data/M3/non_moving_window/m3_test_monthly_other_18.txt',
binary_train_file_path=output_path + 'm3_stl_monthly_other_18.tfrecords',
binary_validation_file_path=output_path + 'm3_stl_monthly_other_18v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_other_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# demographic data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_demo_18.txt',
validate_file_path='../../../datasets/text_data/M3/non_moving_window/m3_stl_monthly_demo_18v.txt',
test_file_path='../../../datasets/text_data/M3/non_moving_window/m3_test_monthly_demo_18.txt',
binary_train_file_path=output_path + 'm3_stl_monthly_demo_18.tfrecords',
binary_validation_file_path=output_path + 'm3_stl_monthly_demo_18v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_demo_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 5,477 | 52.184466 | 110 | py |
time-series-forecasting-release | time-series-forecasting-release/preprocess_scripts/M3/non_moving_window/without_stl_decomposition/create_tfrecords.py | from tfrecords_handler.non_moving_window.tfrecord_writer import TFRecordWriter
import os
output_path = "../../../../datasets/binary_data/M3/non_moving_window/without_stl_decomposition/"
if not os.path.exists(output_path):
os.makedirs(output_path)
if __name__ == '__main__':
# macro data
tfrecord_writer = TFRecordWriter(
output_size = 18,
train_file_path = '../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_macro_18.txt',
validate_file_path = '../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_macro_18v.txt',
test_file_path = '../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_test_monthly_macro_18.txt',
binary_train_file_path = output_path + 'm3_monthly_macro_18.tfrecords',
binary_validation_file_path = output_path + 'm3_monthly_macro_18v.tfrecords',
binary_test_file_path = output_path + 'm3_test_monthly_macro_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# micro data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_micro_18.txt',
validate_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_micro_18v.txt',
test_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_test_monthly_micro_18.txt',
binary_train_file_path=output_path + 'm3_monthly_micro_18.tfrecords',
binary_validation_file_path=output_path + 'm3_monthly_micro_18v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_micro_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# industry data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_industry_18.txt',
validate_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_industry_18v.txt',
test_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_test_monthly_industry_18.txt',
binary_train_file_path=output_path + 'm3_monthly_industry_18.tfrecords',
binary_validation_file_path=output_path + 'm3_monthly_industry_18v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_industry_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# finance data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_finance_18.txt',
validate_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_finance_18v.txt',
test_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_test_monthly_finance_18.txt',
binary_train_file_path=output_path + 'm3_monthly_finance_18.tfrecords',
binary_validation_file_path=output_path + 'm3_monthly_finance_18v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_finance_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# other data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_other_18.txt',
validate_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_other_18v.txt',
test_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_test_monthly_other_18.txt',
binary_train_file_path=output_path + 'm3_monthly_other_18.tfrecords',
binary_validation_file_path=output_path + 'm3_monthly_other_18v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_other_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file()
# demographic data
tfrecord_writer = TFRecordWriter(
output_size=18,
train_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_demo_18.txt',
validate_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_monthly_demo_18v.txt',
test_file_path='../../../../datasets/text_data/M3/non_moving_window/without_stl_decomposition/m3_test_monthly_demo_18.txt',
binary_train_file_path=output_path + 'm3_monthly_demo_18.tfrecords',
binary_validation_file_path=output_path + 'm3_monthly_demo_18v.tfrecords',
binary_test_file_path=output_path + 'm3_test_monthly_demo_18.tfrecords'
)
tfrecord_writer.read_text_data()
tfrecord_writer.write_train_data_to_tfrecord_file()
tfrecord_writer.write_validation_data_to_tfrecord_file()
tfrecord_writer.write_test_data_to_tfrecord_file() | 5,931 | 56.592233 | 135 | py |
time-series-forecasting-release | time-series-forecasting-release/graph_plotter/training_curve_plotter.py | import tensorflow as tf
import numpy as np
class CurvePlotter:
def __init__(self, session, no_of_curves):
self.__session = session
self.__writer_train = tf.summary.FileWriter('./logs/plot_train')
if no_of_curves == 2:
self.__writer_val = tf.summary.FileWriter('./logs/plot_val')
self.__loss_var = tf.Variable(0.0)
tf.summary.scalar("loss", self.__loss_var)
self.__write_op = tf.summary.merge_all()
def plot_train(self, loss, epoch):
summary = self.__session.run(self.__write_op, {self.__loss_var: np.mean(loss)})
self.__writer_train.add_summary(summary, epoch)
self.__writer_train.flush()
def plot_val(self, loss, epoch):
summary = self.__session.run(self.__write_op, {self.__loss_var: np.mean(np.mean(loss))})
self.__writer_val.add_summary(summary, epoch)
self.__writer_val.flush() | 906 | 38.434783 | 96 | py |
time-series-forecasting-release | time-series-forecasting-release/configs/global_configs.py | # configs for the model training
class model_training_configs:
VALIDATION_ERRORS_DIRECTORY = 'results/validation_errors/'
INFO_FREQ = 1
# configs for the model testing
class model_testing_configs:
RNN_FORECASTS_DIRECTORY = 'results/rnn_forecasts/'
RNN_ERRORS_DIRECTORY = 'results/errors'
PROCESSED_RNN_FORECASTS_DIRECTORY = '/results/processed_rnn_forecasts/'
# configs for hyperparameter tuning(SMAC3)
class hyperparameter_tuning_configs:
SMAC_RUNCOUNT_LIMIT = 50
class gpu_configs:
log_device_placement = False
| 544 | 29.277778 | 75 | py |
time-series-forecasting-release | time-series-forecasting-release/tfrecords_handler/moving_window/tfrecord_writer.py | import tensorflow as tf
import numpy as np
import pandas as pd
class TFRecordWriter:
def __init__(self, **kwargs):
self.__input_size = kwargs['input_size']
self.__output_size = kwargs['output_size']
self.__train_file_path = kwargs['train_file_path']
self.__validate_file_path = kwargs['validate_file_path']
self.__test_file_path = kwargs['test_file_path']
self.__binary_train_file_path = kwargs['binary_train_file_path']
self.__binary_validation_file_path = kwargs['binary_validation_file_path']
self.__binary_test_file_path = kwargs['binary_test_file_path']
# read the text data from text files
def read_text_data(self):
self.__list_of_training_inputs = []
self.__list_of_training_outputs = []
self.__list_of_validation_inputs = []
self.__list_of_validation_outputs =[]
self.__list_of_validation_metadata = []
self.__list_of_test_inputs = []
self.__list_of_test_metadata = []
# Reading the training dataset.
train_df = pd.read_csv(self.__train_file_path, nrows=10)
float_cols = [c for c in train_df if train_df[c].dtype == "float64"]
float32_cols = {c: np.float32 for c in float_cols}
train_df = pd.read_csv(self.__train_file_path, sep=" ", header=None, engine='c', dtype=float32_cols)
train_df = train_df.rename(columns={0: 'series'})
# Returns unique number of time series in the dataset.
series = pd.unique(train_df['series'])
# Construct input and output training tuples for each time series.
for ser in series:
one_series_df = train_df[train_df['series'] == ser]
inputs_df = one_series_df.iloc[:, range(1, (self.__input_size + 1))]
outputs_df = one_series_df.iloc[:, range((self.__input_size + 2), (self.__input_size + self.__output_size + 2))]
self.__list_of_training_inputs.append(np.ascontiguousarray(inputs_df, dtype=np.float32))
self.__list_of_training_outputs.append(np.ascontiguousarray(outputs_df, dtype=np.float32))
# Reading the validation dataset.
val_df = pd.read_csv(self.__validate_file_path, nrows=10)
float_cols = [c for c in val_df if val_df[c].dtype == "float64"]
float32_cols = {c: np.float32 for c in float_cols}
val_df = pd.read_csv(self.__validate_file_path, sep=" ", header=None, engine='c', dtype=float32_cols)
val_df = val_df.rename(columns={0: 'series'})
series = pd.unique(val_df['series'])
for ser in series:
one_series_df = val_df[val_df['series'] == ser]
inputs_df_test = one_series_df.iloc[:, range(1, (self.__input_size + 1))]
metadata_df = one_series_df.iloc[:, range((self.__input_size + self.__output_size + 3), one_series_df.shape[1])]
outputs_df_test = one_series_df.iloc[:, range((self.__input_size + 2), (self.__input_size + self.__output_size + 2))]
self.__list_of_validation_inputs.append(np.ascontiguousarray(inputs_df_test, dtype=np.float32))
self.__list_of_validation_outputs.append(np.ascontiguousarray(outputs_df_test, dtype=np.float32))
self.__list_of_validation_metadata.append(np.ascontiguousarray(metadata_df, dtype=np.float32))
# Reading the test file.
test_df = pd.read_csv(self.__test_file_path, nrows=10)
float_cols = [c for c in test_df if test_df[c].dtype == "float64"]
float32_cols = {c: np.float32 for c in float_cols}
test_df = pd.read_csv(self.__test_file_path, sep=" ", header=None, engine='c', dtype=float32_cols)
test_df = test_df.rename(columns={0: 'series'})
series = pd.unique(test_df['series'])
for ser in series:
test_series_df = test_df[test_df['series'] == ser]
test_inputs_df = test_series_df.iloc[:, range(1, (self.__input_size + 1))]
metadata_df = test_series_df.iloc[:, range((self.__input_size + 2), test_series_df.shape[1])]
self.__list_of_test_inputs.append(np.ascontiguousarray(test_inputs_df, dtype=np.float32))
self.__list_of_test_metadata.append(np.ascontiguousarray(metadata_df, dtype=np.float32))
# write the train and validation text data into tfrecord file
def write_train_data_to_tfrecord_file(self):
writer = tf.python_io.TFRecordWriter(self.__binary_train_file_path, tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB))
# write the training data file in tfrecords format
for input, output in zip(self.__list_of_training_inputs, self.__list_of_training_outputs):
sequence_length = input.shape[0]
sequence_example = tf.train.SequenceExample(
context=tf.train.Features(feature={
"sequence_length" : tf.train.Feature(int64_list=tf.train.Int64List(value=[sequence_length]))
}),
feature_lists = tf.train.FeatureLists(feature_list={
"input" : tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=input_sequence)) for input_sequence in input
]),
"output" : tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=output_sequence)) for output_sequence in output
])
})
)
writer.write(sequence_example.SerializeToString())
writer.close()
# write the train and validation text data into tfrecord file
def write_validation_data_to_tfrecord_file(self):
writer = tf.python_io.TFRecordWriter(self.__binary_validation_file_path, tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.ZLIB))
# write the training data file in tfrecords format
for input, output, metadata in zip(self.__list_of_validation_inputs, self.__list_of_validation_outputs, self.__list_of_validation_metadata):
sequence_length = input.shape[0]
sequence_example = tf.train.SequenceExample(
context=tf.train.Features(feature={
"sequence_length": tf.train.Feature(int64_list=tf.train.Int64List(value=[sequence_length]))
}),
feature_lists=tf.train.FeatureLists(feature_list={
"input": tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=input_sequence)) for input_sequence in input
]),
"output": tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=output_sequence)) for output_sequence
in output
]),
"metadata": tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=metadata_sequence)) for metadata_sequence in metadata
])
})
)
writer.write(sequence_example.SerializeToString())
writer.close()
# write the test text data into tfrecord file
def write_test_data_to_tfrecord_file(self):
writer = tf.python_io.TFRecordWriter(self.__binary_test_file_path, tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB))
# write the training data file in tfrecords format
for input, metadata in zip(self.__list_of_test_inputs, self.__list_of_test_metadata):
sequence_length = input.shape[0]
sequence_example = tf.train.SequenceExample(
context=tf.train.Features(feature={
"sequence_length" : tf.train.Feature(int64_list=tf.train.Int64List(value=[sequence_length]))
}),
feature_lists = tf.train.FeatureLists(feature_list={
"input" : tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=input_sequence)) for input_sequence in input
]),
"metadata" : tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=metadata_sequence)) for metadata_sequence in metadata
])
})
)
writer.write(sequence_example.SerializeToString())
writer.close() | 8,497 | 50.817073 | 148 | py |
time-series-forecasting-release | time-series-forecasting-release/tfrecords_handler/moving_window/tfrecord_reader.py | import tensorflow as tf
class TFRecordReader:
def __init__(self, input_size, output_size, metadata_size):
self.__input_size = input_size
self.__output_size = output_size
self.__metadata_size = metadata_size
def train_data_parser(self, serialized_example):
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized_example,
context_features=({
"sequence_length": tf.FixedLenFeature([], dtype=tf.int64)
}),
sequence_features=({
"input": tf.FixedLenSequenceFeature([self.__input_size], dtype=tf.float32),
"output": tf.FixedLenSequenceFeature([self.__output_size], dtype=tf.float32)
})
)
return context_parsed["sequence_length"], sequence_parsed["input"], sequence_parsed["output"]
def validation_data_parser(self, serialized_example):
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized_example,
context_features=({
"sequence_length": tf.FixedLenFeature([], dtype=tf.int64)
}),
sequence_features=({
"input": tf.FixedLenSequenceFeature([self.__input_size], dtype=tf.float32),
"output": tf.FixedLenSequenceFeature([self.__output_size], dtype=tf.float32),
"metadata": tf.FixedLenSequenceFeature([self.__metadata_size], dtype=tf.float32)
})
)
return context_parsed["sequence_length"], sequence_parsed["input"], sequence_parsed["output"], sequence_parsed[
"metadata"]
def test_data_parser(self, serialized_example):
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized_example,
context_features=({
"sequence_length": tf.FixedLenFeature([], dtype=tf.int64)
}),
sequence_features=({
"input": tf.FixedLenSequenceFeature([self.__input_size], dtype=tf.float32),
"metadata": tf.FixedLenSequenceFeature([self.__metadata_size], dtype=tf.float32)
})
)
return context_parsed["sequence_length"], sequence_parsed["input"], sequence_parsed["metadata"] | 2,277 | 41.981132 | 119 | py |
time-series-forecasting-release | time-series-forecasting-release/tfrecords_handler/non_moving_window/tfrecord_writer.py | import tensorflow as tf
import numpy as np
import pandas as pd
import csv
class TFRecordWriter:
def __init__(self, **kwargs):
self.__output_size = kwargs['output_size']
self.__train_file_path = kwargs['train_file_path']
self.__validate_file_path = kwargs['validate_file_path']
self.__test_file_path = kwargs['test_file_path']
self.__binary_train_file_path = kwargs['binary_train_file_path']
self.__binary_validation_file_path = kwargs['binary_validation_file_path']
self.__binary_test_file_path = kwargs['binary_test_file_path']
# read the text data from text files
def read_text_data(self):
self.__list_of_training_inputs = []
self.__list_of_training_outputs = []
self.__list_of_validation_inputs = []
self.__list_of_validation_outputs = []
self.__list_of_validation_metadata = []
self.__list_of_test_inputs = []
self.__list_of_test_metadata = []
# Reading the training dataset.
with open(self.__train_file_path) as train_file:
train_data_reader = csv.reader(train_file, delimiter=" ")
train_data_list = list(train_data_reader)
for series in train_data_list:
last_train_input_index = len(series) - self.__output_size - 2
train_input_data = series[1: last_train_input_index + 1]
train_output_data = series[(last_train_input_index + 2): len(series)]
self.__list_of_training_inputs.append(np.ascontiguousarray(train_input_data, dtype=np.float32))
self.__list_of_training_outputs.append(np.ascontiguousarray(train_output_data, dtype=np.float32))
# Reading the validation dataset
with open(self.__validate_file_path) as validate_file:
validate_file_reader = csv.reader(validate_file, delimiter=" ")
validate_data_list = list(validate_file_reader)
for series in validate_data_list:
meta_data_index = series.index("|#")
output_index = series.index("|o")
validate_input_data = series[1: output_index]
validate_output_data = series[output_index + 1: meta_data_index]
validate_meta_data = series[meta_data_index + 1:]
self.__list_of_validation_inputs.append(np.ascontiguousarray(validate_input_data, dtype=np.float32))
self.__list_of_validation_outputs.append(np.ascontiguousarray(validate_output_data, dtype=np.float32))
self.__list_of_validation_metadata.append(np.ascontiguousarray(validate_meta_data, dtype=np.float32))
# Reading the test file
with open(self.__test_file_path) as test_file:
test_file_reader = csv.reader(test_file, delimiter=" ")
test_data_list = list(test_file_reader)
for series in test_data_list:
meta_data_index = series.index("|#")
test_input_data = series[1: meta_data_index]
test_meta_data = series[meta_data_index + 1:]
self.__list_of_test_inputs.append(np.ascontiguousarray(test_input_data, dtype=np.float32))
self.__list_of_test_metadata.append(np.ascontiguousarray(test_meta_data, dtype=np.float32))
# write the train and validation text data into tfrecord file
def write_train_data_to_tfrecord_file(self):
writer = tf.python_io.TFRecordWriter(self.__binary_train_file_path,
tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB))
# write the training data file in tfrecords format
for input, output in zip(self.__list_of_training_inputs, self.__list_of_training_outputs):
sequence_length = len(input)
sequence_example = tf.train.SequenceExample(
context=tf.train.Features(feature={
"sequence_length": tf.train.Feature(int64_list=tf.train.Int64List(value=[sequence_length]))
}),
feature_lists=tf.train.FeatureLists(feature_list={
"input": tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=[input_data_element])) for input_data_element
in input
]),
"output": tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=[output_data_element])) for output_data_element
in output
])
})
)
writer.write(sequence_example.SerializeToString())
writer.close()
# write the train and validation text data into tfrecord file
def write_validation_data_to_tfrecord_file(self):
writer = tf.python_io.TFRecordWriter(self.__binary_validation_file_path, tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.ZLIB))
# write the training data file in tfrecords format
for input, output, metadata in zip(self.__list_of_validation_inputs, self.__list_of_validation_outputs,
self.__list_of_validation_metadata):
sequence_length = input.shape[0]
sequence_example = tf.train.SequenceExample(
context=tf.train.Features(feature={
"sequence_length": tf.train.Feature(int64_list=tf.train.Int64List(value=[sequence_length]))
}),
feature_lists=tf.train.FeatureLists(feature_list={
"input": tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=[input_data_element])) for input_data_element
in input
]),
"output": tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=[output_data_element])) for output_data_element
in output
]),
"metadata": tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=[metadata_element])) for metadata_element in
metadata
])
})
)
writer.write(sequence_example.SerializeToString())
writer.close()
# write the test text data into tfrecord file
def write_test_data_to_tfrecord_file(self):
writer = tf.python_io.TFRecordWriter(self.__binary_test_file_path,
tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB))
# write the training data file in tfrecords format
for input, metadata in zip(self.__list_of_test_inputs, self.__list_of_test_metadata):
sequence_length = input.shape[0]
sequence_example = tf.train.SequenceExample(
context=tf.train.Features(feature={
"sequence_length": tf.train.Feature(int64_list=tf.train.Int64List(value=[sequence_length]))
}),
feature_lists=tf.train.FeatureLists(feature_list={
"input": tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=[input_data_element])) for input_data_element
in input
]),
"metadata": tf.train.FeatureList(feature=[
tf.train.Feature(float_list=tf.train.FloatList(value=[metadata_element])) for metadata_element in
metadata
])
})
)
writer.write(sequence_example.SerializeToString())
writer.close()
| 7,776 | 50.164474 | 124 | py |
time-series-forecasting-release | time-series-forecasting-release/tfrecords_handler/non_moving_window/tfrecord_reader.py | import tensorflow as tf
class TFRecordReader:
def train_data_parser(self, serialized_example):
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized_example,
context_features=({
"sequence_length": tf.FixedLenFeature([], dtype=tf.int64)
}),
sequence_features=({
"input": tf.FixedLenSequenceFeature([1], dtype=tf.float32),
"output": tf.FixedLenSequenceFeature([1], dtype=tf.float32)
})
)
return context_parsed["sequence_length"], sequence_parsed["input"], sequence_parsed["output"]
def validation_data_parser(self, serialized_example):
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized_example,
context_features=({
"sequence_length": tf.FixedLenFeature([], dtype=tf.int64)
}),
sequence_features=({
"input": tf.FixedLenSequenceFeature([1], dtype=tf.float32),
"output": tf.FixedLenSequenceFeature([1], dtype=tf.float32),
"metadata": tf.FixedLenSequenceFeature([1], dtype=tf.float32)
})
)
return context_parsed["sequence_length"], sequence_parsed["input"], sequence_parsed["output"], sequence_parsed[
"metadata"]
def test_data_parser(self, serialized_example):
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized_example,
context_features=({
"sequence_length": tf.FixedLenFeature([], dtype=tf.int64)
}),
sequence_features=({
"input": tf.FixedLenSequenceFeature([1], dtype=tf.float32),
"metadata": tf.FixedLenSequenceFeature([1], dtype=tf.float32)
})
)
return context_parsed["sequence_length"], sequence_parsed["input"], sequence_parsed["metadata"] | 1,967 | 40 | 119 | py |
time-series-forecasting-release | time-series-forecasting-release/utility_scripts/persist_optimized_config_results.py | def persist_results(results, file):
file_object = open(file, mode = 'w')
for k, v in results.items():
file_object.write(str(k) + ' >>> ' + str(v) + '\n\n')
file_object.close()
| 198 | 23.875 | 61 | py |
time-series-forecasting-release | time-series-forecasting-release/utility_scripts/time_series_length_calculator.py | import argparse
import csv
argument_parser = argparse.ArgumentParser("Calculate the time series length")
argument_parser.add_argument('--data_file', required=True, help='The full name of the data file')
argument_parser.add_argument('--output_file', required=True, help='The full name of the output file')
args = argument_parser.parse_args()
data_file = args.data_file
output_file = args.output_file
lengths_list =[]
with open(data_file) as input:
for line in input:
length = line.count(",") + 1
lengths_list.append(length)
with open(output_file, "w") as output:
for item in lengths_list:
output.write("%s\n" % item) | 652 | 30.095238 | 101 | py |
time-series-forecasting-release | time-series-forecasting-release/utility_scripts/invoke_r_final_evaluation.py | import subprocess
from configs.global_configs import model_testing_configs
def invoke_r_script(args, moving_window):
if moving_window:
subprocess.call(["Rscript", "--vanilla", "error_calculator/moving_window/final_evaluation.R", args[0], model_testing_configs.RNN_ERRORS_DIRECTORY, model_testing_configs.PROCESSED_RNN_FORECASTS_DIRECTORY, args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10], args[11]])
else:
subprocess.call(["Rscript", "--vanilla", "error_calculator/non_moving_window/final_evaluation.R", args[0], model_testing_configs.RNN_ERRORS_DIRECTORY, model_testing_configs.PROCESSED_RNN_FORECASTS_DIRECTORY, args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10]])
| 772 | 84.888889 | 313 | py |
time-series-forecasting-release | time-series-forecasting-release/utility_scripts/hyperparameter_scripts/hyperparameter_summary_generator.py | ## This file concatenates the optimal hyperparameter configs across all the models for the same dataset and writes to a csv file
import glob
import argparse
import pandas as pd
import re
from utility_scripts.hyperparameter_scripts.hyperparameter_config_reader import read_optimal_hyperparameter_values
# get the different cluster names as external arguments
argument_parser = argparse.ArgumentParser("Create hyperparameter summaries")
argument_parser.add_argument('--dataset_name', required=True, help='Unique string for the name of the dataset')
# parse the user arguments
args = argument_parser.parse_args()
dataset_name = args.dataset_name
input_path = '../results/optimized_configurations/'
output_path = '../results/optimized_configurations/aggregate_hyperparameter_configs/'
output_file = output_path + dataset_name + ".csv"
# get the list of all the files matching the regex
hyperparameter_files = [filename for filename in glob.iglob(input_path + dataset_name + "_*")]
hyperparameters_df = pd.DataFrame(
columns=["Model_Name", "cell_dimension", "gaussian_noise_stdev", "l2_regularization", "max_epoch_size",
"max_num_epochs", "minibatch_size", "num_hidden_layers", "random_normal_initializer_stdev",
"rate_of_learning"])
# concat all the hyperparameters to data frames
for config_file in sorted(hyperparameter_files):
file_name_part = re.split(pattern=dataset_name + "_", string=config_file, maxsplit=1)[1]
model_name = file_name_part.rsplit('_', 1)[0]
print(model_name)
hyperparameter_values_dic = read_optimal_hyperparameter_values(config_file)
if "rate_of_learning" not in hyperparameter_values_dic.keys():
hyperparameter_values_dic["rate_of_learning"] = "-"
hyperparameters_df.loc[-1] = [model_name, hyperparameter_values_dic["cell_dimension"],
hyperparameter_values_dic["gaussian_noise_stdev"],
hyperparameter_values_dic["l2_regularization"],
hyperparameter_values_dic["max_epoch_size"],
hyperparameter_values_dic["max_num_epochs"],
hyperparameter_values_dic["minibatch_size"],
hyperparameter_values_dic["num_hidden_layers"],
hyperparameter_values_dic["random_normal_initializer_stdev"],
hyperparameter_values_dic["rate_of_learning"]]
hyperparameters_df.index = hyperparameters_df.index + 1
# write the errors to csv file
hyperparameters_df.to_csv(output_file, index=False)
| 2,639 | 46.142857 | 128 | py |
time-series-forecasting-release | time-series-forecasting-release/utility_scripts/hyperparameter_scripts/hyperparameter_config_reader.py | import re
def read_optimal_hyperparameter_values(file_name):
# define dictionary to store the hyperparameter values
hyperparameter_values_dic = {}
with open(file_name) as configs_file:
configs = configs_file.readlines()
for config in configs:
if not config.startswith('#') and config.strip():
values = [value.strip() for value in (re.split(">>>", config))]
hyperparameter_values_dic[values[0]] = float(values[1])
configs_file.close()
return hyperparameter_values_dic
def read_initial_hyperparameter_values(initial_hyperparameter_values_file):
# define dictionary to store the hyperparameter values
hyperparameter_values_dic = {}
with open(initial_hyperparameter_values_file) as configs_file:
configs = configs_file.readlines()
for config in configs:
if not config.startswith('#') and config.strip():
values = [value.strip() for value in (re.split("-|,", config))]
hyperparameter_values_dic[values[0]] = [float(values[1]), float(values[2])]
configs_file.close()
return hyperparameter_values_dic | 1,169 | 36.741935 | 91 | py |
time-series-forecasting-release | time-series-forecasting-release/utility_scripts/error_summary_scripts/error_summary_generator.py | ## This file concatenates the results across all the models for the same dataset and writes the mean SMAPE, median SMAPE, ranked SMAPE,
# mean MASE, median MASE and ranked MASE across the time series to a csv file
import glob
import argparse
import pandas as pd
import re
import numpy as np
from collections import defaultdict
# get the different cluster names as external arguments
argument_parser = argparse.ArgumentParser("Create error summaries")
argument_parser.add_argument('--dataset_name', required=True, help='Unique string for the name of the dataset')
argument_parser.add_argument('--is_merged_cluster_result', required=True,
help='1/0 denoting whether the results are merged from multiple clusters or not')
# parse the user arguments
args = argument_parser.parse_args()
dataset_name = args.dataset_name
is_merged_cluster_result = args.is_merged_cluster_result
if int(is_merged_cluster_result):
input_path = '../results/ensemble_errors/merged_cluster_results/'
else:
input_path = '../results/ensemble_errors/'
output_path = '../results/ensemble_errors/aggregate_errors/'
output_file = output_path + dataset_name + ".csv"
# get the list of all the files matching the regex
all_SMAPE_files = [filename for filename in glob.iglob(input_path + "all_smape_errors_" + dataset_name + "_*")]
all_MASE_files = [filename for filename in glob.iglob(input_path + "all_mase_errors_" + dataset_name + "_*")]
average_smape_errors_df = pd.DataFrame()
average_mase_errors_df = pd.DataFrame()
all_errors_df = pd.DataFrame(columns=["Model_Name", "Mean_SMAPE", "Median_SMAPE", "Mean_MASE", "Median_MASE"])
all_seeds_smape_errors_dic = {}
all_seeds_mase_errors_dic = {}
# concat all the errors to data frames
for smape_errors_file, mase_errors_file in zip(sorted(all_SMAPE_files),
sorted(all_MASE_files)):
print(smape_errors_file)
smape_errors_file_object = open(smape_errors_file, "r")
mase_errors_file_object = open(mase_errors_file, "r")
file_name_part = re.split(pattern="all_smape_errors_" + dataset_name + "_", string=smape_errors_file, maxsplit=1)[1]
current_model_all_smape_errors = []
for num in smape_errors_file_object:
if num == "NA\n" or num == "NA":
current_model_all_smape_errors.append(np.nan)
else:
current_model_all_smape_errors.append(float(num))
current_model_all_mase_errors = []
for num in mase_errors_file_object:
if num == "NA\n" or num == "NA":
current_model_all_mase_errors.append(np.nan)
else:
current_model_all_mase_errors.append(float(num))
smape_errors = np.asarray(current_model_all_smape_errors)
mase_errors = np.asarray(current_model_all_mase_errors)
# store the errors to calculate ranked errors later
average_smape_errors_df[file_name_part] = smape_errors
average_mase_errors_df[file_name_part] = mase_errors
# calculate the mean, median
mean_SMAPE = np.mean(smape_errors)
median_SMAPE = np.median(smape_errors)
mean_MASE = np.mean(mase_errors)
median_MASE = np.median(mase_errors)
all_errors_df.loc[-1] = [file_name_part, mean_SMAPE, median_SMAPE, mean_MASE, median_MASE]
all_errors_df.index = all_errors_df.index + 1
# calculate the ranked errors
all_smape_ranks_df = np.mean(average_smape_errors_df.rank(axis=1), axis=0)
all_mase_ranks_df = np.mean(average_mase_errors_df.rank(axis=1), axis=0)
# add the ranked errors to all_errors_df
all_errors_df["Ranked_SMAPE"] = all_smape_ranks_df.tolist()
all_errors_df["Ranked_MASE"] = all_mase_ranks_df.tolist()
# write the errors to csv file
all_errors_df.to_csv(output_file, index=False)
| 3,726 | 38.648936 | 135 | py |
time-series-forecasting-release | time-series-forecasting-release/utility_scripts/error_summary_scripts/ensembling_forecasts.py | ## This file concatenates the results across all the models for the same dataset, takes the median of errors for different seeds and writes the mean SMAPE, median SMAPE, ranked SMAPE,
# mean MASE, median MASE and ranked MASE to a csv file
import glob
import argparse
import pandas as pd
import numpy as np
from collections import defaultdict
# get the different cluster names as external arguments
argument_parser = argparse.ArgumentParser("Ensembling forecasts")
argument_parser.add_argument('--dataset_name', required=True, help='Unique string for the name of the dataset')
# parse the user arguments
args = argument_parser.parse_args()
dataset_name = args.dataset_name
input_path = '../results/rnn_forecasts/'
output_path = '../results/ensemble_rnn_forecasts/'
all_forecast_files = [filename for filename in glob.iglob(input_path + dataset_name + "_*")]
all_models_all_seeds_forecasts_dic = defaultdict(list)
# concat all the errors to data frames
for file_name in sorted(all_forecast_files):
file_name_part = file_name.rsplit('_', 1)[0]
file_name_part = file_name_part.split(input_path, 1)[1]
# read the forecasts from the current file
current_model_forecasts_df = pd.read_csv(file_name, header=None, dtype=np.float64)
# append the forecasts to the dictionary
all_models_all_seeds_forecasts_dic[file_name_part].append(current_model_forecasts_df)
# iterate the dictionary
for (model, forecasts_df_list) in all_models_all_seeds_forecasts_dic.items():
# convert the dataframe list to an array
forecasts_array = np.stack(forecasts_df_list)
# take the mean of forecasts across seeds for ensembling
ensembled_forecasts = np.nanmedian(forecasts_array, axis=0)
# write the ensembled forecasts to a file
output_file = output_path + model
np.savetxt(output_file, ensembled_forecasts, delimiter = ',')
| 1,864 | 34.865385 | 183 | py |
time-series-forecasting-release | time-series-forecasting-release/utility_scripts/error_summary_scripts/clusters_results_merger.py | ## This file concatenates the error results of the different clusters for a given dataset
## The script requires that all the files(mean_median file, all_smape_errors and all_mase_errors files) are present for all the clusters subject to consideration
import glob
import numpy as np
import argparse
import re
from collections import defaultdict
import os
input_path = '../results/ensemble_errors/'
output_path = '../results/ensemble_errors/merged_cluster_results/'
# get the different cluster names as external arguments
argument_parser = argparse.ArgumentParser("Concatenate different cluster results of the same dataset")
argument_parser.add_argument('--dataset_name', required=True, help='Unique string for the name of the dataset')
# parse the user arguments
args = argument_parser.parse_args()
dataset_name = args.dataset_name
output_file_mean_median = output_path + "mean_median_" + dataset_name + "_"
output_file_all_smape_errors = output_path + "all_smape_errors_" + dataset_name + "_"
output_file_all_mase_errors = output_path + "all_mase_errors_" + dataset_name + "_"
# delete files if existing
existing_mean_median_file_list = glob.glob(output_file_mean_median + "*", recursive = True)
existing_all_smape_file_list = glob.glob(output_file_all_smape_errors + "*", recursive = True)
existing_all_mase_file_list = glob.glob(output_file_all_mase_errors + "*", recursive = True)
for file in existing_mean_median_file_list:
try:
os.remove(file)
except:
print("Error while deleting file : ", file)
for file in existing_all_smape_file_list:
try:
os.remove(file)
except:
print("Error while deleting file : ", file)
for file in existing_all_mase_file_list:
try:
os.remove(file)
except:
print("Error while deleting file : ", file)
# get the list of all the files matching the regex
# smape errors
all_smape_errors_files = [filename for filename in glob.iglob(input_path + "all_smape_errors_" + dataset_name + "_*")]
# mase errors
all_mase_errors_files = [filename for filename in glob.iglob(input_path + "all_mase_errors_" + dataset_name + "_*")]
all_mase_errors_dic = defaultdict(dict)
all_smape_errors_dic = defaultdict(dict)
category_order_dic = {
"macro": 0,
"micro": 1,
"demo": 2,
"industry": 3,
"finance": 4,
"other": 5
}
# read the files one by one and merge the content
for smape_errors_file, mase_errors_file in zip(sorted(all_smape_errors_files),
sorted(all_mase_errors_files)):
filename_smape_object = open(smape_errors_file)
filename_mase_object = open(mase_errors_file)
string_list = re.split(pattern="all_smape_errors_" + dataset_name + "_" + "[a-zA-Z0-9]+_", string=smape_errors_file,
maxsplit=1)
filename_part = string_list[1]
category = re.search(dataset_name + '_([a-zA-Z0-9]+)_', smape_errors_file).group(1)
# read the errors from both files
all_smape_errors = []
for num in filename_smape_object:
if num == "NA\n":
all_smape_errors.append("NA")
else:
all_smape_errors.append(float(num))
all_mase_errors = []
for num in filename_mase_object:
if num == "NA\n":
all_mase_errors.append("NA")
else:
all_mase_errors.append(float(num))
all_mase_errors_dic[filename_part][category] = all_mase_errors
all_smape_errors_dic[filename_part][category] = all_smape_errors
# iterate the dictionaries and write to files
for (smape_key, smape_errors_dic), (mase_key, mase_errors_dic) in zip(all_smape_errors_dic.items(),
all_mase_errors_dic.items()):
# open the all errors smape file
output_file_all_smape_errors_object = open(output_file_all_smape_errors + smape_key, "a")
# open the all errors mase file
output_file_all_mase_errors_object = open(output_file_all_mase_errors + mase_key, "a")
# sort the categories according to the given order
for (category, index) in sorted(category_order_dic.items(), key=lambda item: item[1]):
category_smape_errors = smape_errors_dic[category]
category_mase_errors = mase_errors_dic[category]
# write to files
output_file_all_smape_errors_object.writelines('\n'.join(str(element) for element in category_smape_errors))
output_file_all_smape_errors_object.writelines('\n')
output_file_all_mase_errors_object.writelines('\n'.join(str(element) for element in category_mase_errors))
output_file_all_mase_errors_object.writelines('\n')
# close the files
output_file_all_smape_errors_object.close()
output_file_all_mase_errors_object.close()
| 4,761 | 38.032787 | 161 | py |
time-series-forecasting-release | time-series-forecasting-release/rnn_architectures/seq2seq_model/with_decoder/non_moving_window/unaccumulated_error/seq2seq_model_trainer.py | import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
from tfrecords_handler.non_moving_window.tfrecord_reader import TFRecordReader
from configs.global_configs import model_training_configs
from configs.global_configs import training_data_configs
from configs.global_configs import gpu_configs
class Seq2SeqModelTrainer:
def __init__(self, **kwargs):
self.__use_bias = kwargs["use_bias"]
self.__use_peepholes = kwargs["use_peepholes"]
self.__output_size = kwargs["output_size"]
self.__binary_train_file_path = kwargs["binary_train_file_path"]
self.__binary_validation_file_path = kwargs["binary_validation_file_path"]
self.__contain_zero_values = kwargs["contain_zero_values"]
self.__address_near_zero_instability = kwargs["address_near_zero_instability"]
self.__integer_conversion = kwargs["integer_conversion"]
self.__seed = kwargs["seed"]
self.__cell_type = kwargs["cell_type"]
self.__without_stl_decomposition = kwargs['without_stl_decomposition']
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
# Training the time series
def train_model(self, **kwargs):
num_hidden_layers = int(kwargs['num_hidden_layers'])
cell_dimension = int(kwargs["cell_dimension"])
minibatch_size = int(kwargs["minibatch_size"])
max_epoch_size = int(kwargs["max_epoch_size"])
max_num_epochs = int(kwargs["max_num_epochs"])
l2_regularization = kwargs["l2_regularization"]
gaussian_noise_stdev = kwargs["gaussian_noise_stdev"]
random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']
optimizer_fn = kwargs["optimizer_fn"]
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
# adding noise to the input
input = tf.placeholder(dtype=tf.float32, shape=[None, None, 1])
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
validation_input = input
training_target = tf.placeholder(dtype=tf.float32, shape=[None, self.__output_size, 1])
decoder_input = tf.placeholder(dtype=tf.float32, shape=[None, self.__output_size, 1])
# placeholder for the sequence lengths
input_sequence_length = tf.placeholder(dtype=tf.int32, shape=[None])
output_sequence_length = tf.placeholder(dtype=tf.int32, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)
# create the model architecture
# RNN with the layer of cells
def cell():
if self.__cell_type == "LSTM":
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,
initializer=weight_initializer)
elif self.__cell_type == "GRU":
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)
elif self.__cell_type == "RNN":
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))
return cell
# building the encoder network
multi_layered_encoder_cell = tf.nn.rnn_cell.MultiRNNCell(
cells=[cell() for _ in range(int(num_hidden_layers))])
with tf.variable_scope('train_encoder_scope') as encoder_train_scope:
training_encoder_outputs, training_encoder_state = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=training_input,
sequence_length=input_sequence_length,
dtype=tf.float32)
with tf.variable_scope(encoder_train_scope, reuse=tf.AUTO_REUSE) as encoder_inference_scope:
inference_encoder_outputs, inference_encoder_states = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=validation_input,
sequence_length=input_sequence_length,
dtype=tf.float32)
# the final projection layer to convert the output to the desired dimension
dense_layer = Dense(units=1, use_bias=self.__use_bias, kernel_initializer=weight_initializer)
# decoder cell of the decoder network
multi_layered_decoder_cell = tf.nn.rnn_cell.MultiRNNCell(
cells=[cell() for _ in range(int(num_hidden_layers))])
# building the decoder network for training
with tf.variable_scope('decoder_train_scope') as decoder_train_scope:
# create the initial state for the decoder
training_helper = tf.contrib.seq2seq.ScheduledOutputTrainingHelper(inputs=decoder_input,
sequence_length=output_sequence_length,
sampling_probability=0.0,
name = "training_helper")
training_decoder = tf.contrib.seq2seq.BasicDecoder(cell=multi_layered_decoder_cell, helper=training_helper,
initial_state=training_encoder_state,
output_layer=dense_layer)
# perform the decoding
training_decoder_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder=training_decoder)
# building the decoder network for inference
with tf.variable_scope(decoder_train_scope, reuse=tf.AUTO_REUSE) as decoder_inference_scope:
# create the initial state for the decoder
inference_helper = tf.contrib.seq2seq.ScheduledOutputTrainingHelper(inputs=decoder_input,
sequence_length=output_sequence_length,
sampling_probability=1.0,
name = "inference_helper")
inference_decoder = tf.contrib.seq2seq.BasicDecoder(cell=multi_layered_decoder_cell, helper=inference_helper,
initial_state=inference_encoder_states,
output_layer=dense_layer)
# perform the decoding
inference_decoder_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder=inference_decoder)
# error that should be minimized in the training process
error = self.__l1_loss(training_decoder_outputs[0], training_target)
# l2 regularization of the trainable model parameters
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
# create the optimizer
optimizer = optimizer_fn(total_loss)
# create the training and validation datasets from the tfrecord files
training_dataset = tf.data.TFRecordDataset(filenames = [self.__binary_train_file_path], compression_type = "ZLIB")
validation_dataset = tf.data.TFRecordDataset(filenames = [self.__binary_validation_file_path], compression_type = "ZLIB")
# parse the records
tfrecord_reader = TFRecordReader()
# define the expected shapes of data after padding
train_padded_shapes = ([], [tf.Dimension(None), 1], [self.__output_size, 1])
validation_padded_shapes = ([], [tf.Dimension(None), 1], [self.__output_size, 1], [self.__meta_data_size, 1])
# preparing the training data
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
# training_dataset = training_dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,
# count=int(max_epoch_size), seed=shuffle_seed))
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.train_data_parser)
padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=train_padded_shapes)
training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()
next_training_data_batch = training_data_batch_iterator.get_next()
# preparing the validation data
validation_dataset = validation_dataset.map(tfrecord_reader.validation_data_parser)
# create a single batch from all the validation time series by padding the datasets to make the variable sequence lengths fixed
padded_validation_dataset = validation_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=validation_padded_shapes)
# get an iterator to the validation data
validation_data_iterator = padded_validation_dataset.make_initializable_iterator()
# access the validation data using the iterator
next_validation_data_batch = validation_data_iterator.get_next()
# setup variable initialization
init_op = tf.global_variables_initializer()
# define the GPU options
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(
config=tf.ConfigProto(log_device_placement=gpu_configs.log_device_placement, allow_soft_placement=True,
gpu_options=gpu_options)) as session:
session.run(init_op)
smape_final = 0.0
smape_list = []
for epoch in range(max_num_epochs):
print("Epoch->", epoch)
session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed:epoch})
while True:
try:
training_data_batch_value = session.run(next_training_data_batch, feed_dict={shuffle_seed:epoch})
decoder_input_value = np.hstack((np.expand_dims(training_data_batch_value[1][:, -1, :], axis=1), training_data_batch_value[2][:, :-1, :]))
total_loss_value, _ = session.run([total_loss, optimizer],
feed_dict={input: training_data_batch_value[1],
training_target: training_data_batch_value[2],
decoder_input: decoder_input_value,
input_sequence_length: training_data_batch_value[0],
output_sequence_length: [self.__output_size] * np.shape(training_data_batch_value[1])[0]})
except tf.errors.OutOfRangeError:
break
session.run(validation_data_iterator.initializer)
while True:
try:
# get the batch of validation inputs
validation_data_batch_value = session.run(next_validation_data_batch)
# shape for the target data
decoder_input_shape = [np.shape(validation_data_batch_value[1])[0], self.__output_size, 1]
# get the output of the network for the validation input data batch
validation_output = session.run(inference_decoder_outputs[0],
feed_dict={input: validation_data_batch_value[1],
decoder_input: np.zeros(decoder_input_shape),
input_sequence_length: validation_data_batch_value[0],
output_sequence_length: [self.__output_size] *
np.shape(validation_data_batch_value[1])[0]
})
# calculate the smape for the validation data using vectorization
# convert the data to remove the preprocessing
true_seasonality_values = validation_data_batch_value[3][:, 1:, 0]
level_values = validation_data_batch_value[3][:, 0, 0]
actual_values = validation_data_batch_value[2]
if self.__without_stl_decomposition:
converted_validation_output = np.exp(np.squeeze(validation_output, axis=2))
converted_actual_values = np.exp(np.squeeze(actual_values, axis=2))
else:
converted_validation_output = np.exp(
true_seasonality_values + level_values[:, np.newaxis] + np.squeeze(validation_output,
axis=2))
converted_actual_values = np.exp(
true_seasonality_values + level_values[:, np.newaxis] + np.squeeze(actual_values,
axis=2))
if (self.__contain_zero_values): # to compensate for 0 values in data
converted_validation_output = converted_validation_output - 1
converted_actual_values = converted_actual_values - 1
if self.__without_stl_decomposition:
converted_validation_output = converted_validation_output * level_values[:, np.newaxis]
converted_actual_values = converted_actual_values * level_values[:, np.newaxis]
if self.__integer_conversion:
converted_validation_output = np.round(converted_validation_output)
converted_actual_values = np.round(converted_actual_values)
converted_validation_output[converted_validation_output < 0] = 0
converted_actual_values[converted_actual_values < 0] = 0
if self.__address_near_zero_instability:
# calculate the smape
epsilon = 0.1
sum = np.maximum(
np.abs(converted_validation_output) + np.abs(converted_actual_values) + epsilon,
0.5 + epsilon)
smape_values = (np.abs(converted_validation_output - converted_actual_values) /
sum) * 2
smape_values_per_series = np.mean(smape_values, axis=1)
smape_list.extend(smape_values_per_series)
else:
# calculate the smape
smape_values = (np.abs(converted_validation_output - converted_actual_values) /
(np.abs(converted_validation_output) + np.abs(converted_actual_values))) * 2
smape_values_per_series = np.mean(smape_values, axis=1)
smape_list.extend(smape_values_per_series)
except tf.errors.OutOfRangeError:
break
smape_final = np.mean(smape_list)
print("SMAPE value: {}".format(smape_final))
session.close()
return float(smape_final), smape_list | 16,182 | 54.611684 | 162 | py |
time-series-forecasting-release | time-series-forecasting-release/rnn_architectures/seq2seq_model/with_decoder/non_moving_window/unaccumulated_error/seq2seq_model_tester.py | import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
from tfrecords_handler.non_moving_window.tfrecord_reader import TFRecordReader
from configs.global_configs import training_data_configs
from configs.global_configs import gpu_configs
class Seq2SeqModelTester:
def __init__(self, **kwargs):
self.__use_bias = kwargs["use_bias"]
self.__use_peepholes = kwargs["use_peepholes"]
self.__output_size = kwargs["output_size"]
self.__binary_train_file_path = kwargs["binary_train_file_path"]
self.__binary_test_file_path = kwargs["binary_test_file_path"]
self.__seed = kwargs["seed"]
self.__cell_type = kwargs["cell_type"]
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
# Training the time series
def test_model(self, **kwargs):
# optimized hyperparameters
num_hidden_layers = int(kwargs['num_hidden_layers'])
max_num_epochs = int(kwargs['max_num_epochs'])
max_epoch_size = int(kwargs['max_epoch_size'])
cell_dimension = int(kwargs['cell_dimension'])
l2_regularization = kwargs['l2_regularization']
minibatch_size = int(kwargs['minibatch_size'])
gaussian_noise_stdev = kwargs['gaussian_noise_stdev']
random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']
optimizer_fn = kwargs['optimizer_fn']
# reset the tensorflow graph
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
# declare the input and output placeholders
# adding noise to the input
input = tf.placeholder(dtype=tf.float32, shape=[None, None, 1])
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
testing_input = input
training_target = tf.placeholder(dtype=tf.float32, shape=[None, self.__output_size, 1])
decoder_input = tf.placeholder(dtype=tf.float32, shape=[None, self.__output_size, 1])
# placeholder for the sequence lengths
input_sequence_length = tf.placeholder(dtype=tf.int32, shape=[None])
output_sequence_length = tf.placeholder(dtype=tf.int32, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)
# create the model architecture
# RNN with the layer of cells
def cell():
if self.__cell_type == "LSTM":
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,
initializer=weight_initializer)
elif self.__cell_type == "GRU":
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)
elif self.__cell_type == "RNN":
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))
return cell
# building the encoder network
multi_layered_encoder_cell = tf.nn.rnn_cell.MultiRNNCell(
cells=[cell() for _ in range(int(num_hidden_layers))])
with tf.variable_scope('train_encoder_scope') as encoder_train_scope:
training_encoder_outputs, training_encoder_state = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=training_input,
sequence_length=input_sequence_length,
dtype=tf.float32)
with tf.variable_scope(encoder_train_scope, reuse=tf.AUTO_REUSE) as encoder_inference_scope:
inference_encoder_outputs, inference_encoder_state = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=testing_input,
sequence_length=input_sequence_length,
dtype=tf.float32)
# the final projection layer to convert the output to the desired dimension
dense_layer = Dense(units=1, use_bias=self.__use_bias, kernel_initializer=weight_initializer)
# decoder cell of the decoder network
multi_layered_decoder_cell = tf.nn.rnn_cell.MultiRNNCell(
cells=[cell() for _ in range(int(num_hidden_layers))])
# building the decoder network for training
with tf.variable_scope('decoder_train_scope') as decoder_train_scope:
# create the initial state for the decoder
training_helper = tf.contrib.seq2seq.ScheduledOutputTrainingHelper(inputs=decoder_input,
sequence_length=output_sequence_length,
sampling_probability=0.0,
name="training_helper")
training_decoder = tf.contrib.seq2seq.BasicDecoder(cell=multi_layered_decoder_cell, helper=training_helper,
initial_state=training_encoder_state,
output_layer=dense_layer)
# perform the decoding
training_decoder_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder=training_decoder)
# building the decoder network for inference
with tf.variable_scope(decoder_train_scope, reuse=tf.AUTO_REUSE) as decoder_inference_scope:
# create the initial state for the decoder
inference_helper = tf.contrib.seq2seq.ScheduledOutputTrainingHelper(inputs=decoder_input,
sequence_length=output_sequence_length,
sampling_probability=1.0,
name="inference_helper")
inference_decoder = tf.contrib.seq2seq.BasicDecoder(cell=multi_layered_decoder_cell,
helper=inference_helper,
initial_state=inference_encoder_state,
output_layer=dense_layer)
# perform the decoding
inference_decoder_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder=inference_decoder)
# error that should be minimized in the training process
error = self.__l1_loss(training_decoder_outputs[0], training_target)
# l2 regularization of the trainable model parameters
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
# create the optimizer
optimizer = optimizer_fn(total_loss)
# create the Dataset objects for the training and test data
training_dataset = tf.data.TFRecordDataset(filenames = [self.__binary_train_file_path], compression_type = "ZLIB")
test_dataset = tf.data.TFRecordDataset([self.__binary_test_file_path], compression_type = "ZLIB")
# parse the records
tfrecord_reader = TFRecordReader()
# preparing the training data
# randomly shuffle the time series within the dataset
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
# training_dataset = training_dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,
# count=int(max_epoch_size), seed=shuffle_seed))
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.validation_data_parser)
# create the batches by padding the datasets to make the variable sequence lengths fixed within the individual batches
padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=([], [tf.Dimension(None), 1], [self.__output_size, 1],
[self.__meta_data_size, 1]))
# get an iterator to the batches
training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()
# access each batch using the iterator
next_training_data_batch = training_data_batch_iterator.get_next()
# preparing the test data
test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)
# create a single batch from all the test time series by padding the datasets to make the variable sequence lengths fixed
padded_test_input_data = test_dataset.padded_batch(batch_size=int(minibatch_size), padded_shapes=(
[], [tf.Dimension(None), 1], [self.__meta_data_size, 1]))
# get an iterator to the test input data batch
test_input_iterator = padded_test_input_data.make_one_shot_iterator()
# access the test input batch using the iterator
test_input_data_batch = test_input_iterator.get_next()
# setup variable initialization
init_op = tf.global_variables_initializer()
# define the GPU options
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(
config=tf.ConfigProto(log_device_placement=gpu_configs.log_device_placement, allow_soft_placement=True,
gpu_options=gpu_options)) as session:
session.run(init_op)
for epoch in range(int(max_num_epochs)):
print("Epoch->", epoch)
session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed:epoch})
while True:
try:
next_training_batch_value = session.run(next_training_data_batch, feed_dict={shuffle_seed:epoch})
decoder_input_value = np.hstack((np.expand_dims(next_training_batch_value[1][:, -1, :], axis=1),
next_training_batch_value[2][:, :-1, :]))
# model training
loss, _ = session.run([total_loss, optimizer],
feed_dict={input: next_training_batch_value[1],
training_target: next_training_batch_value[2],
decoder_input: decoder_input_value,
input_sequence_length: next_training_batch_value[0],
output_sequence_length: [self.__output_size] * np.shape(next_training_batch_value[1])[0]})
except tf.errors.OutOfRangeError:
break
# applying the model to the test data
list_of_forecasts = []
while True:
try:
# get the batch of test inputs
test_input_batch_value = session.run(test_input_data_batch)
# shape for the target data
decoder_input_shape = [np.shape(test_input_batch_value[1])[0], self.__output_size, 1]
# get the output of the network for the test input data batch
test_output = session.run(inference_decoder_outputs[0],
feed_dict={input: test_input_batch_value[1],
decoder_input: np.zeros(decoder_input_shape),
input_sequence_length: test_input_batch_value[0],
output_sequence_length: [self.__output_size] * np.shape(test_input_batch_value[1])[0]})
forecasts = test_output
list_of_forecasts.extend(forecasts.tolist())
except tf.errors.OutOfRangeError:
break
return np.squeeze(list_of_forecasts, axis = 2) #the third dimension is squeezed since it is one | 12,870 | 53.538136 | 146 | py |
time-series-forecasting-release | time-series-forecasting-release/rnn_architectures/seq2seq_model/with_dense_layer/moving_window/unaccumulated_error/seq2seq_model_trainer.py | import numpy as np
import tensorflow as tf
from tfrecords_handler.moving_window.tfrecord_reader import TFRecordReader
from configs.global_configs import model_training_configs
from configs.global_configs import training_data_configs
from configs.global_configs import gpu_configs
class Seq2SeqModelTrainerWithDenseLayer:
def __init__(self, **kwargs):
self.__use_bias = kwargs["use_bias"]
self.__use_peepholes = kwargs["use_peepholes"]
self.__input_size = kwargs["input_size"]
self.__output_size = kwargs["output_size"]
self.__binary_train_file_path = kwargs["binary_train_file_path"]
self.__binary_validation_file_path = kwargs["binary_validation_file_path"]
self.__contain_zero_values = kwargs["contain_zero_values"]
self.__address_near_zero_instability = kwargs["address_near_zero_instability"]
self.__integer_conversion = kwargs["integer_conversion"]
self.__seed = kwargs["seed"]
self.__cell_type = kwargs["cell_type"]
self.__without_stl_decomposition = kwargs['without_stl_decomposition']
# define the metadata size based on the usage of stl decomposition
if self.__without_stl_decomposition:
self.__meta_data_size = 1
else:
self.__meta_data_size = self.__output_size + 1
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
# Training the time series
def train_model(self, **kwargs):
num_hidden_layers = int(kwargs['num_hidden_layers'])
cell_dimension = int(kwargs["cell_dimension"])
minibatch_size = int(kwargs["minibatch_size"])
max_epoch_size = int(kwargs["max_epoch_size"])
max_num_epochs = int(kwargs["max_num_epochs"])
l2_regularization = kwargs["l2_regularization"]
gaussian_noise_stdev = kwargs["gaussian_noise_stdev"]
random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']
optimizer_fn = kwargs["optimizer_fn"]
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
# adding noise to the input
input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__input_size])
validation_input = input
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev,
dtype=tf.float32)
training_input = input + noise
target = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__output_size])
# placeholder for the sequence lengths
sequence_length = tf.placeholder(dtype=tf.int32, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)
# create a tensor array for the indices of the encoder outputs array and the target
new_index_array = tf.range(start=0, limit=tf.shape(sequence_length)[0], delta=1)
output_array_indices = tf.stack([new_index_array, sequence_length - 1], axis=-1)
actual_targets = tf.gather_nd(params=target, indices=output_array_indices)
actual_targets = tf.expand_dims(input=actual_targets, axis=1)
# create the model architecture
# RNN with the layer of cells
def cell():
if self.__cell_type == "LSTM":
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,
initializer=weight_initializer)
elif self.__cell_type == "GRU":
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)
elif self.__cell_type == "RNN":
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))
return cell
# building the encoder network
multi_layered_encoder_cell = tf.nn.rnn_cell.MultiRNNCell(
cells=[cell() for _ in range(int(num_hidden_layers))])
with tf.variable_scope('train_encoder_scope') as encoder_train_scope:
training_encoder_outputs, training_encoder_state = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=training_input,
sequence_length=sequence_length,
dtype=tf.float32)
with tf.variable_scope(encoder_train_scope, reuse=tf.AUTO_REUSE) as encoder_inference_scope:
inference_encoder_outputs, inference_encoder_states = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=validation_input,
sequence_length=sequence_length,
dtype=tf.float32)
# building the decoder network for training
with tf.variable_scope('dense_layer_train_scope') as dense_layer_train_scope:
train_final_timestep_predictions = tf.gather_nd(params=training_encoder_outputs,
indices=output_array_indices)
# the final projection layer to convert the encoder_outputs to the desired dimension
train_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=train_final_timestep_predictions, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer)
train_prediction_output = tf.expand_dims(input=train_prediction_output, axis=1)
# building the decoder network for inference
with tf.variable_scope(dense_layer_train_scope, reuse=tf.AUTO_REUSE) as dense_layer_inference_scope:
inference_final_timestep_predictions = tf.gather_nd(params=inference_encoder_outputs,
indices=output_array_indices)
# the final projection layer to convert the encoder_outputs to the desired dimension
inference_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=inference_final_timestep_predictions, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer)
inference_prediction_output = tf.expand_dims(input=inference_prediction_output, axis=1)
# error that should be minimized in the training process
error = self.__l1_loss(train_prediction_output, actual_targets)
# l2 regularization of the trainable model parameters
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
# create the optimizer
optimizer = optimizer_fn(total_loss)
# create the training and validation datasets from the tfrecord files
training_dataset = tf.data.TFRecordDataset(filenames=[self.__binary_train_file_path], compression_type="ZLIB")
validation_dataset = tf.data.TFRecordDataset(filenames=[self.__binary_validation_file_path],
compression_type="ZLIB")
# parse the records
tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size, self.__meta_data_size)
# define the expected shapes of data after padding
train_padded_shapes = ([], [tf.Dimension(None), self.__input_size], [tf.Dimension(None), self.__output_size])
validation_padded_shapes = (
[], [tf.Dimension(None), self.__input_size], [tf.Dimension(None), self.__output_size],
[tf.Dimension(None), self.__meta_data_size])
# preparing the training data
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
# training_dataset = training_dataset.apply(
# tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,
# count=int(max_epoch_size), seed=shuffle_seed))
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.train_data_parser)
padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=train_padded_shapes)
training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()
next_training_data_batch = training_data_batch_iterator.get_next()
# preparing the validation data
validation_dataset = validation_dataset.map(tfrecord_reader.validation_data_parser)
# create a single batch from all the validation time series by padding the datasets to make the variable sequence lengths fixed
padded_validation_dataset = validation_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=validation_padded_shapes)
# get an iterator to the validation data
validation_data_iterator = padded_validation_dataset.make_initializable_iterator()
# access the validation data using the iterator
next_validation_data_batch = validation_data_iterator.get_next()
# setup variable initialization
init_op = tf.global_variables_initializer()
# define the GPU options
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(
config=tf.ConfigProto(log_device_placement=gpu_configs.log_device_placement, allow_soft_placement=True,
gpu_options=gpu_options)) as session:
session.run(init_op)
smape_final = 0.0
smape_list = []
for epoch in range(max_num_epochs):
print("Epoch->", epoch)
session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed: epoch})
while True:
try:
training_data_batch_value = session.run(next_training_data_batch,
feed_dict={shuffle_seed: epoch})
total_loss_value, _ = session.run([total_loss, optimizer],
feed_dict={input: training_data_batch_value[1],
target: training_data_batch_value[2],
sequence_length: training_data_batch_value[0]
})
except tf.errors.OutOfRangeError:
break
session.run(validation_data_iterator.initializer)
while True:
try:
# get the batch of validation inputs
validation_data_batch_value = session.run(next_validation_data_batch)
# get the output of the network for the validation input data batch
validation_output = session.run(inference_prediction_output,
feed_dict={input: validation_data_batch_value[1],
sequence_length:
validation_data_batch_value[0]
})
# calculate the smape for the validation data using vectorization
last_indices = validation_data_batch_value[0] - 1
array_first_dimension = np.array(range(0, validation_data_batch_value[0].shape[0]))
true_seasonality_values = validation_data_batch_value[3][array_first_dimension,
last_indices, 1:]
level_values = validation_data_batch_value[3][array_first_dimension, last_indices, 0]
actual_values = validation_data_batch_value[2][array_first_dimension, last_indices, :]
if self.__without_stl_decomposition:
converted_actual_values = np.exp(actual_values)
converted_validation_output = np.exp(validation_output)
else:
converted_actual_values = np.exp(
true_seasonality_values + level_values[:, np.newaxis] + actual_values)
converted_validation_output = np.exp(
true_seasonality_values + level_values[:, np.newaxis] + validation_output)
if (self.__contain_zero_values): # to compensate for 0 values in data
converted_validation_output = converted_validation_output - 1
converted_actual_values = converted_actual_values - 1
if self.__without_stl_decomposition:
converted_actual_values = converted_actual_values * level_values[:, np.newaxis]
converted_validation_output = converted_validation_output * level_values[:, np.newaxis]
if self.__integer_conversion:
converted_validation_output = np.round(converted_validation_output)
converted_actual_values = np.round(converted_actual_values)
converted_validation_output[converted_validation_output < 0] = 0
converted_actual_values[converted_actual_values < 0] = 0
if self.__address_near_zero_instability:
# calculate the smape
epsilon = 0.1
sum = np.maximum(
np.abs(converted_validation_output) + np.abs(converted_actual_values) + epsilon,
0.5 + epsilon)
smape_values = (np.abs(converted_validation_output - converted_actual_values) /
sum) * 2
smape_values_per_series = np.mean(smape_values, axis=1)
smape_list.extend(smape_values_per_series)
else:
# calculate the smape
smape_values = (np.abs(converted_validation_output - converted_actual_values) /
(np.abs(converted_validation_output) + np.abs(converted_actual_values))) * 2
smape_values_per_series = np.mean(smape_values, axis=1)
smape_list.extend(smape_values_per_series)
except tf.errors.OutOfRangeError:
break
smape_final = np.mean(smape_list)
print("SMAPE value: {}".format(smape_final))
session.close()
return float(smape_final), smape_list
| 15,412 | 52.703833 | 135 | py |
time-series-forecasting-release | time-series-forecasting-release/rnn_architectures/seq2seq_model/with_dense_layer/moving_window/unaccumulated_error/seq2seq_model_tester.py | import numpy as np
import tensorflow as tf
from tfrecords_handler.moving_window.tfrecord_reader import TFRecordReader
from configs.global_configs import training_data_configs
from configs.global_configs import gpu_configs
class Seq2SeqModelTesterWithDenseLayer:
def __init__(self, **kwargs):
self.__use_bias = kwargs["use_bias"]
self.__use_peepholes = kwargs["use_peepholes"]
self.__input_size = kwargs["input_size"]
self.__output_size = kwargs["output_size"]
self.__binary_train_file_path = kwargs["binary_train_file_path"]
self.__binary_test_file_path = kwargs["binary_test_file_path"]
self.__seed = kwargs["seed"]
self.__cell_type = kwargs["cell_type"]
self.__without_stl_decomposition = kwargs["without_stl_decomposition"]
# define the metadata size based on the usage of stl decomposition
if self.__without_stl_decomposition:
self.__meta_data_size = 1
else:
self.__meta_data_size = self.__output_size + 1
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
# Training the time series
def test_model(self, **kwargs):
# optimized hyperparameters
num_hidden_layers = int(kwargs['num_hidden_layers'])
max_num_epochs = int(kwargs['max_num_epochs'])
max_epoch_size = int(kwargs['max_epoch_size'])
cell_dimension = int(kwargs['cell_dimension'])
l2_regularization = kwargs['l2_regularization']
minibatch_size = int(kwargs['minibatch_size'])
gaussian_noise_stdev = kwargs['gaussian_noise_stdev']
random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']
optimizer_fn = kwargs['optimizer_fn']
# reset the tensorflow graph
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
# declare the input and output placeholders
# adding noise to the input
input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__input_size])
testing_input = input
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
target = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__output_size])
# placeholder for the sequence lengths
sequence_length = tf.placeholder(dtype=tf.int32, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)
# create a tensor array for the indices of the encoder outputs array and the target
new_index_array = tf.range(start=0, limit=tf.shape(sequence_length)[0], delta=1)
output_array_indices = tf.stack([new_index_array, sequence_length - 1], axis=-1)
actual_targets = tf.gather_nd(params=target, indices=output_array_indices)
actual_targets = tf.expand_dims(input=actual_targets, axis=1)
# create the model architecture
# RNN with the layer of cells
def cell():
if self.__cell_type == "LSTM":
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,
initializer=weight_initializer)
elif self.__cell_type == "GRU":
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)
elif self.__cell_type == "RNN":
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))
return cell
# building the encoder network
multi_layered_encoder_cell = tf.nn.rnn_cell.MultiRNNCell(
cells=[cell() for _ in range(int(num_hidden_layers))])
with tf.variable_scope('train_encoder_scope') as encoder_train_scope:
training_encoder_outputs, training_encoder_state = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=training_input,
sequence_length=sequence_length,
dtype=tf.float32)
with tf.variable_scope(encoder_train_scope, reuse=tf.AUTO_REUSE) as encoder_inference_scope:
inference_encoder_outputs, inference_encoder_states = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=testing_input,
sequence_length=sequence_length,
dtype=tf.float32)
# building the decoder network for training
with tf.variable_scope('dense_layer_train_scope') as dense_layer_train_scope:
train_final_timestep_predictions = tf.gather_nd(params=training_encoder_outputs, indices=output_array_indices)
# the final projection layer to convert the encoder_outputs to the desired dimension
train_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=train_final_timestep_predictions, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer)
train_prediction_output = tf.expand_dims(input=train_prediction_output, axis=1)
# building the decoder network for inference
with tf.variable_scope(dense_layer_train_scope, reuse=tf.AUTO_REUSE) as dense_layer_inference_scope:
inference_final_timestep_predictions = tf.gather_nd(params=inference_encoder_outputs,
indices=output_array_indices)
# the final projection layer to convert the encoder_outputs to the desired dimension
inference_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=inference_final_timestep_predictions, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer)
inference_prediction_output = tf.expand_dims(input=inference_prediction_output, axis=1)
# error that should be minimized in the training process
error = self.__l1_loss(train_prediction_output, actual_targets)
# l2 regularization of the trainable model parameters
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
# create the optimizer
optimizer = optimizer_fn(total_loss)
# create the Dataset objects for the training and test data
training_dataset = tf.data.TFRecordDataset(filenames = [self.__binary_train_file_path], compression_type = "ZLIB")
test_dataset = tf.data.TFRecordDataset([self.__binary_test_file_path], compression_type = "ZLIB")
# parse the records
tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size, self.__meta_data_size)
# preparing the training data
# randomly shuffle the time series within the dataset
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
# training_dataset = training_dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,
# count=int(max_epoch_size), seed=shuffle_seed))
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.validation_data_parser)
# create the batches by padding the datasets to make the variable sequence lengths fixed within the individual batches
padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=([], [tf.Dimension(None), self.__input_size], [tf.Dimension(None), self.__output_size],
[tf.Dimension(None), self.__meta_data_size]))
# get an iterator to the batches
training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()
# access each batch using the iterator
next_training_data_batch = training_data_batch_iterator.get_next()
# preparing the test data
test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)
# create a single batch from all the test time series by padding the datasets to make the variable sequence lengths fixed
padded_test_input_data = test_dataset.padded_batch(batch_size=int(minibatch_size), padded_shapes=(
[], [tf.Dimension(None),self.__input_size], [tf.Dimension(None), self.__meta_data_size]))
# get an iterator to the test input data batch
test_input_iterator = padded_test_input_data.make_one_shot_iterator()
# access the test input batch using the iterator
test_input_data_batch = test_input_iterator.get_next()
# setup variable initialization
init_op = tf.global_variables_initializer()
# define the GPU options
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(
config=tf.ConfigProto(log_device_placement=gpu_configs.log_device_placement, allow_soft_placement=True,
gpu_options=gpu_options)) as session:
session.run(init_op)
for epoch in range(int(max_num_epochs)):
print("Epoch->", epoch)
session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed: epoch})
losses = []
while True:
try:
next_training_batch_value = session.run(next_training_data_batch, feed_dict={shuffle_seed: epoch})
# model training
_, loss_val = session.run([optimizer, total_loss],
feed_dict={input: next_training_batch_value[1],
target: next_training_batch_value[2],
sequence_length: next_training_batch_value[0],
})
losses.append(loss_val)
except tf.errors.OutOfRangeError:
break
# applying the model to the test data
list_of_forecasts = []
while True:
try:
# get the batch of test inputs
test_input_batch_value = session.run(test_input_data_batch)
# get the output of the network for the test input data batch
test_output = session.run(inference_prediction_output,
feed_dict={input: test_input_batch_value[1],
sequence_length: test_input_batch_value[0],
})
forecasts = test_output
list_of_forecasts.extend(forecasts.tolist())
except tf.errors.OutOfRangeError:
break
return np.squeeze(list_of_forecasts, axis = 1) #the second dimension is squeezed since it is one | 11,874 | 51.083333 | 170 | py |
time-series-forecasting-release | time-series-forecasting-release/rnn_architectures/seq2seq_model/with_dense_layer/non_moving_window/unaccumulated_error/seq2seq_model_trainer.py | import numpy as np
import tensorflow as tf
from tfrecords_handler.non_moving_window.tfrecord_reader import TFRecordReader
from configs.global_configs import model_training_configs
from configs.global_configs import training_data_configs
from configs.global_configs import gpu_configs
class Seq2SeqModelTrainerWithDenseLayer:
def __init__(self, **kwargs):
self.__use_bias = kwargs["use_bias"]
self.__use_peepholes = kwargs["use_peepholes"]
self.__output_size = kwargs["output_size"]
self.__binary_train_file_path = kwargs["binary_train_file_path"]
self.__binary_validation_file_path = kwargs["binary_validation_file_path"]
self.__contain_zero_values = kwargs["contain_zero_values"]
self.__address_near_zero_instability = kwargs["address_near_zero_instability"]
self.__integer_conversion = kwargs["integer_conversion"]
self.__seed = kwargs["seed"]
self.__cell_type = kwargs["cell_type"]
self.__without_stl_decomposition = kwargs['without_stl_decomposition']
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
# Training the time series
def train_model(self, **kwargs):
num_hidden_layers = int(kwargs['num_hidden_layers'])
cell_dimension = int(kwargs["cell_dimension"])
minibatch_size = int(kwargs["minibatch_size"])
max_epoch_size = int(kwargs["max_epoch_size"])
max_num_epochs = int(kwargs["max_num_epochs"])
l2_regularization = kwargs["l2_regularization"]
gaussian_noise_stdev = kwargs["gaussian_noise_stdev"]
random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']
optimizer_fn = kwargs["optimizer_fn"]
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
# adding noise to the input
input = tf.placeholder(dtype=tf.float32, shape=[None, None, 1])
validation_input = input
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
target = tf.placeholder(dtype=tf.float32, shape=[None, self.__output_size, 1])
# placeholder for the sequence lengths
sequence_length = tf.placeholder(dtype=tf.int32, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)
# create the model architecture
# RNN with the layer of cells
def cell():
if self.__cell_type == "LSTM":
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,
initializer=weight_initializer)
elif self.__cell_type == "GRU":
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)
elif self.__cell_type == "RNN":
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))
return cell
# building the encoder network
multi_layered_encoder_cell = tf.nn.rnn_cell.MultiRNNCell(
cells=[cell() for _ in range(int(num_hidden_layers))])
with tf.variable_scope('train_encoder_scope') as encoder_train_scope:
training_encoder_outputs, training_encoder_state = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=training_input,
sequence_length=sequence_length,
dtype=tf.float32)
with tf.variable_scope(encoder_train_scope, reuse=tf.AUTO_REUSE) as encoder_inference_scope:
inference_encoder_outputs, inference_encoder_states = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=validation_input,
sequence_length=sequence_length,
dtype=tf.float32)
# create a tensor array for the indices of the encoder outputs array
new_index_array = tf.range(start=0, limit=tf.shape(sequence_length)[0], delta=1)
output_array_indices = tf.stack([new_index_array, sequence_length - 1], axis=-1)
# building the decoder network for training
with tf.variable_scope('dense_layer_train_scope') as dense_layer_train_scope:
train_final_timestep_predictions = tf.gather_nd(params=training_encoder_outputs,
indices=output_array_indices)
# the final projection layer to convert the encoder_outputs to the desired dimension
train_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=train_final_timestep_predictions, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer)
train_prediction_output = tf.expand_dims(input=train_prediction_output, axis=2)
# building the decoder network for inference
with tf.variable_scope(dense_layer_train_scope, reuse=tf.AUTO_REUSE) as dense_layer_inference_scope:
inference_final_timestep_predictions = tf.gather_nd(params=inference_encoder_outputs,
indices=output_array_indices)
# the final projection layer to convert the encoder_outputs to the desired dimension
inference_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=inference_final_timestep_predictions, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer)
inference_prediction_output = tf.expand_dims(input=inference_prediction_output, axis=2)
# error that should be minimized in the training process
error = self.__l1_loss(train_prediction_output, target)
# l2 regularization of the trainable model parameters
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
# create the optimizer
optimizer = optimizer_fn(total_loss)
# create the training and validation datasets from the tfrecord files
training_dataset = tf.data.TFRecordDataset(filenames = [self.__binary_train_file_path], compression_type = "ZLIB")
validation_dataset = tf.data.TFRecordDataset(filenames = [self.__binary_validation_file_path], compression_type = "ZLIB")
# parse the records
tfrecord_reader = TFRecordReader()
# define the expected shapes of data after padding
train_padded_shapes = ([], [tf.Dimension(None), 1], [self.__output_size, 1])
validation_padded_shapes = ([], [tf.Dimension(None), 1], [self.__output_size, 1], [self.__meta_data_size, 1])
# preparing the training data
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
# training_dataset = training_dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,
# count=int(max_epoch_size), seed=shuffle_seed))
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.train_data_parser)
padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=train_padded_shapes)
training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()
next_training_data_batch = training_data_batch_iterator.get_next()
# preparing the validation data
validation_dataset = validation_dataset.map(tfrecord_reader.validation_data_parser)
# create a single batch from all the validation time series by padding the datasets to make the variable sequence lengths fixed
padded_validation_dataset = validation_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=validation_padded_shapes)
# get an iterator to the validation data
validation_data_iterator = padded_validation_dataset.make_initializable_iterator()
# access the validation data using the iterator
next_validation_data_batch = validation_data_iterator.get_next()
# setup variable initialization
init_op = tf.global_variables_initializer()
# define the GPU options
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(
config=tf.ConfigProto(log_device_placement=gpu_configs.log_device_placement, allow_soft_placement=True,
gpu_options=gpu_options)) as session:
session.run(init_op)
smape_final = 0.0
smape_list = []
for epoch in range(max_num_epochs):
print("Epoch->", epoch)
session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed:epoch})
losses = []
while True:
try:
training_data_batch_value = session.run(next_training_data_batch, feed_dict={shuffle_seed:epoch})
total_loss_value, _ = session.run([total_loss, optimizer],
feed_dict={input: training_data_batch_value[1],
target: training_data_batch_value[2],
sequence_length: training_data_batch_value[0]
})
losses.append(total_loss_value)
except tf.errors.OutOfRangeError:
break
session.run(validation_data_iterator.initializer)
while True:
try:
# get the batch of validation inputs
validation_data_batch_value = session.run(next_validation_data_batch)
# shape for the target data
target_data_shape = [np.shape(validation_data_batch_value[1])[0], self.__output_size, 1]
# get the output of the network for the validation input data batch
validation_output = session.run(inference_prediction_output,
feed_dict={input: validation_data_batch_value[1],
target: np.zeros(target_data_shape),
sequence_length: validation_data_batch_value[0]
})
# calculate the smape for the validation data using vectorization
# convert the data to remove the preprocessing
true_seasonality_values = validation_data_batch_value[3][:, 1:, 0]
level_values = validation_data_batch_value[3][:, 0, 0]
actual_values = validation_data_batch_value[2]
if self.__without_stl_decomposition:
converted_validation_output = np.exp(np.squeeze(validation_output, axis=2))
converted_actual_values = np.exp(np.squeeze(actual_values, axis=2))
else:
converted_validation_output = np.exp(
true_seasonality_values + level_values[:, np.newaxis] + np.squeeze(validation_output,
axis=2))
converted_actual_values = np.exp(
true_seasonality_values + level_values[:, np.newaxis] + np.squeeze(actual_values,
axis=2))
if (self.__contain_zero_values): # to compensate for 0 values in data
converted_validation_output = converted_validation_output - 1
converted_actual_values = converted_actual_values - 1
if self.__without_stl_decomposition:
converted_validation_output = converted_validation_output * level_values[:, np.newaxis]
converted_actual_values = converted_actual_values * level_values[:, np.newaxis]
if self.__integer_conversion:
converted_validation_output = np.round(converted_validation_output)
converted_actual_values = np.round(converted_actual_values)
converted_validation_output[converted_validation_output < 0] = 0
converted_actual_values[converted_actual_values < 0] = 0
if self.__address_near_zero_instability:
# calculate the smape
epsilon = 0.1
sum = np.maximum(
np.abs(converted_validation_output) + np.abs(converted_actual_values) + epsilon,
0.5 + epsilon)
smape_values = (np.abs(converted_validation_output - converted_actual_values) /
sum) * 2
smape_values_per_series = np.mean(smape_values, axis=1)
smape_list.extend(smape_values_per_series)
else:
# calculate the smape
smape_values = (np.abs(converted_validation_output - converted_actual_values) /
(np.abs(converted_validation_output) + np.abs(converted_actual_values))) * 2
smape_values_per_series = np.mean(smape_values, axis=1)
smape_list.extend(smape_values_per_series)
except tf.errors.OutOfRangeError:
break
smape_final = np.mean(smape_list)
print("SMAPE value: {}".format(smape_final))
session.close()
return float(smape_final), smape_list | 14,802 | 53.223443 | 146 | py |
time-series-forecasting-release | time-series-forecasting-release/rnn_architectures/seq2seq_model/with_dense_layer/non_moving_window/unaccumulated_error/seq2seq_model_tester.py | import numpy as np
import tensorflow as tf
from tfrecords_handler.non_moving_window.tfrecord_reader import TFRecordReader
from configs.global_configs import training_data_configs
from configs.global_configs import gpu_configs
class Seq2SeqModelTesterWithDenseLayer:
def __init__(self, **kwargs):
self.__use_bias = kwargs["use_bias"]
self.__use_peepholes = kwargs["use_peepholes"]
self.__output_size = kwargs["output_size"]
self.__binary_train_file_path = kwargs["binary_train_file_path"]
self.__binary_test_file_path = kwargs["binary_test_file_path"]
self.__seed = kwargs["seed"]
self.__cell_type = kwargs["cell_type"]
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
# Training the time series
def test_model(self, **kwargs):
# optimized hyperparameters
num_hidden_layers = int(kwargs['num_hidden_layers'])
max_num_epochs = int(kwargs['max_num_epochs'])
max_epoch_size = int(kwargs['max_epoch_size'])
cell_dimension = int(kwargs['cell_dimension'])
l2_regularization = kwargs['l2_regularization']
minibatch_size = int(kwargs['minibatch_size'])
gaussian_noise_stdev = kwargs['gaussian_noise_stdev']
random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']
optimizer_fn = kwargs['optimizer_fn']
# reset the tensorflow graph
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
# declare the input and output placeholders
# adding noise to the input
input = tf.placeholder(dtype=tf.float32, shape=[None, None, 1])
testing_input = input
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
target = tf.placeholder(dtype=tf.float32, shape=[None, self.__output_size, 1])
# placeholder for the sequence lengths
sequence_length = tf.placeholder(dtype=tf.int32, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)
# create the model architecture
# RNN with the layer of cells
def cell():
if self.__cell_type == "LSTM":
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,
initializer=weight_initializer)
elif self.__cell_type == "GRU":
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)
elif self.__cell_type == "RNN":
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))
return cell
# building the encoder network
multi_layered_encoder_cell = tf.nn.rnn_cell.MultiRNNCell(
cells=[cell() for _ in range(int(num_hidden_layers))])
with tf.variable_scope('train_encoder_scope') as encoder_train_scope:
training_encoder_outputs, training_encoder_state = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=training_input,
sequence_length=sequence_length,
dtype=tf.float32)
with tf.variable_scope(encoder_train_scope, reuse=tf.AUTO_REUSE) as encoder_inference_scope:
inference_encoder_outputs, inference_encoder_states = tf.nn.dynamic_rnn(cell=multi_layered_encoder_cell,
inputs=testing_input,
sequence_length=sequence_length,
dtype=tf.float32)
# create a tensor array for the indices of the encoder outputs array
new_index_array = tf.range(start=0, limit=tf.shape(sequence_length)[0], delta=1)
output_array_indices = tf.stack([new_index_array, sequence_length - 1], axis=-1)
# building the decoder network for training
with tf.variable_scope('dense_layer_train_scope') as dense_layer_train_scope:
train_final_timestep_predictions = tf.gather_nd(params=training_encoder_outputs,
indices=output_array_indices)
# the final projection layer to convert the encoder_outputs to the desired dimension
train_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=train_final_timestep_predictions, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer)
train_prediction_output = tf.expand_dims(input=train_prediction_output, axis=2)
# building the decoder network for inference
with tf.variable_scope(dense_layer_train_scope, reuse=tf.AUTO_REUSE) as dense_layer_inference_scope:
inference_final_timestep_predictions = tf.gather_nd(params=inference_encoder_outputs,
indices=output_array_indices)
# the final projection layer to convert the encoder_outputs to the desired dimension
inference_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=inference_final_timestep_predictions, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer)
inference_prediction_output = tf.expand_dims(input=inference_prediction_output, axis=2)
# error that should be minimized in the training process
error = self.__l1_loss(train_prediction_output, target)
# l2 regularization of the trainable model parameters
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
# create the optimizer
optimizer = optimizer_fn(total_loss)
# create the Dataset objects for the training and test data
training_dataset = tf.data.TFRecordDataset(filenames = [self.__binary_train_file_path], compression_type = "ZLIB")
test_dataset = tf.data.TFRecordDataset([self.__binary_test_file_path], compression_type = "ZLIB")
# parse the records
tfrecord_reader = TFRecordReader()
# preparing the training data
# randomly shuffle the time series within the dataset
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
# training_dataset = training_dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,
# count=int(max_epoch_size), seed=shuffle_seed))
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.validation_data_parser)
# create the batches by padding the datasets to make the variable sequence lengths fixed within the individual batches
padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=([], [tf.Dimension(None), 1], [self.__output_size, 1],
[self.__meta_data_size, 1]))
# get an iterator to the batches
training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()
# access each batch using the iterator
next_training_data_batch = training_data_batch_iterator.get_next()
# preparing the test data
test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)
# create a single batch from all the test time series by padding the datasets to make the variable sequence lengths fixed
padded_test_input_data = test_dataset.padded_batch(batch_size=int(minibatch_size), padded_shapes=(
[], [tf.Dimension(None), 1], [self.__meta_data_size, 1]))
# get an iterator to the test input data batch
test_input_iterator = padded_test_input_data.make_one_shot_iterator()
# access the test input batch using the iterator
test_input_data_batch = test_input_iterator.get_next()
# setup variable initialization
init_op = tf.global_variables_initializer()
# define the GPU options
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(
config=tf.ConfigProto(log_device_placement=gpu_configs.log_device_placement, allow_soft_placement=True,
gpu_options=gpu_options)) as session:
session.run(init_op)
for epoch in range(int(max_num_epochs)):
print("Epoch->", epoch)
session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed:epoch})
losses = []
while True:
try:
next_training_batch_value = session.run(next_training_data_batch, feed_dict={shuffle_seed:epoch})
# model training
_, loss_val = session.run([optimizer, total_loss],
feed_dict={input: next_training_batch_value[1],
target: next_training_batch_value[2],
sequence_length: next_training_batch_value[0],
})
losses.append(loss_val)
except tf.errors.OutOfRangeError:
break
# applying the model to the test data
list_of_forecasts = []
while True:
try:
# get the batch of test inputs
test_input_batch_value = session.run(test_input_data_batch)
# shape for the target data
target_data_shape = [np.shape(test_input_batch_value[1])[0], self.__output_size, 1]
# get the output of the network for the test input data batch
test_output = session.run(inference_prediction_output,
feed_dict={input: test_input_batch_value[1],
target: np.zeros(shape = target_data_shape),
sequence_length: test_input_batch_value[0],
})
forecasts = test_output
list_of_forecasts.extend(forecasts.tolist())
except tf.errors.OutOfRangeError:
break
return np.squeeze(list_of_forecasts, axis = 2) #the third dimension is squeezed since it is one | 11,488 | 50.986425 | 146 | py |
time-series-forecasting-release | time-series-forecasting-release/rnn_architectures/stacking_model/stacking_model_tester.py | import numpy as np
import tensorflow as tf
from tfrecords_handler.moving_window.tfrecord_reader import TFRecordReader
from configs.global_configs import training_data_configs
from configs.global_configs import gpu_configs
class StackingModelTester:
def __init__(self, **kwargs):
self.__use_bias = kwargs["use_bias"]
self.__use_peepholes = kwargs["use_peepholes"]
self.__input_size = kwargs["input_size"]
self.__output_size = kwargs["output_size"]
self.__binary_train_file_path = kwargs["binary_train_file_path"]
self.__binary_test_file_path = kwargs["binary_test_file_path"]
self.__seed = kwargs["seed"]
self.__cell_type = kwargs["cell_type"]
self.__without_stl_decomposition = kwargs["without_stl_decomposition"]
# define the metadata size based on the usage of stl decomposition
if self.__without_stl_decomposition:
self.__meta_data_size = 1
else:
self.__meta_data_size = self.__output_size + 1
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
def __l2_loss(selfself, z, t):
loss = tf.losses.mean_squared_error(labels=t, predictions=z)
return loss
# Training the time series
def test_model(self, **kwargs):
# extract the parameters from the kwargs
num_hidden_layers = kwargs['num_hidden_layers']
cell_dimension = kwargs['cell_dimension']
minibatch_size = kwargs['minibatch_size']
max_epoch_size = kwargs['max_epoch_size']
max_num_epochs = kwargs['max_num_epochs']
l2_regularization = kwargs['l2_regularization']
gaussian_noise_stdev = kwargs['gaussian_noise_stdev']
optimizer_fn = kwargs['optimizer_fn']
random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']
# reset the tensorflow graph
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
# declare the input and output placeholders
input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__input_size])
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
testing_input = input
# output format [batch_size, sequence_length, dimension]
true_output = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__output_size])
sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)
# RNN with the layer of cells
def cell():
if self.__cell_type == "LSTM":
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,
initializer=weight_initializer)
elif self.__cell_type == "GRU":
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)
elif self.__cell_type == "RNN":
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))
return cell
multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for _ in range(int(num_hidden_layers))])
with tf.variable_scope('train_scope') as train_scope:
training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,
inputs=training_input,
sequence_length=sequence_lengths,
dtype=tf.float32)
# connect the dense layer to the RNN
training_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=training_rnn_outputs, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer')
with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE) as inference_scope:
inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,
inputs=testing_input,
sequence_length=sequence_lengths,
dtype=tf.float32)
# connect the dense layer to the RNN
inference_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=inference_rnn_outputs, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer', reuse=True)
# error that should be minimized in the training process
error = self.__l1_loss(training_prediction_output, true_output)
# l2 regularization of the trainable model parameters
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
# create the adagrad optimizer
optimizer = optimizer_fn(total_loss)
# create the Dataset objects for the training and test data
training_dataset = tf.data.TFRecordDataset(filenames=[self.__binary_train_file_path], compression_type="ZLIB")
test_dataset = tf.data.TFRecordDataset([self.__binary_test_file_path], compression_type="ZLIB")
# parse the records
tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size, self.__meta_data_size)
# prepare the training data into batches
# randomly shuffle the time series within the dataset
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
# training_dataset = training_dataset.apply(
# tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,
# count=int(max_epoch_size), seed=shuffle_seed))
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.validation_data_parser)
# create the batches by padding the datasets to make the variable sequence lengths fixed within the individual batches
padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=(
[], [tf.Dimension(None), self.__input_size],
[tf.Dimension(None), self.__output_size],
[tf.Dimension(None), self.__meta_data_size]))
# get an iterator to the batches
training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()
# access each batch using the iterator
next_training_data_batch = training_data_batch_iterator.get_next()
# preparing the test data
test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)
# create a single batch from all the test time series by padding the datasets to make the variable sequence lengths fixed
padded_test_input_data = test_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=([], [tf.Dimension(None), self.__input_size],
[tf.Dimension(None), self.__meta_data_size]))
# get an iterator to the test input data batch
test_input_iterator = padded_test_input_data.make_one_shot_iterator()
# access the test input batch using the iterator
test_input_data_batch = test_input_iterator.get_next()
# setup variable initialization
init_op = tf.global_variables_initializer()
# define the GPU options
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(
config=tf.ConfigProto(log_device_placement=gpu_configs.log_device_placement, allow_soft_placement=True,
gpu_options=gpu_options)) as session:
session.run(init_op)
for epoch in range(int(max_num_epochs)):
print("Epoch->", epoch)
session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed: epoch})
while True:
try:
training_data_batch_value = session.run(next_training_data_batch,
feed_dict={shuffle_seed: epoch})
session.run(optimizer,
feed_dict={input: training_data_batch_value[1],
true_output: training_data_batch_value[2],
sequence_lengths: training_data_batch_value[0]})
except tf.errors.OutOfRangeError:
break
# applying the model to the test data
list_of_forecasts = []
while True:
try:
# get the batch of test inputs
test_input_batch_value = session.run(test_input_data_batch)
# get the output of the network for the test input data batch
test_output = session.run(inference_prediction_output,
feed_dict={input: test_input_batch_value[1],
sequence_lengths: test_input_batch_value[0]})
last_output_index = test_input_batch_value[0] - 1
array_first_dimension = np.array(range(0, test_input_batch_value[0].shape[0]))
forecasts = test_output[array_first_dimension, last_output_index]
list_of_forecasts.extend(forecasts.tolist())
except tf.errors.OutOfRangeError:
break
session.close()
return list_of_forecasts
| 10,712 | 49.533019 | 129 | py |
time-series-forecasting-release | time-series-forecasting-release/rnn_architectures/stacking_model/stacking_model_trainer.py | import numpy as np
import tensorflow as tf
from tfrecords_handler.moving_window.tfrecord_reader import TFRecordReader
from configs.global_configs import model_training_configs
from configs.global_configs import training_data_configs
from configs.global_configs import gpu_configs
class StackingModelTrainer:
def __init__(self, **kwargs):
self.__use_bias = kwargs["use_bias"]
self.__use_peepholes = kwargs["use_peepholes"]
self.__input_size = kwargs["input_size"]
self.__output_size = kwargs["output_size"]
self.__binary_train_file_path = kwargs["binary_train_file_path"]
self.__binary_validation_file_path = kwargs["binary_validation_file_path"]
self.__contain_zero_values = kwargs["contain_zero_values"]
self.__address_near_zero_instability = kwargs["address_near_zero_instability"]
self.__integer_conversion = kwargs["integer_conversion"]
self.__seed = kwargs["seed"]
self.__cell_type = kwargs["cell_type"]
self.__without_stl_decomposition = kwargs["without_stl_decomposition"]
# define the metadata size based on the usage of stl decomposition
if self.__without_stl_decomposition:
self.__meta_data_size = 1
else:
self.__meta_data_size = self.__output_size + 1
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
def __l2_loss(self, z, t):
loss = tf.losses.mean_squared_error(labels=t, predictions=z)
return loss
# Training the time series
def train_model(self, **kwargs):
# extract the parameters from the kwargs
num_hidden_layers = kwargs['num_hidden_layers']
cell_dimension = kwargs['cell_dimension']
minibatch_size = kwargs['minibatch_size']
max_epoch_size = kwargs['max_epoch_size']
max_num_epochs = kwargs['max_num_epochs']
l2_regularization = kwargs['l2_regularization']
gaussian_noise_stdev = kwargs['gaussian_noise_stdev']
optimizer_fn = kwargs['optimizer_fn']
random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
# declare the input and output placeholders
# input format [batch_size, sequence_length, dimension]
input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__input_size])
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
validation_input = input
# output format [batch_size, sequence_length, dimension]
true_output = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__output_size])
sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)
# RNN with the layer of cells
def cell():
if self.__cell_type == "LSTM":
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,
initializer=weight_initializer)
elif self.__cell_type == "GRU":
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)
elif self.__cell_type == "RNN":
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))
return cell
multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for _ in range(int(num_hidden_layers))])
with tf.variable_scope('train_scope') as train_scope:
training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,
inputs=training_input,
sequence_length=sequence_lengths,
dtype=tf.float32)
# connect the dense layer to the RNN
training_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=training_rnn_outputs, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer')
with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE) as inference_scope:
inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,
inputs=validation_input,
sequence_length=sequence_lengths,
dtype=tf.float32)
# connect the dense layer to the RNN
inference_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=inference_rnn_outputs, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer', reuse=True)
error = self.__l1_loss(training_prediction_output, true_output)
# l2 regularization of the trainable model parameters
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
# create the adagrad optimizer
optimizer = optimizer_fn(total_loss)
# create the training and validation datasets from the tfrecord files
training_dataset = tf.data.TFRecordDataset(filenames=[self.__binary_train_file_path], compression_type="ZLIB")
validation_dataset = tf.data.TFRecordDataset(filenames=[self.__binary_validation_file_path],
compression_type="ZLIB")
# parse the records
tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size, self.__meta_data_size)
# define the expected shapes of data after padding
train_padded_shapes = ([], [tf.Dimension(None), self.__input_size], [tf.Dimension(None), self.__output_size])
validation_padded_shapes = (
[], [tf.Dimension(None), self.__input_size], [tf.Dimension(None), self.__output_size],
[tf.Dimension(None), self.__meta_data_size])
# prepare the training data into batches
# randomly shuffle the time series within the dataset and repeat for the value of the epoch size
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
# training_dataset = training_dataset.apply(
# tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,
# count=int(max_epoch_size), seed=shuffle_seed))
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.train_data_parser)
padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=train_padded_shapes)
training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()
next_training_data_batch = training_data_batch_iterator.get_next()
# prepare the validation data into batches
validation_dataset = validation_dataset.map(tfrecord_reader.validation_data_parser)
# create a single batch from all the validation time series by padding the datasets to make the variable sequence lengths fixed
padded_validation_dataset = validation_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=validation_padded_shapes)
# get an iterator to the validation data
validation_data_iterator = padded_validation_dataset.make_initializable_iterator()
# access the validation data using the iterator
next_validation_data_batch = validation_data_iterator.get_next()
# setup variable initialization
init_op = tf.global_variables_initializer()
# define the GPU options
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(
config=tf.ConfigProto(log_device_placement=gpu_configs.log_device_placement, allow_soft_placement=True,
gpu_options=gpu_options)) as session:
session.run(init_op)
smape_final = 0.0
smape_list = []
for epoch in range(int(max_num_epochs)):
print("Epoch->", epoch)
session.run(training_data_batch_iterator.initializer, feed_dict={
shuffle_seed: epoch}) # initialize the iterator to the beginning of the training dataset
while True:
try:
training_data_batch_value = session.run(next_training_data_batch,
feed_dict={shuffle_seed: epoch})
_, total_loss_value = session.run([optimizer, total_loss],
feed_dict={training_input: training_data_batch_value[1],
true_output: training_data_batch_value[2],
sequence_lengths: training_data_batch_value[0]})
except tf.errors.OutOfRangeError:
break
session.run(
validation_data_iterator.initializer) # initialize the iterator to the beginning of the training dataset
while True:
try:
# get the batch of validation inputs
validation_data_batch_value = session.run(next_validation_data_batch)
# get the output of the network for the validation input data batch
validation_output = session.run(inference_prediction_output,
feed_dict={input: validation_data_batch_value[1],
sequence_lengths: validation_data_batch_value[0]
})
# calculate the smape for the validation data using vectorization
# convert the data to remove the preprocessing
last_indices = validation_data_batch_value[0] - 1
array_first_dimension = np.array(range(0, validation_data_batch_value[0].shape[0]))
true_seasonality_values = validation_data_batch_value[3][array_first_dimension,
last_indices, 1:]
level_values = validation_data_batch_value[3][array_first_dimension, last_indices, 0]
lambda_val = -0.7
last_validation_outputs = validation_output[array_first_dimension, last_indices]
actual_values = validation_data_batch_value[2][array_first_dimension, last_indices, :]
if self.__without_stl_decomposition:
converted_validation_output = np.exp(last_validation_outputs)
converted_actual_values = np.exp(actual_values)
else:
converted_validation_output = np.exp(
true_seasonality_values + level_values[:, np.newaxis] + last_validation_outputs)
converted_actual_values = np.exp(true_seasonality_values + level_values[:, np.newaxis] + actual_values)
if self.__contain_zero_values: # to compensate for 0 values in data
converted_validation_output = converted_validation_output - 1
converted_actual_values = converted_actual_values - 1
if self.__without_stl_decomposition:
converted_validation_output = converted_validation_output * level_values[:, np.newaxis]
converted_actual_values = converted_actual_values * level_values[:, np.newaxis]
if self.__integer_conversion:
converted_validation_output = np.round(converted_validation_output)
converted_actual_values = np.round(converted_actual_values)
converted_validation_output[converted_validation_output < 0] = 0
converted_actual_values[converted_actual_values < 0] = 0
if self.__address_near_zero_instability:
# calculate the smape
epsilon = 0.1
sum = np.maximum(np.abs(converted_validation_output) + np.abs(converted_actual_values) + epsilon, 0.5 + epsilon)
smape_values = (np.abs(converted_validation_output - converted_actual_values) /
sum) * 2
smape_values_per_series = np.mean(smape_values, axis=1)
smape_list.extend(smape_values_per_series)
else:
# calculate the smape
smape_values = (np.abs(converted_validation_output - converted_actual_values) /
(np.abs(converted_validation_output) + np.abs(converted_actual_values))) * 2
smape_values_per_series = np.mean(smape_values, axis=1)
smape_list.extend(smape_values_per_series)
except tf.errors.OutOfRangeError:
break
smape_final = np.mean(smape_list)
print("SMAPE value: {}".format(smape_final))
session.close()
return float(smape_final), smape_list
| 14,326 | 51.479853 | 136 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/models/unet_fastMRI.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import math
from torch import nn
from torch.nn import functional as F
class unet_fastMRI(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int,
chans: int = 32,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
residual_connection: bool = True,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
residual_connection: Network outputs the residual between input and output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = in_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.residual_connection = residual_connection
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)]) #first conv block followed by downsampling
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob)) #conv blocks that are followed by downsampling
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch)) #transposed conv blocks
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch)) #last transposed conv block
self.up_conv.append(
nn.Sequential(#
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),#
)#
)
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument("--bias", default=True, help="use residual bias")
parser.add_argument("--residual", default=True, help="use residual connection")
parser.add_argument("--in-chans", default=3, help="Either color (3) or grey (1)")
parser.add_argument("--chans", default=32, help="Number of channels in outer most layer")
parser.add_argument("--num-pool-layers", default=3, help="Number of layers per down- and up-sampling path.")
parser.add_argument("--no-pooling", default=False, help="No downsampling. Use the no_unet_fastMRI module.")
return parser
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image.detach().clone()
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
if self.residual_connection:
output = image - output;
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image)
| 7,138 | 34.517413 | 131 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/models/__init__.py | from .unet_fastMRI import *
| 29 | 9 | 27 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/utils/progress_bar.py | from collections import OrderedDict
from numbers import Number
from tqdm import tqdm
from .meters import AverageMeter, RunningAverageMeter, TimeMeter
class ProgressBar:
def __init__(self, iterable, epoch=None, prefix=None, quiet=False):
self.epoch = epoch
self.quiet = quiet
self.prefix = prefix + ' | ' if prefix is not None else ''
if epoch is not None:
self.prefix += f"epoch {epoch:02d}"
self.iterable = iterable if self.quiet else tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.iterable)
def log(self, stats, verbose=False):
if not self.quiet:
self.iterable.set_postfix(self.format_stats(stats, verbose), refresh=True)
def format_stats(self, stats, verbose=False):
postfix = OrderedDict(stats)
for key, value in postfix.items():
if isinstance(value, Number):
fmt = "{:.3f}" if value > 0.001 else "{:.1e}"
postfix[key] = fmt.format(value)
elif isinstance(value, AverageMeter) or isinstance(value, RunningAverageMeter):
if verbose:
postfix[key] = f"{value.avg:.5f} ({value.val:.5f})"
else:
postfix[key] = f"{value.avg:.5f}"
elif isinstance(value, TimeMeter):
postfix[key] = f"{value.elapsed_time:.1f}s"
elif not isinstance(postfix[key], str):
postfix[key] = str(value)
return postfix
def print(self, stats, verbose=False):
postfix = " | ".join(key + " " + value.strip() for key, value in self.format_stats(stats, verbose).items())
return f"{self.prefix + ' | ' if self.epoch is not None else ''}{postfix}"
| 1,789 | 37.913043 | 115 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/utils/utils_image.py | import os
import math
import random
import numpy as np
import torch
import cv2
from torchvision.utils import make_grid
def modcrop(img, scale):
# img_in: BCHW or CHW or HW
#img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
C, H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:, :H - H_r, :W - W_r]
elif img.ndim == 4:
B, C, H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:, :, :H - H_r, :W - W_r]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
def modcrop_pil(image, modulo):
w = image.width - image.width % modulo
h = image.height - image.height % modulo
return image.crop((0, 0, w, h))
def crop_center(pil_img, crop_width, crop_height):
# Perform center crop on a PIL image
img_width, img_height = pil_img.size
return pil_img.crop(((img_width - crop_width) // 2,
(img_height - crop_height) // 2,
(img_width + crop_width) // 2,
(img_height + crop_height) // 2))
'''
# --------------------------------------------
# matlab's bicubic imresize (numpy and torch) [0, 1]
# --------------------------------------------
# from https://github.com/cszn/KAIR/blob/master/utils/utils_image.py
'''
# matlab 'imresize' function, now only support 'bicubic'
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
# --------------------------------------------
# imresize for tensor image [0, 1]
# --------------------------------------------
def imresize(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: pytorch tensor, CHW or HW [0,1]
# output: CHW or HW [0,1] w/o round
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(0)
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2
# --------------------------------------------
# imresize for numpy image [0, 1]
# --------------------------------------------
def imresize_np(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC or HW [0,1]
# output: HWC or HW [0,1] w/o round
img = torch.from_numpy(img)
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(2)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2.numpy()
| 10,256 | 37.852273 | 99 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/utils/train_utils.py | import argparse
import os
import logging
import numpy as np
import random
import sys
import torch
from datetime import datetime
from torch.serialization import default_restore_location
def add_logging_arguments(parser):
parser.add_argument("--seed", default=0, type=int, help="random number generator seed")
parser.add_argument("--output-dir", default="experiments", help="path to experiment directories")
parser.add_argument("--experiment", default=None, help="experiment name to be used with Tensorboard")
parser.add_argument("--resume-training", action="store_true", help="whether to resume training")
parser.add_argument("--restore-mode", default=None, help="Either 'best' 'last' or '\path\to\checkpoint\dir'")
parser.add_argument("--restore-file", default=None, help="filename to load checkpoint")
parser.add_argument("--test-mode", default=None, help="Evaluate on which test set.")
parser.add_argument("--no-save", action="store_true", help="don't save models or checkpoints")
parser.add_argument("--step-checkpoints", action="store_true", help="store all step checkpoints")
parser.add_argument("--no-log", action="store_true", help="don't save logs to file or Tensorboard directory")
parser.add_argument("--log-interval", type=int, default=100, help="log every N steps")
parser.add_argument("--no-visual", action="store_true", help="don't use Tensorboard")
parser.add_argument("--visual-interval", type=int, default=100, help="log every N steps")
parser.add_argument("--no-progress", action="store_true", help="don't use progress bar")
parser.add_argument("--draft", action="store_true", help="save experiment results to draft directory")
parser.add_argument("--dry-run", action="store_true", help="no log, no save, no visualization")
return parser
def init_logging(args):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handlers = [logging.StreamHandler()]
if not args.no_log and args.log_file is not None:
mode = "a" if args.resume_training else "w"
handlers.append(logging.FileHandler(args.log_file, mode=mode))
logging.basicConfig(handlers=handlers, format="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
logging.info("Arguments: {}".format(vars(args)))
def setup_experiment(args):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.dry_run:
args.no_save = args.no_log = args.no_visual = True
return
args.experiment = args.experiment or f"{args.model.replace('_', '-')}" #unet
args.experiment = "-".join([args.experiment, 'std'+str(args.noise_std)])
if not args.resume_training:
args.experiment = "-".join([args.experiment, datetime.now().strftime("%b-%d-%H:%M:%S")])
args.experiment = "-".join([args.experiment, 'tr'+str(args.train_size)])
args.experiment_dir = os.path.join(args.output_dir, args.experiment)
os.makedirs(args.experiment_dir, exist_ok=True) #dir is only created if it not already exists. If it already exists no error is raised
if not args.no_save:
args.checkpoint_dir = os.path.join(args.experiment_dir, "checkpoints")
os.makedirs(args.checkpoint_dir, exist_ok=True)
if not args.no_log:
args.log_dir = os.path.join(args.experiment_dir, "logs")
os.makedirs(args.log_dir, exist_ok=True)
args.log_file = os.path.join(args.log_dir, "train.log")
def save_checkpoint(args, step, epoch, model, optimizer=None, scheduler=None, score=None, mode="min"):
assert mode == "min" or mode == "max"
last_step = getattr(save_checkpoint, "last_step", -1) #-1 as default argument that is given if attribute does not exist
save_checkpoint.last_step = max(last_step, step)
default_score = float("inf") if mode == "min" else float("-inf")
best_score = getattr(save_checkpoint, "best_score", default_score)
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
save_checkpoint.best_step = step
save_checkpoint.best_epoch = epoch
save_checkpoint.best_score = score
if not args.no_save and step % args.save_interval == 0:
os.makedirs(args.checkpoint_dir, exist_ok=True)
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
state_dict = {
"step": step,
"epoch": epoch,
"score": score,
"last_step": save_checkpoint.last_step,
"best_step": save_checkpoint.best_step,
"best_epoch": save_checkpoint.best_epoch,
"best_score": getattr(save_checkpoint, "best_score", None),
"model": [m.state_dict() for m in model] if model is not None else None,
"optimizer": [o.state_dict() for o in optimizer] if optimizer is not None else None,
"scheduler": [s.state_dict() for s in scheduler] if scheduler is not None else None,
"args": argparse.Namespace(**{k: v for k, v in vars(args).items() if not callable(v)}),
}
if args.step_checkpoints:
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint{}.pt".format(step)))
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint_best.pt"))
if step > last_step:
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint_last.pt"))
def load_checkpoint(args, model=None, optimizer=None, scheduler=None):
if args.restore_file is not None and os.path.isfile(args.restore_file):
print('restoring model..')
state_dict = torch.load(args.restore_file, map_location=lambda s, l: default_restore_location(s, "cpu"))
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
if "best_score" in state_dict:
save_checkpoint.best_score = state_dict["best_score"]
save_checkpoint.best_step = state_dict["best_step"]
if "last_step" in state_dict:
save_checkpoint.last_step = state_dict["last_step"]
if model is not None and state_dict.get("model", None) is not None:
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
if optimizer is not None and state_dict.get("optimizer", None) is not None:
for o, state in zip(optimizer, state_dict["optimizer"]):
o.load_state_dict(state)
if scheduler is not None and state_dict.get("scheduler", None) is not None:
for s, state in zip(scheduler, state_dict["scheduler"]):
milestones = s.milestones
state['milestones'] = milestones
s.load_state_dict(state)
s.milestones = milestones
logging.info("Loaded checkpoint {}".format(args.restore_file))
return state_dict
| 7,573 | 52.716312 | 138 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/utils/main_function_helpers.py | import torch
import argparse
import os
import yaml
import pathlib
import pickle
import logging
import sys
import time
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import torchvision
import glob
from torch.serialization import default_restore_location
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import utils
import models
from utils.data_helpers.load_datasets_helpers import *
from utils.meters import *
from utils.progress_bar import *
from utils.noise_model import get_noise
from utils.metrics import ssim,psnr
from utils.util_calculate_psnr_ssim import calculate_psnr,calculate_ssim
from utils.test_metrics import *
def load_model(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
checkpoint_path = glob.glob(args.output_dir +'/unet*')
if len(checkpoint_path) != 1:
raise ValueError("There is either no or more than one model to load")
checkpoint_path = pathlib.Path(checkpoint_path[0] + f"/checkpoints/checkpoint_{args.restore_mode}.pt")
state_dict = torch.load(checkpoint_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
args = argparse.Namespace(**{ **vars(state_dict["args"]), "no_log": True})
model = models.unet_fastMRI(
in_chans=args.in_chans,
chans = args.chans,
num_pool_layers = args.num_pool_layers,
drop_prob = 0.0,
residual_connection = args.residual,
).to(device)
model.load_state_dict(state_dict["model"][0])
model.eval()
return model
def cli_main_test(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
model = load_model(args)
# evaluate test performance over following noise range
noise_std_range = np.linspace(args.test_noise_std_min, args.test_noise_std_max,
((args.test_noise_std_max-args.test_noise_std_min)//args.test_noise_stepsize)+1,dtype=int)/255.
metrics_path = os.path.join(args.output_dir, args.test_mode + '_' + str(args.test_noise_std_min)+'-'+str(args.test_noise_std_max)+f'_metrics_{args.restore_mode}.p')
metrics_dict = metrics_avg_on_noise_range(model, args, noise_std_range, device = device)
pickle.dump( metrics_dict, open(metrics_path, "wb" ) )
def cli_main(args):
available_models = glob.glob(f'{args.output_dir}/*')
if not args.resume_training and available_models:
raise ValueError('There exists already a trained model and resume_training is set False')
if args.resume_training:
f_restore_file(args)
# reset the attributes of the function save_checkpoint
mode = "max"
default_score = float("inf") if mode == "min" else float("-inf")
utils.save_checkpoint.best_score = default_score
utils.save_checkpoint.best_step = -1
utils.save_checkpoint.best_epoch = -1
utils.save_checkpoint.last_step = -1
utils.save_checkpoint.current_lr = args.lr
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Set the name of the directory for saving results
utils.setup_experiment(args)
utils.init_logging(args)
# Build data loaders, a model and an optimizer
model = models.unet_fastMRI(
in_chans=args.in_chans,
chans = args.chans,
num_pool_layers = args.num_pool_layers,
drop_prob = 0.0,
residual_connection = args.residual,
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='max', factor=args.lr_gamma, patience=args.lr_patience,
threshold=args.lr_threshold, threshold_mode='abs', cooldown=0,
min_lr=args.lr_min, eps=1e-08, verbose=True
)
logging.info(f"Built a model consisting of {sum(p.numel() for p in model.parameters()):,} parameters")
trainset = ImagenetSubdataset(args.train_size,args.path_to_ImageNet_train,mode='train',patch_size=args.patch_size,val_crop=args.val_crop)
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True,generator=torch.Generator().manual_seed(args.seed))
valset = ImagenetSubdataset(args.val_size,args.path_to_ImageNet_train,mode='val',patch_size=args.patch_size,val_crop=args.val_crop)
val_loader = DataLoader(valset, batch_size=1, shuffle=False, num_workers=4, pin_memory=True,generator=torch.Generator().manual_seed(args.seed))
print(optimizer.param_groups[0]["lr"])
if args.resume_training:
state_dict = utils.load_checkpoint(args, model, optimizer, scheduler)
global_step = state_dict['last_step']
start_epoch = int(state_dict['last_step']/(len(train_loader)))+1
start_decay = True
elif args.no_annealing:
global_step = -1
start_epoch = 0
start_decay = True
else:
global_step = -1
start_epoch = 0
start_decay = False
print(optimizer.param_groups[0]["lr"])
args.log_interval = min(len(trainset), 100) # len(train_loader)=log once per epoch
args.no_visual = False # True for not logging to tensorboard
# Track moving average of loss values
train_meters = {"train_loss":RunningAverageMeter(0.98)}
valid_meters = {name: AverageMeter() for name in (["valid_psnr", "valid_ssim", "valid_psnr_self_supervised", "valid_ssim_self_supervised"])}
# Create tensorflow event file
writer = SummaryWriter(log_dir=args.experiment_dir) if not args.no_visual else None
break_counter = 0
# store the best val performance from lr-interval before the last lr decay
best_val_last = 0
# track the best val performance for the current lr-inerval
best_val_current = 0
# count for how many lr intervals there was no improvement and break only if there was no improvement for 2
lr_interval_counter = 0
# if best_val_current at the end of the current lr interval is smaller than best_val_last we perform early stopping
for epoch in range(start_epoch, args.num_epochs):
start = time.process_time()
train_bar = ProgressBar(train_loader, epoch)
# At beginning of each epoch reset the train meters
for meter in train_meters.values():
meter.reset()
for inputs, noise_seed in train_bar:
model.train() #Sets the module in training mode.
global_step += 1
inputs = inputs.to(device)
noise = get_noise(inputs,noise_seed, fix_noise = args.fix_noise, noise_std = args.noise_std/255.)
noisier_noise = get_noise(inputs,torch.mul(noise_seed,10),fix_noise = args.fix_noisier, noise_std = args.noise_std_noisier/255.)
noisy_inputs = noise + inputs
noisier_image = noisy_inputs + noisier_noise
outputs = model(noisier_image)
loss = F.mse_loss(outputs, noisy_inputs, reduction="sum") / torch.prod(torch.tensor(inputs.size())) #(inputs.size(0) * 2)
model.zero_grad()
loss.backward()
optimizer.step()
train_meters["train_loss"].update(loss.item())
train_bar.log(dict(**train_meters, lr=optimizer.param_groups[0]["lr"]), verbose=True)
# Add to tensorflow event file:
if writer is not None:
writer.add_scalar("lr", optimizer.param_groups[0]["lr"], global_step)
writer.add_scalar("loss/train", train_meters["train_loss"].avg, global_step)
sys.stdout.flush()
if epoch % args.valid_interval == 0:
model.eval()
gen_val = torch.Generator()
gen_val = gen_val.manual_seed(10)
for meter in valid_meters.values():
meter.reset()
valid_bar = ProgressBar(val_loader)
for sample, noise_seed in valid_bar:
with torch.no_grad():
sample = sample.to(device)
# Self-supervised validation with fixed noise
noise_self_supervised = get_noise(sample,noise_seed, fix_noise = args.fix_noise, noise_std = args.noise_std/255.)
noisier_noise = get_noise(sample,torch.mul(noise_seed,10),fix_noise = args.fix_noisier, noise_std = args.noise_std_noisier/255.)
noisy_input_fixed = sample + noise_self_supervised
noisier_input = noisy_input_fixed + noisier_noise
model_output = model(noisier_input)
prediction = 2*model_output - noisier_input
valid_psnr_self_supervised = psnr(model_output, noisy_input_fixed)
valid_ssim_self_supervised = ssim(model_output, noisy_input_fixed)
valid_meters["valid_psnr_self_supervised"].update(valid_psnr_self_supervised.item())
valid_meters["valid_ssim_self_supervised"].update(valid_ssim_self_supervised.item())
# Ground truth validation wit fixed noise
# It uses the same input and output as in the self-supervised case since the noise seed is fixed
valid_psnr = psnr(prediction, sample)
valid_ssim = ssim(prediction, sample)
valid_meters["valid_psnr"].update(valid_psnr.item())
valid_meters["valid_ssim"].update(valid_ssim.item())
if writer is not None:
# Average is correct valid_meters['valid_psnr'].avg since .val would be just the psnr of last sample in val set.
writer.add_scalar("psnr/valid", valid_meters['valid_psnr'].avg, global_step)
writer.add_scalar("ssim/valid", valid_meters['valid_ssim'].avg, global_step)
writer.add_scalar("psnr_selfsupervised/valid", valid_meters['valid_psnr_self_supervised'].avg, global_step)
writer.add_scalar("ssim_selfsupervised/valid", valid_meters["valid_ssim_self_supervised"].avg, global_step)
writer.add_scalar("lr", optimizer.param_groups[0]["lr"], global_step)
sys.stdout.flush()
if args.val_flag == 0: # if we do self-supervised validation
val_loss = valid_meters["valid_psnr_self_supervised"].avg
else: # if we do supervised validation
val_loss = valid_meters["valid_psnr"].avg
if utils.save_checkpoint.best_score < val_loss and not start_decay:
utils.save_checkpoint(args, global_step, epoch, model, optimizer, score=val_loss, mode="max")
current_lr = utils.save_checkpoint.current_lr
optimizer.param_groups[0]["lr"] = current_lr*args.lr_beta
utils.save_checkpoint.current_lr = current_lr*args.lr_beta
annealing_counter = 0
elif not start_decay:
annealing_counter += 1
current_lr = utils.save_checkpoint.current_lr
if annealing_counter == args.lr_patience_annealing:
available_models = glob.glob(f'{args.output_dir}/*')
if not available_models:
raise ValueError('No file to restore')
elif len(available_models)>1:
raise ValueError('Too many files to restore from')
model_path = os.path.join(available_models[0], "checkpoints/checkpoint_best.pt")
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
model = [model] if model is not None and not isinstance(model, list) else model
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
model = model[0]
optimizer.param_groups[0]["lr"] = current_lr/(args.lr_beta*args.inital_decay_factor)
start_decay = True
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='max', factor=args.lr_gamma, patience=args.lr_patience,
threshold=args.lr_threshold, threshold_mode='abs', cooldown=0,
min_lr=args.lr_min, eps=1e-08, verbose=True
)
else:
utils.save_checkpoint(args, global_step, epoch, model, optimizer, score=val_loss, mode="max")
current_lr = optimizer.param_groups[0]["lr"]
if val_loss > best_val_current:
best_val_current = val_loss
if writer is not None:
writer.add_scalar("epoch", epoch, global_step)
sys.stdout.flush()
if start_decay:
current_lr = optimizer.param_groups[0]["lr"]
scheduler.step(val_loss)
new_lr = optimizer.param_groups[0]["lr"]
#At every lr decay check if the model did not improve during the current or the previous lr interval and break if it didn't.
if new_lr < current_lr:
if best_val_current < best_val_last and lr_interval_counter==1:
logging.info('Break training due to convergence of val loss!')
break
elif best_val_current < best_val_last and lr_interval_counter==0:
lr_interval_counter += 1
logging.info('Do not yet break due to convergence of val loss!')
else:
best_val_last = best_val_current
best_val_current = 0
lr_interval_counter = 0
end = time.process_time() - start
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, time=np.round(end/60,3))))
if optimizer.param_groups[0]["lr"] == args.lr_min and start_decay:
break_counter += 1
if break_counter == args.break_counter:
print('Break training due to minimal learning rate constraint!')
break
logging.info(f"Done training! Best PSNR {utils.save_checkpoint.best_score:.3f} obtained after step {utils.save_checkpoint.best_step} (epoch {utils.save_checkpoint.best_epoch}).")
def get_args(hp,ee,rr):
parser = argparse.ArgumentParser(allow_abbrev=False)
# Add data arguments
parser.add_argument("--train-size", default=None, help="number of examples in training set")
parser.add_argument("--val-size", default=40, help="number of examples in validation set")
parser.add_argument("--test-size", default=100, help="number of examples in test set")
parser.add_argument("--val-crop", default=True, type=bool, help="Crop validation images to train size.")
parser.add_argument("--patch-size", default=128, help="size of the center cropped HR image")
parser.add_argument("--batch-size", default=128, type=int, help="train batch size")
# Add model arguments
parser.add_argument("--model", default="unet", help="model architecture")
# Add noise arguments
parser.add_argument('--noise_std', default = 15, type = float,
help = 'noise level')
parser.add_argument('--test_noise_std_min', default = 15, type = float,
help = 'minimal noise level for testing')
parser.add_argument('--test_noise_std_max', default = 15, type = float,
help = 'maximal noise level for testing')
parser.add_argument('--test_noise_stepsize', default = 5, type = float,
help = 'Stepsize between test_noise_std_min and test_noise_std_max')
# Add optimization arguments
parser.add_argument("--lr", default=1e-3, type=float, help="learning rate")
parser.add_argument("--lr-gamma", default=0.5, type=float, help="factor by which to reduce learning rate")
parser.add_argument("--lr-beta", default=2, type=float, help="factor by which to increase learning rate")
parser.add_argument("--lr-patience", default=5, type=int, help="epochs without improvement before lr decay")
parser.add_argument("--no_annealing", default=True, type=bool, help="Use lr annealing or not.")
parser.add_argument("--lr-patience-annealing", default=3, type=int, help="epochs without improvement before lr annealing stops")
parser.add_argument("--lr-min", default=1e-5, type=float, help="Once we reach this learning rate continue for break_counter many epochs then stop.")
parser.add_argument("--lr-threshold", default=0.003, type=float, help="Improvements by less than this threshold are not counted for decay patience.")
parser.add_argument("--break-counter", default=9, type=int, help="Once smallest learning rate is reached, continue for so many epochs before stopping.")
parser.add_argument("--inital-decay-factor", default=2, type=int, help="After annealing found a lr for which val loss does not improve, go back initial_decay_factor many lrs")
parser.add_argument("--num-epochs", default=100, type=int, help="force stop training at specified epoch")
parser.add_argument("--valid-interval", default=1, type=int, help="evaluate every N epochs")
parser.add_argument("--save-interval", default=1, type=int, help="save a checkpoint every N steps")
# Add model arguments
parser = models.unet_fastMRI.add_args(parser)
parser = utils.add_logging_arguments(parser)
#args = parser.parse_args()
args, _ = parser.parse_known_args()
# Set arguments specific for this experiment
dargs = vars(args)
for key in hp.keys():
dargs[key] = hp[key][ee]
args.seed = int(42 + 10*rr)
return args
def f_restore_file(args):
#available_models = glob.glob(f'{args.output_dir}/{args.experiment}-*')
available_models = glob.glob(f'{args.output_dir}/*')
if not available_models:
raise ValueError('No file to restore')
if not args.restore_mode:
raise ValueError("Pick restore mode either 'best' 'last' or '\path\to\checkpoint\dir'")
if args.restore_mode=='best':
mode = "max"
best_score = float("inf") if mode == "min" else float("-inf")
best_model = None
for modelp in available_models:
model_path = os.path.join(modelp, "checkpoints/checkpoint_best.pt")
if os.path.isfile(model_path):
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
score = state_dict["best_score"]
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
best_score = score
best_model = model_path
best_modelp = modelp
best_step = state_dict["best_step"]
best_epoch = state_dict["best_epoch"]
args.restore_file = best_model
args.experiment_dir = best_modelp
#logging.info(f"Prepare to restore best model {best_model} with PSNR {best_score} at step {best_step}, epoch {best_epoch}")
elif args.restore_mode=='last':
last_step = -1
last_model = None
for modelp in available_models:
model_path = os.path.join(modelp, "checkpoints/checkpoint_last.pt")
if os.path.isfile(model_path):
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
step = state_dict["last_step"]
if step > last_step:
last_step = step
last_model = model_path
last_modelp = modelp
score = state_dict["score"]
last_epoch = state_dict["epoch"]
args.restore_file = last_model
args.experiment_dir = last_modelp
#logging.info(f"Prepare to restore last model {last_model} with PSNR {score} at step {last_step}, epoch {last_epoch}")
else:
args.restore_file = args.restore_mode
args.experiment_dir = args.restore_mode[:args.restore_mode.find('/checkpoints')]
def infer_images(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
net = load_model(args) # the denoiser
seed_dict = {
"val":10,
"test":20,
"cbsd68":30,
"urban100":40,
"mcmaster18":50,
"kodak24":60,
"CBSD68":70,
}
gen = torch.Generator()
gen = gen.manual_seed(seed_dict[args.test_mode])
# Load the test images
load_path = '../training_set_lists/'
if args.test_mode == 'test':
files_source = torch.load(load_path+f'ImageNetTest{args.test_size}_filepaths.pt')
#files_source.sort()
elif args.test_mode == 'val':
files_source = torch.load(load_path+f'ImageNetVal{args.val_size}_filepaths.pt')
#files_source.sort()
else:
files_source = torch.load(load_path+f'{args.test_mode}_filepaths.pt')
if not os.path.isdir(args.output_dir+'/test_images'):
os.mkdir(args.output_dir+'/test_images')
counter = 0
transformT = transforms.ToTensor()
transformIm = transforms.ToPILImage()
for f in files_source:
counter = counter + 1
if counter > 3:
break
# Create noise
ISource = torch.unsqueeze(transformT(Image.open(f).convert("RGB")),0).to(device)
noise = torch.randn(ISource.shape,generator = gen) * args.noise_std/255.
INoisy = noise.to(device) + ISource
out = torch.clamp(net(INoisy), 0., 1.).cpu()
out = torch.squeeze(out,0) # Get rid of the 1 in dim 0.
im = transformIm(out)
INoisy = torch.clamp(torch.squeeze(INoisy,0), 0., 1.).cpu()
#INoisy = torch.squeeze(INoisy,0).cpu()
INoisy = transformIm(INoisy)
clean_image = Image.open(f).convert("RGB")
im.save(args.output_dir+f'/test_images/im{counter}_denoised_notclamped.png')
clean_image.save(args.output_dir+f'/test_images/im{counter}_ground_truth_notclamped.png')
INoisy.save(args.output_dir+f'/test_images/im{counter}_noisy_notclamped.png')
im.save(args.output_dir+f'/test_images/im{counter}_denoised_notclamped.pdf')
clean_image.save(args.output_dir+f'/test_images/im{counter}_ground_truth_notclamped.pdf')
INoisy.save(args.output_dir+f'/test_images/im{counter}_noisy_notclamped.pdf')
| 22,955 | 45.563895 | 182 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/utils/util_calculate_psnr_ssim.py | import cv2
import numpy as np
import torch
# from https://github.com/JingyunLiang/SwinIR/blob/328dda0f4768772e6d8c5aa3d5aa8e24f1ad903b/utils/util_calculate_psnr_ssim.py#L80
def calculate_psnr(img1, img2, crop_border, input_order='HWC', test_y_channel=False):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return float('inf')
return 20. * np.log10(255. / np.sqrt(mse))
def _ssim(img1, img2):
"""Calculate SSIM (structural similarity) for one channel images.
It is called by func:`calculate_ssim`.
Args:
img1 (ndarray): Images with range [0, 255] with order 'HWC'.
img2 (ndarray): Images with range [0, 255] with order 'HWC'.
Returns:
float: ssim result.
"""
C1 = (0.01 * 255) ** 2
C2 = (0.03 * 255) ** 2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1 ** 2
mu2_sq = mu2 ** 2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1, img2, crop_border, input_order='HWC', test_y_channel=False):
"""Calculate SSIM (structural similarity).
Ref:
Image quality assessment: From error visibility to structural similarity
The results are the same as that of the official released MATLAB code in
https://ece.uwaterloo.ca/~z70wang/research/ssim/.
For three-channel images, SSIM is calculated for each channel and then
averaged.
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the SSIM calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: ssim result.
"""
assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
ssims = []
for i in range(img1.shape[2]):
ssims.append(_ssim(img1[..., i], img2[..., i]))
return np.array(ssims).mean()
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'")
if len(img.shape) == 2:
img = img[..., None]
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
def _convert_input_type_range(img):
"""Convert the type and range of the input image.
It converts the input image to np.float32 type and range of [0, 1].
It is mainly used for pre-processing the input image in colorspace
convertion functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
(ndarray): The converted image with type of np.float32 and range of
[0, 1].
"""
img_type = img.dtype
img = img.astype(np.float32)
if img_type == np.float32:
pass
elif img_type == np.uint8:
img /= 255.
else:
raise TypeError('The img type should be np.float32 or np.uint8, ' f'but got {img_type}')
return img
def _convert_output_type_range(img, dst_type):
"""Convert the type and range of the image according to dst_type.
It converts the image to desired type and range. If `dst_type` is np.uint8,
images will be converted to np.uint8 type with range [0, 255]. If
`dst_type` is np.float32, it converts the image to np.float32 type with
range [0, 1].
It is mainly used for post-processing images in colorspace convertion
functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The image to be converted with np.float32 type and
range [0, 255].
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
converts the image to np.uint8 type with range [0, 255]. If
dst_type is np.float32, it converts the image to np.float32 type
with range [0, 1].
Returns:
(ndarray): The converted image with desired type and range.
"""
if dst_type not in (np.uint8, np.float32):
raise TypeError('The dst_type should be np.float32 or np.uint8, ' f'but got {dst_type}')
if dst_type == np.uint8:
img = img.round()
else:
img /= 255.
return img.astype(dst_type)
def bgr2ycbcr(img, y_only=False):
"""Convert a BGR image to YCbCr image.
The bgr version of rgb2ycbcr.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
else:
out_img = np.matmul(
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128]
out_img = _convert_output_type_range(out_img, img_type)
return out_img | 9,023 | 37.564103 | 129 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/utils/metrics.py | import numpy as np
from skimage.metrics import peak_signal_noise_ratio, structural_similarity
def ssim(clean, noisy, normalized=True):
"""Use skimage.meamsure.compare_ssim to calculate SSIM
Args:
clean (Tensor): (B, C, H, W)
noisy (Tensor): (B, C, H, W)
normalized (bool): If True, the range of tensors are [0., 1.] else [0, 255]
Returns:
SSIM per image: (B, )
"""
if len(clean.shape)!=4 or len(noisy.shape)!=4:
raise ValueError("ssim expects clean (Tensor): (B, C, H, W) noisy (Tensor): (B, C, H, W)")
if normalized:
clean = clean.mul(255).clamp(0, 255)
noisy = noisy.mul(255).clamp(0, 255)
clean = clean.cpu().detach().numpy().astype(np.float32)
noisy = noisy.cpu().detach().numpy().astype(np.float32)
clean = np.moveaxis(clean,1,-1)
noisy = np.moveaxis(noisy,1,-1)
return np.array([structural_similarity(c, n, data_range=255, multichannel=True) for c, n in zip(clean, noisy)]).mean()
def psnr(clean, noisy, normalized=True):
"""Use skimage.meamsure.compare_ssim to calculate SSIM
Args:
clean (Tensor): (B, C, H, W)
noisy (Tensor): (B, C, H, W)
normalized (bool): If True, the range of tensors are [0., 1.]
else [0, 255]
Returns:
SSIM per image: (B, )
"""
if len(clean.shape)!=4 or len(noisy.shape)!=4:
raise ValueError("psnr expects clean (Tensor): (B, C, H, W) noisy (Tensor): (B, C, H, W)")
if normalized:
clean = clean.mul(255).clamp(0, 255)
noisy = noisy.mul(255).clamp(0, 255)
clean = clean.cpu().detach().numpy().astype(np.float32)
noisy = noisy.cpu().detach().numpy().astype(np.float32)
return np.array([peak_signal_noise_ratio(c, n, data_range=255) for c, n in zip(clean, noisy)]).mean()
| 1,811 | 36.75 | 122 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/utils/__init__.py | from .train_utils import *
from .main_function_helpers import * | 63 | 31 | 36 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/utils/test_metrics.py | import torch
import numpy as np
import matplotlib.pyplot as plt
import glob
import os
#import cv2
from utils.noise_model import get_noise
from utils.metrics import ssim,psnr
from utils.util_calculate_psnr_ssim import calculate_psnr,calculate_ssim
from skimage import color
import PIL.Image as Image
import torchvision.transforms as transforms
from utils.utils_image import *
metrics_key = ['psnr_m', 'psnr_s', 'psnr_delta_m', 'psnr_delta_s', 'ssim_m', 'ssim_s', 'ssim_delta_m', 'ssim_delta_s'];
def tensor_to_image(torch_image, low=0.0, high = 1.0, clamp = True):
if clamp:
torch_image = torch.clamp(torch_image, low, high);
return torch_image[0,0].cpu().data.numpy()
def normalize(data):
return data/255.
def convert_dict_to_string(metrics):
return_string = '';
for x in metrics.keys():
return_string += x+': '+str(round(metrics[x], 3))+' ';
return return_string
def get_all_comparison_metrics(denoised, source, noisy = None, scale=None, return_title_string = False, clamp = True):
metrics = {};
metrics['psnr'] = np.zeros(len(denoised))
metrics['ssim'] = np.zeros(len(denoised))
if noisy is not None:
metrics['psnr_delta'] = np.zeros(len(denoised))
metrics['ssim_delta'] = np.zeros(len(denoised))
if clamp:
denoised = torch.clamp(denoised, 0.0, 1.0)
metrics['psnr'] = psnr(source, denoised);
metrics['ssim'] = ssim(source, denoised);
if noisy is not None:
metrics['psnr_delta'] = metrics['psnr'] - psnr(source, noisy);
metrics['ssim_delta'] = metrics['ssim'] - ssim(source, noisy);
if return_title_string:
return convert_dict_to_string(metrics)
else:
return metrics
def average_on_folder(args, net, noise_std,
verbose=True, device = torch.device('cuda')):
#if verbose:
#print('Loading data info ...\n')
print(f'\n Dataset: {args.test_mode}, Restore mode: {args.restore_mode}')
load_path = '../training_set_lists/'
seed_dict = {
"val":10,
"test":20,
}
gen = torch.Generator()
gen = gen.manual_seed(seed_dict[args.test_mode])
gen2 = torch.Generator()
gen2 = gen2.manual_seed(seed_dict[args.test_mode]*10)
if args.test_mode == 'test':
files_source = torch.load(load_path+f'ImageNetTest{args.test_size}_filepaths.pt')
#files_source.sort()
elif args.test_mode == 'val':
files_source = torch.load(load_path+f'ImageNetVal{args.val_size}_filepaths.pt')
#files_source.sort()
avreage_metrics_key = ['psnr', 'psnr_delta', 'ssim', 'ssim_delta']
avg_metrics = {};
for x in avreage_metrics_key:
avg_metrics[x] = [];
psnr_list = []
ssim_list = []
for f in files_source:
transformT = transforms.ToTensor()
ISource = torch.unsqueeze(transformT(Image.open(args.path_to_ImageNet_train + f).convert("RGB")),0).to(device)
if args.test_mode == 'val':
noise_seed = int(f[f.find('train/')+17:-5].replace('_',''))
gen = gen.manual_seed(noise_seed)
gen2 = gen2.manual_seed(noise_seed*10)
noise = torch.randn(ISource.shape,generator = gen) * args.noise_std/255.
noisier_noise = torch.randn(ISource.shape,generator = gen2) * args.noise_std_noisier/255.
INoisy = noise.to(device) + ISource
INoisier = INoisy + noisier_noise.to(device)
out = torch.clamp(((1 + args.alpha**2)*net(INoisier) - INoisier) / (args.alpha ** 2), 0., 1.)
ind_metrics = get_all_comparison_metrics(out, ISource, INoisy, return_title_string = False);
for x in avreage_metrics_key:
avg_metrics[x].append(ind_metrics[x])
if(verbose):
print("%s %s" % (f, convert_dict_to_string(ind_metrics)))
metrics = {}
for x in avreage_metrics_key:
metrics[x+'_m'] = np.mean(avg_metrics[x])
metrics[x+'_s'] = np.std(avg_metrics[x])
if verbose:
print("\n Average %s" % (convert_dict_to_string(metrics)))
#if(not verbose):
return metrics
def metrics_avg_on_noise_range(net, args, noise_std_array, device = torch.device('cuda')):
array_metrics = {}
for x in metrics_key:
array_metrics[x] = np.zeros(len(noise_std_array))
for j, noise_std in enumerate(noise_std_array):
metric_list = average_on_folder(args, net,
noise_std = noise_std,
verbose=False, device=device);
for x in metrics_key:
array_metrics[x][j] += metric_list[x]
print('noise: ', int(noise_std*255), ' ', x, ': ', str(array_metrics[x][j]))
return array_metrics
| 4,784 | 31.331081 | 119 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/utils/noise_model.py | import torch
def get_noise(data, noise_seed, fix_noise, noise_std = float(25)/255.0):
if fix_noise:
device = torch.device('cuda')
gen = torch.Generator(device=device)
batch_size = data.size(dim=0)
tensor_dim = list(data.size())[1:]
for i in range(0,batch_size):
gen = gen.manual_seed(noise_seed[i].item())
noise = torch.randn(tensor_dim,generator = gen, device=device) * noise_std
noise = torch.unsqueeze(noise,0)
if i == 0:
noise_tensor = noise
else:
noise_tensor = torch.cat((noise_tensor, noise),0)
noise = noise_tensor
#noise = torch.randn(data.shape,generator = gen, device=device) * noise_std
else:
noise = torch.randn_like(data)
noise.data = noise.data * noise_std
return noise | 880 | 31.62963 | 87 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/utils/meters.py | import time
import torch
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
self.val = val / n
self.sum += val
self.count += n
self.avg = self.sum / self.count
class RunningAverageMeter(object):
def __init__(self, momentum=0.98):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if isinstance(val, torch.Tensor):
val = val.item()
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
class TimeMeter(object):
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
| 1,321 | 20.322581 | 75 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noisier2Noise/utils/data_helpers/load_datasets_helpers.py | import os
import os.path
import numpy as np
import h5py
import torch
import torchvision.transforms as transforms
import PIL.Image as Image
from utils.utils_image import *
class ImagenetSubdataset(torch.utils.data.Dataset):
def __init__(self, size, path_to_ImageNet_train, mode='train', patch_size='128', val_crop=True):
super().__init__()
load_path = '../training_set_lists/'
self.path_to_ImageNet_train = path_to_ImageNet_train
if mode=='train':
self.files = torch.load(load_path+f'trsize{size}_filepaths.pt')
self.transform = transforms.Compose([
transforms.CenterCrop(patch_size),
transforms.ToTensor(),
])
elif mode=='val':
self.files = torch.load(load_path+f'ImageNetVal{size}_filepaths.pt')
#print(self.files)
if val_crop:
self.transform = transforms.Compose([
transforms.CenterCrop(patch_size),
transforms.ToTensor(),
])
else:
self.transform = transforms.Compose([
transforms.ToTensor(),
])
self.noise_seeds = {}
for i, file in enumerate(self.files):
key = file[file.find('train/')+16:-5]
number = int(file[file.find('train/')+17:-5].replace('_',''))
self.noise_seeds[key] = number
def __len__(self):
return len(self.files)
def __getitem__(self, index):
file = self.files[index]
key = file[file.find('train/')+16:-5]
noise_seed = self.noise_seeds[key]
image = Image.open(self.path_to_ImageNet_train + self.files[index]).convert("RGB") #ImageNet contains some grayscale images
data = self.transform(image)
return data, noise_seed
| 1,949 | 32.62069 | 131 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/models/unet_fastMRI.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import math
from torch import nn
from torch.nn import functional as F
class unet_fastMRI(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int,
chans: int = 32,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
residual_connection: bool = True,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
residual_connection: Network outputs the residual between input and output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = in_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.residual_connection = residual_connection
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)]) #first conv block followed by downsampling
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob)) #conv blocks that are followed by downsampling
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch)) #transposed conv blocks
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch)) #last transposed conv block
self.up_conv.append(
nn.Sequential(#
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),#
)#
)
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument("--bias", default=True, help="use residual bias")
parser.add_argument("--residual", default=True, help="use residual connection")
parser.add_argument("--in-chans", default=3, help="Either color (3) or grey (1)")
parser.add_argument("--chans", default=32, help="Number of channels in outer most layer")
parser.add_argument("--num-pool-layers", default=3, help="Number of layers per down- and up-sampling path.")
parser.add_argument("--no-pooling", default=False, help="No downsampling. Use the no_unet_fastMRI module.")
return parser
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image.detach().clone()
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
if self.residual_connection:
output = image - output;
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image)
| 7,138 | 34.517413 | 131 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/models/__init__.py | from .unet_fastMRI import *
| 28 | 13.5 | 27 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/utils/progress_bar.py | from collections import OrderedDict
from numbers import Number
from tqdm import tqdm
from .meters import AverageMeter, RunningAverageMeter, TimeMeter
class ProgressBar:
def __init__(self, iterable, epoch=None, prefix=None, quiet=False):
self.epoch = epoch
self.quiet = quiet
self.prefix = prefix + ' | ' if prefix is not None else ''
if epoch is not None:
self.prefix += f"epoch {epoch:02d}"
self.iterable = iterable if self.quiet else tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.iterable)
def log(self, stats, verbose=False):
if not self.quiet:
self.iterable.set_postfix(self.format_stats(stats, verbose), refresh=True)
def format_stats(self, stats, verbose=False):
postfix = OrderedDict(stats)
for key, value in postfix.items():
if isinstance(value, Number):
fmt = "{:.3f}" if value > 0.001 else "{:.1e}"
postfix[key] = fmt.format(value)
elif isinstance(value, AverageMeter) or isinstance(value, RunningAverageMeter):
if verbose:
postfix[key] = f"{value.avg:.5f} ({value.val:.5f})"
else:
postfix[key] = f"{value.avg:.5f}"
elif isinstance(value, TimeMeter):
postfix[key] = f"{value.elapsed_time:.1f}s"
elif not isinstance(postfix[key], str):
postfix[key] = str(value)
return postfix
def print(self, stats, verbose=False):
postfix = " | ".join(key + " " + value.strip() for key, value in self.format_stats(stats, verbose).items())
return f"{self.prefix + ' | ' if self.epoch is not None else ''}{postfix}"
| 1,789 | 37.913043 | 115 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/utils/utils_image.py | import os
import math
import random
import numpy as np
import torch
import cv2
from torchvision.utils import make_grid
def modcrop(img, scale):
# img_in: BCHW or CHW or HW
#img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
C, H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:, :H - H_r, :W - W_r]
elif img.ndim == 4:
B, C, H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:, :, :H - H_r, :W - W_r]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
def modcrop_pil(image, modulo):
w = image.width - image.width % modulo
h = image.height - image.height % modulo
return image.crop((0, 0, w, h))
def crop_center(pil_img, crop_width, crop_height):
# Perform center crop on a PIL image
img_width, img_height = pil_img.size
return pil_img.crop(((img_width - crop_width) // 2,
(img_height - crop_height) // 2,
(img_width + crop_width) // 2,
(img_height + crop_height) // 2))
'''
# --------------------------------------------
# matlab's bicubic imresize (numpy and torch) [0, 1]
# --------------------------------------------
# from https://github.com/cszn/KAIR/blob/master/utils/utils_image.py
'''
# matlab 'imresize' function, now only support 'bicubic'
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
# --------------------------------------------
# imresize for tensor image [0, 1]
# --------------------------------------------
def imresize(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: pytorch tensor, CHW or HW [0,1]
# output: CHW or HW [0,1] w/o round
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(0)
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2
# --------------------------------------------
# imresize for numpy image [0, 1]
# --------------------------------------------
def imresize_np(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC or HW [0,1]
# output: HWC or HW [0,1] w/o round
img = torch.from_numpy(img)
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(2)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2.numpy()
| 10,256 | 37.852273 | 99 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/utils/train_utils.py | import argparse
import os
import logging
import numpy as np
import random
import sys
import torch
from datetime import datetime
from torch.serialization import default_restore_location
def add_logging_arguments(parser):
parser.add_argument("--seed", default=0, type=int, help="random number generator seed")
parser.add_argument("--output-dir", default="experiments", help="path to experiment directories")
parser.add_argument("--experiment", default=None, help="experiment name to be used with Tensorboard")
parser.add_argument("--resume-training", action="store_true", help="whether to resume training")
parser.add_argument("--restore-mode", default=None, help="Either 'best' 'last' or '\path\to\checkpoint\dir'")
parser.add_argument("--restore-file", default=None, help="filename to load checkpoint")
parser.add_argument("--test-mode", default=None, help="Evaluate on which test set.")
parser.add_argument("--no-save", action="store_true", help="don't save models or checkpoints")
parser.add_argument("--step-checkpoints", action="store_true", help="store all step checkpoints")
parser.add_argument("--no-log", action="store_true", help="don't save logs to file or Tensorboard directory")
parser.add_argument("--log-interval", type=int, default=100, help="log every N steps")
parser.add_argument("--no-visual", action="store_true", help="don't use Tensorboard")
parser.add_argument("--visual-interval", type=int, default=100, help="log every N steps")
parser.add_argument("--no-progress", action="store_true", help="don't use progress bar")
parser.add_argument("--draft", action="store_true", help="save experiment results to draft directory")
parser.add_argument("--dry-run", action="store_true", help="no log, no save, no visualization")
return parser
def init_logging(args):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handlers = [logging.StreamHandler()]
if not args.no_log and args.log_file is not None:
mode = "a" if args.resume_training else "w"
handlers.append(logging.FileHandler(args.log_file, mode=mode))
logging.basicConfig(handlers=handlers, format="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
logging.info("Arguments: {}".format(vars(args)))
def setup_experiment(args):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.dry_run:
args.no_save = args.no_log = args.no_visual = True
return
args.experiment = args.experiment or f"{args.model.replace('_', '-')}" #unet
args.experiment = "-".join([args.experiment, 'std'+str(args.noise_std)])
if not args.resume_training:
args.experiment = "-".join([args.experiment, datetime.now().strftime("%b-%d-%H:%M:%S")])
args.experiment = "-".join([args.experiment, 'tr'+str(args.train_size)])
args.experiment_dir = os.path.join(args.output_dir, args.experiment)
os.makedirs(args.experiment_dir, exist_ok=True) #dir is only created if it not already exists. If it already exists no error is raised
if not args.no_save:
args.checkpoint_dir = os.path.join(args.experiment_dir, "checkpoints")
os.makedirs(args.checkpoint_dir, exist_ok=True)
if not args.no_log:
args.log_dir = os.path.join(args.experiment_dir, "logs")
os.makedirs(args.log_dir, exist_ok=True)
args.log_file = os.path.join(args.log_dir, "train.log")
def save_checkpoint(args, step, epoch, model, optimizer=None, scheduler=None, score=None, mode="min"):
assert mode == "min" or mode == "max"
last_step = getattr(save_checkpoint, "last_step", -1) #-1 as default argument that is given if attribute does not exist
save_checkpoint.last_step = max(last_step, step)
default_score = float("inf") if mode == "min" else float("-inf")
best_score = getattr(save_checkpoint, "best_score", default_score)
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
save_checkpoint.best_step = step
save_checkpoint.best_epoch = epoch
save_checkpoint.best_score = score
if not args.no_save and step % args.save_interval == 0:
os.makedirs(args.checkpoint_dir, exist_ok=True)
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
state_dict = {
"step": step,
"epoch": epoch,
"score": score,
"last_step": save_checkpoint.last_step,
"best_step": save_checkpoint.best_step,
"best_epoch": save_checkpoint.best_epoch,
"best_score": getattr(save_checkpoint, "best_score", None),
"model": [m.state_dict() for m in model] if model is not None else None,
"optimizer": [o.state_dict() for o in optimizer] if optimizer is not None else None,
"scheduler": [s.state_dict() for s in scheduler] if scheduler is not None else None,
"args": argparse.Namespace(**{k: v for k, v in vars(args).items() if not callable(v)}),
}
if args.step_checkpoints:
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint{}.pt".format(step)))
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint_best.pt"))
if step > last_step:
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint_last.pt"))
def load_checkpoint(args, model=None, optimizer=None, scheduler=None):
if args.restore_file is not None and os.path.isfile(args.restore_file):
print('restoring model..')
state_dict = torch.load(args.restore_file, map_location=lambda s, l: default_restore_location(s, "cpu"))
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
if "best_score" in state_dict:
save_checkpoint.best_score = state_dict["best_score"]
save_checkpoint.best_step = state_dict["best_step"]
if "last_step" in state_dict:
save_checkpoint.last_step = state_dict["last_step"]
if model is not None and state_dict.get("model", None) is not None:
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
if optimizer is not None and state_dict.get("optimizer", None) is not None:
for o, state in zip(optimizer, state_dict["optimizer"]):
o.load_state_dict(state)
if scheduler is not None and state_dict.get("scheduler", None) is not None:
for s, state in zip(scheduler, state_dict["scheduler"]):
milestones = s.milestones
state['milestones'] = milestones
s.load_state_dict(state)
s.milestones = milestones
logging.info("Loaded checkpoint {}".format(args.restore_file))
return state_dict
| 7,573 | 52.716312 | 138 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/utils/main_function_helpers.py | import torch
import argparse
import os
import yaml
import pathlib
import pickle
import logging
import sys
import time
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import torchvision
import glob
from torch.serialization import default_restore_location
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
from tensorboard.backend.event_processing import event_accumulator
import utils
import models
from utils.data_helpers.load_datasets_helpers import *
from utils.meters import *
from utils.progress_bar import *
from utils.noise_model import get_noise
from utils.metrics import ssim,psnr
from utils.util_calculate_psnr_ssim import calculate_psnr,calculate_ssim,calculate_psnr_neighbor, calculate_upsnr
from utils.test_metrics import *
import torchvision.transforms.functional as FF
operation_seed_counter = 0
def space_to_depth(x, block_size):
n, c, h, w = x.size()
unfolded_x = torch.nn.functional.unfold(x, block_size, stride=block_size)
return unfolded_x.view(n, c * block_size**2, h // block_size,
w // block_size)
def generate_subimages(img, mask):
n, c, h, w = img.shape
subimage = torch.zeros(n,
c,
h // 2,
w // 2,
dtype=img.dtype,
layout=img.layout,
device=img.device)
# per channel
for i in range(c):
img_per_channel = space_to_depth(img[:, i:i + 1, :, :], block_size=2)
img_per_channel = img_per_channel.permute(0, 2, 3, 1).reshape(-1)
subimage[:, i:i + 1, :, :] = img_per_channel[mask].reshape(
n, h // 2, w // 2, 1).permute(0, 3, 1, 2)
return subimage
def get_generator(seed = None):
global operation_seed_counter
operation_seed_counter += 1
g_cuda_generator = torch.Generator(device="cuda")
if seed == None:
g_cuda_generator.manual_seed(operation_seed_counter)
else:
g_cuda_generator.manual_seed(seed)
return g_cuda_generator
def generate_mask_pair(img,seed=None):
# prepare masks (N x C x H/2 x W/2)
n, c, h, w = img.shape
mask1 = torch.zeros(size=(n * h // 2 * w // 2 * 4, ),
dtype=torch.bool,
device=img.device)
mask2 = torch.zeros(size=(n * h // 2 * w // 2 * 4, ),
dtype=torch.bool,
device=img.device)
# prepare random mask pairs
idx_pair = torch.tensor(
[[0, 1], [0, 2], [1, 3], [2, 3], [1, 0], [2, 0], [3, 1], [3, 2]],
dtype=torch.int64,
device=img.device)
rd_idx = torch.zeros(size=(n * h // 2 * w // 2, ),
dtype=torch.int64,
device=img.device)
if seed == None:
torch.randint(low=0,
high=8,
size=(n * h // 2 * w // 2, ),
generator=get_generator(),
out=rd_idx)
else:
torch.randint(low=0,
high=8,
size=(n * h // 2 * w // 2, ),
generator=get_generator(seed),
out=rd_idx)
rd_pair_idx = idx_pair[rd_idx]
rd_pair_idx += torch.arange(start=0,
end=n * h // 2 * w // 2 * 4,
step=4,
dtype=torch.int64,
device=img.device).reshape(-1, 1)
# get masks
mask1[rd_pair_idx[:, 0]] = 1
mask2[rd_pair_idx[:, 1]] = 1
return mask1, mask2
def generate_val_mask(img):
n, c, h, w = img.shape
mask1 = torch.zeros(size=(n * h // 2 * w // 2 * 4, ),
dtype=torch.bool,
device=img.device)
mask2 = torch.zeros(size=(n * h // 2 * w // 2 * 4, ),
dtype=torch.bool,
device=img.device)
mask3 = torch.zeros(size=(n * h // 2 * w // 2 * 4, ),
dtype=torch.bool,
device=img.device)
mask4 = torch.zeros(size=(n * h // 2 * w // 2 * 4, ),
dtype=torch.bool,
device=img.device)
mask1[torch.arange(start=0,end=n * h // 2 * w // 2 * 4,
step=4, dtype=torch.int64,device=img.device)]=1
mask2[torch.arange(start=1,end=n * h // 2 * w // 2 * 4,
step=4, dtype=torch.int64,device=img.device)]=1
mask3[torch.arange(start=2,end=n * h // 2 * w // 2 * 4,
step=4, dtype=torch.int64,device=img.device)]=1
mask4[torch.arange(start=3,end=n * h // 2 * w // 2 * 4,
step=4, dtype=torch.int64,device=img.device)]=1
return mask1, mask2, mask3, mask4
def load_model(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
checkpoint_path = glob.glob(args.output_dir +'/unet*')
if len(checkpoint_path) != 1:
raise ValueError("There is either no or more than one model to load")
checkpoint_path = pathlib.Path(checkpoint_path[0] + f"/checkpoints/checkpoint_{args.restore_mode}.pt")
state_dict = torch.load(checkpoint_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
args = argparse.Namespace(**{ **vars(state_dict["args"]), "no_log": True})
#model = models.build_model(args).to(device)
model = models.unet_fastMRI(
in_chans=args.in_chans,
chans = args.chans,
num_pool_layers = args.num_pool_layers,
drop_prob = 0.0,
residual_connection = args.residual,
).to(device)
model.load_state_dict(state_dict["model"][0])
model.eval()
return model
def cli_main_test(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
model = load_model(args)
# evaluate test performance over following noise range
noise_std_range = np.linspace(args.test_noise_std_min, args.test_noise_std_max,
((args.test_noise_std_max-args.test_noise_std_min)//args.test_noise_stepsize)+1,dtype=int)/255.
metrics_path = os.path.join(args.output_dir, args.test_mode + '_' + str(args.test_noise_std_min)+'-'+str(args.test_noise_std_max)+f'_metrics_{args.restore_mode}.p')
metrics_dict = metrics_avg_on_noise_range(model, args, noise_std_range, device = device)
pickle.dump( metrics_dict, open(metrics_path, "wb" ) )
def cli_main(args):
available_models = glob.glob(f'{args.output_dir}/*')
if not args.resume_training and available_models:
raise ValueError('There exists already a trained model and resume_training is set False')
if args.resume_training:
f_restore_file(args)
# reset the attributes of the function save_checkpoint
mode = "max"
default_score = float("inf") if mode == "min" else float("-inf")
utils.save_checkpoint.best_score = default_score
utils.save_checkpoint.best_step = -1
utils.save_checkpoint.best_epoch = -1
utils.save_checkpoint.last_step = -1
utils.save_checkpoint.current_lr = args.lr
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Set the name of the directory for saving results
utils.setup_experiment(args)
utils.init_logging(args)
# Build data loaders, a model and an optimizer
model = models.unet_fastMRI(
in_chans=args.in_chans,
chans = args.chans,
num_pool_layers = args.num_pool_layers,
drop_prob = 0.0,
residual_connection = args.residual,
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
num_epoch = args.n_epoch
ratio = num_epoch / 100
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[
int(20 * ratio) - 1,
int(40 * ratio) - 1,
int(60 * ratio) - 1,
int(80 * ratio) - 1
],
gamma=args.lr_gamma)
logging.info(f"Built a model consisting of {sum(p.numel() for p in model.parameters()):,} parameters")
trainset = ImagenetSubdataset(args.train_size,args.path_to_ImageNet_train,mode='train',patch_size=args.patch_size,val_crop=args.val_crop)
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True,generator=torch.Generator().manual_seed(args.seed))
valset = ImagenetSubdataset(args.val_size,args.path_to_ImageNet_train,mode='val',patch_size=args.patch_size,val_crop=args.val_crop)
val_loader = DataLoader(valset, batch_size=1, shuffle=False, num_workers=4, pin_memory=True,generator=torch.Generator().manual_seed(args.seed))
print(optimizer.param_groups[0]["lr"])
if args.resume_training:
state_dict = utils.load_checkpoint(args, model, optimizer, scheduler)
global_step = state_dict['last_step']
start_epoch = int(state_dict['last_step']/(len(train_loader)))+1
start_decay = True
elif args.no_annealing:
global_step = -1
start_epoch = 0
start_decay = True
else:
global_step = -1
start_epoch = 0
start_decay = False
print(optimizer.param_groups[0]["lr"])
args.log_interval = min(len(trainset), 100) # len(train_loader)=log once per epoch
args.no_visual = False # True for not logging to tensorboard
# Track moving average of loss values
#train_meters = {name: RunningAverageMeter(0.98) for name in (["train_loss_running_avg"])}
#train_meters = {name: AverageMeter() for name in (["train_loss"])}
train_meters = {"train_loss": AverageMeter(), "train_loss_running_avg":RunningAverageMeter(0.98),"reconstruction_loss": AverageMeter(), "regularization_loss":AverageMeter()}
valid_meters = {name: AverageMeter() for name in (["valid_psnr", "valid_ssim", "valid_psnr_self_supervised", "valid_ssim_self_supervised"])}
# Create tensorflow event file
writer = SummaryWriter(log_dir=args.experiment_dir) if not args.no_visual else None
break_counter = 0
# store the best val performance from lr-interval before the last lr decay
best_val_last = 0
# track the best val performance for the current lr-inerval
best_val_current = 0
# count for how many lr intervals there was no improvement and break only if there was no improvement for 2
lr_interval_counter = 0
# if best_val_current at the end of the current lr interval is smaller than best_val_last we perform early stopping
for epoch in range(start_epoch, args.num_epochs):
start = time.process_time()
train_bar = ProgressBar(train_loader, epoch)
# At beginning of each epoch reset the train meters
for meter in train_meters.values():
meter.reset()
for inputs, noise_seed in train_bar:
model.train() #Sets the module in training mode.
global_step += 1
inputs = inputs.to(device)
noise = get_noise(inputs,noise_seed, fix_noise = args.fix_noise, noise_std = args.noise_std/255.)
noisy_inputs = noise + inputs
mask1, mask2 = generate_mask_pair(noisy_inputs)
noisy_sub1 = generate_subimages(noisy_inputs, mask1)
noisy_sub2 = generate_subimages(noisy_inputs, mask2)
with torch.no_grad():
noisy_denoised = model(noisy_inputs)
noisy_sub1_denoised = generate_subimages(noisy_denoised, mask1)
noisy_sub2_denoised = generate_subimages(noisy_denoised, mask2)
noisy_output = model(noisy_sub1)
noisy_target = noisy_sub2
Lambda = (epoch / args.n_epoch)* args.increase_ratio
diff = noisy_output - noisy_target
exp_diff = noisy_sub1_denoised - noisy_sub2_denoised
loss1 = torch.mean(diff**2)
reg = torch.mean((diff - exp_diff)**2)
loss2 = Lambda * reg
loss = args.Lambda1 * loss1 + args.Lambda2 * loss2
model.zero_grad()
loss.backward()
optimizer.step()
train_meters["train_loss"].update(loss.item())
train_meters["reconstruction_loss"].update(loss1.item())
train_meters["regularization_loss"].update(reg.item())
train_meters["train_loss_running_avg"].update(loss.item())
train_bar.log(dict(**train_meters, lr=optimizer.param_groups[0]["lr"]), verbose=True)
# Add to tensorflow event file:
if writer is not None:
writer.add_scalar("lr", optimizer.param_groups[0]["lr"], global_step)
writer.add_scalar("loss/train", train_meters["train_loss"].avg, global_step)
writer.add_scalar("loss/train_running_avg", train_meters["train_loss_running_avg"].avg, global_step)
writer.add_scalar("loss/rec",train_meters["reconstruction_loss"].avg,global_step)
writer.add_scalar("loss/reg",train_meters["regularization_loss"].avg,global_step)
#gradients = torch.cat([p.grad.view(-1) for p in model.parameters() if p.grad is not None], dim=0)
#writer.add_histogram("gradients", gradients, global_step)
sys.stdout.flush()
if epoch % args.valid_interval == 0:
model.eval()
gen_val = torch.Generator()
gen_val = gen_val.manual_seed(10)
for meter in valid_meters.values():
meter.reset()
valid_bar = ProgressBar(val_loader)
for sample, noise_seed in valid_bar:
with torch.no_grad():
sample = sample.to(device)
sample_np = sample.cpu().detach().numpy()
shape = np.shape(sample_np)
if shape[3] % 2 == 1:
sample_np = sample_np[:,:,:,:-1]
sample = torch.from_numpy(sample_np)
sample = sample.to(device)
#print(sample.size())
# Self-supervised validation with fixed noise
noise_self_supervised = get_noise(sample,noise_seed, fix_noise = args.fix_noise, noise_std = args.noise_std/255.)
noisy_input_fixed = sample + noise_self_supervised
output_self_supervised = model(noisy_input_fixed)
#noisy_target = sample + noise_target_self_supervised
#if args.val_crop:
mask1, mask2, mask3, mask4 = generate_val_mask(noisy_input_fixed)
noisy_sub1 = generate_subimages(noisy_input_fixed, mask1)
noisy_sub2 = generate_subimages(noisy_input_fixed, mask2)
noisy_sub3 = generate_subimages(noisy_input_fixed, mask3)
noisy_sub4 = generate_subimages(noisy_input_fixed, mask4)
noisy_sub1_denoised = generate_subimages(output_self_supervised, mask1)
noisy_sub2_denoised = generate_subimages(output_self_supervised, mask2)
#noisy_sub1_denoised = generate_subimages(output_self_supervised, mask1)
noisy_output = model(noisy_sub1)
valid_psnr_self_supervised = calculate_psnr_neighbor(noisy_output,noisy_sub2,noisy_sub1_denoised,noisy_sub2_denoised,args.increase_ratio)
valid_ssim_self_supervised = ssim(noisy_output, noisy_sub2)
valid_meters["valid_psnr_self_supervised"].update(valid_psnr_self_supervised.item())
valid_meters["valid_ssim_self_supervised"].update(valid_ssim_self_supervised.item())
# Ground truth validation wit fixed noise
# It uses the same input and output as in the self-supervised case since the noise seed is fixed
valid_psnr = psnr(output_self_supervised, sample)
valid_ssim = ssim(output_self_supervised, sample)
valid_meters["valid_psnr"].update(valid_psnr.item())
valid_meters["valid_ssim"].update(valid_ssim.item())
if writer is not None:
# Average is correct valid_meters['valid_psnr'].avg since .val would be just the psnr of last sample in val set.
writer.add_scalar("psnr/valid", valid_meters['valid_psnr'].avg, global_step)
writer.add_scalar("ssim/valid", valid_meters['valid_ssim'].avg, global_step)
writer.add_scalar("psnr_selfsupervised/valid", valid_meters['valid_psnr_self_supervised'].avg, global_step)
writer.add_scalar("ssim_selfsupervised/valid", valid_meters["valid_ssim_self_supervised"].avg, global_step)
writer.add_scalar("lr", optimizer.param_groups[0]["lr"], global_step)
sys.stdout.flush()
if args.val_flag == 0: # if we do self-supervised validation
val_loss = valid_meters["valid_psnr_self_supervised"].avg
else: # if we do supervised validation
val_loss = valid_meters["valid_psnr"].avg
if utils.save_checkpoint.best_score < val_loss and not start_decay:
utils.save_checkpoint(args, global_step, epoch, model, optimizer, score=val_loss, mode="max")
current_lr = utils.save_checkpoint.current_lr
optimizer.param_groups[0]["lr"] = current_lr*args.lr_beta
utils.save_checkpoint.current_lr = current_lr*args.lr_beta
annealing_counter = 0
elif not start_decay:
annealing_counter += 1
current_lr = utils.save_checkpoint.current_lr
if annealing_counter == args.lr_patience_annealing:
available_models = glob.glob(f'{args.output_dir}/*')
if not available_models:
raise ValueError('No file to restore')
elif len(available_models)>1:
raise ValueError('Too many files to restore from')
model_path = os.path.join(available_models[0], "checkpoints/checkpoint_best.pt")
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
model = [model] if model is not None and not isinstance(model, list) else model
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
model = model[0]
optimizer.param_groups[0]["lr"] = current_lr/(args.lr_beta*args.inital_decay_factor)
start_decay = True
num_epoch = args.n_epoch
ratio = num_epoch / 100
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[
int(20 * ratio) - 1,
int(40 * ratio) - 1,
int(60 * ratio) - 1,
int(80 * ratio) - 1
],
gamma=args.lr_gamma)
else:
utils.save_checkpoint(args, global_step, epoch, model, optimizer, score=val_loss, mode="max")
current_lr = optimizer.param_groups[0]["lr"]
if val_loss > best_val_current:
best_val_current = val_loss
if writer is not None:
writer.add_scalar("epoch", epoch, global_step)
sys.stdout.flush()
if start_decay:
current_lr = optimizer.param_groups[0]["lr"]
scheduler.step()
new_lr = optimizer.param_groups[0]["lr"]
#At every lr decay check if the model did not improve during the current or the previous lr interval and break if it didn't.
if new_lr < current_lr:
if best_val_current < best_val_last and lr_interval_counter==1:
logging.info('Break training due to convergence of val loss!')
break
elif best_val_current < best_val_last and lr_interval_counter==0:
lr_interval_counter += 1
logging.info('Do not yet break due to convergence of val loss!')
else:
best_val_last = best_val_current
best_val_current = 0
lr_interval_counter = 0
end = time.process_time() - start
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, time=np.round(end/60,3))))
if optimizer.param_groups[0]["lr"] == args.lr_min and start_decay:
break_counter += 1
if break_counter == args.break_counter:
print('Break training due to minimal learning rate constraint!')
break
logging.info(f"Done training! Best PSNR {utils.save_checkpoint.best_score:.3f} obtained after step {utils.save_checkpoint.best_step} (epoch {utils.save_checkpoint.best_epoch}).")
def get_args(hp,ee,rr):
parser = argparse.ArgumentParser(allow_abbrev=False)
# Add data arguments
parser.add_argument("--train-size", default=None, help="number of examples in training set")
parser.add_argument("--val-size", default=40, help="number of examples in validation set")
parser.add_argument("--test-size", default=100, help="number of examples in test set")
parser.add_argument("--val-crop", default=True, type=bool, help="Crop validation images to train size.")
parser.add_argument("--patch-size", default=128, help="size of the center cropped HR image")
parser.add_argument("--batch-size", default=128, type=int, help="train batch size")
# Add model arguments
parser.add_argument("--model", default="unet", help="model architecture")
# Add noise arguments
parser.add_argument('--noise_std', default = 15, type = float,
help = 'noise level')
parser.add_argument('--test_noise_std_min', default = 15, type = float,
help = 'minimal noise level for testing')
parser.add_argument('--test_noise_std_max', default = 15, type = float,
help = 'maximal noise level for testing')
parser.add_argument('--test_noise_stepsize', default = 5, type = float,
help = 'Stepsize between test_noise_std_min and test_noise_std_max')
# Add optimization arguments
parser.add_argument("--lr", default=1e-3, type=float, help="learning rate")
parser.add_argument("--lr-gamma", default=0.5, type=float, help="factor by which to reduce learning rate")
parser.add_argument("--lr-beta", default=2, type=float, help="factor by which to increase learning rate")
parser.add_argument("--lr-patience", default=5, type=int, help="epochs without improvement before lr decay")
parser.add_argument("--no_annealing", default=True, type=bool, help="Use lr annealing or not.")
parser.add_argument("--lr-patience-annealing", default=3, type=int, help="epochs without improvement before lr annealing stops")
parser.add_argument("--lr-min", default=1e-5, type=float, help="Once we reach this learning rate continue for break_counter many epochs then stop.")
parser.add_argument("--lr-threshold", default=0.003, type=float, help="Improvements by less than this threshold are not counted for decay patience.")
parser.add_argument("--break-counter", default=9, type=int, help="Once smallest learning rate is reached, continue for so many epochs before stopping.")
parser.add_argument("--inital-decay-factor", default=2, type=int, help="After annealing found a lr for which val loss does not improve, go back initial_decay_factor many lrs")
parser.add_argument("--num-epochs", default=100, type=int, help="force stop training at specified epoch")
parser.add_argument("--valid-interval", default=1, type=int, help="evaluate every N epochs")
parser.add_argument("--save-interval", default=1, type=int, help="save a checkpoint every N steps")
# Add model arguments
parser = models.unet_fastMRI.add_args(parser)
parser = utils.add_logging_arguments(parser)
#args = parser.parse_args()
args, _ = parser.parse_known_args()
# Set arguments specific for this experiment
dargs = vars(args)
for key in hp.keys():
dargs[key] = hp[key][ee]
args.seed = int(42 + 10*rr)
return args
def f_restore_file(args):
#available_models = glob.glob(f'{args.output_dir}/{args.experiment}-*')
available_models = glob.glob(f'{args.output_dir}/*')
if not available_models:
raise ValueError('No file to restore')
if not args.restore_mode:
raise ValueError("Pick restore mode either 'best' 'last' or '\path\to\checkpoint\dir'")
if args.restore_mode=='best':
mode = "max"
best_score = float("inf") if mode == "min" else float("-inf")
best_model = None
for modelp in available_models:
model_path = os.path.join(modelp, "checkpoints/checkpoint_best.pt")
if os.path.isfile(model_path):
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
score = state_dict["best_score"]
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
best_score = score
best_model = model_path
best_modelp = modelp
best_step = state_dict["best_step"]
best_epoch = state_dict["best_epoch"]
args.restore_file = best_model
args.experiment_dir = best_modelp
#logging.info(f"Prepare to restore best model {best_model} with PSNR {best_score} at step {best_step}, epoch {best_epoch}")
elif args.restore_mode=='last':
last_step = -1
last_model = None
for modelp in available_models:
model_path = os.path.join(modelp, "checkpoints/checkpoint_last.pt")
if os.path.isfile(model_path):
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
step = state_dict["last_step"]
if step > last_step:
last_step = step
last_model = model_path
last_modelp = modelp
score = state_dict["score"]
last_epoch = state_dict["epoch"]
args.restore_file = last_model
args.experiment_dir = last_modelp
#logging.info(f"Prepare to restore last model {last_model} with PSNR {score} at step {last_step}, epoch {last_epoch}")
else:
args.restore_file = args.restore_mode
args.experiment_dir = args.restore_mode[:args.restore_mode.find('/checkpoints')]
def infer_images(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
net = load_model(args) # the denoiser
seed_dict = {
"val":10,
"test":20,
}
gen = torch.Generator()
gen = gen.manual_seed(seed_dict[args.test_mode])
# Load the test images
load_path = '../training_set_lists/'
if args.test_mode == 'test':
files_source = torch.load(load_path+f'ImageNetTest{args.test_size}_filepaths.pt')
#files_source.sort()
elif args.test_mode == 'val':
files_source = torch.load(load_path+f'ImageNetVal{args.val_size}_filepaths.pt')
#files_source.sort()
if not os.path.isdir(args.output_dir+'/test_images'):
os.mkdir(args.output_dir+'/test_images')
counter = 0
transformT = transforms.ToTensor()
transformIm = transforms.ToPILImage()
for f in files_source:
counter = counter + 1
if counter > 3:
break
# Create noise
ISource = torch.unsqueeze(transformT(Image.open(f).convert("RGB")),0).to(device)
noise = torch.randn(ISource.shape,generator = gen) * args.noise_std/255.
INoisy = noise.to(device) + ISource
out = torch.clamp(net(INoisy), 0., 1.).cpu()
out = torch.squeeze(out,0) # Get rid of the 1 in dim 0.
im = transformIm(out)
INoisy = torch.clamp(torch.squeeze(INoisy,0), 0., 1.).cpu()
INoisy = transformIm(INoisy)
clean_image = Image.open(f).convert("RGB")
im.save(args.output_dir+f'/test_images/im{counter}_denoised_notclamped.png')
clean_image.save(args.output_dir+f'/test_images/im{counter}_ground_truth_notclamped.png')
INoisy.save(args.output_dir+f'/test_images/im{counter}_noisy_notclamped.png')
im.save(args.output_dir+f'/test_images/im{counter}_denoised_notclamped.pdf')
clean_image.save(args.output_dir+f'/test_images/im{counter}_ground_truth_notclamped.pdf')
INoisy.save(args.output_dir+f'/test_images/im{counter}_noisy_notclamped.pdf')
| 29,827 | 45.244961 | 182 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/utils/util_calculate_psnr_ssim.py | import cv2
import numpy as np
import torch
# from https://github.com/JingyunLiang/SwinIR/blob/328dda0f4768772e6d8c5aa3d5aa8e24f1ad903b/utils/util_calculate_psnr_ssim.py#L80
def calculate_psnr(img1, img2, crop_border, input_order='HWC', test_y_channel=False):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return float('inf')
return 20. * np.log10(255. / np.sqrt(mse))
def calculate_psnr_neighbor(img1, img2,noisy_sub1_denoised,noisy_sub2_denoised,increase_ratio, crop_border=0, input_order='HWC', test_y_channel=False):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
noisy_sub1_denoised = reorder_image(noisy_sub1_denoised,input_order=input_order)
noisy_sub2_denoised = reorder_image(noisy_sub2_denoised,input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
#img1 = img1.astype(np.float64)
#img2 = img2.astype(np.float64)
#noisy_sub1_denoised = noisy_sub1_denoised.astype(np.float64)
#noisy_sub2_denoised = noisy_sub2_denoised.astype(np.float64)
img1 = img1.cpu().detach().numpy()
img2 = img2.cpu().detach().numpy()
noisy_sub1_denoised = noisy_sub1_denoised.cpu().detach().numpy()
noisy_sub2_denoised = noisy_sub2_denoised.cpu().detach().numpy()
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
mse = np.mean((img1 - img2) ** 2)
reg = increase_ratio * np.mean((img1 - img2 - (noisy_sub1_denoised - noisy_sub2_denoised)) ** 2)
if mse == 0:
return float('inf')
return 20. * np.log10(1. / np.sqrt(mse))
def calculate_upsnr(img1,img2,img3,img4,noisy_sub1_denoised,noisy_sub2_denoised,increase_ratio, crop_border=0,input_order='HWC', test_y_channel=False):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img3 = reorder_image(img3,input_order=input_order)
img4 = reorder_image(img4,input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
noisy_sub1_denoised = reorder_image(noisy_sub1_denoised,input_order=input_order)
noisy_sub2_denoised = reorder_image(noisy_sub2_denoised,input_order=input_order)
#img1 = img1.astype(np.float64)
#img2 = img2.astype(np.float64)
img1 = img1.cpu().detach().numpy()
img2 = img2.cpu().detach().numpy()
img3 = img3.cpu().detach().numpy()
img4 = img4.cpu().detach().numpy()
noisy_sub1_denoised = noisy_sub1_denoised.cpu().detach().numpy()
noisy_sub2_denoised = noisy_sub2_denoised.cpu().detach().numpy()
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
umse = np.mean((img1 - img2) ** 2) + 0.5 * np.mean((img3 - img4) ** 2)
reg = increase_ratio * np.mean((img1 - img2 - (noisy_sub1_denoised - noisy_sub2_denoised)) ** 2)
if umse == 0:
return float('inf')
#return 20. * np.log10(1. / np.sqrt(umse+reg))
return 20. * np.log10(1. / np.sqrt(umse)) # E720 - 725
def _ssim(img1, img2):
"""Calculate SSIM (structural similarity) for one channel images.
It is called by func:`calculate_ssim`.
Args:
img1 (ndarray): Images with range [0, 255] with order 'HWC'.
img2 (ndarray): Images with range [0, 255] with order 'HWC'.
Returns:
float: ssim result.
"""
C1 = (0.01 * 255) ** 2
C2 = (0.03 * 255) ** 2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1 ** 2
mu2_sq = mu2 ** 2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1, img2, crop_border, input_order='HWC', test_y_channel=False):
"""Calculate SSIM (structural similarity).
Ref:
Image quality assessment: From error visibility to structural similarity
The results are the same as that of the official released MATLAB code in
https://ece.uwaterloo.ca/~z70wang/research/ssim/.
For three-channel images, SSIM is calculated for each channel and then
averaged.
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the SSIM calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: ssim result.
"""
assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
ssims = []
for i in range(img1.shape[2]):
ssims.append(_ssim(img1[..., i], img2[..., i]))
return np.array(ssims).mean()
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'")
if len(img.shape) == 2:
img = img[..., None]
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
def _convert_input_type_range(img):
"""Convert the type and range of the input image.
It converts the input image to np.float32 type and range of [0, 1].
It is mainly used for pre-processing the input image in colorspace
convertion functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
(ndarray): The converted image with type of np.float32 and range of
[0, 1].
"""
img_type = img.dtype
img = img.astype(np.float32)
if img_type == np.float32:
pass
elif img_type == np.uint8:
img /= 255.
else:
raise TypeError('The img type should be np.float32 or np.uint8, ' f'but got {img_type}')
return img
def _convert_output_type_range(img, dst_type):
"""Convert the type and range of the image according to dst_type.
It converts the image to desired type and range. If `dst_type` is np.uint8,
images will be converted to np.uint8 type with range [0, 255]. If
`dst_type` is np.float32, it converts the image to np.float32 type with
range [0, 1].
It is mainly used for post-processing images in colorspace convertion
functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The image to be converted with np.float32 type and
range [0, 255].
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
converts the image to np.uint8 type with range [0, 255]. If
dst_type is np.float32, it converts the image to np.float32 type
with range [0, 1].
Returns:
(ndarray): The converted image with desired type and range.
"""
if dst_type not in (np.uint8, np.float32):
raise TypeError('The dst_type should be np.float32 or np.uint8, ' f'but got {dst_type}')
if dst_type == np.uint8:
img = img.round()
else:
img /= 255.
return img.astype(dst_type)
def bgr2ycbcr(img, y_only=False):
"""Convert a BGR image to YCbCr image.
The bgr version of rgb2ycbcr.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
else:
out_img = np.matmul(
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128]
out_img = _convert_output_type_range(out_img, img_type)
return out_img | 13,571 | 40.631902 | 151 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/utils/metrics.py | import numpy as np
from skimage.metrics import peak_signal_noise_ratio, structural_similarity
def ssim(clean, noisy, normalized=True):
"""Use skimage.meamsure.compare_ssim to calculate SSIM
Args:
clean (Tensor): (B, C, H, W)
noisy (Tensor): (B, C, H, W)
normalized (bool): If True, the range of tensors are [0., 1.] else [0, 255]
Returns:
SSIM per image: (B, )
"""
if len(clean.shape)!=4 or len(noisy.shape)!=4:
raise ValueError("ssim expects clean (Tensor): (B, C, H, W) noisy (Tensor): (B, C, H, W)")
if normalized:
clean = clean.mul(255).clamp(0, 255)
noisy = noisy.mul(255).clamp(0, 255)
clean = clean.cpu().detach().numpy().astype(np.float32)
noisy = noisy.cpu().detach().numpy().astype(np.float32)
clean = np.moveaxis(clean,1,-1)
noisy = np.moveaxis(noisy,1,-1)
return np.array([structural_similarity(c, n, data_range=255, multichannel=True) for c, n in zip(clean, noisy)]).mean()
def psnr(clean, noisy, normalized=True):
"""Use skimage.meamsure.compare_ssim to calculate SSIM
Args:
clean (Tensor): (B, C, H, W)
noisy (Tensor): (B, C, H, W)
normalized (bool): If True, the range of tensors are [0., 1.]
else [0, 255]
Returns:
SSIM per image: (B, )
"""
if len(clean.shape)!=4 or len(noisy.shape)!=4:
raise ValueError("psnr expects clean (Tensor): (B, C, H, W) noisy (Tensor): (B, C, H, W)")
if normalized:
clean = clean.mul(255).clamp(0, 255)
noisy = noisy.mul(255).clamp(0, 255)
clean = clean.cpu().detach().numpy().astype(np.float32)
noisy = noisy.cpu().detach().numpy().astype(np.float32)
return np.array([peak_signal_noise_ratio(c, n, data_range=255) for c, n in zip(clean, noisy)]).mean()
| 1,811 | 36.75 | 122 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/utils/__init__.py | from .train_utils import *
from .main_function_helpers import * | 63 | 31 | 36 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/utils/test_metrics.py | import torch
import numpy as np
import matplotlib.pyplot as plt
import glob
import os
#import cv2
from utils.noise_model import get_noise
from utils.metrics import ssim,psnr
from utils.util_calculate_psnr_ssim import calculate_psnr,calculate_ssim
from skimage import color
import PIL.Image as Image
import torchvision.transforms as transforms
from utils.utils_image import *
metrics_key = ['psnr_m', 'psnr_s', 'psnr_delta_m', 'psnr_delta_s', 'ssim_m', 'ssim_s', 'ssim_delta_m', 'ssim_delta_s'];
def tensor_to_image(torch_image, low=0.0, high = 1.0, clamp = True):
if clamp:
torch_image = torch.clamp(torch_image, low, high);
return torch_image[0,0].cpu().data.numpy()
def normalize(data):
return data/255.
def convert_dict_to_string(metrics):
return_string = '';
for x in metrics.keys():
return_string += x+': '+str(round(metrics[x], 3))+' ';
return return_string
def get_all_comparison_metrics(denoised, source, noisy = None, scale=None, return_title_string = False, clamp = True):
metrics = {};
metrics['psnr'] = np.zeros(len(denoised))
metrics['ssim'] = np.zeros(len(denoised))
if noisy is not None:
metrics['psnr_delta'] = np.zeros(len(denoised))
metrics['ssim_delta'] = np.zeros(len(denoised))
if clamp:
denoised = torch.clamp(denoised, 0.0, 1.0)
metrics['psnr'] = psnr(source, denoised);
metrics['ssim'] = ssim(source, denoised);
if noisy is not None:
metrics['psnr_delta'] = metrics['psnr'] - psnr(source, noisy);
metrics['ssim_delta'] = metrics['ssim'] - ssim(source, noisy);
if return_title_string:
return convert_dict_to_string(metrics)
else:
return metrics
def average_on_folder(args, net, noise_std,
verbose=True, device = torch.device('cuda')):
#if verbose:
#print('Loading data info ...\n')
print(f'\n Dataset: {args.test_mode}, Restore mode: {args.restore_mode}')
load_path = '../training_set_lists/'
seed_dict = {
"val":10,
"test":20,
}
gen = torch.Generator()
gen = gen.manual_seed(seed_dict[args.test_mode])
if args.test_mode == 'test':
files_source = torch.load(load_path+f'ImageNetTest{args.test_size}_filepaths.pt')
#files_source.sort()
elif args.test_mode == 'val':
files_source = torch.load(load_path+f'ImageNetVal{args.val_size}_filepaths.pt')
avreage_metrics_key = ['psnr', 'psnr_delta', 'ssim', 'ssim_delta']
avg_metrics = {};
for x in avreage_metrics_key:
avg_metrics[x] = [];
psnr_list = []
ssim_list = []
for f in files_source:
transformT = transforms.ToTensor()
ISource = torch.unsqueeze(transformT(Image.open(args.path_to_ImageNet_train + f).convert("RGB")),0).to(device)
if args.test_mode == 'val':
noise_seed = int(f[f.find('train/')+17:-5].replace('_',''))
gen = gen.manual_seed(noise_seed)
noise = torch.randn(ISource.shape,generator = gen) * args.noise_std/255.
INoisy = noise.to(device) + ISource
out = torch.clamp(net(INoisy), 0., 1.)
ind_metrics = get_all_comparison_metrics(out, ISource, INoisy, return_title_string = False)
for x in avreage_metrics_key:
avg_metrics[x].append(ind_metrics[x])
if(verbose):
print("%s %s" % (f, convert_dict_to_string(ind_metrics)))
metrics = {}
for x in avreage_metrics_key:
metrics[x+'_m'] = np.mean(avg_metrics[x])
metrics[x+'_s'] = np.std(avg_metrics[x])
if verbose:
print("\n Average %s" % (convert_dict_to_string(metrics)))
#if(not verbose):
return metrics
def metrics_avg_on_noise_range(net, args, noise_std_array, device = torch.device('cuda')):
array_metrics = {}
for x in metrics_key:
array_metrics[x] = np.zeros(len(noise_std_array))
for j, noise_std in enumerate(noise_std_array):
metric_list = average_on_folder(args, net,
noise_std = noise_std,
verbose=False, device=device);
for x in metrics_key:
array_metrics[x][j] += metric_list[x]
print('noise: ', int(noise_std*255), ' ', x, ': ', str(array_metrics[x][j]))
return array_metrics
| 4,404 | 29.804196 | 119 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/utils/noise_model.py | import torch
def get_noise(data, noise_seed, fix_noise, noise_std = float(25)/255.0):
if fix_noise:
device = torch.device('cuda')
gen = torch.Generator(device=device)
batch_size = data.size(dim=0)
tensor_dim = list(data.size())[1:]
for i in range(0,batch_size):
gen = gen.manual_seed(noise_seed[i].item())
noise = torch.randn(tensor_dim,generator = gen, device=device) * noise_std
noise = torch.unsqueeze(noise,0)
if i == 0:
noise_tensor = noise
else:
noise_tensor = torch.cat((noise_tensor, noise),0)
noise = noise_tensor
#noise = torch.randn(data.shape,generator = gen, device=device) * noise_std
else:
noise = torch.randn_like(data)
noise.data = noise.data * noise_std
return noise | 880 | 31.62963 | 87 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/utils/meters.py | import time
import torch
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
self.val = val / n
self.sum += val
self.count += n
self.avg = self.sum / self.count
class RunningAverageMeter(object):
def __init__(self, momentum=0.98):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if isinstance(val, torch.Tensor):
val = val.item()
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
class TimeMeter(object):
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
| 1,321 | 20.322581 | 75 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Neighbor2Neighbor/utils/data_helpers/load_datasets_helpers.py | import os
import os.path
import numpy as np
import h5py
import torch
import torchvision.transforms as transforms
import PIL.Image as Image
from utils.utils_image import *
class ImagenetSubdataset(torch.utils.data.Dataset):
def __init__(self, size, path_to_ImageNet_train, mode='train', patch_size='128', val_crop=True):
super().__init__()
load_path = '../training_set_lists/'
self.path_to_ImageNet_train = path_to_ImageNet_train
if mode=='train':
self.files = torch.load(load_path+f'trsize{size}_filepaths.pt')
self.transform = transforms.Compose([
transforms.CenterCrop(patch_size),
transforms.ToTensor(),
])
elif mode=='val':
self.files = torch.load(load_path+f'ImageNetVal{size}_filepaths.pt')
#print(self.files)
if val_crop:
self.transform = transforms.Compose([
transforms.CenterCrop(patch_size),
transforms.ToTensor(),
])
else:
self.transform = transforms.Compose([
transforms.ToTensor(),
])
self.noise_seeds = {}
for i, file in enumerate(self.files):
key = file[file.find('train/')+16:-5]
number = int(file[file.find('train/')+17:-5].replace('_',''))
self.noise_seeds[key] = number
def __len__(self):
return len(self.files)
def __getitem__(self, index):
file = self.files[index]
key = file[file.find('train/')+16:-5]
noise_seed = self.noise_seeds[key]
image = Image.open(self.path_to_ImageNet_train + self.files[index]).convert("RGB") #ImageNet contains some grayscale images
data = self.transform(image)
return data, noise_seed
| 1,949 | 32.62069 | 131 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/models/unet_fastMRI.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import math
from torch import nn
from torch.nn import functional as F
class unet_fastMRI(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int,
chans: int = 32,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
residual_connection: bool = True,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
residual_connection: Network outputs the residual between input and output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = in_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.residual_connection = residual_connection
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)]) #first conv block followed by downsampling
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob)) #conv blocks that are followed by downsampling
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch)) #transposed conv blocks
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch)) #last transposed conv block
self.up_conv.append(
nn.Sequential(#
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),#
)#
)
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument("--bias", default=True, help="use residual bias")
parser.add_argument("--residual", default=True, help="use residual connection")
parser.add_argument("--in-chans", default=3, help="Either color (3) or grey (1)")
parser.add_argument("--chans", default=32, help="Number of channels in outer most layer")
parser.add_argument("--num-pool-layers", default=3, help="Number of layers per down- and up-sampling path.")
parser.add_argument("--no-pooling", default=False, help="No downsampling. Use the no_unet_fastMRI module.")
return parser
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image.detach().clone()
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
if self.residual_connection:
output = image - output;
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image)
| 7,138 | 34.517413 | 131 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/models/__init__.py | from .unet_fastMRI import * | 27 | 27 | 27 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/progress_bar.py | from collections import OrderedDict
from numbers import Number
from tqdm import tqdm
from .meters import AverageMeter, RunningAverageMeter, TimeMeter
class ProgressBar:
def __init__(self, iterable, epoch=None, prefix=None, quiet=False):
self.epoch = epoch
self.quiet = quiet
self.prefix = prefix + ' | ' if prefix is not None else ''
if epoch is not None:
self.prefix += f"epoch {epoch:02d}"
self.iterable = iterable if self.quiet else tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.iterable)
def log(self, stats, verbose=False):
if not self.quiet:
self.iterable.set_postfix(self.format_stats(stats, verbose), refresh=True)
def format_stats(self, stats, verbose=False):
postfix = OrderedDict(stats)
for key, value in postfix.items():
if isinstance(value, Number):
fmt = "{:.3f}" if value > 0.001 else "{:.1e}"
postfix[key] = fmt.format(value)
elif isinstance(value, AverageMeter) or isinstance(value, RunningAverageMeter):
if verbose:
postfix[key] = f"{value.avg:.5f} ({value.val:.5f})"
else:
postfix[key] = f"{value.avg:.5f}"
elif isinstance(value, TimeMeter):
postfix[key] = f"{value.elapsed_time:.1f}s"
elif not isinstance(postfix[key], str):
postfix[key] = str(value)
return postfix
def print(self, stats, verbose=False):
postfix = " | ".join(key + " " + value.strip() for key, value in self.format_stats(stats, verbose).items())
return f"{self.prefix + ' | ' if self.epoch is not None else ''}{postfix}"
| 1,789 | 37.913043 | 115 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/utils_image.py | import os
import math
import random
import numpy as np
import torch
import cv2
from torchvision.utils import make_grid
def modcrop(img, scale):
# img_in: BCHW or CHW or HW
#img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
C, H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:, :H - H_r, :W - W_r]
elif img.ndim == 4:
B, C, H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:, :, :H - H_r, :W - W_r]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
def modcrop_pil(image, modulo):
w = image.width - image.width % modulo
h = image.height - image.height % modulo
return image.crop((0, 0, w, h))
def crop_center(pil_img, crop_width, crop_height):
# Perform center crop on a PIL image
img_width, img_height = pil_img.size
return pil_img.crop(((img_width - crop_width) // 2,
(img_height - crop_height) // 2,
(img_width + crop_width) // 2,
(img_height + crop_height) // 2))
'''
# --------------------------------------------
# matlab's bicubic imresize (numpy and torch) [0, 1]
# --------------------------------------------
# from https://github.com/cszn/KAIR/blob/master/utils/utils_image.py
'''
# matlab 'imresize' function, now only support 'bicubic'
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
u = x / scale + 0.5 * (1 - 1 / scale)
left = torch.floor(u - kernel_width / 2)
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
# --------------------------------------------
# imresize for tensor image [0, 1]
# --------------------------------------------
def imresize(img, scale, antialiasing=True):
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(0)
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2
# --------------------------------------------
# imresize for numpy image [0, 1]
# --------------------------------------------
def imresize_np(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC or HW [0,1]
# output: HWC or HW [0,1] w/o round
img = torch.from_numpy(img)
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(2)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2.numpy()
| 9,403 | 36.466135 | 99 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/train_utils.py | import argparse
import os
import logging
import numpy as np
import random
import sys
import torch
from datetime import datetime
from torch.serialization import default_restore_location
def add_logging_arguments(parser):
parser.add_argument("--seed", default=0, type=int, help="random number generator seed")
parser.add_argument("--output-dir", default="experiments", help="path to experiment directories")
parser.add_argument("--experiment", default=None, help="experiment name to be used with Tensorboard")
parser.add_argument("--resume-training", action="store_true", help="whether to resume training")
parser.add_argument("--restore-mode", default=None, help="Either 'best' 'last' or '\path\to\checkpoint\dir'")
parser.add_argument("--restore-file", default=None, help="filename to load checkpoint")
parser.add_argument("--test-mode", default=None, help="Evaluate on which test set.")
parser.add_argument("--no-save", action="store_true", help="don't save models or checkpoints")
parser.add_argument("--step-checkpoints", action="store_true", help="store all step checkpoints")
parser.add_argument("--no-log", action="store_true", help="don't save logs to file or Tensorboard directory")
parser.add_argument("--log-interval", type=int, default=100, help="log every N steps")
parser.add_argument("--no-visual", action="store_true", help="don't use Tensorboard")
parser.add_argument("--visual-interval", type=int, default=100, help="log every N steps")
parser.add_argument("--no-progress", action="store_true", help="don't use progress bar")
parser.add_argument("--draft", action="store_true", help="save experiment results to draft directory")
parser.add_argument("--dry-run", action="store_true", help="no log, no save, no visualization")
return parser
def init_logging(args):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handlers = [logging.StreamHandler()]
if not args.no_log and args.log_file is not None:
mode = "a" if args.resume_training else "w"
handlers.append(logging.FileHandler(args.log_file, mode=mode))
logging.basicConfig(handlers=handlers, format="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
logging.info("Arguments: {}".format(vars(args)))
def setup_experiment(args):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.dry_run:
args.no_save = args.no_log = args.no_visual = True
return
args.experiment = args.experiment or f"{args.model.replace('_', '-')}" #unet
args.experiment = "-".join([args.experiment, 'std'+str(args.noise_std)])
if not args.resume_training:
args.experiment = "-".join([args.experiment, datetime.now().strftime("%b-%d-%H:%M:%S")])
args.experiment = "-".join([args.experiment, 'tr'+str(args.train_size)])
args.experiment_dir = os.path.join(args.output_dir, args.experiment)
os.makedirs(args.experiment_dir, exist_ok=True) #dir is only created if it not already exists. If it already exists no error is raised
if not args.no_save:
args.checkpoint_dir = os.path.join(args.experiment_dir, "checkpoints")
os.makedirs(args.checkpoint_dir, exist_ok=True)
if not args.no_log:
args.log_dir = os.path.join(args.experiment_dir, "logs")
os.makedirs(args.log_dir, exist_ok=True)
args.log_file = os.path.join(args.log_dir, "train.log")
def save_checkpoint(args, step, epoch, model, optimizer=None, scheduler=None, score=None, mode="min"):
assert mode == "min" or mode == "max"
last_step = getattr(save_checkpoint, "last_step", -1) #-1 as default argument that is given if attribute does not exist
save_checkpoint.last_step = max(last_step, step)
default_score = float("inf") if mode == "min" else float("-inf")
best_score = getattr(save_checkpoint, "best_score", default_score)
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
save_checkpoint.best_step = step
save_checkpoint.best_epoch = epoch
save_checkpoint.best_score = score
if not args.no_save and step % args.save_interval == 0:
os.makedirs(args.checkpoint_dir, exist_ok=True)
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
state_dict = {
"step": step,
"epoch": epoch,
"score": score,
"last_step": save_checkpoint.last_step,
"best_step": save_checkpoint.best_step,
"best_epoch": save_checkpoint.best_epoch,
"best_score": getattr(save_checkpoint, "best_score", None),
"model": [m.state_dict() for m in model] if model is not None else None,
"optimizer": [o.state_dict() for o in optimizer] if optimizer is not None else None,
"scheduler": [s.state_dict() for s in scheduler] if scheduler is not None else None,
"args": argparse.Namespace(**{k: v for k, v in vars(args).items() if not callable(v)}),
}
if args.step_checkpoints:
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint{}.pt".format(step)))
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint_best.pt"))
if step > last_step:
torch.save(state_dict, os.path.join(args.checkpoint_dir, "checkpoint_last.pt"))
def load_checkpoint(args, model=None, optimizer=None, scheduler=None):
if args.restore_file is not None and os.path.isfile(args.restore_file):
print('restoring model..')
state_dict = torch.load(args.restore_file, map_location=lambda s, l: default_restore_location(s, "cpu"))
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
if "best_score" in state_dict:
save_checkpoint.best_score = state_dict["best_score"]
save_checkpoint.best_step = state_dict["best_step"]
if "last_step" in state_dict:
save_checkpoint.last_step = state_dict["last_step"]
if model is not None and state_dict.get("model", None) is not None:
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
if optimizer is not None and state_dict.get("optimizer", None) is not None:
for o, state in zip(optimizer, state_dict["optimizer"]):
o.load_state_dict(state)
if scheduler is not None and state_dict.get("scheduler", None) is not None:
for s, state in zip(scheduler, state_dict["scheduler"]):
milestones = s.milestones
state['milestones'] = milestones
s.load_state_dict(state)
s.milestones = milestones
logging.info("Loaded checkpoint {}".format(args.restore_file))
return state_dict
| 7,573 | 52.716312 | 138 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/main_function_helpers.py | import torch
import argparse
import os
import yaml
import pathlib
import pickle
import logging
import sys
import time
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import torchvision
import glob
from torch.serialization import default_restore_location
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
from tensorboard.backend.event_processing import event_accumulator
import utils
import models
from utils.data_helpers.load_datasets_helpers import *
from utils.meters import *
from utils.progress_bar import *
from utils.noise_model import get_noise
from utils.metrics import ssim,psnr
from utils.util_calculate_psnr_ssim import calculate_psnr,calculate_ssim
from utils.test_metrics import *
def load_model(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
checkpoint_path = glob.glob(args.output_dir +'/unet*')
if len(checkpoint_path) != 1:
raise ValueError("There is either no or more than one model to load")
checkpoint_path = pathlib.Path(checkpoint_path[0] + f"/checkpoints/checkpoint_{args.restore_mode}.pt")
state_dict = torch.load(checkpoint_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
args = argparse.Namespace(**{ **vars(state_dict["args"]), "no_log": True})
#model = models.build_model(args).to(device)
model = models.unet_fastMRI(
in_chans=args.in_chans,
chans = args.chans,
num_pool_layers = args.num_pool_layers,
drop_prob = 0.0,
residual_connection = args.residual,
).to(device)
model.load_state_dict(state_dict["model"][0])
model.eval()
return model
def cli_main_test(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
model = load_model(args)
# evaluate test performance over following noise range
noise_std_range = np.linspace(args.test_noise_std_min, args.test_noise_std_max,
((args.test_noise_std_max-args.test_noise_std_min)//args.test_noise_stepsize)+1,dtype=int)/255.
metrics_path = os.path.join(args.output_dir, args.test_mode + '_' + str(args.test_noise_std_min)+'-'+str(args.test_noise_std_max)+f'_metrics_{args.restore_mode}.p')
metrics_dict = metrics_avg_on_noise_range(model, args, noise_std_range, device = device)
pickle.dump( metrics_dict, open(metrics_path, "wb" ) )
def cli_main(args):
available_models = glob.glob(f'{args.output_dir}/*')
if not args.resume_training and available_models:
raise ValueError('There exists already a trained model and resume_training is set False')
if args.resume_training:
f_restore_file(args)
# reset the attributes of the function save_checkpoint
mode = "max"
default_score = float("inf") if mode == "min" else float("-inf")
utils.save_checkpoint.best_score = default_score
utils.save_checkpoint.best_step = -1
utils.save_checkpoint.best_epoch = -1
utils.save_checkpoint.last_step = -1
utils.save_checkpoint.current_lr = args.lr
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Set the name of the directory for saving results
utils.setup_experiment(args)
utils.init_logging(args)
# Build data loaders, a model and an optimizer
model = models.unet_fastMRI(
in_chans=args.in_chans,
chans = args.chans,
num_pool_layers = args.num_pool_layers,
drop_prob = 0.0,
residual_connection = args.residual,
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
#scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50, 60, 70, 80, 90, 100], gamma=0.5)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='max', factor=args.lr_gamma, patience=args.lr_patience,
threshold=args.lr_threshold, threshold_mode='abs', cooldown=0,
min_lr=args.lr_min, eps=1e-08, verbose=True
)
logging.info(f"Built a model consisting of {sum(p.numel() for p in model.parameters()):,} parameters")
trainset = ImagenetSubdataset(args.train_size,args.path_to_ImageNet_train,mode='train',patch_size=args.patch_size,val_crop=args.val_crop)
train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True,generator=torch.Generator().manual_seed(args.seed))
valset = ImagenetSubdataset(args.val_size,args.path_to_ImageNet_train,mode='val',patch_size=args.patch_size,val_crop=args.val_crop)
val_loader = DataLoader(valset, batch_size=1, shuffle=False, num_workers=4, pin_memory=True,generator=torch.Generator().manual_seed(args.seed))
print(optimizer.param_groups[0]["lr"])
if args.resume_training:
state_dict = utils.load_checkpoint(args, model, optimizer, scheduler)
global_step = state_dict['last_step']
start_epoch = int(state_dict['last_step']/(len(train_loader)))+1
start_decay = True
elif args.no_annealing:
global_step = -1
start_epoch = 0
start_decay = True
else:
global_step = -1
start_epoch = 0
start_decay = False
print(optimizer.param_groups[0]["lr"])
args.log_interval = min(len(trainset), 100) # len(train_loader)=log once per epoch
args.no_visual = False # True for not logging to tensorboard
# Track moving average of loss values
train_meters = { "train_loss":RunningAverageMeter(0.98)}
valid_meters = {name: AverageMeter() for name in (["valid_psnr", "valid_ssim", "valid_psnr_self_supervised", "valid_ssim_self_supervised"])}
# Create tensorflow event file
writer = SummaryWriter(log_dir=args.experiment_dir) if not args.no_visual else None
break_counter = 0
# store the best val performance from lr-interval before the last lr decay
best_val_last = 0
# track the best val performance for the current lr-inerval
best_val_current = 0
# count for how many lr intervals there was no improvement and break only if there was no improvement for 2
lr_interval_counter = 0
# if best_val_current at the end of the current lr interval is smaller than best_val_last we perform early stopping
for epoch in range(start_epoch, args.num_epochs):
start = time.process_time()
train_bar = ProgressBar(train_loader, epoch)
# At beginning of each epoch reset the train meters
for meter in train_meters.values():
meter.reset()
for inputs, noise_seed in train_bar:
model.train() #Sets the module in training mode.
global_step += 1
inputs = inputs.to(device)
noise = get_noise(inputs,noise_seed, fix_noise = args.fix_noise, noise_std = args.noise_std/255.)
noise_target = get_noise(inputs,torch.mul(noise_seed,10),fix_noise = args.fix_noise, noise_std = args.noise_std_target/255.)
noisy_targets = noise_target + inputs
noisy_inputs = noise + inputs
outputs = model(noisy_inputs)
# In loss function, I changed outputs to noisy_targets for self-supervision
loss = F.mse_loss(outputs, noisy_targets, reduction="sum") / torch.prod(torch.tensor(inputs.size())) #(inputs.size(0) * 2)
model.zero_grad()
loss.backward()
optimizer.step()
train_meters["train_loss"].update(loss.item())
train_bar.log(dict(**train_meters, lr=optimizer.param_groups[0]["lr"]), verbose=True)
# Add to tensorflow event file:
if writer is not None:
writer.add_scalar("lr", optimizer.param_groups[0]["lr"], global_step)
writer.add_scalar("loss/train", train_meters["train_loss"].avg, global_step)
sys.stdout.flush()
if epoch % args.valid_interval == 0:
model.eval()
gen_val = torch.Generator()
gen_val = gen_val.manual_seed(10)
for meter in valid_meters.values():
meter.reset()
valid_bar = ProgressBar(val_loader)
for sample, noise_seed in valid_bar:
with torch.no_grad():
sample = sample.to(device)
# Self-supervised validation with fixed noise
noise_self_supervised = get_noise(sample,noise_seed, fix_noise = args.fix_noise, noise_std = args.noise_std/255.)
noise_target_self_supervised = get_noise(sample,torch.mul(noise_seed,10),fix_noise = args.fix_noise, noise_std = args.noise_std_target/255.)
noisy_input_fixed = sample + noise_self_supervised
noisy_target = sample + noise_target_self_supervised
output_self_supervised = model(noisy_input_fixed)
valid_psnr_self_supervised = psnr(output_self_supervised, noisy_target)
valid_ssim_self_supervised = ssim(output_self_supervised, noisy_target)
valid_meters["valid_psnr_self_supervised"].update(valid_psnr_self_supervised.item())
valid_meters["valid_ssim_self_supervised"].update(valid_ssim_self_supervised.item())
# Ground truth validation wit fixed noise
# It uses the same input and output as in the self-supervised case since the noise seed is fixed
valid_psnr = psnr(output_self_supervised, sample)
valid_ssim = ssim(output_self_supervised, sample)
valid_meters["valid_psnr"].update(valid_psnr.item())
valid_meters["valid_ssim"].update(valid_ssim.item())
if writer is not None:
# Average is correct valid_meters['valid_psnr'].avg since .val would be just the psnr of last sample in val set.
writer.add_scalar("psnr/valid", valid_meters['valid_psnr'].avg, global_step)
writer.add_scalar("ssim/valid", valid_meters['valid_ssim'].avg, global_step)
writer.add_scalar("psnr_selfsupervised/valid", valid_meters['valid_psnr_self_supervised'].avg, global_step)
writer.add_scalar("ssim_selfsupervised/valid", valid_meters["valid_ssim_self_supervised"].avg, global_step)
writer.add_scalar("lr", optimizer.param_groups[0]["lr"], global_step)
sys.stdout.flush()
if args.val_flag == 0: # if we do self-supervised validation
val_loss = valid_meters["valid_psnr_self_supervised"].avg
else: # if we do supervised validation
val_loss = valid_meters["valid_psnr"].avg
if utils.save_checkpoint.best_score < val_loss and not start_decay:
utils.save_checkpoint(args, global_step, epoch, model, optimizer, score=val_loss, mode="max")
current_lr = utils.save_checkpoint.current_lr
optimizer.param_groups[0]["lr"] = current_lr*args.lr_beta
utils.save_checkpoint.current_lr = current_lr*args.lr_beta
annealing_counter = 0
elif not start_decay:
annealing_counter += 1
current_lr = utils.save_checkpoint.current_lr
if annealing_counter == args.lr_patience_annealing:
available_models = glob.glob(f'{args.output_dir}/*')
if not available_models:
raise ValueError('No file to restore')
elif len(available_models)>1:
raise ValueError('Too many files to restore from')
model_path = os.path.join(available_models[0], "checkpoints/checkpoint_best.pt")
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
model = [model] if model is not None and not isinstance(model, list) else model
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
model = model[0]
optimizer.param_groups[0]["lr"] = current_lr/(args.lr_beta*args.inital_decay_factor)
start_decay = True
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='max', factor=args.lr_gamma, patience=args.lr_patience,
threshold=args.lr_threshold, threshold_mode='abs', cooldown=0,
min_lr=args.lr_min, eps=1e-08, verbose=True
)
else:
utils.save_checkpoint(args, global_step, epoch, model, optimizer, score=val_loss, mode="max")
current_lr = optimizer.param_groups[0]["lr"]
if val_loss > best_val_current:
best_val_current = val_loss
if writer is not None:
writer.add_scalar("epoch", epoch, global_step)
sys.stdout.flush()
if start_decay:
current_lr = optimizer.param_groups[0]["lr"]
scheduler.step(val_loss)
new_lr = optimizer.param_groups[0]["lr"]
#At every lr decay check if the model did not improve during the current or the previous lr interval and break if it didn't.
if new_lr < current_lr:
if best_val_current < best_val_last and lr_interval_counter==1:
logging.info('Break training due to convergence of val loss!')
break
elif best_val_current < best_val_last and lr_interval_counter==0:
lr_interval_counter += 1
logging.info('Do not yet break due to convergence of val loss!')
else:
best_val_last = best_val_current
best_val_current = 0
lr_interval_counter = 0
end = time.process_time() - start
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, time=np.round(end/60,3))))
if optimizer.param_groups[0]["lr"] == args.lr_min and start_decay:
break_counter += 1
if break_counter == args.break_counter:
print('Break training due to minimal learning rate constraint!')
break
logging.info(f"Done training! Best PSNR {utils.save_checkpoint.best_score:.3f} obtained after step {utils.save_checkpoint.best_step} (epoch {utils.save_checkpoint.best_epoch}).")
def get_args(hp,ee,rr):
parser = argparse.ArgumentParser(allow_abbrev=False)
# Add data arguments
parser.add_argument("--train-size", default=None, help="number of examples in training set")
parser.add_argument("--val-size", default=40, help="number of examples in validation set")
parser.add_argument("--test-size", default=100, help="number of examples in test set")
parser.add_argument("--val-crop", default=True, type=bool, help="Crop validation images to train size.")
parser.add_argument("--patch-size", default=128, help="size of the center cropped HR image")
parser.add_argument("--batch-size", default=128, type=int, help="train batch size")
# Add model arguments
parser.add_argument("--model", default="unet", help="model architecture")
# Add noise arguments
parser.add_argument('--noise_std', default = 15, type = float,
help = 'noise level')
parser.add_argument('--test_noise_std_min', default = 15, type = float,
help = 'minimal noise level for testing')
parser.add_argument('--test_noise_std_max', default = 15, type = float,
help = 'maximal noise level for testing')
parser.add_argument('--test_noise_stepsize', default = 5, type = float,
help = 'Stepsize between test_noise_std_min and test_noise_std_max')
# Add optimization arguments
parser.add_argument("--lr", default=1e-3, type=float, help="learning rate")
parser.add_argument("--lr-gamma", default=0.5, type=float, help="factor by which to reduce learning rate")
parser.add_argument("--lr-beta", default=2, type=float, help="factor by which to increase learning rate")
parser.add_argument("--lr-patience", default=5, type=int, help="epochs without improvement before lr decay")
parser.add_argument("--no_annealing", default=True, type=bool, help="Use lr annealing or not.")
parser.add_argument("--lr-patience-annealing", default=3, type=int, help="epochs without improvement before lr annealing stops")
parser.add_argument("--lr-min", default=1e-5, type=float, help="Once we reach this learning rate continue for break_counter many epochs then stop.")
parser.add_argument("--lr-threshold", default=0.003, type=float, help="Improvements by less than this threshold are not counted for decay patience.")
parser.add_argument("--break-counter", default=9, type=int, help="Once smallest learning rate is reached, continue for so many epochs before stopping.")
parser.add_argument("--inital-decay-factor", default=2, type=int, help="After annealing found a lr for which val loss does not improve, go back initial_decay_factor many lrs")
parser.add_argument("--num-epochs", default=100, type=int, help="force stop training at specified epoch")
parser.add_argument("--valid-interval", default=1, type=int, help="evaluate every N epochs")
parser.add_argument("--save-interval", default=1, type=int, help="save a checkpoint every N steps")
# Add model arguments
parser = models.unet_fastMRI.add_args(parser)
parser = utils.add_logging_arguments(parser)
#args = parser.parse_args()
args, _ = parser.parse_known_args()
# Set arguments specific for this experiment
dargs = vars(args)
for key in hp.keys():
dargs[key] = hp[key][ee]
args.seed = int(42 + 10*rr)
return args
def f_restore_file(args):
#available_models = glob.glob(f'{args.output_dir}/{args.experiment}-*')
available_models = glob.glob(f'{args.output_dir}/*')
if not available_models:
raise ValueError('No file to restore')
if not args.restore_mode:
raise ValueError("Pick restore mode either 'best' 'last' or '\path\to\checkpoint\dir'")
if args.restore_mode=='best':
mode = "max"
best_score = float("inf") if mode == "min" else float("-inf")
best_model = None
for modelp in available_models:
model_path = os.path.join(modelp, "checkpoints/checkpoint_best.pt")
if os.path.isfile(model_path):
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
score = state_dict["best_score"]
if (score < best_score and mode == "min") or (score > best_score and mode == "max"):
best_score = score
best_model = model_path
best_modelp = modelp
best_step = state_dict["best_step"]
best_epoch = state_dict["best_epoch"]
args.restore_file = best_model
args.experiment_dir = best_modelp
#logging.info(f"Prepare to restore best model {best_model} with PSNR {best_score} at step {best_step}, epoch {best_epoch}")
elif args.restore_mode=='last':
last_step = -1
last_model = None
for modelp in available_models:
model_path = os.path.join(modelp, "checkpoints/checkpoint_last.pt")
if os.path.isfile(model_path):
state_dict = torch.load(model_path, map_location=lambda s, l: default_restore_location(s, "cpu"))
step = state_dict["last_step"]
if step > last_step:
last_step = step
last_model = model_path
last_modelp = modelp
score = state_dict["score"]
last_epoch = state_dict["epoch"]
args.restore_file = last_model
args.experiment_dir = last_modelp
#logging.info(f"Prepare to restore last model {last_model} with PSNR {score} at step {last_step}, epoch {last_epoch}")
else:
args.restore_file = args.restore_mode
args.experiment_dir = args.restore_mode[:args.restore_mode.find('/checkpoints')]
def infer_images(args):
USE_CUDA = True
device = torch.device('cuda') if (torch.cuda.is_available() and USE_CUDA) else torch.device('cpu')
net = load_model(args) # the denoiser
seed_dict = {
"val":10,
"test":20,
"cbsd68":30,
"urban100":40,
"mcmaster18":50,
"kodak24":60,
"CBSD68":70,
}
gen = torch.Generator()
gen = gen.manual_seed(seed_dict[args.test_mode])
# Load the test images
load_path = '../training_set_lists/'
if args.test_mode == 'test':
files_source = torch.load(load_path+f'ImageNetTest{args.test_size}_filepaths.pt')
#files_source.sort()
elif args.test_mode == 'val':
files_source = torch.load(load_path+f'ImageNetVal{args.val_size}_filepaths.pt')
#files_source.sort()
else:
files_source = torch.load(load_path+f'{args.test_mode}_filepaths.pt')
if not os.path.isdir(args.output_dir+'/test_images'):
os.mkdir(args.output_dir+'/test_images')
counter = 0
transformT = transforms.ToTensor()
transformIm = transforms.ToPILImage()
for f in files_source:
counter = counter + 1
if counter > 3:
break
# Create noise
ISource = torch.unsqueeze(transformT(Image.open(f).convert("RGB")),0).to(device)
noise = torch.randn(ISource.shape,generator = gen) * args.noise_std/255.
INoisy = noise.to(device) + ISource
out = torch.clamp(net(INoisy), 0., 1.).cpu()
out = torch.squeeze(out,0) # Get rid of the 1 in dim 0.
im = transformIm(out)
INoisy = torch.clamp(torch.squeeze(INoisy,0), 0., 1.).cpu()
#INoisy = torch.squeeze(INoisy,0).cpu()
INoisy = transformIm(INoisy)
clean_image = Image.open(f).convert("RGB")
im.save(args.output_dir+f'/test_images/im{counter}_denoised_notclamped.png')
clean_image.save(args.output_dir+f'/test_images/im{counter}_ground_truth_notclamped.png')
INoisy.save(args.output_dir+f'/test_images/im{counter}_noisy_notclamped.png')
im.save(args.output_dir+f'/test_images/im{counter}_denoised_notclamped.pdf')
clean_image.save(args.output_dir+f'/test_images/im{counter}_ground_truth_notclamped.pdf')
INoisy.save(args.output_dir+f'/test_images/im{counter}_noisy_notclamped.pdf')
| 23,245 | 45.772636 | 182 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/util_calculate_psnr_ssim.py | import cv2
import numpy as np
import torch
# from https://github.com/JingyunLiang/SwinIR/blob/328dda0f4768772e6d8c5aa3d5aa8e24f1ad903b/utils/util_calculate_psnr_ssim.py#L80
def calculate_psnr(img1, img2, crop_border, input_order='HWC', test_y_channel=False):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return float('inf')
return 20. * np.log10(255. / np.sqrt(mse))
def _ssim(img1, img2):
"""Calculate SSIM (structural similarity) for one channel images.
It is called by func:`calculate_ssim`.
Args:
img1 (ndarray): Images with range [0, 255] with order 'HWC'.
img2 (ndarray): Images with range [0, 255] with order 'HWC'.
Returns:
float: ssim result.
"""
C1 = (0.01 * 255) ** 2
C2 = (0.03 * 255) ** 2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1 ** 2
mu2_sq = mu2 ** 2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1, img2, crop_border, input_order='HWC', test_y_channel=False):
"""Calculate SSIM (structural similarity).
Ref:
Image quality assessment: From error visibility to structural similarity
The results are the same as that of the official released MATLAB code in
https://ece.uwaterloo.ca/~z70wang/research/ssim/.
For three-channel images, SSIM is calculated for each channel and then
averaged.
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the SSIM calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: ssim result.
"""
assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
ssims = []
for i in range(img1.shape[2]):
ssims.append(_ssim(img1[..., i], img2[..., i]))
return np.array(ssims).mean()
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'")
if len(img.shape) == 2:
img = img[..., None]
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
def _convert_input_type_range(img):
"""Convert the type and range of the input image.
It converts the input image to np.float32 type and range of [0, 1].
It is mainly used for pre-processing the input image in colorspace
convertion functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
(ndarray): The converted image with type of np.float32 and range of
[0, 1].
"""
img_type = img.dtype
img = img.astype(np.float32)
if img_type == np.float32:
pass
elif img_type == np.uint8:
img /= 255.
else:
raise TypeError('The img type should be np.float32 or np.uint8, ' f'but got {img_type}')
return img
def _convert_output_type_range(img, dst_type):
"""Convert the type and range of the image according to dst_type.
It converts the image to desired type and range. If `dst_type` is np.uint8,
images will be converted to np.uint8 type with range [0, 255]. If
`dst_type` is np.float32, it converts the image to np.float32 type with
range [0, 1].
It is mainly used for post-processing images in colorspace convertion
functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The image to be converted with np.float32 type and
range [0, 255].
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
converts the image to np.uint8 type with range [0, 255]. If
dst_type is np.float32, it converts the image to np.float32 type
with range [0, 1].
Returns:
(ndarray): The converted image with desired type and range.
"""
if dst_type not in (np.uint8, np.float32):
raise TypeError('The dst_type should be np.float32 or np.uint8, ' f'but got {dst_type}')
if dst_type == np.uint8:
img = img.round()
else:
img /= 255.
return img.astype(dst_type)
def bgr2ycbcr(img, y_only=False):
"""Convert a BGR image to YCbCr image.
The bgr version of rgb2ycbcr.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
else:
out_img = np.matmul(
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128]
out_img = _convert_output_type_range(out_img, img_type)
return out_img | 9,023 | 37.564103 | 129 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/metrics.py | import numpy as np
from skimage.metrics import peak_signal_noise_ratio, structural_similarity
def ssim(clean, noisy, normalized=True):
"""Use skimage.meamsure.compare_ssim to calculate SSIM
Args:
clean (Tensor): (B, C, H, W)
noisy (Tensor): (B, C, H, W)
normalized (bool): If True, the range of tensors are [0., 1.] else [0, 255]
Returns:
SSIM per image: (B, )
"""
if len(clean.shape)!=4 or len(noisy.shape)!=4:
raise ValueError("ssim expects clean (Tensor): (B, C, H, W) noisy (Tensor): (B, C, H, W)")
if normalized:
clean = clean.mul(255).clamp(0, 255)
noisy = noisy.mul(255).clamp(0, 255)
clean = clean.cpu().detach().numpy().astype(np.float32)
noisy = noisy.cpu().detach().numpy().astype(np.float32)
clean = np.moveaxis(clean,1,-1)
noisy = np.moveaxis(noisy,1,-1)
return np.array([structural_similarity(c, n, data_range=255, multichannel=True) for c, n in zip(clean, noisy)]).mean()
def psnr(clean, noisy, normalized=True):
"""Use skimage.meamsure.compare_ssim to calculate SSIM
Args:
clean (Tensor): (B, C, H, W)
noisy (Tensor): (B, C, H, W)
normalized (bool): If True, the range of tensors are [0., 1.]
else [0, 255]
Returns:
SSIM per image: (B, )
"""
if len(clean.shape)!=4 or len(noisy.shape)!=4:
raise ValueError("psnr expects clean (Tensor): (B, C, H, W) noisy (Tensor): (B, C, H, W)")
if normalized:
clean = clean.mul(255).clamp(0, 255)
noisy = noisy.mul(255).clamp(0, 255)
clean = clean.cpu().detach().numpy().astype(np.float32)
noisy = noisy.cpu().detach().numpy().astype(np.float32)
return np.array([peak_signal_noise_ratio(c, n, data_range=255) for c, n in zip(clean, noisy)]).mean()
| 1,811 | 36.75 | 122 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/__init__.py | from .train_utils import *
from .main_function_helpers import * | 63 | 31 | 36 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/test_metrics.py | import torch
import numpy as np
import matplotlib.pyplot as plt
import glob
import os
#import cv2
from utils.noise_model import get_noise
from utils.metrics import ssim,psnr
from utils.util_calculate_psnr_ssim import calculate_psnr,calculate_ssim
from skimage import color
import PIL.Image as Image
import torchvision.transforms as transforms
from utils.utils_image import *
metrics_key = ['psnr_m', 'psnr_s', 'psnr_delta_m', 'psnr_delta_s', 'ssim_m', 'ssim_s', 'ssim_delta_m', 'ssim_delta_s'];
def tensor_to_image(torch_image, low=0.0, high = 1.0, clamp = True):
if clamp:
torch_image = torch.clamp(torch_image, low, high);
return torch_image[0,0].cpu().data.numpy()
def normalize(data):
return data/255.
def convert_dict_to_string(metrics):
return_string = '';
for x in metrics.keys():
return_string += x+': '+str(round(metrics[x], 3))+' ';
return return_string
def get_all_comparison_metrics(denoised, source, noisy = None, scale=None, return_title_string = False, clamp = True):
metrics = {};
metrics['psnr'] = np.zeros(len(denoised))
metrics['ssim'] = np.zeros(len(denoised))
if noisy is not None:
metrics['psnr_delta'] = np.zeros(len(denoised))
metrics['ssim_delta'] = np.zeros(len(denoised))
if clamp:
denoised = torch.clamp(denoised, 0.0, 1.0)
metrics['psnr'] = psnr(source, denoised);
metrics['ssim'] = ssim(source, denoised);
if noisy is not None:
metrics['psnr_delta'] = metrics['psnr'] - psnr(source, noisy);
metrics['ssim_delta'] = metrics['ssim'] - ssim(source, noisy);
if return_title_string:
return convert_dict_to_string(metrics)
else:
return metrics
def average_on_folder(args, net, noise_std,
verbose=True, device = torch.device('cuda')):
#if verbose:
#print('Loading data info ...\n')
print(f'\n Dataset: {args.test_mode}, Restore mode: {args.restore_mode}')
load_path = '../training_set_lists/'
seed_dict = {
"val":10,
"test":20,
}
gen = torch.Generator()
gen = gen.manual_seed(seed_dict[args.test_mode])
if args.test_mode == 'test':
files_source = torch.load(load_path+f'ImageNetTest{args.test_size}_filepaths.pt')
#files_source.sort()
elif args.test_mode == 'val':
files_source = torch.load(load_path+f'ImageNetVal{args.val_size}_filepaths.pt')
#files_source.sort()
avreage_metrics_key = ['psnr', 'psnr_delta', 'ssim', 'ssim_delta']
avg_metrics = {};
for x in avreage_metrics_key:
avg_metrics[x] = [];
psnr_list = []
ssim_list = []
#print(files_source)
for f in files_source:
transformT = transforms.ToTensor()
ISource = torch.unsqueeze(transformT(Image.open(args.path_to_ImageNet_train + f).convert("RGB")),0).to(device)
if args.test_mode == 'val':
noise_seed = int(f[f.find('train/')+17:-5].replace('_',''))
gen = gen.manual_seed(noise_seed)
noise = torch.randn(ISource.shape,generator = gen) * args.noise_std/255.
INoisy = noise.to(device) + ISource
out = torch.clamp(net(INoisy), 0., 1.)
ind_metrics = get_all_comparison_metrics(out, ISource, INoisy, return_title_string = False)
for x in avreage_metrics_key:
avg_metrics[x].append(ind_metrics[x])
if(verbose):
print("%s %s" % (f, convert_dict_to_string(ind_metrics)))
metrics = {}
for x in avreage_metrics_key:
metrics[x+'_m'] = np.mean(avg_metrics[x])
metrics[x+'_s'] = np.std(avg_metrics[x])
if verbose:
print("\n Average %s" % (convert_dict_to_string(metrics)))
#if(not verbose):
return metrics
def metrics_avg_on_noise_range(net, args, noise_std_array, device = torch.device('cuda')):
array_metrics = {}
for x in metrics_key:
array_metrics[x] = np.zeros(len(noise_std_array))
for j, noise_std in enumerate(noise_std_array):
metric_list = average_on_folder(args, net,
noise_std = noise_std,
verbose=False, device=device);
for x in metrics_key:
array_metrics[x][j] += metric_list[x]
print('noise: ', int(noise_std*255), ' ', x, ': ', str(array_metrics[x][j]))
return array_metrics
| 4,464 | 29.582192 | 119 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/noise_model.py | import torch
def get_noise(data, noise_seed, fix_noise, noise_std = float(25)/255.0):
if fix_noise:
device = torch.device('cuda')
gen = torch.Generator(device=device)
batch_size = data.size(dim=0)
tensor_dim = list(data.size())[1:]
for i in range(0,batch_size):
gen = gen.manual_seed(noise_seed[i].item())
noise = torch.randn(tensor_dim,generator = gen, device=device) * noise_std
noise = torch.unsqueeze(noise,0)
if i == 0:
noise_tensor = noise
else:
noise_tensor = torch.cat((noise_tensor, noise),0)
noise = noise_tensor
#noise = torch.randn(data.shape,generator = gen, device=device) * noise_std
else:
noise = torch.randn_like(data)
noise.data = noise.data * noise_std
return noise | 880 | 31.62963 | 87 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/meters.py | import time
import torch
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
self.val = val / n
self.sum += val
self.count += n
self.avg = self.sum / self.count
class RunningAverageMeter(object):
def __init__(self, momentum=0.98):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if isinstance(val, torch.Tensor):
val = val.item()
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
class TimeMeter(object):
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
| 1,321 | 20.322581 | 75 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/Image_denoising_figure2/Noise2Noise/utils/data_helpers/load_datasets_helpers.py | import os
import os.path
import numpy as np
import h5py
import torch
import torchvision.transforms as transforms
import PIL.Image as Image
from utils.utils_image import *
class ImagenetSubdataset(torch.utils.data.Dataset):
def __init__(self, size, path_to_ImageNet_train, mode='train', patch_size='128', val_crop=True):
super().__init__()
load_path = '../training_set_lists/'
self.path_to_ImageNet_train = path_to_ImageNet_train
if mode=='train':
self.files = torch.load(load_path+f'trsize{size}_filepaths.pt')
self.transform = transforms.Compose([
transforms.CenterCrop(patch_size),
transforms.ToTensor(),
])
elif mode=='val':
self.files = torch.load(load_path+f'ImageNetVal{size}_filepaths.pt')
#print(self.files)
if val_crop:
self.transform = transforms.Compose([
transforms.CenterCrop(patch_size),
transforms.ToTensor(),
])
else:
self.transform = transforms.Compose([
transforms.ToTensor(),
])
self.noise_seeds = {}
for i, file in enumerate(self.files):
key = file[file.find('train/')+16:-5]
number = int(file[file.find('train/')+17:-5].replace('_',''))
self.noise_seeds[key] = number
def __len__(self):
return len(self.files)
def __getitem__(self, index):
file = self.files[index]
key = file[file.find('train/')+16:-5]
noise_seed = self.noise_seeds[key]
image = Image.open(self.path_to_ImageNet_train + self.files[index]).convert("RGB") #ImageNet contains some grayscale images
data = self.transform(image)
return data, noise_seed
| 1,949 | 32.62069 | 131 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/train_network_for_histogram.py | # %%
import torch
import h5py
import numpy as np
import os
import yaml
import logging
import glob
import random
import pickle
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import matplotlib.pyplot as plt
from torch.nn import MSELoss
import copy
from argparse import ArgumentParser
from torch.utils.tensorboard import SummaryWriter
import sys
from tqdm import tqdm
import torchvision.transforms as transforms
import PIL.Image as Image
from skimage.transform import resize
from CS_natural_images_functions.unet import Unet
from CS_natural_images_functions.fftc import fft2c, ifft2c
from CS_natural_images_functions.losses import SSIMLoss
from CS_natural_images_functions.progress_bar import ProgressBar, init_logging, AverageMeter, TrackMeter, TrackMeter_testing
from CS_natural_images_functions.log_progress_helpers import save_figure, add_img_to_tensorboard, save_test_image_with_dc
from CS_natural_images_functions.load_save_model_helpers import setup_experiment_or_load_checkpoint, save_checkpoint
from CS_natural_images_functions.data_transforms import UnetDataTransform
from CS_natural_images_functions.data_transforms import compute_number_of_lines_in_input_target_kspace
# %%
class CropDataset(torch.utils.data.Dataset):
"""
A PyTorch Dataset that provides access to MR image slices.
In this case we are only interested in downsampled magnitude images.
"""
def __init__(
self,
dataset: List,
path_to_ImageNet_train: str,
transforms_list: List,
experiment_path: str,
img_size: int,
):
"""
Args:
dataset: A list containing one entry for every slice in the dataset.
Each entry is a dictionary with keys 'path','slice','filename'ArithmeticError
path_to_mridata: Path to fastMRI data on the server.
transform: Function that transforms the ground truth image x into training input and target.
"""
self.transform_30 = transforms_list[0]
self.transform_35 = transforms_list[1]
self.experiment_path = experiment_path
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Store downsampled ground truth training images here
self.examples = []
load_transform = transforms.Compose([
transforms.CenterCrop(img_size),
transforms.ToTensor(),
])
# Load mri magnitude images, downsample and store
for datapath in dataset:
image = Image.open(path_to_ImageNet_train+datapath).convert("L")
filename = datapath[16:-5]
self.examples.append((load_transform(image)[0].to(device),filename))
def __len__(self):
return len(self.examples)
def __getitem__(self, i: int):
# Determine input, target and ground truth
x,filename = self.examples[i]
y_input, x_input, y_target, x_target, x_gt, input_mask, target_mask, mean, std, fname = self.transform_30(x,filename,i)
_, _, _, _, _, _, target_mask_35, _, _, _ = self.transform_35(x,filename,i)
return y_input, x_input, y_target, x_target, x_gt, input_mask, target_mask, mean, std, fname,target_mask_35
# %%
def read_args():
parser = ArgumentParser()
parser.add_argument(
'--path_to_ImageNet_train',
type=str,
help='Path to ImageNet train directory.',
required=True
)
parser.add_argument(
'--training',
default=True,
action='store_false',
help='Add this flag to disable training.'
)
parser.add_argument(
'--testing',
default=False,
action='store_false',
help='Add this flag to disable testing.'
)
parser.add_argument(
'--experiment_number',
default='300',
type=str,
help='Set consecutive numbering for the experiments.'
)
parser.add_argument(
'--gpu',
choices=(0, 1, 2, 3),
default=1,
type=int,
help='Pick one out of four gpus.'
)
parser.add_argument(
'--seed',
default=0,
type=int,
help='Set seed for network initialization.'
)
parser.add_argument(
'--trainset_size',
choices=(50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000, 250000, 500000, 1000000),
default=50,
type=int,
help='Set training set size.'
)
parser.add_argument(
'--img_size',
default=100,
type=int,
help='Set img_size for downsampling.'
)
parser.add_argument(
'--num_epochs',
default=1000,
type=int,
help='Set number of training epochs.'
)
parser.add_argument(
'--acceleration',
default=4.0,
type=float,
help='Undersampling of training and test inputs.'
)
parser.add_argument(
'--center_fraction',
default=0.08,
type=float,
help='Fraction of lines that are always sample from the center (input and target). Set to 0.0 for sampling all lines randomly.'
)
parser.add_argument(
'--fix_split',
default=True,
action='store_true',
help='Add this flag to set fix_split=True for fixed input target split for self-supervised and fixed input for supervised training.'
)
args = parser.parse_args()
exp_nums = ['992']
# validation every second epoch
# lr decay on plateau
hyperparameters = {
#'acceleration_total' : [
#3.0,
#],
'trainset_size' : [
10000,
],
'center_fraction' : [
0.08,
],
'seed' : [
1,
],
'fix_split' : [
True,
],
'num_epochs' : [
2,
],
'patience' : [ # as we validate every second epoch a patience of 10 actually means 20 epochs
15,
]
}
# Sanity checks
for key in hyperparameters.keys():
if len(hyperparameters[key]) != len(exp_nums):
print(key)
raise ValueError("Specify hyperparameters for every experiment!")
for i in range(len(exp_nums)):
args.trainset_size = hyperparameters['trainset_size'][i]
args.center_fraction = hyperparameters['center_fraction'][i]
args.seed = hyperparameters['seed'][i]
args.fix_split = hyperparameters['fix_split'][i]
args.num_epochs = hyperparameters['num_epochs'][i]
args.patience = hyperparameters['patience'][i]
args.experiment_number = exp_nums[i]
experiment_name = f"N{args.experiment_number}_t{args.trainset_size}_"
experiment_name+="sup_VS_ss3035_"
if args.center_fraction==0.0:
experiment_name+="RandCenter_"
else:
experiment_name+="FixCenter_"
experiment_name+="grad_diff_"
experiment_name += f"run{args.seed}"
experiment_path = experiment_name+"/"
#dataset_path = f"../datasets/train_{args.trainset_size}_selfsup_slice.yaml"
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
if args.training:
run_training(experiment_path=experiment_path,
acceleration=args.acceleration,
center_fraction=args.center_fraction,
seed=args.seed,
img_size=args.img_size,
fix_split=args.fix_split,
num_epochs=args.num_epochs,
patience=args.patience,
trainset_size=args.trainset_size,
path_to_ImageNet_train=args.path_to_ImageNet_train)
################################################################################################
def run_training(experiment_path,
acceleration,
center_fraction,
seed,
img_size,
fix_split,
num_epochs,
patience,
trainset_size,
path_to_ImageNet_train):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Create directory that holds train files
if not os.path.isdir(experiment_path):
os.mkdir(experiment_path)
else:
print(experiment_path)
#raise ValueError("Experiment already exists!!")
print("Warning: Experiment already exists!!")
# Init train.log file
init_logging(experiment_path)
logging.info("Training...")
# train loss function
loss_fct = MSELoss(reduction='sum')
#val_ssim_fct = SSIMLoss()
# Init model
model = Unet(
in_chans=2,
out_chans=2,
chans=24,
num_pool_layers=3,
drop_prob=0.0,).to(device)
# Init optimizer and scheduler
optimizer = torch.optim.Adam( params=model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0, amsgrad=False)
# Load train set
train_pool = torch.load('CS_natural_images_functions/training_set_lists/trsize1000000_filepaths.pt')
zero_norm_files = ['train/n03729826/n03729826_6483.JPEG',
'train/n04515003/n04515003_24673.JPEG',
'train/n02111277/n02111277_12490.JPEG',
'train/n03888605/n03888605_9775.JPEG',
'train/n02992529/n02992529_3197.JPEG',
'train/n01930112/n01930112_18908.JPEG',
'train/n06874185/n06874185_3219.JPEG',
'train/n06785654/n06785654_17232.JPEG',
'train/n04033901/n04033901_29617.JPEG',
'train/n07920052/n07920052_14729.JPEG',
'train/n03729826/n03729826_40479.JPEG',
'train/n03729826/n03729826_10716.JPEG',
'train/n04286575/n04286575_74296.JPEG',
'train/n03937543/n03937543_10198.JPEG',
'train/n03063599/n03063599_3942.JPEG',
'train/n04152593/n04152593_13802.JPEG',
'train/n04522168/n04522168_24105.JPEG',
'train/n03532672/n03532672_78983.JPEG',
'train/n04404412/n04404412_12316.JPEG',
'train/n04330267/n04330267_18003.JPEG',
'train/n04118776/n04118776_37671.JPEG',
'train/n04591713/n04591713_3568.JPEG',
'train/n02437616/n02437616_12697.JPEG',
'train/n02799071/n02799071_54867.JPEG',
'train/n02883205/n02883205_26196.JPEG',
'train/n02667093/n02667093_2919.JPEG',
'train/n03196217/n03196217_1135.JPEG',
'train/n03196217/n03196217_3568.JPEG',
'train/n15075141/n15075141_19601.JPEG',
'train/n01943899/n01943899_24166.JPEG']
for zero_norm_file in zero_norm_files:
train_pool.remove(zero_norm_file)
rng_dataset = np.random.default_rng(seed)
train_set = rng_dataset.choice(train_pool, size=trainset_size, replace=False, p=None)
torch.save(train_set,experiment_path+'train_set.pt')
# Train loader
data_transform_train_35 = UnetDataTransform(acceleration=acceleration,acceleration_total=3.5, fix_split=fix_split, experiment_path=experiment_path,center_fraction=center_fraction)
data_transform_train_30 = UnetDataTransform(acceleration=acceleration,acceleration_total=3.0, fix_split=fix_split, experiment_path=experiment_path,center_fraction=center_fraction)
trainset = CropDataset(dataset=train_set, path_to_ImageNet_train=path_to_ImageNet_train, transforms_list=[data_transform_train_30,data_transform_train_35], experiment_path=experiment_path, img_size=img_size)
train_loader = torch.utils.data.DataLoader(dataset=trainset, batch_size=1, num_workers=0, shuffle=True, generator=torch.Generator().manual_seed(0))
# store training loss metrics
train_meters = {'train_L2': AverageMeter()}
sup_diff_tracks = {'divide_by_norm_of_risk_grad': TrackMeter_testing(), 'take_mse': TrackMeter_testing()}
ss_diff_30_tracks = {'divide_by_norm_of_risk_grad': TrackMeter_testing(), 'take_mse': TrackMeter_testing()}
ss_diff_35_tracks = {'divide_by_norm_of_risk_grad': TrackMeter_testing(), 'take_mse': TrackMeter_testing()}
# Init tensorboard
#writer = SummaryWriter(log_dir=experiment_path)
#log_image_interval_tb = 10
# when to compute gradient histograms
compute_gradients_interval = 1
# Start training
break_counter=0
for epoch in range(num_epochs):
# compute gradient histogrms
if epoch % compute_gradients_interval == 0:
model_copy = copy.deepcopy(model)
train_bar_hist = ProgressBar(train_loader, epoch)
model_copy.train()
for meter in sup_diff_tracks.values():
meter.reset()
for meter in ss_diff_30_tracks.values():
meter.reset()
for meter in ss_diff_35_tracks.values():
meter.reset()
# estimate ground truth gradient based on whole dataset
for id,sample in enumerate(train_bar_hist):
y_input, x_input, y_target, x_target, x, input_mask, target_mask_30, mean, std, fname, target_mask_35 = sample
# prediction
x_output = torch.moveaxis(model_copy(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# move to kspace
y_output_sup = fft2c(x_output)
# apply target mask (all ones for supervised training)
y_output_ss_30 = y_output_sup * target_mask_30 + 0.0
y_output_ss_35 = y_output_sup * target_mask_35 + 0.0
y_target_sup = fft2c(x)
y_target_ss_30 = y_target_sup * target_mask_30 + 0.0
y_target_ss_35 = y_target_sup * target_mask_35 + 0.0
save_figure(torch.log(torch.abs(y_input[0,:,:,0].detach().cpu())+ 1e-9),"y_input_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_output_sup[0,:,:,0].detach().cpu())+ 1e-9),"y_output_sup_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_output_ss_30[0,:,:,0].detach().cpu())+ 1e-9),"y_output_ss_30_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_output_ss_35[0,:,:,0].detach().cpu())+ 1e-9),"y_output_ss_35_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_target_sup[0,:,:,0].detach().cpu())+ 1e-9),"y_target_sup_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_target_ss_30[0,:,:,0].detach().cpu())+ 1e-9),"y_target_ss_30_real",experiment_path) if id==0 else None
save_figure(torch.log(torch.abs(y_target_ss_35[0,:,:,0].detach().cpu())+ 1e-9),"y_target_ss_35_real",experiment_path) if id==0 else None
# compute loss
train_loss_sup = loss_fct(y_output_sup,y_target_sup) / torch.sum(torch.abs(y_target_sup)**2)
#train_loss_ss_30 = loss_fct(y_output_ss_30,y_target_ss_30) / torch.sum(torch.abs(y_target_ss_30)**2)
#train_loss_ss_35 = loss_fct(y_output_ss_35,y_target_ss_35) / torch.sum(torch.abs(y_target_ss_35)**2)
param = list(model_copy.parameters())
model_copy.zero_grad()
train_loss_sup.backward(retain_graph=True)
if id == 0:
for p in param:
p.grad_true_risk = p.grad
p.grad = None
else:
for p in param:
p.grad_true_risk += p.grad
p.grad = None
for p in param:
p.grad_true_risk = p.grad_true_risk/len(train_loader)
# compute stochastic supervised and self-supervised gradients based on the same dataset
train_bar_hist = ProgressBar(train_loader, epoch)
for id,sample in enumerate(train_bar_hist):
y_input, x_input, y_target, x_target, x, input_mask, target_mask_30, mean, std, fname, target_mask_35 = sample
# prediction
x_output = torch.moveaxis(model_copy(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# move to kspace
y_output_sup = fft2c(x_output)
# apply target mask (all ones for supervised training)
y_output_ss_30 = y_output_sup * target_mask_30 + 0.0
y_output_ss_35 = y_output_sup * target_mask_35 + 0.0
y_target_sup = fft2c(x)
y_target_ss_30 = y_target_sup * target_mask_30 + 0.0
y_target_ss_35 = y_target_sup * target_mask_35 + 0.0
# compute loss
train_loss_sup = loss_fct(y_output_sup,y_target_sup) / torch.sum(torch.abs(y_target_sup)**2)
train_loss_ss_30 = loss_fct(y_output_ss_30,y_target_ss_30) / torch.sum(torch.abs(y_target_ss_30)**2)
train_loss_ss_35 = loss_fct(y_output_ss_35,y_target_ss_35) / torch.sum(torch.abs(y_target_ss_35)**2)
param = list(model_copy.parameters())
model_copy.zero_grad()
train_loss_sup.backward(retain_graph=True)
for p in param:
p.grad_sup = p.grad
p.grad = None
train_loss_ss_30.backward(retain_graph=True)
for p in param:
p.grad_ss_30 = p.grad
p.grad = None
train_loss_ss_35.backward(retain_graph=True)
for p in param:
p.grad_ss_35 = p.grad
p.grad = None
diff_sup = torch.zeros(1).to(device)
diff_ss_30 = torch.zeros(1).to(device)
diff_ss_35 = torch.zeros(1).to(device)
norm_grad_of_risk = torch.zeros(1).to(device)
for p in param:
diff_sup += torch.sum(torch.square(torch.sub(p.grad_sup,p.grad_true_risk)))
diff_ss_30 += torch.sum(torch.square(torch.sub(p.grad_ss_30,p.grad_true_risk)))
diff_ss_35 += torch.sum(torch.square(torch.sub(p.grad_ss_35,p.grad_true_risk)))
norm_grad_of_risk += torch.sum(torch.square(p.grad_true_risk))
sup_diff_tracks['divide_by_norm_of_risk_grad'].update(torch.div(diff_sup,norm_grad_of_risk).item())
sup_diff_tracks['take_mse'].update(torch.mean(diff_sup).item())
ss_diff_30_tracks['divide_by_norm_of_risk_grad'].update(torch.div(diff_ss_30,norm_grad_of_risk).item())
ss_diff_30_tracks['take_mse'].update(torch.mean(diff_ss_30).item())
ss_diff_35_tracks['divide_by_norm_of_risk_grad'].update(torch.div(diff_ss_35,norm_grad_of_risk).item())
ss_diff_35_tracks['take_mse'].update(torch.mean(diff_ss_35).item())
pickle.dump( sup_diff_tracks, open(experiment_path + f"sup_diff_tracks_ep{epoch}.pkl", "wb" ) , pickle.HIGHEST_PROTOCOL )
pickle.dump( ss_diff_30_tracks, open(experiment_path + f"ss_diff_30_tracks_ep{epoch}.pkl", "wb" ) , pickle.HIGHEST_PROTOCOL )
pickle.dump( ss_diff_35_tracks, open(experiment_path + f"ss_diff_35_tracks_ep{epoch}.pkl", "wb" ) , pickle.HIGHEST_PROTOCOL )
# perform one training epoch
train_bar = ProgressBar(train_loader, epoch)
for meter in train_meters.values():
meter.reset()
for id,sample in enumerate(train_bar):
model.train()
y_input, x_input, y_target, x_target, x, input_mask, target_mask_30, mean, std, fname, target_mask_35 = sample
# prediction
x_output = torch.moveaxis(model(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# move to kspace
y_output = fft2c(x_output)
# apply target mask (all ones for supervised training)
# DO SUPERVISED TRAINING HERE
#y_output = y_output #* target_mask + 0.0
y_target = fft2c(x)
# compute loss
train_loss = loss_fct(y_output,y_target) / torch.sum(torch.abs(y_target)**2)
model.zero_grad()
train_loss.backward()
optimizer.step()
# log train metrics
train_meters['train_L2'].update(train_loss.item())
train_bar.log(dict(**train_meters), verbose=True)
################################################################################################
if __name__ == '__main__':
read_args()
# %%
| 21,063 | 36.681574 | 211 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/run_CS_natural_images.py | # %%
import torch
import h5py
import numpy as np
import os
import yaml
import logging
import glob
import json
import random
import pickle
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import matplotlib.pyplot as plt
from torch.nn import MSELoss
from argparse import ArgumentParser
from torch.utils.tensorboard import SummaryWriter
import sys
from CS_natural_images_functions.unet import Unet
from CS_natural_images_functions.fftc import fft2c, ifft2c
from CS_natural_images_functions.losses import SSIMLoss
from CS_natural_images_functions.progress_bar import ProgressBar, init_logging, AverageMeter, TrackMeter, TrackMeter_testing
from CS_natural_images_functions.log_progress_helpers import save_figure, add_img_to_tensorboard, save_test_image_with_dc
from CS_natural_images_functions.load_save_model_helpers import setup_experiment_or_load_checkpoint, save_checkpoint
from CS_natural_images_functions.data_transforms import UnetDataTransform, CropDataset
from CS_natural_images_functions.data_transforms import compute_number_of_lines_in_input_target_kspace
# %%
def read_args():
parser = ArgumentParser()
# Required arguments
parser.add_argument(
'--config_file',
type=str,
help='Name of a config file in the experiment_configs folder.',
required=True
)
parser.add_argument(
'--path_to_ImageNet_train',
type=str,
help='Path to ImageNet train directory.',
required=True
)
parser.add_argument(
'--experiment_number',
type=str,
help="Set a unique identifier for the folder containing the experimental results. Start number with '001'. ",
required=True
)
parser.add_argument(
'--run_which_seeds',
type=str,
choices=('run_best_seed','run_all_seeds'),
help='Choose to run either only the best seed or all seeds shown in our results.',
required=True
)
# Optional arguments
parser.add_argument(
'--training',
default=True,
action='store_false',
help='Add this flag to disable training.'
)
parser.add_argument(
'--testing',
default=True,
action='store_false',
help='Add this flag to disable testing.'
)
parser.add_argument(
'--gpu',
choices=(0, 1, 2, 3),
default=3,
type=int,
help='Pick one out of four gpus.'
)
parser.add_argument(
'--trainset_size',
choices=(50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100000, 250000, 500000, 1000000),
default=50,
type=int,
help='Set training set size.'
)
parser.add_argument(
'--img_size',
default=100,
type=int,
help='Set img_size for downsampling.'
)
parser.add_argument(
'--num_epochs',
default=1000,
type=int,
help='Set number of training epochs.'
)
parser.add_argument(
'--val_epoch_interval',
default=2,
type=int,
help='Set how often the validation loss is computed.'
)
parser.add_argument(
'--patience',
default=10,
type=int,
help='Patience parameter for the learning rate scheduler.'
)
parser.add_argument(
'--acceleration',
default=4.0,
type=float,
help='Undersampling of training and test inputs.'
)
parser.add_argument(
'--acceleration_total',
default=1.0,
type=float,
help='Undersampling of data available for input target split. Set to 1 for supervised training.'
)
parser.add_argument(
'--center_fraction',
default=0.08,
type=float,
help='Fraction of lines that are always sample from the center (input and target). Set to 0.0 for sampling all lines randomly.'
)
parser.add_argument(
'--fix_split',
default=True,
action='store_true',
help='Add this flag to set use_seed=True for fixed input target split for self-supervised and fixed input for supervised training.'
)
args = parser.parse_args()
with open("experiment_configs/"+args.config_file) as handle:
config_file = json.load(handle)
args.acceleration_total = config_file['acceleration_total']
args.acceleration = config_file['acceleration']
args.trainset_size = config_file['trainset_size']
args.val_epoch_interval = config_file['val_epoch_interval']
args.patience = config_file['patience']
if args.run_which_seeds == 'run_best_seed':
seeds = [config_file['best_seed']]
elif args.run_which_seeds == 'run_all_seeds':
seeds = config_file['all_seeds']
for seed in seeds:
experiment_name = f"N{args.experiment_number}_t{args.trainset_size}_"
if args.acceleration_total==1.0:
experiment_name+="sup_"
else:
experiment_name+="selfsup_"
if args.fix_split:
experiment_name+="fixInput_"
else:
experiment_name+="RandInput_"
experiment_name += f"run{seed}"
experiment_path = experiment_name+"/"
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
if args.training:
run_training(experiment_path=experiment_path,
acceleration=args.acceleration,
center_fraction=args.center_fraction,
acceleration_total=args.acceleration_total,
seed=seed,
img_size=args.img_size,
fix_split=args.fix_split,
val_epoch_interval=args.val_epoch_interval,
num_epochs=args.num_epochs,
patience=args.patience,
trainset_size=args.trainset_size,
path_to_ImageNet_train=args.path_to_ImageNet_train)
if args.testing:
run_testing(experiment_path=experiment_path,
acceleration=args.acceleration,
center_fraction=args.center_fraction,
acceleration_total=args.acceleration_total,
img_size=args.img_size,
path_to_ImageNet_train=args.path_to_ImageNet_train)
################################################################################################
def run_training(experiment_path,
acceleration,
center_fraction,
acceleration_total,
seed,
img_size,
fix_split,
val_epoch_interval,
num_epochs,
patience,
trainset_size,
path_to_ImageNet_train):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# Create directory that holds train files
if not os.path.isdir(experiment_path):
os.mkdir(experiment_path)
else:
print(experiment_path)
#raise ValueError("Experiment already exists!!")
print("Warning: Experiment already exists!!")
# Init train.log file
init_logging(experiment_path)
logging.info("Training...")
# Log sanity checks on the number of lines in the input/target kspaces
input_size, target_size, overlap_size_high, size_low, p, q, mu, nu, weight_on_random_lines = compute_number_of_lines_in_input_target_kspace(p=1/acceleration,mu=1/acceleration_total,nu=center_fraction, n=img_size)
logging.info(f"mu: {mu}, p: {p}, q: {q}, nu: {nu}, weight_on_random_lines: {weight_on_random_lines}")
logging.info(f"\n Lines in kspace: {img_size} \n Lines in input: {input_size} \n Lines in target: {target_size} \n Number of high freq overlapping lines: {overlap_size_high} \n Number of low freq lines: {size_low}")
# train loss function
loss_fct = MSELoss(reduction='sum')
val_ssim_fct = SSIMLoss()
# Init model
model = Unet(
in_chans=2,
out_chans=2,
chans=24,
num_pool_layers=3,
drop_prob=0.0,).to(device)
logging.info(f"Built a model consisting of {sum(p.numel() for p in model.parameters()):,} parameters")#
# Init optimizer and scheduler
optimizer = torch.optim.Adam( params=model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0, amsgrad=False)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode='max',
factor=0.1,
patience=patience,
threshold=0.0001,
threshold_mode='abs',
cooldown=0,
min_lr=1e-5,
eps=1e-08,
verbose=True
)
# If a checkpoint exists, it is automtically loaded
setup_experiment_or_load_checkpoint(experiment_path, resume_from='best', model=model, optimizer=optimizer, scheduler=scheduler)
# Load train set
train_pool = torch.load('CS_natural_images_functions/training_set_lists/trsize1000000_filepaths.pt')
zero_norm_files = ['train/n03729826/n03729826_6483.JPEG',
'train/n04515003/n04515003_24673.JPEG',
'train/n02111277/n02111277_12490.JPEG',
'train/n03888605/n03888605_9775.JPEG',
'train/n02992529/n02992529_3197.JPEG',
'train/n01930112/n01930112_18908.JPEG',
'train/n06874185/n06874185_3219.JPEG',
'train/n06785654/n06785654_17232.JPEG',
'train/n04033901/n04033901_29617.JPEG',
'train/n07920052/n07920052_14729.JPEG',
'train/n03729826/n03729826_40479.JPEG',
'train/n03729826/n03729826_10716.JPEG',
'train/n04286575/n04286575_74296.JPEG',
'train/n03937543/n03937543_10198.JPEG',
'train/n03063599/n03063599_3942.JPEG',
'train/n04152593/n04152593_13802.JPEG',
'train/n04522168/n04522168_24105.JPEG',
'train/n03532672/n03532672_78983.JPEG',
'train/n04404412/n04404412_12316.JPEG',
'train/n04330267/n04330267_18003.JPEG',
'train/n04118776/n04118776_37671.JPEG',
'train/n04591713/n04591713_3568.JPEG',
'train/n02437616/n02437616_12697.JPEG',
'train/n02799071/n02799071_54867.JPEG',
'train/n02883205/n02883205_26196.JPEG',
'train/n02667093/n02667093_2919.JPEG',
'train/n03196217/n03196217_1135.JPEG',
'train/n03196217/n03196217_3568.JPEG',
'train/n15075141/n15075141_19601.JPEG',
'train/n01943899/n01943899_24166.JPEG']
for zero_norm_file in zero_norm_files:
train_pool.remove(zero_norm_file)
rng_dataset = np.random.default_rng(seed)
if trainset_size == 1000000:
train_set = train_pool
else:
train_set = rng_dataset.choice(train_pool, size=trainset_size, replace=False, p=None)
torch.save(train_set,experiment_path+'train_set.pt')
validation_set = torch.load('CS_natural_images_functions/training_set_lists/ImageNetVal80_filepaths.pt')
# Train loader
data_transform_train = UnetDataTransform(acceleration=acceleration,acceleration_total=acceleration_total, fix_split=fix_split, experiment_path=experiment_path,center_fraction=center_fraction)
trainset = CropDataset(dataset=train_set, path_to_ImageNet_train=path_to_ImageNet_train, transform=data_transform_train, experiment_path=experiment_path, img_size=img_size)
train_loader = torch.utils.data.DataLoader(dataset=trainset, batch_size=1, num_workers=0, shuffle=True, generator=torch.Generator().manual_seed(0))
# Val loader
data_transform_val = UnetDataTransform(acceleration=acceleration,acceleration_total=acceleration_total, fix_split=True, experiment_path=experiment_path,center_fraction=center_fraction)
valset = CropDataset(dataset=validation_set, path_to_ImageNet_train=path_to_ImageNet_train, transform=data_transform_val, experiment_path=experiment_path, img_size=img_size)
val_loader = torch.utils.data.DataLoader( dataset=valset, batch_size=1, num_workers=0, shuffle=False, generator=torch.Generator().manual_seed(0))
# store training loss metrics
train_meters = {'train_L2': AverageMeter()}
train_tracks = {'train_L2': TrackMeter('decaying')}
# store validation metrics
valid_meters = {'val_SSIM' : AverageMeter(), 'val_PSNR' : AverageMeter(), 'val_L2' : AverageMeter(), 'val_L2_kspace': AverageMeter()}
valid_tracks = {'val_SSIM' : TrackMeter('increasing'), 'val_PSNR' : TrackMeter('increasing'), 'val_L2' : TrackMeter('decaying'), 'val_L2_kspace': TrackMeter('decaying')}
# Init tensorboard
writer = SummaryWriter(log_dir=experiment_path)
log_image_interval_tb = 30
break_counter=0
# Start training
for epoch in range(save_checkpoint.start_epoch, num_epochs):
train_bar = ProgressBar(train_loader, epoch)
for meter in train_meters.values():
meter.reset()
for id,sample in enumerate(train_bar):
model.train()
y_input, x_input, y_target, x_target, x, input_mask, target_mask, mean, std, fname = sample
# sanity check on number of lines in input and target mask
if epoch==0 and id==0:
tm = target_mask.detach()
target_mask_no_zeros = torch.where(tm != 0., tm , torch.tensor(1, dtype=tm.dtype).to(device))
target_mask_norm_to_one = tm / target_mask_no_zeros
logging.info(f"\n Mask sanity check! Lines in kspace: {input_mask.shape[-2]} \n Lines in input: {torch.sum(input_mask)} \n Lines in target: {torch.sum(target_mask_norm_to_one)} \n Number of all overlapping lines: {torch.sum(input_mask*target_mask_norm_to_one)}")
# prediction
x_output = torch.moveaxis(model(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# move to kspace
y_output = fft2c(x_output)
# apply target mask (all ones for supervised training)
y_output = y_output * target_mask + 0.0
# compute loss
train_loss = loss_fct(y_output,y_target) / torch.sum(torch.abs(y_target)**2)
model.zero_grad()
train_loss.backward()
optimizer.step()
# log train metrics
train_meters['train_L2'].update(train_loss.item())
train_bar.log(dict(**train_meters), verbose=True)
if id ==0: # log a random train image to tensorboard
name = f"train_0_img"
add_img_to_tensorboard(writer, epoch, name, x_input.detach(),x_output.detach(),x_target.detach(),ksp=False) if epoch % log_image_interval_tb == 0 else None
name = f"train_0_ksp"
add_img_to_tensorboard(writer, epoch, name, y_input.detach(),y_output.detach(),y_target.detach(),ksp=True) if epoch % log_image_interval_tb == 0 else None
if id ==1: # log a specific train image to tensorboard
name = f"train_1_img"
add_img_to_tensorboard(writer, epoch, name, x_input.detach(),x_output.detach(),x_target.detach(),ksp=False) if epoch % log_image_interval_tb == 0 else None
name = f"train_1_ksp"
add_img_to_tensorboard(writer, epoch, name, y_input.detach(),y_output.detach(),y_target.detach(),ksp=True) if epoch % log_image_interval_tb == 0 else None
train_tracks['train_L2'].update(train_meters['train_L2'].avg,epoch)
current_lr = optimizer.param_groups[0]["lr"]
#scheduler.step()
############################################################################################################################
if epoch % val_epoch_interval == 0: # set this value such that it works with save_at_epochs and log_image_interval_tb
model.eval()
for meter in valid_meters.values():
meter.reset()
valid_bar = ProgressBar(val_loader, epoch)
rand_id = random.randint(0, len(val_loader)) # draw id to log a random slice to tensorboard
for id, sample in enumerate(valid_bar):
with torch.no_grad():
y_input, x_input, y_target, x_target, x, input_mask, target_mask, mean, std, fname = sample
# prediction
x_output = torch.moveaxis(model(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# move to kspace
y_output = fft2c(x_output)
if id ==0: # log one fixed and one random validation image to tensorboard
name = f"val_0_img"
add_img_to_tensorboard(writer, epoch, name, x_input.detach(),x_output.detach(),x.detach(),ksp=False) if epoch % log_image_interval_tb == 0 else None
name = f"val_0_ksp"
add_img_to_tensorboard(writer, epoch, name, y_input.detach(),y_output.detach(),y_target.detach(),ksp=True) if epoch % log_image_interval_tb == 0 else None
elif id==rand_id: # log one fixed and one random validation image to tensorboard
name = f"val_1_img"
add_img_to_tensorboard(writer, epoch, name, x_input.detach(),x_output.detach(),x.detach(),ksp=False) if epoch % log_image_interval_tb == 0 else None
name = f"val_1_ksp"
add_img_to_tensorboard(writer, epoch, name, y_input.detach(),y_output.detach(),y_target.detach(),ksp=True) if epoch % log_image_interval_tb == 0 else None
# apply target mask (all ones for supervised training)
y_output = y_output * target_mask + 0.0
# val loss in kspace (L2)
val_loss = loss_fct(y_output,y_target) / torch.sum(torch.abs(y_target)**2)
valid_meters['val_L2_kspace'].update(val_loss)
# L2 in image domain between complex output and target image
val_loss = loss_fct(x_output,x) / torch.sum(torch.abs(x)**2)
valid_meters['val_L2'].update(val_loss)
output_magnitude = (x_output ** 2).sum(dim=-1).sqrt()
x_magnitude = (x ** 2).sum(dim=-1).sqrt() # since x is real, this operation is identity
x_magnitude = x_magnitude.unsqueeze(1)
output_magnitude = output_magnitude.unsqueeze(1)
# psnr
max_value = x.max().unsqueeze(0)
mse = torch.mean(torch.abs(output_magnitude-x_magnitude)**2)
psnr = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(mse)
valid_meters["val_PSNR"].update(psnr.item())
# ssim
ssim_loss = 1-val_ssim_fct(output_magnitude, x_magnitude, data_range=max_value)
valid_meters["val_SSIM"].update(ssim_loss.item())
# log progrss
valid_tracks['val_L2_kspace'].update(valid_meters['val_L2_kspace'].avg,epoch)
valid_tracks['val_L2'].update(valid_meters['val_L2'].avg,epoch)
valid_tracks['val_PSNR'].update(valid_meters['val_PSNR'].avg,epoch)
valid_tracks['val_SSIM'].update(valid_meters['val_SSIM'].avg,epoch)
valid_bar.log(dict(**valid_meters), verbose=True)
scheduler.step(valid_meters['val_PSNR'].avg)
if current_lr > optimizer.param_groups[0]["lr"]:
scheduler.patience= scheduler.patience//2
if current_lr == scheduler.min_lrs[0]:
break_counter+=1
if break_counter == 3:
break
if save_checkpoint.best_score < valid_meters['val_PSNR'].avg:
if scheduler.num_bad_epochs == 0:
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, New='Highscore', Scheduler_patience='reset')))
else:
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, New='Highscore')))
else:
if scheduler.num_bad_epochs == 0:
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, Scheduler_patience='reset')))
else:
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr)))
writer.add_scalar("lr", current_lr, epoch)
writer.add_scalar("epoch", epoch, epoch)
writer.add_scalar("train_L2", train_meters["train_L2"].avg, epoch)
for val_loss_name in valid_meters.keys():
writer.add_scalar(val_loss_name, valid_meters[val_loss_name].avg, epoch)
sys.stdout.flush()
# Save checkpoint
save_checkpoint(experiment_path, epoch, model, optimizer=optimizer, scheduler=scheduler, score=valid_meters['val_PSNR'].avg, save_at_epochs=[])
else:
logging.info(train_bar.print(dict(**train_meters, lr=current_lr)))
writer.add_scalar("lr", current_lr, epoch)
writer.add_scalar("epoch", epoch, epoch)
writer.add_scalar("train_L2", train_meters["train_L2"].avg, epoch)
sys.stdout.flush()
logging.info(f"Done training! Best Val score {valid_tracks['val_PSNR'].best_val:.5f} obtained after epoch {valid_tracks['val_PSNR'].best_count}.")
pickle.dump( valid_tracks, open(experiment_path + 'valid_tracks_metrics.pkl', "wb" ) , pickle.HIGHEST_PROTOCOL )
pickle.dump( train_tracks, open(experiment_path + 'train_tracks_metrics.pkl', "wb" ) , pickle.HIGHEST_PROTOCOL )
################################################################################################
def run_testing(experiment_path,
acceleration,
center_fraction,
acceleration_total,
img_size,
path_to_ImageNet_train):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Init train.log file
init_logging(experiment_path)
# Log sanity checks on the number of lines in the input/target kspaces
logging.info("Testing...")
input_size, target_size, overlap_size_high, size_low, p, q, mu, nu, weight_on_random_lines = compute_number_of_lines_in_input_target_kspace(p=1/acceleration,mu=1/acceleration_total,nu=center_fraction, n=img_size)
logging.info(f"mu: {mu}, p: {p}, q: {q}, nu: {nu}, weight_on_random_lines: {weight_on_random_lines}")
logging.info(f"\n Lines in kspace: {img_size} \n Lines in input: {input_size} \n Lines in target: {target_size} \n Number of high freq overlapping lines: {overlap_size_high} \n Number of low freq lines: {size_low}")
# train loss function
loss_fct = MSELoss(reduction='sum')
val_ssim_fct = SSIMLoss()
# Init model
model = Unet(
in_chans=2,
out_chans=2,
chans=24,
num_pool_layers=3,
drop_prob=0.0,).to(device)
validation_set = torch.load('CS_natural_images_functions/training_set_lists/ImageNetVal80_filepaths.pt')
test_set = torch.load('CS_natural_images_functions/training_set_lists/ImageNetTest300_filepaths.pt')
# test loader
data_transform_test = UnetDataTransform(acceleration=acceleration,acceleration_total=acceleration_total, fix_split=True, experiment_path=experiment_path,center_fraction=center_fraction)
testset = CropDataset(dataset=test_set, path_to_ImageNet_train=path_to_ImageNet_train, transform=data_transform_test, experiment_path=experiment_path, img_size=img_size)
test_loader = torch.utils.data.DataLoader( dataset=testset, batch_size=1, num_workers=0, shuffle=False, generator=torch.Generator().manual_seed(0), )
# Val loader
data_transform_val = UnetDataTransform(acceleration=acceleration,acceleration_total=acceleration_total, fix_split=True, experiment_path=experiment_path,center_fraction=center_fraction)
valset = CropDataset(dataset=validation_set, path_to_ImageNet_train=path_to_ImageNet_train, transform=data_transform_val, experiment_path=experiment_path, img_size=img_size)
val_loader = torch.utils.data.DataLoader( dataset=valset, batch_size=1, num_workers=0, shuffle=False, generator=torch.Generator().manual_seed(0), )
setup_experiment_or_load_checkpoint(experiment_path, resume_from='best', model=model, optimizer=None, scheduler=None)
test_validationSet_tracks = {'SSIM' : TrackMeter_testing(), 'PSNR' : TrackMeter_testing(), 'L2' : TrackMeter_testing(),'SSIM_dc' : TrackMeter_testing(), 'PSNR_dc' : TrackMeter_testing(), 'L2_dc' : TrackMeter_testing()}
test_testSet_tracks = {'SSIM' : TrackMeter_testing(), 'PSNR' : TrackMeter_testing(), 'L2' : TrackMeter_testing(),'SSIM_dc' : TrackMeter_testing(), 'PSNR_dc' : TrackMeter_testing(), 'L2_dc' : TrackMeter_testing()}
model.eval()
tmp=0
for data_loader, track_meter in zip([val_loader, test_loader],[test_validationSet_tracks, test_testSet_tracks]):
tmp+=1
test_bar = ProgressBar(data_loader, epoch=0)
for id, sample in enumerate(test_bar):
y_input, x_input, y_target, x_target, x, input_mask, target_mask, mean, std, fname = sample
# prediction
x_output = torch.moveaxis(model(torch.moveaxis( x_input , -1, 1 )), 1, -1)
# unnormalize
x_output = x_output * std + mean
# Apply data consistency
y_output = fft2c(x_output)
y_output_dc = y_output * (1-input_mask) + y_input
x_output_dc = ifft2c(y_output_dc)
# L2 in image domain between complex output and target image
val_loss = loss_fct(x_output,x) / torch.sum(torch.abs(x)**2)
track_meter['L2'].update(val_loss)
val_loss_dc = loss_fct(x_output_dc,x) / torch.sum(torch.abs(x)**2)
track_meter['L2_dc'].update(val_loss_dc)
output_magnitude = (x_output ** 2).sum(dim=-1).sqrt()
output_dc_magnitude = (x_output_dc ** 2).sum(dim=-1).sqrt()
x_magnitude = (x ** 2).sum(dim=-1).sqrt() # since x is real, this operation is identity
x_magnitude = x_magnitude.unsqueeze(1)
output_magnitude = output_magnitude.unsqueeze(1)
output_dc_magnitude = output_dc_magnitude.unsqueeze(1)
# psnr
max_value = x.max().unsqueeze(0)
mse = torch.mean(torch.abs(output_magnitude-x_magnitude)**2)
psnr = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(mse)
track_meter["PSNR"].update(psnr.item())
mse_dc = torch.mean(torch.abs(output_dc_magnitude-x_magnitude)**2)
psnr_dc = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(mse_dc)
track_meter["PSNR_dc"].update(psnr_dc.item())
# ssim
ssim_loss = 1-val_ssim_fct(output_magnitude, x_magnitude, data_range=max_value)
track_meter["SSIM"].update(ssim_loss.item())
ssim_loss_dc = 1-val_ssim_fct(output_dc_magnitude, x_magnitude, data_range=max_value)
track_meter["SSIM_dc"].update(ssim_loss_dc.item())
# Save the first image in test set
if (tmp==1 and id==1) or (tmp==1 and id==22):
x_input_abs = (x_input ** 2).sum(dim=-1).sqrt()
x_input_abs = x_input_abs.unsqueeze(1)
save_test_image_with_dc(experiment_path, ground_truth_image=x_magnitude, input_img=x_input_abs, output=output_magnitude, output_image_dc=output_dc_magnitude, fname=fname, track_meter=track_meter)
pickle.dump( test_validationSet_tracks, open(experiment_path + 'test_validationSet_metrics.pkl', "wb" ) , pickle.HIGHEST_PROTOCOL )
pickle.dump( test_testSet_tracks, open(experiment_path + 'test_testSet_metrics.pkl', "wb" ) , pickle.HIGHEST_PROTOCOL )
logging.info(f"\nEvaluate validationset of length {len(val_loader)}:")
for metric in test_validationSet_tracks.keys():
logging.info(f"{metric}: avg {test_validationSet_tracks[metric].avg:.6f}, std {test_validationSet_tracks[metric].std:.6f}")
logging.info(f"\nEvaluate testset of length {len(test_loader)}:")
for metric in test_testSet_tracks.keys():
logging.info(f"{metric}: avg {test_testSet_tracks[metric].avg:.6f}, std {test_testSet_tracks[metric].std:.6f}")
if __name__ == '__main__':
read_args()
# %%
| 29,149 | 44.404984 | 278 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/progress_bar.py | from collections import OrderedDict
from numbers import Number
from tqdm import tqdm
import torch
import logging
import os
import numpy as np
def init_logging(experiment_path):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handlers = [logging.StreamHandler()]
mode = "a" if os.path.exists(experiment_path+"train.log") else "w"
handlers.append(logging.FileHandler(experiment_path+"train.log", mode=mode))
logging.basicConfig(handlers=handlers, format="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
if isinstance(val, torch.Tensor):
val = val.item()
self.val = val / n
self.sum += val
self.count += n
self.avg = self.sum / self.count
class TrackMeter(object):
def __init__(self, inc_or_dec='decaying'):
self.inc_or_dec = inc_or_dec
self.reset()
def reset(self):
self.val = []
self.epochs = []
self.count = 0
self.best_val = float("inf") if self.inc_or_dec=='decaying' else float("-inf")
self.best_count = 0
self.best_epoch = 0
def update(self, val, epoch):
if isinstance(val, torch.Tensor):
val = val.item()
self.val.append(val)
self.epochs.append(epoch)
if (self.inc_or_dec=='decaying' and val < self.best_val) or (self.inc_or_dec=='increasing' and val > self.best_val):
self.best_val = val
self.best_count = self.count
self.best_count = epoch
self.count += 1
class TrackMeter_testing(object):
def __init__(self,):
self.reset()
def reset(self):
self.val = []
self.avg = 0
self.std = 0
def update(self, val,):
if isinstance(val, torch.Tensor):
val = val.item()
self.val.append(val)
self.avg = np.mean(self.val)
self.std = np.std(self.val)
class ProgressBar:
def __init__(self, iterable, epoch, quiet=False):
self.epoch = epoch
self.quiet = quiet
self.prefix = f"epoch {epoch:02d}"
self.iterable = iterable if self.quiet else tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.iterable)
def log(self, stats, verbose=False):
if not self.quiet:
self.iterable.set_postfix(self.format_stats(stats, verbose), refresh=True)
def format_stats(self, stats, verbose=False):
postfix = OrderedDict(stats) # method set_postfix requires ordered_dict
for key, value in postfix.items():
if isinstance(value, Number):
fmt = "{:.6f}" if value > 0.001 else "{:.3e}"
postfix[key] = fmt.format(value)
elif isinstance(value, AverageMeter):
if verbose:
postfix[key] = f"{value.avg:.6f} ({value.val:.6f})"
else:
postfix[key] = f"{value.avg:.6f}"
elif not isinstance(postfix[key], str):
postfix[key] = str(value)
return postfix
def print(self, stats, verbose=False):
postfix = " | ".join(key + " " + value.strip() for key, value in self.format_stats(stats, verbose).items())
return f"{self.prefix + ' | ' if self.epoch is not None else ''}{postfix}"
| 3,573 | 30.910714 | 127 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/losses.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class SSIMLoss(nn.Module):
"""
SSIM loss module.
"""
def __init__(self, win_size: int = 7, k1: float = 0.01, k2: float = 0.03):
"""
Args:
win_size: Window size for SSIM calculation.
k1: k1 parameter for SSIM calculation.
k2: k2 parameter for SSIM calculation.
"""
super().__init__()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.win_size = win_size
self.k1, self.k2 = torch.tensor(k1).to(device), torch.tensor(k2).to(device)
self.register_buffer("w", torch.ones(1, 1, win_size, win_size).to(device) / win_size ** 2)
NP = win_size ** 2
self.cov_norm = torch.tensor(NP / (NP - 1)).to(device)
def forward(self, X: torch.Tensor, Y: torch.Tensor, data_range: torch.Tensor):
assert isinstance(self.w, torch.Tensor)
data_range = data_range[:, None, None, None]
C1 = (self.k1 * data_range) ** 2
C2 = (self.k2 * data_range) ** 2
ux = F.conv2d(X, self.w) # typing: ignore
uy = F.conv2d(Y, self.w) #
uxx = F.conv2d(X * X, self.w)
uyy = F.conv2d(Y * Y, self.w)
uxy = F.conv2d(X * Y, self.w)
vx = self.cov_norm * (uxx - ux * ux)
vy = self.cov_norm * (uyy - uy * uy)
vxy = self.cov_norm * (uxy - ux * uy)
A1, A2, B1, B2 = (
2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2,
)
D = B1 * B2
S = (A1 * A2) / D
return 1 - S.mean()
| 1,849 | 31.45614 | 98 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/load_save_model_helpers.py | import glob
import torch
import os
from torch.serialization import default_restore_location
import logging
def setup_experiment_or_load_checkpoint(experiment_path, resume_from='best', model=None, optimizer=None, scheduler=None):
'''
Args:
- resume_from: Either 'best' or 'some_number' where some_number could by any epoch at which a checkpoint was saved
'''
# Look for checkpoints to load from. If avalable, always load.
available_models = glob.glob(experiment_path + '*.pt')
if available_models:
restore_file = experiment_path + f"checkpoint_{resume_from}.pt"
print('restoring model..')
state_dict = torch.load(restore_file, map_location=lambda s, l: default_restore_location(s, "cpu"))
save_checkpoint.last_epoch = state_dict["best_epoch"] if resume_from=='best' else state_dict["last_epoch"]
save_checkpoint.start_epoch = state_dict["best_epoch"]+1 if resume_from=='best' else state_dict["last_epoch"]+1
save_checkpoint.best_score = state_dict["best_score"]
save_checkpoint.best_epoch = state_dict["best_epoch"]
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
if model is not None and state_dict.get("model", None) is not None:
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
if optimizer is not None and state_dict.get("optimizer", None) is not None:
for o, state in zip(optimizer, state_dict["optimizer"]):
o.load_state_dict(state)
if scheduler is not None and state_dict.get("scheduler", None) is not None:
for s, state in zip(scheduler, state_dict["scheduler"]):
s.load_state_dict(state)
logging.info("Loaded checkpoint {} with best_epoch {} last_epoch {}".format(restore_file, save_checkpoint.best_epoch, save_checkpoint.last_epoch))
else:
print("No checkpoint to load. Start training from scratch.")
save_checkpoint.best_epoch = -1
save_checkpoint.last_epoch = 0
save_checkpoint.start_epoch = 0
save_checkpoint.best_score = float("-inf")
def save_checkpoint(experiment_path, epoch, model, optimizer=None, scheduler=None, score=None, save_at_epochs=None):
''''
Args:
-
'''
save_checkpoint.last_epoch = epoch
best_score = save_checkpoint.best_score
if score > best_score:
save_checkpoint.best_epoch = epoch
save_checkpoint.best_score = score
if score > best_score:
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
state_dict = {
"last_epoch": save_checkpoint.last_epoch,
"best_epoch": save_checkpoint.best_epoch,
"best_score": save_checkpoint.best_score,
"model": [m.state_dict() for m in model] if model is not None else None,
"optimizer": [o.state_dict() for o in optimizer] if optimizer is not None else None,
"scheduler": [s.state_dict() for s in scheduler] if scheduler is not None else None,
}
torch.save(state_dict, os.path.join(experiment_path + "checkpoint_best.pt"))
if save_at_epochs:
if epoch in save_at_epochs:
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
state_dict = {
"last_epoch": save_checkpoint.last_epoch, #set
"best_epoch": save_checkpoint.best_epoch, #set
"best_score": getattr(save_checkpoint, "best_score", None), #set
"model": [m.state_dict() for m in model] if model is not None else None,
"optimizer": [o.state_dict() for o in optimizer] if optimizer is not None else None,
"scheduler": [s.state_dict() for s in scheduler] if scheduler is not None else None,
}
torch.save(state_dict, os.path.join(experiment_path + f"checkpoint{epoch}.pt"))
| 4,690 | 45.445545 | 154 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class Unet(nn.Module):
"""
PyTorch implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self,
in_chans: int,
out_chans: int,
chans: int = 32,
num_pool_layers: int = 4,
drop_prob: float = 0.0,
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1), # here is the only conv layer with a bias
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False
),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image)
| 6,021 | 31.907104 | 113 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/fftc.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import List, Optional
import torch
import torch.fft # type: ignore
def fft2c(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def ifft2c(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
# Helper functions
def roll_one_dim(x: torch.Tensor, shift: int, dim: int) -> torch.Tensor:
"""
Similar to roll but for only one dim.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def roll(
x: torch.Tensor,
shift: List[int],
dim: List[int],
) -> torch.Tensor:
"""
Similar to np.roll but applies to PyTorch Tensors.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
if len(shift) != len(dim):
raise ValueError("len(shift) must match len(dim)")
for (s, d) in zip(shift, dim):
x = roll_one_dim(x, s, d)
return x
def fftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to fftshift.
Returns:
fftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = x.shape[dim_num] // 2
return roll(x, shift, dim)
def ifftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to ifftshift.
Returns:
ifftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = (x.shape[dim_num] + 1) // 2
return roll(x, shift, dim)
| 4,108 | 23.753012 | 80 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/data_transforms.py | import numpy as np
import torch
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import torchvision.transforms as transforms
import PIL.Image as Image
from CS_natural_images_functions.log_progress_helpers import save_figure
from CS_natural_images_functions.fftc import fft2c, ifft2c
class CropDataset(torch.utils.data.Dataset):
"""
A PyTorch Dataset that provides access to cropped images from ImageNet.
"""
def __init__(
self,
dataset: List,
path_to_ImageNet_train: str,
transform: Callable,
experiment_path: str,
img_size: int,
):
self.transform = transform
self.experiment_path = experiment_path
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.examples = []
load_transform = transforms.Compose([
transforms.CenterCrop(img_size),
transforms.ToTensor(),
])
for datapath in dataset:
image = Image.open(path_to_ImageNet_train+datapath).convert("L")
filename = datapath[16:-5]
self.examples.append((load_transform(image)[0].to(device),filename))
def __len__(self):
return len(self.examples)
def __getitem__(self, i: int):
# Determine input, target and ground truth
x,filename = self.examples[i]
sample = self.transform(x,filename,i)
return sample
class UnetDataTransform:
def __init__(
self,
acceleration,
acceleration_total,
fix_split,
experiment_path,
center_fraction,
):
self.acceleration = acceleration
self.acceleration_total = acceleration_total
self.fix_split = fix_split
self.experiment_path = experiment_path
self.center_fraction = center_fraction
def __call__(
self,
x: np.ndarray,
fname: str,
id: int,
) -> Tuple[torch.Tensor,torch.Tensor]:
"""
Args:
Returns:
tuple containing:
x_input: zero-filled coarse reconstruction in image domain
y_target: the fully sampled kspace
x: the ground truth image
input_mask: undersampled input mask
target_mask: in the case of supervised training all ones
"""
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
n = x.shape[-1]
# transform x to a tensor with real channel and complex channel. Right now the complex channel is all zerors.
#x = np.stack((x, np.zeros_like(x)), axis=-1)
x = torch.stack((x, torch.zeros_like(x)), axis=-1)
#x = torch.from_numpy(x)
# obtain kspace
y = fft2c(x)
#save_figure(y[:,:,0],"y_real",self.experiment_path) if id==0 else None
#save_figure(y[:,:,1],"y_imag",self.experiment_path) if id==0 else None
#######################################
# sample input mask
nu = self.center_fraction
p = 1/self.acceleration
mu = 1/self.acceleration_total
q = (mu-p+nu-mu*nu)/(1-p)
# 1. Determine the set S_low consisting of the indices of the nu*n many center frequencies which are always sampled
size_low = int(round(n*nu))
pad = (n - size_low + 1) // 2
# set of indices of all lines in kspace
S_all = np.arange(n)
S_low = S_all[pad : pad + size_low]
# 1.1 Determine S_mu_high, i.e, S_mu without S_low, so only the random high frequencies
# set of indices of all high frequencies
S_high = np.hstack((S_all[: pad],S_all[pad + size_low :]))
S_mu_size_high = int(round((mu-nu)*n))
S_p_size_high = int(round((p-nu)*n))
#### Depending on whether the input/target split is fixed or re-sampled, the order of sampling needs to be adapted
# This is so that validation during training samples the same input mask as during testing
# Recall that during testing selfsup=False, hence S_mu_high is not sampled.
seed = tuple(map(ord, fname))
rng = np.random.default_rng(seed)
if self.fix_split:
# If split is fixed, first sample S_p_high and then additional lines for S_mu_high
# such that the set S_p_high is the same as if we would sample for selfsup=False
S_p_high = rng.choice(S_high, size=S_p_size_high, replace=False, p=None)
S_mu_size_high_remainding = S_mu_size_high - S_p_size_high
S_high_remainding = np.array(list(set(S_high)-set(S_p_high)))
S_q_high = rng.choice(S_high_remainding, size=S_mu_size_high_remainding, replace=False, p=None)
else:
# If split is random, first sample S_mu_high such that this set is always fixed.
S_mu_high = rng.choice(S_high, size=S_mu_size_high, replace=False, p=None)
# 2. From S_mu_high sample the set S_p_high of size (p-nu)n
S_p_high = np.random.choice(S_mu_high, size=S_p_size_high, replace=False, p=None)
# 3. All other indices in S_mu_high add to the set S_q_high
S_q_high = np.array(list(set(S_mu_high)-set(S_p_high)))
# 4. Determine the size of the overlap between S_p_high and S_q_high, sample this many indices from S_p_high and add them to S_q_high
overlap_size_high = int(round(( (p-nu) / (1-nu) ) * ( (q-nu) / (1-nu) ) *(n-n*nu)))
S_overlap = S_p_high[0:overlap_size_high]
S_q_high = np.concatenate([S_q_high,S_overlap])
# 5. Define the final input and target masks by setting entries to zero or to one for S_p=S_low+S_p_high and S_q=S_low+S_q_high
input_mask = np.zeros(n)
input_mask[S_low] = 1.0
input_mask[S_p_high] = 1.0
input_mask = torch.from_numpy(input_mask.astype(np.float32)).unsqueeze(0).unsqueeze(-1).to(device)
# 6. Create a target mask where the random entries are weighted
weight_on_random_lines = np.sqrt((1-nu)/(q-nu))
target_mask = np.zeros(n)
target_mask[S_low] = 1.0
target_mask[S_q_high] = weight_on_random_lines
target_mask = torch.from_numpy(target_mask.astype(np.float32)).unsqueeze(0).unsqueeze(-1).to(device)
#######################################
# apply mask to kspace
y_input = y * input_mask + 0.0
#save_figure(y_input[:,:,0],"y_input_real",self.experiment_path) if id==0 else None
#save_figure(y_input[:,:,1],"y_input_imag",self.experiment_path) if id==0 else None
# compute zero-filed coarse reconstruction as input
x_input = ifft2c(y_input)
#save_figure(x_input[:,:,0],"x_input_real",self.experiment_path) if id==0 else None
#save_figure(x_input[:,:,1],"x_input_imag",self.experiment_path) if id==0 else None
mean = x_input.mean(dim=[0,1],keepdim=True)
std = x_input.std(dim=[0,1],keepdim=True)
x_input = (x_input - mean) / (std + 1e-11)
# training target. target_mask is all ones if supervised training
y_target = y * target_mask + 0.0
# training target in image domain
x_target = ifft2c(y_target)
return y_input, x_input, y_target, x_target, x, input_mask, target_mask, mean, std, fname
def compute_number_of_lines_in_input_target_kspace(p,mu,nu,n=160):
q = (mu-p+nu-mu*nu)/(1-p)
size_low = int(round(n*nu))
S_p_size_high = int(round((p-nu)*n))
S_mu_size_high = int(round((mu-nu)*n))
S_mu_size_high_remainding = S_mu_size_high - S_p_size_high
overlap_size_high = int(round(( (p-nu) / (1-nu) ) * ( (q-nu) / (1-nu) ) *(n-n*nu)))
input_size = size_low + S_p_size_high
target_size = size_low + S_mu_size_high_remainding + overlap_size_high
weight_on_random_lines = np.sqrt((1-nu)/(q-nu))
return input_size, target_size, overlap_size_high, size_low, p, q, mu, nu, weight_on_random_lines
| 8,047 | 36.432558 | 141 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_natural_images_figure4/CS_natural_images_functions/log_progress_helpers.py | import numpy as np
import matplotlib.pyplot as plt
from typing import Dict, Optional, Sequence, Tuple, Union, List
import os
import torchvision
import io
import torch
from CS_natural_images_functions.losses import SSIMLoss
def complex_abs(data: torch.Tensor) -> torch.Tensor:
"""
Compute the absolute value of a complex valued input tensor.
Args:
data: A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
Absolute value of data.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return (data ** 2).sum(dim=-1).sqrt()
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close(figure)
buf.seek(0)
frameTensor = torch.tensor(np.frombuffer(buf.getvalue(), dtype=np.uint8), device='cpu')
image = torchvision.io.decode_png(frameTensor)
return image
def get_figure(image,figsize,title):
"""Return a matplotlib figure of a given image."""
if len(image.shape) != 3:
raise ValueError("Image dimensions not suitable for logging to tensorboard.")
if image.shape[0] == 1 or image.shape[0] == 3:
image = np.rollaxis(image,0,3)
# Create a figure to contain the plot.
if figsize:
figure = plt.figure(figsize=figsize)
else:
figure = plt.figure()
# Start next subplot.
plt.subplot(1, 1, 1, title=title)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap='gray')
figure.tight_layout()
return figure
def plot_figure(
x: np.array,):
""""
x must have dimension height,width
"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(x,'gray')
ax.axis('off')
#ax.set_title(title,fontsize=10)
fig.tight_layout()
plt.show()
def save_figure(
x: np.array,
figname: str,
experiment_path: str,
save: Optional[bool]=True,):
""""
x must have dimension height,width
"""
if save:
save_path = experiment_path + 'train_figures/'
if not os.path.isdir(save_path):
os.mkdir(save_path)
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111)
ax.imshow(x,'gray')
ax.axis('off')
#ax.set_title(title,fontsize=10)
fig.tight_layout()
plt.savefig(save_path + figname + ".png")
plt.close(fig)
def save_test_image_with_dc(experiment_path, ground_truth_image, input_img, output, output_image_dc, fname, track_meter):
save_path = experiment_path + 'test_figures/'
if not os.path.isdir(save_path):
os.mkdir(save_path)
error = torch.abs(ground_truth_image - output)
error_dc = torch.abs(ground_truth_image - output_image_dc)
output = output - output.min()
output = output / output.max()
output_image_dc = output_image_dc - output_image_dc.min()
output_image_dc = output_image_dc / output_image_dc.max()
ground_truth_image = ground_truth_image - ground_truth_image.min()
ground_truth_image = ground_truth_image / ground_truth_image.max()
input_img = input_img - input_img.min()
input_img = input_img / input_img.max()
error = error - error.min()
error_dc = error_dc - error_dc.min()
max_norm = torch.stack([error,error_dc]).max()
error = error / max_norm
error_dc = error_dc / max_norm
image = torch.cat([ground_truth_image, input_img, output, output_image_dc, error, error_dc], dim=0)
image = torchvision.utils.make_grid(image, nrow=2, normalize=False, value_range=(0,1), pad_value=1)
ssim_score = track_meter["SSIM"].val[-1]
ssim_score_dc = track_meter["SSIM_dc"].val[-1]
psnr_score = track_meter["PSNR"].val[-1]
psnr_score_dc = track_meter["PSNR_dc"].val[-1]
figure = get_figure(image.cpu().numpy(),figsize=(8,12),title=f"ssim={ssim_score:.4f}, dc={ssim_score_dc:.4f}, psnr={psnr_score:.3f}, dc={psnr_score_dc:.3f}")
plt.savefig(experiment_path + 'test_figures/' + f"{fname[0]}.png", dpi='figure')
plt.close()
def add_img_to_tensorboard(writer, epoch, name, input_img_comp,output_comp,targetcomp,ksp):
if ksp:
input_img = torch.log(complex_abs(input_img_comp)[0]+ 1e-9)
output = torch.log(complex_abs(output_comp)[0]+ 1e-9)
target = torch.log(complex_abs(targetcomp)[0]+ 1e-9)
else:
input_img = complex_abs(input_img_comp)[0]
output = complex_abs(output_comp)[0]
target = complex_abs(targetcomp)[0]
val_ssim_fct = SSIMLoss()
max_value = target.max().unsqueeze(0)
ssim_loss = 1-val_ssim_fct(output.unsqueeze(0).unsqueeze(0), target.unsqueeze(0).unsqueeze(0), data_range=max_value)
error = torch.abs(target - output)
input_img = input_img - input_img.min()
input_img = input_img / input_img.max()
output = output - output.min()
output = output / output.max()
target = target - target.min()
target = target / target.max()
error = error - error.min()
error = error / error.max()
image = torch.cat([input_img, target, output, error], dim=0)
image = torchvision.utils.make_grid(image, nrow=1, normalize=False)
figure = get_figure(image.cpu().numpy(),figsize=(3,12),title=f"ssim={ssim_loss.item():.6f}")
writer.add_image(name+"_abs", plot_to_image(figure), epoch)
if ksp:
input_img = torch.log(torch.abs(input_img_comp[0,:,:,0])+ 1e-9)
#input_img_max = torch.max(torch.stack((torch.log(torch.abs(input_img_comp[0,:,:,0])+ 1e-9),torch.log(torch.abs(input_img_comp[0,:,:,1])+ 1e-9))))
output = torch.log(torch.abs(output_comp[0,:,:,0])+ 1e-9)
#output_max = torch.max(torch.stack((torch.log(torch.abs(output_comp[0,:,:,0])+ 1e-9),torch.log(torch.abs(output_comp[0,:,:,1])+ 1e-9))))
target = torch.log(torch.abs(targetcomp[0,:,:,0])+ 1e-9)
#target_max = torch.max(torch.stack((torch.log(torch.abs(output_comp[0,:,:,0])+ 1e-9),torch.log(torch.abs(output_comp[0,:,:,1])+ 1e-9))))
else:
input_img = input_img_comp[0,:,:,0]
output = output_comp[0,:,:,0]
target = targetcomp[0,:,:,0]
val_ssim_fct = SSIMLoss()
max_value = target.max().unsqueeze(0)
ssim_loss = 1-val_ssim_fct(output.unsqueeze(0).unsqueeze(0), target.unsqueeze(0).unsqueeze(0), data_range=max_value)
error = torch.abs(target - output)
input_img = input_img - input_img.min()
input_img = input_img / input_img.max()
output = output - output.min()
output = output / output.max()
target = target - target.min()
target = target / target.max()
error = error - error.min()
error = error / error.max()
image = torch.cat([input_img, target, output, error], dim=0)
image = torchvision.utils.make_grid(image, nrow=1, normalize=False)
figure = get_figure(image.cpu().numpy(),figsize=(3,12),title=f"ssim={ssim_loss.item():.6f}")
writer.add_image(name+"_re", plot_to_image(figure), epoch)
if ksp:
input_img = torch.log(torch.abs(input_img_comp[0,:,:,1])+ 1e-9)
output = torch.log(torch.abs(output_comp[0,:,:,1])+ 1e-9)
target = torch.log(torch.abs(targetcomp[0,:,:,1])+ 1e-9)
else:
input_img = input_img_comp[0,:,:,1]
output = output_comp[0,:,:,1]
target = targetcomp[0,:,:,1]
val_ssim_fct = SSIMLoss()
max_value = target.max().unsqueeze(0)
ssim_loss = 1-val_ssim_fct(output.unsqueeze(0).unsqueeze(0), target.unsqueeze(0).unsqueeze(0), data_range=max_value)
error = torch.abs(target - output)
input_img = input_img - input_img.min()
input_img = input_img / input_img.max()
output = output - output.min()
output = output / output.max()
target = target - target.min()
target = target / target.max()
error = error - error.min()
error = error / error.max()
image = torch.cat([input_img, target, output, error], dim=0)
image = torchvision.utils.make_grid(image, nrow=1, normalize=False)
figure = get_figure(image.cpu().numpy(),figsize=(3,12),title=f"ssim={ssim_loss.item():.6f}")
writer.add_image(name+"_im", plot_to_image(figure), epoch)
plt.close()
| 8,395 | 35.663755 | 162 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/run_MRI.py | # %%
##################################
# Import python packages
import numpy as np
import os
import traceback
# Import main()
from functions.main import main_train, main_test
#from functions.train_utils import get_event_details
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
# %%
##################################
# Customize hyperparemters
# Specify an ID for each experiment, e.g. 001,002,...
exp_nums = ['001']
## Available datasets for MRI experiments:
#train_49827_selfsup_slice
#train_24900_selfsup_slice
#train_9977_selfsup_slice
#train_4943_selfsup_slice
#train_2466_selfsup_slice
#train_970_selfsup_slice
#train_491_selfsup_slice
#train_223_selfsup_slice
#train_96_selfsup_slice
#train_48_selfsup_slice
#test_4713_selfsup_slice
#val_313_selfsup_slice
hp_all_exps = {
#############################################################
### SET FALGS AND PICK NUMBER OF RUNS ###
# Specify the number of runs with different random initialization per experiment, e.g. [0,1,2] means three runs
'num_runs' : [[0]],
# Start training or continue training if there exists already a checkpoint.
'training' : [True],
# Evaluate best and last checkpoint on validation and test set.
'testing' : [True],
# Load model from last or best checkpoint. "last" is for continuing an interrupted training, "best" is automatically set for test mode, Right now "best" is not supported during training.
'resume_from_which_checkpoint' : ["last"],
#############################################################
### CHOOSE DATASETS ###
# Path to data directories: '../../../../srv/multicoil_brain_subset/' (zion) or '../../../../media/ssd1/fastMRIdata/brain/' (bigsur, yosemite)
'data_path' : ['../../../media/ssd1/fastMRIdata/brain/'],
# Path to val data directories. Only specify if different from data_path.
'val_path' : [None],
# Path to test data directories. Only specify if different from data_path.
'test_path' : [None],
# Path to sensitivity maps directories.
'smaps_path' : ['../../../media/ssd1/fastMRIdata/brain_sensmaps_train_and_val/'],
# If true sens maps are loaded in the train, validation and test loader.
'provide_senmaps' : [True],
# Choose a training set, e.g. train_491_selfsup_slice
'train_set' : ['./datasets/train_48_selfsup_slice.yaml'],
# Specify the size of the training set.
'train_size' : [50],
# Choose a validation set.
'val_set' : ['./datasets/val_313_selfsup_slice.yaml'],
# Choose test sets:
'test_sets' : [['./datasets/val_313_selfsup_slice.yaml','./datasets/test_4713_selfsup_slice.yaml']],
# Which challenge to preprocess for.
'challenge' : ["multicoil"],
#############################################################
### DEFINE SETTINGS FOR MASK FUNCTIONS ###
# Type of k-space mask. Use n2n.
'mask_type' : ["n2n"],
# Number of center lines to use in mask. 0.08 for acceleration 4.
'center_fraction' : [0.08],
# Acceleration rates to use for masks, e.g. 4 or 8. This acceleration factor is always applied to create network inputs
'acceleration' : [4.0],
# If True: the same input and target masks for both supervised and self-supervised training are used over all epochs.
# Note that masks still differ over slices in a volume.
# If False: Supervised training draws a new input mask in every epoch for every training slice
# Self-supervised still always draws the same set of frequecies defined by 'acceleration_total', but the split into input and target is re-sampled in every epoch.
'use_mask_seed_for_training' : [False],
#############################################################
### ADDITIONAL FLAGS FOR SELF-SUPERVISED TRAINING
# If True the training loss is only computed on the masked kspace fraction selfsup_acceleration (plus accelerations if selfsup_compute_loss_on_input_freq is True)
'selfsup' : [True],
# Determines the number of frequencies given during self-supervised training. Those are then split into input and target frequencies
# such that the input has acceleration factor 'acceleration' and the target contains the remaining freqs plus some overlap.
# Must be larger than 'accelerations'. If selfsup=False this value does not matter.
'acceleration_total' : [3.0],
#############################################################
### DEFINE THE MODEL ARCHITECTURE ###
# Number of channels in the first layer.
'chans' : [64],
# Number of downsampling/upsampling layers in the UNet (0 or 1 still has one downsampling/upsampling).
'num_pool_layers' : [4],
# Use separate channels for real and imaginary part. Must be True.
'two_channel_imag_real' : [True],
#############################################################
### OPTIMIZER, LOSS FUNCTION, NUMBER OF EPOCHS, BATCH SIZE, INITIAL LEARNING RATE ###
# Currently available RMSprop or Adam
'optimizer' : ['RMSprop'],
# List of loss functions for training. Currently available L1, L2 or SSIM loss or L2_kspace.
'loss_functions' : [['L2_kspace']],
# If True, then the network input is not cropped.
'compute_sup_loss_in_kspace' : [True],
# Maximal number of epochs.
'num_epochs' : [1000],
# Initial learning rate.
'lr' : [1e-3],
# Batch size for mini batch training.
'batch_size' : [1],
#############################################################
### LEARNING RATE SCHEME FOR DECAY ###
# Currently available MultiStepLR or ReduceLROnPlateau
'lr_scheduler' : ['ReduceLROnPlateau'],
# If true, lr_convergence_break_counter and lr_min_break_counter are applied
'early_stop_lr_deacy' : [True],
# Terminate training once this lr is reached.
'lr_min' : [1e-6],
# Once lr_min is reached training continues for this many epochs before terminated.
'lr_min_break_counter' : [10],
# Once for this many consecutive learning rate decays no improvement in val_loss is observed the training is early stopped.
'lr_convergence_break_counter' : [2],
# Decay lr by this factor.
'lr_decay_factor' : [0.1],
# If scheduler is MultiStepLR then decay lr after these number of epochs.
'lr_milestones' : [[50,60]],
# If scheduler is ReduceLROnPlateau, decay lr after these many epochs without improving val loss.
'lr_patience' : [10],
# If scheduler is ReduceLROnPlateau, improving val loss by less than this does not count as improvement.
'lr_threshold' : [0.0001],
# Metric used to determine a Plateau
'decay_metric' : ['L2_kspace'],
#############################################################
### CHOOSE OPTIONS FOR ACCELERATING TRAINING AND CHECKPOINTING
# Number of workers for dataloader.
'num_workers' : [8],
# Enable logging to Tensorboard
'tb_logging' : [True],
# List of validaiton sample indices to be logged to Tensorboard. Put empty dictionary to not log images
'log_val_images' : [{'file_brain_AXT2_210_2100189':3, 'file_brain_AXT2_200_2000396': 7, 'file_brain_AXT2_200_6002509': 12}],
'log_train_images' : [{'file_brain_AXT2_207_2070586':8, 'file_brain_AXT2_210_6001651': 6}],
# Log images ever log_image_interval epochs
'log_image_interval' : [8],
# Optional: A list of epochs at which the model is saved as "checkpoint{}.pt".format(epoch)
'epoch_checkpoints' : [None],
# Interval for validation
'val_interval' : [1],
# Save images from the testset to the log directory. Dictionary containing filenames and slice numbers. Put empty dictionary to not log images
'save_test_images': [{'file_brain_AXT2_207_2070041': 7, 'file_brain_AXT2_201_2010484': 12, 'file_brain_AXT2_203_2030348': 3,'file_brain_AXT2_209_6001477':9, 'file_brain_AXT2_210_6001600': 6, 'file_brain_AXT2_205_2050106': 7}],
}
# Sanity checks
for key in hp_all_exps.keys():
# expand lists with constant settings to the length of the number of experiments
if len(hp_all_exps[key]) == 1 and len(hp_all_exps[key]) != len(exp_nums):
hp_all_exps[key] = [hp_all_exps[key][0] for _ in range(len(exp_nums))]
#hp_all_exps[key] = hp_all_exps[key]*(len(exp_nums))
if len(hp_all_exps[key]) != len(exp_nums):
print(key)
raise ValueError("Specify hyperparameters for every experiment!")
# %%
# Print out the names of the new experiments
printouts = []
print('')
for ee in range(len(exp_nums)):
for rr in hp_all_exps['num_runs'][ee]:
exp_name = 'E' + str(exp_nums[ee]) + \
'_t' + str(hp_all_exps['train_size'][ee]) + \
'_l' + str(hp_all_exps['num_pool_layers'][ee]) + \
'c' + str(hp_all_exps['chans'][ee]) + \
'_bs' + str(hp_all_exps['batch_size'][ee]) +\
'_lr' + str(np.round(hp_all_exps['lr'][ee],6))[2:]
if rr>0:
exp_name = exp_name + '_run{}'.format(rr+1)
printouts.append(exp_name)
print(printouts)
# %%
for ee in range(len(exp_nums)):
hp_exp = {}
for k in hp_all_exps.keys():
hp_exp[k] = hp_all_exps[k][ee]#.copy()
for rr in hp_exp['num_runs']:
exp_name = 'E' + str(exp_nums[ee]) + \
'_t' + str(hp_all_exps['train_size'][ee]) + \
'_l' + str(hp_all_exps['num_pool_layers'][ee]) + \
'c' + str(hp_all_exps['chans'][ee]) + \
'_bs' + str(hp_all_exps['batch_size'][ee]) +\
'_lr' + str(np.round(hp_all_exps['lr'][ee],6))[2:]
if rr>0:
exp_name = exp_name + '_run{}'.format(rr+1)
if not os.path.isdir('./'+exp_name):
os.mkdir('./'+exp_name)
hp_exp['seed'] = int(42 + 10*rr)
hp_exp['exp_name'] = exp_name
if not hp_exp['train_set']:
hp_exp['train_set'] = f"./datasets/train_{hp_exp['train_size']}_filenames_slice.yaml"
########
# Training
########
try:
if hp_exp['training']:
print('\n{} - Training\n'.format(exp_name))
hp_exp['mode'] = 'train'
# Perform training
train_meters, val_metric_dict = main_train(hp_exp)
print('\n{} - Training finished\n'.format(exp_name))
except:
with open("./"+exp_name+"/errors_train.txt", "a+") as text_file:
error_str = traceback.format_exc()
print(error_str, file=text_file)
print(error_str)
########
# Testing
########
try:
if hp_exp['testing']: #
for test_set in hp_exp['test_sets']:
print('\n{} - Testing {}\n'.format(exp_name, test_set))
# Prepare args for testing
hp_exp['test_set'] = test_set
hp_exp['mode'] = 'test'
hp_exp['resume_from_which_checkpoint'] = 'best'
# Create a directory reconstructions with all reconstructions
main_test(hp_exp)
print('\n{} - Testing finished\n'.format(exp_name))
except:
with open("./"+exp_name+"/errors_test.txt", "a+") as text_file:
error_str = traceback.format_exc()
print(error_str, file=text_file)
print(error_str)
# %%
| 11,447 | 44.070866 | 230 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/main.py |
#################
# Import python packages
import torch
import logging
import time
from torch.utils.tensorboard import SummaryWriter
import sys
import os
from torch.serialization import default_restore_location
from collections import defaultdict
import numpy as np
import torchvision
import pickle
import matplotlib.pyplot as plt
from packaging import version
from torch.nn import L1Loss, MSELoss
from functions.math import complex_abs, complex_mul, complex_conj
from functions.data.transforms import center_crop_to_smallest, normalize_to_given_mean_std
if version.parse(torch.__version__) >= version.parse("1.7.0"):
from functions.fftc import fft2c_new as fft2c
from functions.fftc import ifft2c_new as ifft2c
else:
from functions.fftc import fft2c_old as fft2c
from functions.fftc import ifft2c_old as ifft2c
# Implementation of SSIMLoss
from functions.training.losses import SSIMLoss
from functions.training.debug_helper import print_tensor_stats, save_figure
# Set seeds, create directories, set path to checkpoints if available
from functions.train_utils import setup_experiment
from functions.train_utils import load_checkpoint,save_checkpoint, init_logging
# Function that returns a MaskFunc object
from functions.data.subsample import create_mask_for_mask_type
from functions.data.transforms import UnetDataTransform
from functions.data.mri_dataset import SliceDataset
from functions.models.unet import Unet
# Create scheduler and optimizer objects
from functions.training.training_functions import configure_optimizers, Compute_batch_train_loss
# Class that allows to track the average of some quantity over an epoch
from functions.training.meters import AverageMeter
# Gives a customized tqdm object that can be used as iterable instead of train_loader
from functions.training.progress_bar import ProgressBar
# Functions to log images with a header to tensorboard
from functions.log_save_image_utils import plot_to_image, get_figure
def add_img_to_tensorboard(writer, epoch, name, input_img, target, output, val_ssim_fct, max_value, crop):
output, _ = center_crop_to_smallest(output, target)
input_img, _ = center_crop_to_smallest(input_img, target)
# Normalize output to mean and std of target
#target, output = normalize_to_given_mean_std(target, output)
ssim_loss = 1-val_ssim_fct(output, target, data_range=max_value)
error = torch.abs(target - output)
input_img = input_img - input_img.min()
input_img = input_img / input_img.max()
output = output - output.min()
output = output / output.max()
target = target - target.min()
target = target / target.max()
error = error - error.min()
error = error / error.max()
image = torch.cat([input_img, target, output, error], dim=0)
image = torchvision.utils.make_grid(image, nrow=1, normalize=False)
if crop:
figure = get_figure(image.cpu().numpy(),figsize=(3,12),title=f"ssim={ssim_loss.item():.6f}")
else:
figure = get_figure(image.cpu().numpy(),figsize=(3,20),title=f"ssim={ssim_loss.item():.6f}")
writer.add_image(name, plot_to_image(figure), epoch)
def main_train(hp_exp):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# ------------
# setup:
# Set seeds, create directories, set path to checkpoints if available
# ------------
hp_exp = setup_experiment(hp_exp)
init_logging(hp_exp)
writer = SummaryWriter(log_dir=hp_exp['log_path']) if hp_exp['tb_logging'] else None
# Get list of filenames logged to tensorboard during validation (from the validation set)
val_log_filenames_list = []
for k in hp_exp['log_val_images'].keys():
val_log_filenames_list.append(k)
train_log_filenames_list = []
for k in hp_exp['log_train_images'].keys():
train_log_filenames_list.append(k)
mode_lookup = {
'SSIM' : 'max',
'PSNR' : 'max',
'L1' : 'min',
'L2' : 'min',
'MSE' : 'min',
'L2_kspace' : 'min',
'L1_kspace' : 'min',
}
# ------------
# data
# ------------
mask_func = create_mask_for_mask_type(
hp_exp['mask_type'], hp_exp['selfsup'], hp_exp['center_fraction'], hp_exp['acceleration'], hp_exp['acceleration_total']
)
data_transform_train = UnetDataTransform(hp_exp['challenge'],mask_func=mask_func, use_seed=hp_exp['use_mask_seed_for_training'], hp_exp=hp_exp,mode="train")
data_transform_val = UnetDataTransform(hp_exp['challenge'],mask_func=mask_func, use_seed=True, hp_exp=hp_exp,mode="val")
def _init_fn(worker_id):
np.random.seed(12 + worker_id)
trainset = SliceDataset(
dataset=hp_exp['train_set'],
path_to_dataset=hp_exp['data_path'],
path_to_sensmaps=hp_exp['smaps_path'],
provide_senmaps=hp_exp['provide_senmaps'],
challenge=hp_exp['challenge'],
transform=data_transform_train,
use_dataset_cache=True,
)
train_loader = torch.utils.data.DataLoader(
dataset=trainset,
batch_size=hp_exp['batch_size'],
num_workers=hp_exp['num_workers'],
shuffle=True,
generator=torch.Generator().manual_seed(hp_exp['seed']),
pin_memory =True,
)
valset = SliceDataset(
dataset=hp_exp['val_set'],
path_to_dataset=hp_exp['data_path'],
path_to_sensmaps=hp_exp['smaps_path'],
provide_senmaps=hp_exp['provide_senmaps'],
challenge=hp_exp['challenge'],
transform=data_transform_val,
use_dataset_cache=True,
)
val_loader = torch.utils.data.DataLoader(
dataset=valset,
batch_size=1,
num_workers=hp_exp['num_workers'],
shuffle=False,
generator=torch.Generator().manual_seed(hp_exp['seed']),
)
# ------------
# model
# ------------
if hp_exp['two_channel_imag_real']:
in_chans = 2
else:
in_chans = 1
model = Unet(
in_chans=in_chans,
out_chans=in_chans,
chans=hp_exp['chans'],
num_pool_layers=hp_exp['num_pool_layers'],
drop_prob=0.0,
).to(device)
logging.info(f"Built a model consisting of {sum(p.numel() for p in model.parameters()):,} parameters")#
# ------------
# trainer
# ------------
optimizer, scheduler = configure_optimizers(hp_exp, model.parameters())
compute_batch_train_loss = Compute_batch_train_loss()
train_meters = {'train_' + name: AverageMeter() for name in (hp_exp['loss_functions'])}
if len(hp_exp['loss_functions']) > 1:
train_meters['cumulated_loss'] = AverageMeter()
train_meters['train_L2_gt_abs'] = AverageMeter()
valid_meters = {'val_SSIM' : AverageMeter(), 'val_PSNR' : AverageMeter(), 'val_L1' : AverageMeter(), 'val_L2' : AverageMeter(),
'val_L2_kspace': AverageMeter(), 'val_L2_gt_abs': AverageMeter()}
if hp_exp['two_channel_imag_real']:
train_meters['train_L2_gt_comp'] = AverageMeter()
valid_meters['val_L2_gt_comp'] = AverageMeter()
val_ssim_fct = SSIMLoss()
val_l1_fct = L1Loss(reduction='sum')
val_mse_fct = MSELoss()
val_mse_reduceSum_fct = MSELoss(reduction='sum')
# ------------
# load a stored model if available
# ------------
if hp_exp['restore_file']:
load_checkpoint(hp_exp, model, optimizer, scheduler)
#########
# Training
save_train_figures = True
save_val_figures = False
mask_dict = {}
for epoch in range(save_checkpoint.start_epoch, hp_exp['num_epochs']):
start = time.process_time()
train_bar = ProgressBar(train_loader, epoch)
for meter in train_meters.values():
meter.reset()
for batch_id, batch in enumerate(train_bar):
hp_exp['mode'] = 'train'
model.train()
save_checkpoint.global_step +=1
binary_background_mask, input_image, input_kspace, input_mask, target_image, target_kspace, target_mask, target_mask_weighted, ground_truth_image, sens_maps, mean, std, fname, slice_num = batch
input_image=input_image.to(device)
target_image=target_image.to(device)
target_kspace=target_kspace.to(device)
input_kspace=input_kspace.to(device)
input_mask=input_mask.to(device)
target_mask=target_mask.to(device)
target_mask_weighted=target_mask_weighted.to(device)
ground_truth_image=ground_truth_image.to(device)
sens_maps=sens_maps.to(device)
mean=mean.to(device)
std=std.to(device)
binary_background_mask=binary_background_mask.to(device)
output = model(input_image)
output = output * std + mean
output_tensorboard = output.detach().clone()
################################
# Compute the training loss
################################
if hp_exp['selfsup'] or hp_exp['compute_sup_loss_in_kspace']:
# move complex dim to end
output_per_coil_imgs = torch.moveaxis(output , 1, -1 )
output_per_coil_imgs = complex_mul(output_per_coil_imgs, sens_maps)
# Transform coil images to kspace
output_kspace = fft2c(output_per_coil_imgs)
output_kspace = output_kspace * target_mask_weighted + 0.0
target_kspace = target_kspace * target_mask_weighted + 0.0
output_train_loss = output_kspace
target_train_loss = target_kspace
else:
output_train_loss = output
target_train_loss = target_image
# Use max value per ground truth slice instead of per volume to compute ssim and psnr in image domain
max_value = ground_truth_image.max().unsqueeze(0)
train_loss = compute_batch_train_loss.get_batch_train_loss(hp_exp, output_train_loss, target_train_loss, max_value, train_meters)
model.zero_grad()
train_loss.backward()
optimizer.step()
################################
# Compute train metrics that can be compared over all different setupts
################################
# Apply center cropping to outputs if necessary
output_train_metrics, _ = center_crop_to_smallest(output_tensorboard, ground_truth_image)
target_image_train_metrics, _ = center_crop_to_smallest(target_image, ground_truth_image)
# Apply binary masking to outputs if binary masks are given
binary_background_mask, _ = center_crop_to_smallest(binary_background_mask, ground_truth_image)
output_train_metrics = output_train_metrics * binary_background_mask
if hp_exp['two_channel_imag_real']:
# target_image is already masked (if possible)
loss = val_mse_reduceSum_fct(output_train_metrics, target_image_train_metrics) / torch.sum(torch.abs(target_image_train_metrics)**2)
train_meters["train_L2_gt_comp"].update(loss.item())
# prepare for train_L2_gt_abs
output_train_metrics = complex_abs(torch.moveaxis(output_train_metrics , 1, -1 )).unsqueeze(1)
loss = val_mse_reduceSum_fct(output_train_metrics, ground_truth_image) / torch.sum(torch.abs(ground_truth_image)**2)
train_meters["train_L2_gt_abs"].update(loss.item())
train_bar.log(dict(**train_meters), verbose=True)
################################
# Log some training images to tensorboard
################################
if hp_exp['tb_logging'] and fname[0] in train_log_filenames_list and epoch % hp_exp['log_image_interval'] == 0:
if slice_num.item() == hp_exp['log_train_images'][fname[0]]:
with torch.no_grad():
if hp_exp['two_channel_imag_real']:
crop = False
name = f"train_{fname[0]}_s{slice_num.item()}_ch1/"+hp_exp['exp_name']
inp = input_image[:,0,:,:].unsqueeze(1)
tar = target_image[:,0,:,:].unsqueeze(1)
out = output_tensorboard[:,0,:,:].unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, target_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
name = f"train_{fname[0]}_s{slice_num.item()}_ch2/"+hp_exp['exp_name']
inp = input_image[:,1,:,:].unsqueeze(1)
tar = target_image[:,1,:,:].unsqueeze(1)
out = output_tensorboard[:,1,:,:].unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, target_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
name = f"train_{fname[0]}_s{slice_num.item()}_abs/"+hp_exp['exp_name']
inp = complex_abs(torch.moveaxis(input_image , 1, -1 )).unsqueeze(1)
tar = complex_abs(torch.moveaxis(target_image , 1, -1 )).unsqueeze(1)
out = complex_abs(torch.moveaxis(output_tensorboard , 1, -1 )).unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, target_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
else:
name = f"train_{fname[0]}_s{slice_num.item()}_abs/"+hp_exp['exp_name']
add_img_to_tensorboard(writer, epoch, name, input_image, target_image, output_tensorboard, val_ssim_fct, max_value, crop=True)
if epoch % hp_exp['val_interval'] == 0:
hp_exp['mode'] = 'val'
model.eval()
for meter in valid_meters.values():
meter.reset()
valid_bar = ProgressBar(val_loader, epoch)
for sample_id, sample in enumerate(valid_bar):
with torch.no_grad():
binary_background_mask, input_image, input_kspace, input_mask, target_image, target_kspace, target_mask, target_mask_weighted, ground_truth_image, sens_maps, mean, std, fname, slice_num = sample
input_image=input_image.to(device)
input_kspace=input_kspace.to(device)
input_mask=input_mask.to(device)
target_image=target_image.to(device)
target_kspace=target_kspace.to(device)
target_mask=target_mask.to(device)
target_mask_weighted=target_mask_weighted.to(device)
ground_truth_image=ground_truth_image.to(device)
sens_maps=sens_maps.to(device)
mean=mean.to(device)
std=std.to(device)
binary_background_mask=binary_background_mask.to(device)
output = model(input_image)
output = output * std + mean
###############################################################
# Validation L1 and L2 are computed as during training
if hp_exp['selfsup'] or hp_exp['compute_sup_loss_in_kspace']:
# move complex dim to end
output_per_coil_imgs = torch.moveaxis(output , 1, -1 )
output_per_coil_imgs = complex_mul(output_per_coil_imgs, sens_maps)
# Transform coil images to kspace
output_kspace = fft2c(output_per_coil_imgs)
output_kspace_fully_sampled = output_kspace.clone()
output_kspace = output_kspace * target_mask_weighted + 0.0
target_kspace = target_kspace * target_mask_weighted + 0.0
L2kspace = val_mse_reduceSum_fct(output_kspace, target_kspace) / torch.sum(torch.abs(target_kspace)**2)
valid_meters["val_L2_kspace"].update(L2kspace.item())
# L1 and L2 validation are computed on the full images without cropping and complex absolute value and without masking or dc
# L1 validation loss
loss = val_l1_fct(output, target_image) / torch.sum(torch.abs(target_image))
valid_meters["val_L1"].update(loss.item())
# L2 validation loss
loss = val_mse_reduceSum_fct(output, target_image) / torch.sum(torch.abs(target_image)**2)
valid_meters["val_L2"].update(loss.item())
else:
if hp_exp['two_channel_imag_real']:
# To enable data consistency later on
# move complex dim to end
output_per_coil_imgs = torch.moveaxis(output , 1, -1 )
output_per_coil_imgs = complex_mul(output_per_coil_imgs, sens_maps)
# Transform coil images to kspace
output_kspace = fft2c(output_per_coil_imgs)
#save_figure(torch.log(complex_abs(output_kspace[0,0,:,:]) + 1e-9).detach().cpu(),'output_kspace_val',hp_exp,save=save_val_figures)
output_kspace_fully_sampled = output_kspace.clone()
L2kspace = val_mse_reduceSum_fct(output_kspace, target_kspace) / torch.sum(torch.abs(target_kspace)**2)
valid_meters["val_L2_kspace"].update(L2kspace.item())
# L1 and L2 validation are computed on the full images without cropping and complex absolute value and without masking or dc
# L1 validation loss
loss = val_l1_fct(output, target_image) / torch.sum(torch.abs(target_image))
valid_meters["val_L1"].update(loss.item())
# L2 validation loss
loss = val_mse_reduceSum_fct(output, target_image) / torch.sum(torch.abs(target_image)**2)
valid_meters["val_L2"].update(loss.item())
###############################################################
###############################################################
# Validation PSNR and SSIM are computed on masked, cropped and real images
# Apply masking before computing scores in the image domain in order to eliminate artifacts in the background
output = output * binary_background_mask
output_tensorboard = output.clone()
# PSNR and SSIM are computed on the center cropped magnitude reconstruction
output, _ = center_crop_to_smallest(output, ground_truth_image)
if hp_exp['two_channel_imag_real']:
target_image_train_metrics, _ = center_crop_to_smallest(target_image, ground_truth_image)
loss = val_mse_reduceSum_fct(output, target_image_train_metrics) / torch.sum(torch.abs(target_image_train_metrics)**2)
valid_meters["val_L2_gt_comp"].update(loss.item())
# Move complex dim to end, apply complex abs, insert channel dimension
output = complex_abs(torch.moveaxis(output , 1, -1 ))
output = output.unsqueeze(1)
# Use max value per ground truth slice instead of per volume
max_value = ground_truth_image.max().unsqueeze(0)
# SSIM
ssim_loss = 1-val_ssim_fct(output, ground_truth_image, data_range=max_value)
valid_meters["val_SSIM"].update(ssim_loss.item())
loss = val_mse_reduceSum_fct(output, ground_truth_image) / torch.sum(torch.abs(ground_truth_image)**2)
valid_meters["val_L2_gt_abs"].update(loss.item())
# MSE for PSNR
loss = val_mse_fct(output, ground_truth_image) # reduce with mean
# PSNR
psnr = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(loss)
valid_meters["val_PSNR"].update(psnr.item())
valid_bar.log(dict(**valid_meters), verbose=True)
if hp_exp['tb_logging'] and fname[0] in val_log_filenames_list and epoch % hp_exp['log_image_interval'] == 0:
if slice_num.item() == hp_exp['log_val_images'][fname[0]]:
if hp_exp['two_channel_imag_real']:
crop = False
name = f"val_{fname[0]}_s{slice_num.item()}_ch1/"+hp_exp['exp_name']
inp = input_image[:,0,:,:].unsqueeze(1)
tar = target_image[:,0,:,:].unsqueeze(1)
out = output_tensorboard[:,0,:,:].unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, ground_truth_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
name = f"val_{fname[0]}_s{slice_num.item()}_ch2/"+hp_exp['exp_name']
inp = input_image[:,1,:,:].unsqueeze(1)
tar = target_image[:,1,:,:].unsqueeze(1)
out = output_tensorboard[:,1,:,:].unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, ground_truth_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
name = f"val_{fname[0]}_s{slice_num.item()}_abs/"+hp_exp['exp_name']
inp = complex_abs(torch.moveaxis(input_image , 1, -1 )).unsqueeze(1)
tar = complex_abs(torch.moveaxis(target_image , 1, -1 )).unsqueeze(1)
out = complex_abs(torch.moveaxis(output_tensorboard , 1, -1 )).unsqueeze(1)
# If we want to look at center crop then crop target
if crop:
tar, _ = center_crop_to_smallest(tar, ground_truth_image)
add_img_to_tensorboard(writer, epoch, name, inp, tar, out, val_ssim_fct, max_value, crop)
else:
name = f"val_{fname[0]}_s{slice_num.item()}_abs/"+hp_exp['exp_name']
add_img_to_tensorboard(writer, epoch, name, input_image, target_image, output_tensorboard, val_ssim_fct, max_value, crop=True)
if hp_exp['two_channel_imag_real']:
val_metric_dict = { #keys should have the same name as the keys used to pick a training loss in hp_exp['loss_functions']
'SSIM' : valid_meters['val_SSIM'].avg,
'L1' : valid_meters['val_L1'].avg,
'L2' : valid_meters['val_L2'].avg,
'PSNR' : valid_meters['val_PSNR'].avg,
'L2_kspace' : valid_meters['val_L2_kspace'].avg,
'L2_gt_abs' : valid_meters["val_L2_gt_abs"].avg,
'L2_gt_comp' : valid_meters["val_L2_gt_comp"].avg,
}
else:
val_metric_dict = { #keys should have the same name as the keys used to pick a training loss in hp_exp['loss_functions']
'SSIM' : valid_meters['val_SSIM'].avg,
'L1' : valid_meters['val_L1'].avg,
'L2' : valid_meters['val_L2'].avg,
'PSNR' : valid_meters['val_PSNR'].avg,
'L2_kspace' : valid_meters['val_L2_kspace'].avg,
'L2_gt_abs' : valid_meters["val_L2_gt_abs"].avg,
}
current_lr = save_checkpoint.current_lr
current_best_score = save_checkpoint.best_score
# Logging to tensorboard
if hp_exp['tb_logging']:
writer.add_scalar("lr", current_lr, epoch)
writer.add_scalar("epoch", epoch, epoch)
for tr_loss_name in train_meters.keys():
writer.add_scalar(tr_loss_name, train_meters[tr_loss_name].avg, epoch)
for val_loss_name in val_metric_dict.keys():
writer.add_scalar('val_'+val_loss_name, val_metric_dict[val_loss_name], epoch)
sys.stdout.flush()
#### Learning rate decay
score = val_metric_dict[hp_exp['decay_metric']]
if hp_exp['lr_scheduler'] == 'MultiStepLR':
scheduler.step()
elif hp_exp['lr_scheduler'] == 'ReduceLROnPlateau':
scheduler.step(score)
else:
raise ValueError('Scheduler is not defined')
save_checkpoint(hp_exp, epoch, model, optimizer, scheduler, score=score) # This potentially updates save_checkpoint.best_score
end = time.process_time() - start
# Logging to train.log
if (val_metric_dict[hp_exp['decay_metric']] < current_best_score and save_checkpoint.mode == "min") or (val_metric_dict[hp_exp['decay_metric']] > current_best_score and save_checkpoint.mode == "max"):
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, time=np.round(end/60,3), New='Highscore')))
else:
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=current_lr, time=np.round(end/60,3))))
new_lr = optimizer.param_groups[0]["lr"]
save_checkpoint.current_lr = new_lr # current lr during next epoch
if hp_exp['early_stop_lr_deacy']:
if (score < current_best_score and save_checkpoint.mode == "min") or (score > current_best_score and save_checkpoint.mode == "max"):
save_checkpoint.best_val_current_lr_interval = score
#At every lr decay check if the model did not improve during the lr_convergence_break_counter many lastt lr intervals and break if it didn't.
if new_lr < current_lr:
if save_checkpoint.best_val_current_lr_interval != save_checkpoint.best_score:
save_checkpoint.lr_interval_counter += 1
if save_checkpoint.lr_interval_counter == hp_exp['lr_convergence_break_counter']:
logging.info(f'lr decayed to {new_lr}. Break training due to convergence of val loss!')
break
else:
logging.info(f'lr decayed to {new_lr}. lr_interval_counter increased but do not yet break due to convergence of val loss!')
else:
save_checkpoint.best_val_current_lr_interval = float("inf") if mode_lookup[hp_exp['decay_metric']] == "min" else float("-inf")
save_checkpoint.lr_interval_counter = 0
logging.info(f'lr decayed to {new_lr}. No convergence detected. Reset lr_interval_counter.')
if np.round(current_lr,10) <= hp_exp['lr_min']:
if hp_exp['lr_min_break_counter'] == save_checkpoint.break_counter:
logging.info('Break training due to minimal learning rate constraint!')
break
else:
save_checkpoint.break_counter += 1
else:
current_lr = save_checkpoint.current_lr
if hp_exp['lr_scheduler'] == 'MultiStepLR':
scheduler.step()
else:
raise ValueError('Scheduler is not defined')
if hp_exp['tb_logging']:
writer.add_scalar("lr", current_lr, epoch)
writer.add_scalar("epoch", epoch, epoch)
for tr_loss_name in train_meters.keys():
writer.add_scalar(tr_loss_name, train_meters[tr_loss_name].avg, epoch)
new_lr = optimizer.param_groups[0]["lr"]
save_checkpoint.current_lr = new_lr # current lr during next epoch
end = time.process_time() - start
logging.info(train_bar.print(dict(**train_meters, lr=current_lr, time=np.round(end/60,3))))
logging.info(f"Done training! Best PSNR {save_checkpoint.best_score:.5f} obtained after epoch {save_checkpoint.best_epoch}.")
# return names of matrics logged to tensorboard
return train_meters, val_metric_dict
def main_test(hp_exp):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# ------------
# setup:
# Set seeds, create directories, set path to checkpoints if available
# ------------
hp_exp = setup_experiment(hp_exp)
init_logging(hp_exp)
# Get list of filenames saved during testing (from the test set)
test_log_filenames_list = []
for k in hp_exp['save_test_images'].keys():
test_log_filenames_list.append(k)
# ------------
# data
# ------------
# For testing we want the target mask to be all ones.
# This can be achieved either by setting self_sup=False or acceleration_total=1.0
mask_func = create_mask_for_mask_type(
hp_exp['mask_type'], self_sup=False, center_fraction=hp_exp['center_fraction'], acceleration=hp_exp['acceleration'], acceleration_total=1.0
)
data_transform = UnetDataTransform(hp_exp['challenge'],mask_func=mask_func, use_seed=True, hp_exp=hp_exp, mode="test")
testset = SliceDataset(
dataset=hp_exp['test_set'],
path_to_dataset=hp_exp['data_path'],
path_to_sensmaps=hp_exp['smaps_path'],
provide_senmaps=hp_exp['provide_senmaps'],
challenge=hp_exp['challenge'],
transform=data_transform,
use_dataset_cache=True,
)
test_loader = torch.utils.data.DataLoader(
dataset=testset,
batch_size=1,
num_workers=hp_exp['num_workers'],
shuffle=False,
generator=torch.Generator().manual_seed(hp_exp['seed']),
)
# ------------
# model
# ------------
if hp_exp['two_channel_imag_real']:
in_chans = 2
else:
in_chans = 1
model = Unet(
in_chans=in_chans,
out_chans=in_chans,
chans=hp_exp['chans'],
num_pool_layers=hp_exp['num_pool_layers'],
drop_prob=0.0,
).to(device)
# ------------
# load a stored model if available
# ------------
if hp_exp['restore_file']:
load_checkpoint(hp_exp, model, None, None)
test_ssim_fct = SSIMLoss()
test_l1_fct_sum = L1Loss(reduction='sum')
test_mse_fct_sum = MSELoss(reduction='sum')
test_mse_fct_mean = MSELoss(reduction='mean')
model.eval()
test_bar = ProgressBar(test_loader, epoch=0)
# Collect scores
ssim_vals = []
L1_vals = []
psnr_vals = []
L2_vals = []
# Always compute both, scores after binary masking and after data consistency.
ssim_vals_dc = []
L1_vals_dc = []
psnr_vals_dc = []
L2_vals_dc = []
for sample_id, sample in enumerate(test_bar):
with torch.no_grad():
binary_background_mask, input_image, input_kspace, input_mask, target_image, target_kspace, target_mask, target_mask_weighted, ground_truth_image, sens_maps, mean, std, fname, slice_num = sample
input_image=input_image.to(device)
input_kspace=input_kspace.to(device)
input_mask=input_mask.to(device)
target_kspace=target_kspace.to(device)
target_mask=target_mask.to(device)
ground_truth_image=ground_truth_image.to(device)
sens_maps=sens_maps.to(device)
mean=mean.to(device)
std=std.to(device)
binary_background_mask=binary_background_mask.to(device)
output = model(input_image)
output = output * std + mean
#####################
sens_maps_conj = complex_conj(sens_maps)
output_per_coil_imgs = torch.moveaxis(output.clone() , 1, -1 )
output_per_coil_imgs = complex_mul(output_per_coil_imgs, sens_maps)
# Transform coil images to kspace
output_kspace = fft2c(output_per_coil_imgs)
################
################
# Get scores in image domain after data consistency
output_image_data_consistency = ifft2c(output_kspace* (1-input_mask) + input_kspace)
output_image_data_consistency = complex_mul(output_image_data_consistency, sens_maps_conj)
output_image_data_consistency = output_image_data_consistency.sum(dim=1, keepdim=False)
output_image_data_consistency = torch.moveaxis(output_image_data_consistency , -1, 1)
output_image_data_consistency, _ = center_crop_to_smallest(output_image_data_consistency, ground_truth_image)
output_image_data_consistency = complex_abs(torch.moveaxis(output_image_data_consistency , 1, -1 ))
output_image_dc = output_image_data_consistency.unsqueeze(1)
# L1
loss = test_l1_fct_sum(output_image_dc, ground_truth_image) / torch.sum(torch.abs(ground_truth_image))
L1_vals_dc.append(loss.item())
# L2
loss = test_mse_fct_sum(output_image_dc, ground_truth_image) / torch.sum(torch.abs(ground_truth_image)**2)
L2_vals_dc.append(loss.item())
max_value = ground_truth_image.max().unsqueeze(0)
# MSE for PSNR
mse = test_mse_fct_mean(output_image_dc, ground_truth_image)
# PSNR
psnr = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(mse)
psnr_vals_dc.append(psnr.item())
# SSIM
ssim_loss = 1-test_ssim_fct(output_image_dc, ground_truth_image, data_range=max_value)
ssim_vals_dc.append(ssim_loss.item())
output = output * binary_background_mask
######################
######################
# Get scores after binary masking without data consistency
# at test time L1, L2, PSNR and SSIM are all computed on center cropped magnitude values
output, _ = center_crop_to_smallest(output, ground_truth_image)
if hp_exp['two_channel_imag_real']:
# Move complex dim to end, apply complex abs, insert channel dimension
output = complex_abs(torch.moveaxis(output , 1, -1 ))
output = output.unsqueeze(1)
# L1
loss = test_l1_fct_sum(output, ground_truth_image) / torch.sum(torch.abs(ground_truth_image))
L1_vals.append(loss.item())
# L2
loss = test_mse_fct_sum(output, ground_truth_image) / torch.sum(torch.abs(ground_truth_image)**2)
L2_vals.append(loss.item())
# Normalize output to mean and std of target
#target, output = normalize_to_given_mean_std(target, output)
# Use max value per ground truth slice instead of per volume
max_value = ground_truth_image.max().unsqueeze(0)
# MSE for PSNR
mse = test_mse_fct_mean(output, ground_truth_image)
# PSNR
psnr = 20 * torch.log10(torch.tensor(max_value.item()))- 10 * torch.log10(mse)
psnr_vals.append(psnr.item())
# SSIM
ssim_loss = 1-test_ssim_fct(output, ground_truth_image, data_range=max_value)
ssim_vals.append(ssim_loss.item())
######################
# Save some test images
if fname[0] in test_log_filenames_list:
if slice_num.item() == hp_exp['save_test_images'][fname[0]]:
error = torch.abs(ground_truth_image - output)
error_dc = torch.abs(ground_truth_image - output_image_dc)
output = output - output.min()
output = output / output.max()
output_image_dc = output_image_dc - output_image_dc.min()
output_image_dc = output_image_dc / output_image_dc.max()
ground_truth_image = ground_truth_image - ground_truth_image.min()
ground_truth_image = ground_truth_image / ground_truth_image.max()
error = error - error.min()
error_dc = error_dc - error_dc.min()
max_norm = torch.stack([error,error_dc]).max()
error = error / max_norm
error_dc = error_dc / max_norm
image = torch.cat([ground_truth_image, ground_truth_image, output, output_image_dc, error, error_dc], dim=0)
image = torchvision.utils.make_grid(image, nrow=2, normalize=False, value_range=(0,1), pad_value=1)
figure = get_figure(image.cpu().numpy(),figsize=(8,12),title=f"ssim={ssim_loss.item():.6f}, ssim_dc={ssim_vals_dc[-1]:.6f}")
if not os.path.isdir(hp_exp['log_path'] + 'test_imgs/'):
os.mkdir(hp_exp['log_path'] + 'test_imgs/')
plt.savefig(hp_exp['log_path'] + f"test_imgs/{fname[0]}_s{slice_num.item()}.png", dpi='figure')
plt.close()
test_metric_dict = {
'ssim_m' : np.mean(np.array(ssim_vals)),
'ssim_s' : np.std(np.array(ssim_vals)),
'L1_m' : np.mean(np.array(L1_vals)),
'L1_s' : np.std(np.array(L1_vals)),
'psnr_m' : np.mean(np.array(psnr_vals)),
'psnr_s' : np.std(np.array(psnr_vals)),
'L2_m' : np.mean(np.array(L2_vals)),
'L2_s' : np.std(np.array(L2_vals)),
}
print(test_metric_dict)
test_metric_dict_dc = {
'ssim_m' : np.mean(np.array(ssim_vals_dc)),
'ssim_s' : np.std(np.array(ssim_vals_dc)),
'L1_m' : np.mean(np.array(L1_vals_dc)),
'L1_s' : np.std(np.array(L1_vals_dc)),
'psnr_m' : np.mean(np.array(psnr_vals_dc)),
'psnr_s' : np.std(np.array(psnr_vals_dc)),
'L2_m' : np.mean(np.array(L2_vals_dc)),
'L2_s' : np.std(np.array(L2_vals_dc)),
}
print(test_metric_dict_dc)
testset_name = hp_exp['log_path'] + hp_exp['test_set'][hp_exp['test_set'].find('datasets/')+9 : hp_exp['test_set'].find('.yaml')]
pickle.dump( test_metric_dict, open(testset_name + '_metrics_' + hp_exp['resume_from_which_checkpoint'] + '.p', "wb" ) )
pickle.dump( test_metric_dict_dc, open(testset_name + '_metrics_DC_' + hp_exp['resume_from_which_checkpoint'] + '.p', "wb" ) )
logging.info("Evaluate testset : {}".format(testset_name))
for test_metric in test_metric_dict.keys():
logging.info("{}: {}".format(test_metric, test_metric_dict[test_metric]))
logging.info("Evaluate testset : {} with data consistency".format(testset_name))
for test_metric in test_metric_dict_dc.keys():
logging.info("{}: {}".format(test_metric, test_metric_dict_dc[test_metric]))
| 40,715 | 45.961938 | 214 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/coil_combine.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from functions.math import complex_abs_sq
def rss(data: torch.Tensor, dim: int = 0) -> torch.Tensor:
"""
Compute the Root Sum of Squares (RSS).
RSS is computed assuming that dim is the coil dimension.
Args:
data: The input tensor
dim: The dimensions along which to apply the RSS transform
Returns:
The RSS value.
"""
return torch.sqrt((data ** 2).sum(dim))
def rss_complex(data: torch.Tensor, dim: int = 0) -> torch.Tensor:
"""
Compute the Root Sum of Squares (RSS) for complex inputs.
RSS is computed assuming that dim is the coil dimension.
Args:
data: The input tensor
dim: The dimensions along which to apply the RSS transform
Returns:
The RSS value.
"""
return torch.sqrt(complex_abs_sq(data).sum(dim))
| 1,015 | 22.627907 | 66 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/math.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def complex_mul(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Complex multiplication.
This multiplies two complex tensors assuming that they are both stored as
real arrays with the last dimension being the complex dimension.
Args:
x: A PyTorch tensor with the last dimension of size 2.
y: A PyTorch tensor with the last dimension of size 2.
Returns:
A PyTorch tensor with the last dimension of size 2.
"""
if not x.shape[-1] == y.shape[-1] == 2:
raise ValueError("Tensors do not have separate complex dim.")
re = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]
im = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]
return torch.stack((re, im), dim=-1)
def complex_conj(x: torch.Tensor) -> torch.Tensor:
"""
Complex conjugate.
This applies the complex conjugate assuming that the input array has the
last dimension as the complex dimension.
Args:
x: A PyTorch tensor with the last dimension of size 2.
y: A PyTorch tensor with the last dimension of size 2.
Returns:
A PyTorch tensor with the last dimension of size 2.
"""
if not x.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return torch.stack((x[..., 0], -x[..., 1]), dim=-1)
def complex_abs(data: torch.Tensor) -> torch.Tensor:
"""
Compute the absolute value of a complex valued input tensor.
Args:
data: A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
Absolute value of data.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return (data ** 2).sum(dim=-1).sqrt()
def complex_abs_sq(data: torch.Tensor) -> torch.Tensor:
"""
Compute the squared absolute value of a complex tensor.
Args:
data: A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
Squared absolute value of data.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
return (data ** 2).sum(dim=-1)
def tensor_to_complex_np(data: torch.Tensor) -> np.ndarray:
"""
Converts a complex torch tensor to numpy array.
Args:
data: Input data to be converted to numpy.
Returns:
Complex numpy version of data.
"""
data = data.numpy()
return data[..., 0] + 1j * data[..., 1]
| 2,728 | 25.754902 | 77 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/log_save_image_utils.py | import matplotlib.pyplot as plt
import torchvision
import io
import torch
import numpy as np
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
#img = buf.getvalue()#.to(torch.uint8)
#img = torch.import_ir_module_from_buffer(img,dtype=torch.uint8)
frameTensor = torch.tensor(np.frombuffer(buf.getvalue(), dtype=np.uint8), device='cpu')
image = torchvision.io.decode_png(frameTensor)
# Add the batch dimension
#image = torch.unsqueeze(image, 0)
return image
def get_figure(image,figsize,title):
"""Return a matplotlib figure of a given image."""
if len(image.shape) != 3:
raise ValueError("Image dimensions not suitable for logging to tensorboard.")
if image.shape[0] == 1 or image.shape[0] == 3:
image = np.rollaxis(image,0,3)
# Create a figure to contain the plot.
if figsize:
figure = plt.figure(figsize=figsize)
else:
figure = plt.figure()
# Start next subplot.
plt.subplot(1, 1, 1, title=title)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap='gray')
figure.tight_layout()
return figure | 1,533 | 33.088889 | 91 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/train_utils.py |
import torch
import numpy as np
import random
import os
import glob
import logging
from torch.serialization import default_restore_location
from tensorboard.backend.event_processing import event_accumulator
import time
import matplotlib.pyplot as plt
def setup_experiment(hp_exp):
'''
- Handle seeding
- Create directories
- Look for checkpoints to load from
'''
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(hp_exp['seed'])
torch.cuda.manual_seed(hp_exp['seed'])
np.random.seed(hp_exp['seed'])
random.seed(hp_exp['seed'])
hp_exp['log_path'] = './'+ hp_exp['exp_name'] + '/log_files/'
os.makedirs(hp_exp['log_path'] + 'checkpoints/', exist_ok=True)
hp_exp['log_file'] = os.path.join(hp_exp['log_path'], "train.log")
# Look for checkpoints to load from
available_models = glob.glob(hp_exp['log_path'] + 'checkpoints/*.pt')
if available_models and hp_exp['resume_from_which_checkpoint']=='last':
hp_exp['restore_file'] = hp_exp['log_path'] + 'checkpoints/checkpoint_last.pt'
elif available_models and hp_exp['resume_from_which_checkpoint']=='best':
hp_exp['restore_file'] = hp_exp['log_path'] + 'checkpoints/checkpoint_best.pt'
else:
hp_exp['restore_file'] = None
# Set attributes of the function save_checkpoint. They will be used to track the validation score and trigger saving a checkpoint
mode_lookup = {
'SSIM' : 'max',
'PSNR' : 'max',
'L1' : 'min',
'L2' : 'min',
'MSE' : 'min',
'L2_kspace' : 'min',
'L1_kspace' : 'min',
}
save_checkpoint.best_epoch = -1
save_checkpoint.last_epoch = 0
save_checkpoint.start_epoch = 0
save_checkpoint.global_step = 0
save_checkpoint.current_lr = hp_exp['lr']
save_checkpoint.break_counter = 0
save_checkpoint.best_val_current_lr_interval = float("inf") if mode_lookup[hp_exp['decay_metric']] == "min" else float("-inf")
save_checkpoint.lr_interval_counter = 0
save_checkpoint.mode = mode_lookup[hp_exp['decay_metric']]
save_checkpoint.best_score = float("inf") if save_checkpoint.mode == "min" else float("-inf")
return hp_exp
def init_logging(hp_exp):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handlers = [logging.StreamHandler()]
mode = "a" if hp_exp['restore_file'] else "w"
handlers.append(logging.FileHandler(hp_exp['log_file'], mode=mode))
logging.basicConfig(handlers=handlers, format="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
if hp_exp['mode'] == 'train':
logging.info("Arguments: {}".format(hp_exp))
def save_checkpoint(hp_exp, epoch, model, optimizer=None, scheduler=None, score=None):
''''
This function is used to save a range of parameters related to the training progress.
Saving those parameters allows to interrupt and then pick up training later at any point.
At the beginning of every experiment the parameters are initialized in setup_experiment()
Parameters:
- best_score: Holds the best validation score so far
- best_epoch: Holds the epoch in which the best validation score was achieved
- last_epoch: Holds the current epoch.
- break_counter: Count the number of epochs with minimal lr
- best_val_current_lr_interval: Holds the best val performance for the current lr-inerval
- lr_interval_counter: Counts for how many lr intervals there was no improvement
-
'''
save_checkpoint.last_epoch = epoch
best_score = save_checkpoint.best_score
if (score < best_score and save_checkpoint.mode == "min") or (score > best_score and save_checkpoint.mode == "max"):
save_checkpoint.best_epoch = epoch
save_checkpoint.best_score = score
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
state_dict = {
"last_step": save_checkpoint.global_step, #set
"last_score": score, #set
"break_counter": save_checkpoint.break_counter,
"best_val_current_lr_interval": save_checkpoint.best_val_current_lr_interval,
"lr_interval_counter": save_checkpoint.lr_interval_counter,
"last_epoch": save_checkpoint.last_epoch, #set
"best_epoch": save_checkpoint.best_epoch, #set
"current_lr":save_checkpoint.current_lr, #set
"mode": save_checkpoint.mode,
"best_score": getattr(save_checkpoint, "best_score", None), #set
"model": [m.state_dict() for m in model] if model is not None else None,
"optimizer": [o.state_dict() for o in optimizer] if optimizer is not None else None,
"scheduler": [s.state_dict() for s in scheduler] if scheduler is not None else None,
"args": hp_exp,
}
torch.save(state_dict, os.path.join(hp_exp['log_path'] + 'checkpoints/', "checkpoint_last.pt"))
if hp_exp['epoch_checkpoints']:
if epoch in hp_exp['epoch_checkpoints']:
torch.save(state_dict, os.path.join(hp_exp['log_path'] + 'checkpoints/', "checkpoint{}.pt".format(epoch)))
if (score < best_score and save_checkpoint.mode == "min") or (score > best_score and save_checkpoint.mode == "max"):
torch.save(state_dict, os.path.join(hp_exp['log_path'] + 'checkpoints/', "checkpoint_best.pt"))
def load_checkpoint(hp_exp, model=None, optimizer=None, scheduler=None):
print('restoring model..')
state_dict = torch.load(hp_exp['restore_file'], map_location=lambda s, l: default_restore_location(s, "cpu"))
save_checkpoint.last_epoch = state_dict["last_epoch"]
save_checkpoint.start_epoch = state_dict["last_epoch"]+1
save_checkpoint.global_step = state_dict["last_step"]
save_checkpoint.best_score = state_dict["best_score"]
save_checkpoint.best_epoch = state_dict["best_epoch"]
save_checkpoint.break_counter = state_dict["break_counter"]
save_checkpoint.best_val_current_lr_interval = state_dict["best_val_current_lr_interval"]
save_checkpoint.lr_interval_counter = state_dict["lr_interval_counter"]
save_checkpoint.current_lr = state_dict["current_lr"]
save_checkpoint.mode = state_dict["mode"]
model = [model] if model is not None and not isinstance(model, list) else model
optimizer = [optimizer] if optimizer is not None and not isinstance(optimizer, list) else optimizer
scheduler = [scheduler] if scheduler is not None and not isinstance(scheduler, list) else scheduler
if model is not None and state_dict.get("model", None) is not None:
for m, state in zip(model, state_dict["model"]):
m.load_state_dict(state)
if optimizer is not None and state_dict.get("optimizer", None) is not None:
for o, state in zip(optimizer, state_dict["optimizer"]):
o.load_state_dict(state)
if scheduler is not None and state_dict.get("scheduler", None) is not None:
for s, state in zip(scheduler, state_dict["scheduler"]):
#milestones = s.milestones
#state['milestones'] = milestones
s.load_state_dict(state)
#s.milestones = milestones
logging.info("Loaded checkpoint {} from best_epoch {} last_epoch {}".format(hp_exp['restore_file'], save_checkpoint.best_epoch, save_checkpoint.last_epoch))
| 7,597 | 44.771084 | 160 | py |
sample_complexity_ss_recon | sample_complexity_ss_recon-main/CS_accelerated_MRI_figure5/functions/fftc.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import List, Optional
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse("1.7.0"):
import torch.fft # type: ignore
def fft2c_old(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.fft(data, 2, normalized=True)
data = fftshift(data, dim=[-3, -2])
return data
def ifft2c_old(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.ifft(data, 2, normalized=True)
data = fftshift(data, dim=[-3, -2])
return data
def fft2c_new(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The FFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.fftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
def ifft2c_new(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data
# Helper functions
def roll_one_dim(x: torch.Tensor, shift: int, dim: int) -> torch.Tensor:
"""
Similar to roll but for only one dim.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def roll(
x: torch.Tensor,
shift: List[int],
dim: List[int],
) -> torch.Tensor:
"""
Similar to np.roll but applies to PyTorch Tensors.
Args:
x: A PyTorch tensor.
shift: Amount to roll.
dim: Which dimension to roll.
Returns:
Rolled version of x.
"""
if len(shift) != len(dim):
raise ValueError("len(shift) must match len(dim)")
for (s, d) in zip(shift, dim):
x = roll_one_dim(x, s, d)
return x
def fftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to fftshift.
Returns:
fftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = x.shape[dim_num] // 2
return roll(x, shift, dim)
def ifftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to ifftshift.
Returns:
ifftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = (x.shape[dim_num] + 1) // 2
return roll(x, shift, dim)
| 5,535 | 25.236967 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.