text
stringlengths 1
93.6k
|
|---|
log.info(f"Instantiating trainer <{cfg.trainer._target_}>")
|
trainer: Trainer = hydra.utils.instantiate(cfg.trainer, logger=logger)
|
object_dict = {
|
"cfg": cfg,
|
"datamodule": datamodule,
|
"model": model,
|
"logger": logger,
|
"trainer": trainer,
|
}
|
if logger:
|
log.info("Logging hyperparameters!")
|
utils.log_hyperparameters(object_dict)
|
log.info("Starting testing!")
|
if datamodule.hparams.dataset in ["nyu", "sunrgbd"]:
|
results = {}
|
if datamodule.hparams.dataset == "nyu":
|
iter = 10
|
else:
|
iter = 1
|
for i in range(iter):
|
pl.seed_everything(cfg.seed + i)
|
trainer.test(model=model, datamodule=datamodule, ckpt_path=cfg.ckpt_path)
|
metric_dict = trainer.callback_metrics
|
for k in metric_dict.keys():
|
if k[5:] in results:
|
results[k[5:]].append(metric_dict[k].item())
|
else:
|
results[k[5:]] = [metric_dict[k].item()]
|
import csv
|
import numpy as np
|
with open(model.val_csv, "a") as csvfile:
|
writer = csv.DictWriter(csvfile, fieldnames=model.fieldnames)
|
output = {}
|
output["epoch"] = "Result"
|
for k in results.keys():
|
output[k] = f"{np.mean(results[k]):.4f}±{np.std(results[k]):.4f}"
|
print(f"{k:5s}: {output[k]}")
|
writer.writerow(output)
|
else:
|
# trainer.test(model=model, datamodule=datamodule, ckpt_path=cfg.ckpt_path)
|
trainer.validate(model=model, datamodule=datamodule, ckpt_path=cfg.ckpt_path) # for kitti test
|
metric_dict = trainer.callback_metrics
|
return metric_dict, object_dict
|
@hydra.main(version_base="1.3", config_path="configs", config_name="eval.yaml")
|
def main(cfg: DictConfig) -> None:
|
evaluate(cfg)
|
if __name__ == "__main__":
|
main()
|
# <FILESEP>
|
import timm
|
import torch
|
import torch
|
import torch.nn as nn
|
import timm
|
import torch.nn.functional as F
|
import sys
|
import os
|
sys.path.append('.')
|
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
|
from extractor.high_frequency_feature_extraction import HighDctFrequencyExtractor
|
from extractor.low_frequency_feature_extraction import LowDctFrequencyExtractor
|
import math
|
from functools import partial
|
from IMDLBenCo.registry import MODELS
|
class ConvNeXt(timm.models.convnext.ConvNeXt):
|
def __init__(self,conv_pretrain=False):
|
super(ConvNeXt, self).__init__(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768))
|
if conv_pretrain:
|
print("Load Convnext pretrain.")
|
model = timm.create_model('convnext_tiny', pretrained=True)
|
self.load_state_dict(model.state_dict())
|
original_first_layer = self.stem[0]
|
new_first_layer = nn.Conv2d(6, original_first_layer.out_channels,
|
kernel_size=original_first_layer.kernel_size, stride=original_first_layer.stride,
|
padding=original_first_layer.padding, bias=False)
|
new_first_layer.weight.data[:, :3, :, :] = original_first_layer.weight.data.clone()[:, :3, :, :]
|
new_first_layer.weight.data[:, 3:, :, :] = torch.nn.init.kaiming_normal_(new_first_layer.weight[:, 3:, :, :])
|
self.stem[0] = new_first_layer
|
self.stages = self.stages[:-1]
|
del self.head
|
def forward_features(self, x):
|
x = self.stem(x)
|
out = []
|
for stage in self.stages:
|
x = stage(x)
|
out.append(x)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.