text
stringlengths 1
93.6k
|
|---|
cfg_dict = cfg._cfg_dict.to_dict()
|
args_vars = vars(args)
|
for k, v in cfg_dict.items():
|
if k not in args_vars:
|
setattr(args, k, v)
|
else:
|
raise ValueError("Key {} can used by args only".format(k))
|
# update some new args temporally
|
if not getattr(args, "use_ema", None):
|
args.use_ema = False
|
if not getattr(args, "debug", None):
|
args.debug = False
|
# setup logger
|
os.makedirs(args.output_dir, exist_ok=True)
|
logger = setup_logger(
|
output=os.path.join(args.output_dir, "info.txt"), distributed_rank=args.rank, color=False, name="detr"
|
)
|
logger.info("git:\n {}\n".format(utils.get_sha()))
|
logger.info("Command: " + " ".join(sys.argv))
|
if args.rank == 0:
|
save_json_path = os.path.join(args.output_dir, "config_args_all.json")
|
with open(save_json_path, "w") as f:
|
json.dump(vars(args), f, indent=2)
|
logger.info("Full config saved to {}".format(save_json_path))
|
logger.info("world size: {}".format(args.world_size))
|
logger.info("rank: {}".format(args.rank))
|
logger.info("local_rank: {}".format(args.local_rank))
|
logger.info("args: " + str(args) + "\n")
|
if args.frozen_weights is not None:
|
assert args.masks, "Frozen training is meant for segmentation only"
|
print(args)
|
seed = args.seed + utils.get_rank()
|
# torch.manual_seed(seed)
|
np.random.seed(seed)
|
random.seed(seed)
|
device_id = int(os.getenv("DEVICE_ID", "0"))
|
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", device_id=device_id)
|
focus_detr = build_focus_detr(args)
|
param_dict = mindspore.load_checkpoint(args.resume)
|
param_not_load = mindspore.load_param_into_net(focus_detr, param_dict)
|
print(f"-----param_not_load:{param_not_load}")
|
focus_detr.set_train(False)
|
ds_param = dataset_param()
|
dataset, base_ds, length_dataset = build_dataset(ds_param)
|
log_freq = length_dataset // 5
|
data_loader = dataset.create_dict_iterator()
|
coco_evaluator = CocoEvaluator(base_ds, ["bbox"])
|
cnt = 0
|
test_cnt = 100
|
for i, sample in enumerate(data_loader):
|
images = sample["image"]
|
mask_ms = sample["mask"]
|
input_data = {"data": images, "mask": mask_ms}
|
outputs = focus_detr(input_data)
|
outputs = {"pred_logits": outputs["pred_logits"], "pred_boxes": outputs["pred_boxes"]}
|
##
|
orig_target_sizes = sample["orig_sizes"].asnumpy()
|
results = post_process(outputs, orig_target_sizes)
|
res = {img_id: output for img_id, output in zip(sample["img_id"].asnumpy(), results)}
|
coco_evaluator.update(res)
|
cnt += 1
|
############################################
|
if cnt % 10 == 0:
|
print(f"---process--img--nums:{cnt}")
|
if cnt > test_cnt:
|
break
|
coco_evaluator.synchronize_between_processes()
|
coco_evaluator.accumulate()
|
coco_evaluator.summarize()
|
if __name__ == "__main__":
|
parser = argparse.ArgumentParser("DETR training and evaluation script", parents=[get_args_parser()])
|
args = parser.parse_args()
|
if args.output_dir:
|
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
|
main(args)
|
# <FILESEP>
|
# Convolutional Networks with Oriented 1D Kernels (https://arxiv.org/abs/2309.15812)
|
# Licensed under The MIT License [see LICENSE for details]
|
# Based on the ConvNeXt code base: https://github.com/facebookresearch/ConvNeXt
|
# --------------------------------------------------------
|
import argparse
|
import datetime
|
import numpy as np
|
import time
|
import torch
|
import torch.nn as nn
|
import torch.backends.cudnn as cudnn
|
import json
|
import os
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.