text
stringlengths
1
93.6k
for batch_idx, sample in enumerate(TrainImgLoader):
image, depth = sample[0], sample[1]#(b,c,d,w,h)
depth = depth.cuda()
image = image.cuda()
image = torch.autograd.Variable(image)
depth = torch.autograd.Variable(depth)
optimizer.zero_grad()
global_step = len(TrainImgLoader) * epoch + batch_idx
gt_depth = depth
pred_depth = model(image)#(b, c, d, h, w)
# Calculate the total loss
spatial_losses=[]
for seq_idx in range(image.size(2)):
spatial_loss = cal_spatial_loss(pred_depth[:,:,seq_idx,:,:], gt_depth[:,:,seq_idx,:,:])
spatial_losses.append(spatial_loss)
spatial_loss = sum(spatial_losses)
pred_cls = disc(pred_depth)
gt_cls = disc(gt_depth)
temporal_loss = cal_temporal_loss(pred_cls, gt_cls)
loss = spatial_loss + 0.1 * temporal_loss
losses.update(loss.item(), image.size(0))
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
batchSize = depth.size(0)
print(('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})'
.format(epoch, batch_idx, len(TrainImgLoader), batch_time=batch_time, loss=losses)))
if (epoch+1)%1 == 0:
save_checkpoint(model.state_dict(), filename=args.checkpoint_dir + "ResNet18_checkpoints_small_" + str(epoch + 1) + ".pth.tar")
if __name__ == '__main__':
train()
# <FILESEP>
import pkg_resources
import archinfo
filecache = {}
type_sizes = {"UChar": 1, "UShort": 2, "UInt": 4, "ULong": 8, "ULONG": 8, "U128": 16, "U256": 32}
arch_data = {}
defines = {"VEX_GUEST_PPC32_REDIR_STACK_SIZE": 16 * 2, "VEX_GUEST_PPC64_REDIR_STACK_SIZE": 16 * 2}
def to_int(ival):
if ival in defines:
return defines[ival]
return int(ival, 0)
def load_arch(archh):
if archh not in arch_data:
arch_data[archh] = {}
if archh not in filecache:
filecache[archh] = open(pkg_resources.resource_filename("pyvex", "include/libvex_guest_%s.h" % archh)).read()
return filecache[archh]
a = open(pkg_resources.resource_filename("pyvex", "include/libvex_guest_offsets.h")).read().splitlines()
for line in a:
_, defname, offstr = line.split()
offset = int(offstr, 0)
_, archname, fieldname = defname.split("_", 2)
fieldname = fieldname.lower()
arch_defs = load_arch(archname)
arraylen = None
typename = None
lst = arch_defs.split()
for i, k in enumerate(lst):
if k.lower().split("[")[0].strip(";") == "guest_%s" % fieldname:
typename = lst[i - 1].strip("/*")
if typename not in type_sizes:
continue
if "[" in k:
arraylen = to_int(k.split("[")[1].split("]")[0])
break
else:
raise Exception(f"Could not find field in arch {archname} for {fieldname}")
fieldsize = type_sizes[typename] * (1 if arraylen is None else arraylen)
arch_data[archname][fieldname] = (offset, fieldsize)