text
stringlengths
1
93.6k
self.temp_sample_list.append((pos, pos_index))
pos_list, pos_index_list = zip(*self.temp_sample_list)
pos_tensor = torch.tensor(pos_list, dtype=self.temp_input_ids.dtype, device=self.temp_input_ids.device)
pos_tensor += self.input_slice.start
pos_index_tensor = torch.tensor(pos_index_list, dtype=self.temp_input_ids.dtype, device=self.temp_input_ids.device)
sample_ids = self.temp_input_ids.repeat(self.batch_size, 1)
sample_ids[range(self.batch_size), pos_tensor] = pos_index_tensor
self.temp_sample_ids = sample_ids
def forward(self):
loss = torch.empty(0, device=self.device)
with tqdm(total=self.batch_size) as pbar:
pbar.set_description('Processing')
for mini_batch in range(self.mini_batches):
start = mini_batch*self.mini_batch_size
end = min((mini_batch+1)*self.mini_batch_size, self.batch_size)
targets = self.temp_input_ids[self.target_slice].repeat(end-start, 1)
logits = self.model(self.temp_sample_ids[start:end]).logits
logits = logits.permute(0, 2, 1)
mini_batch_loss = cross_entropy(
logits[:, :, self.target_slice.start - 1:self.target_slice.stop - 1],
targets, reduction='none'
).mean(dim=-1)
loss = torch.cat([loss, mini_batch_loss.detach()])
torch.cuda.empty_cache()
pbar.update(end-start)
min_loss, min_index = loss.min(dim=-1)
self.temp_loss = min_loss.item()
self.loss_list.append(self.temp_loss)
self.temp_input_ids = self.temp_sample_ids[min_index]
self.temp_input = self.tokenizer.decode(
self.temp_input_ids[self.input_slice],
skip_special_tokens=True,
)
if self.model_name == 'internlm':
### for internlm, there may be an additional blank space on the left side of the decode string
self.temp_input = self.temp_input.lstrip()
def update(self):
update_strategy = self.kwargs.get('update_strategy', 'strict')
is_update = False
if update_strategy == 'strict':
if self.temp_loss<self.route_loss:
is_update = True
elif update_strategy == 'gaussian':
gap_step = min(self.temp_step - self.route_step_list[-1], 20)
if (self.temp_loss/self.route_loss-1)*100/gap_step <= torch.randn(1)[0].abs():
is_update = True
print(f'Temp Loss: {self.temp_loss}\t'
f'Route Loss: {self.route_loss}\n'
f'Update:', 'True' if is_update else 'False', '\n')
if is_update:
self.route_step_list.append(self.temp_step)
self.route_input = self.temp_input
self.route_input_list.append(self.route_input)
self.route_loss = self.temp_loss
self.route_loss_list.append(self.route_loss)
self.route_output_list.append(self.temp_output)
def pre(self):
self.test()
print('='*128,'\n')
self.route_step_list.append(self.temp_step)
self.route_input_list.append(self.temp_input)
self.route_output_list.append(self.temp_output)
self.route_loss_list.append(self.route_loss)
self.temp_step+=1
def save(self):
save_dir = self.kwargs.get('save_dir', './results')
os.makedirs(save_dir, exist_ok=True)
save_dict = {
'model_name': self.model_name,
'init_input': self.init_input,
'target': self.target,
'steps': self.steps,
'topk': self.topk,
'batch_size': self.batch_size,
'mini_batch_size': self.mini_batch_size,
'kwargs': self.kwargs,
'input_list': self.input_list,
'output_list': self.output_list,
'loss_list': self.loss_list,
'route_step_list': self.route_step_list,
'route_input_list': self.route_input_list,
'route_output_list': self.route_output_list,
'route_loss_list': self.route_loss_list
}
pkl_name = self.model_name+datetime.now().strftime("_%y%m%d%H%M%S.pkl")
with open(os.path.join(save_dir, pkl_name), mode='wb') as f: