text
stringlengths
1
93.6k
self.model.train()
self.temp_output = self.tokenizer.decode(
generate_ids[0][input_ids.shape[-1]:], skip_special_tokens=True
)
print(f'Step : {self.temp_step}/{self.steps}\n'
f'Input : {self.temp_input}\n'
f'Output: {self.temp_output}')
self.input_list.append(self.temp_input)
self.output_list.append(self.temp_output)
def slice(self):
prefix = self.model_config.get('prefix', '')
prompt = self.model_config.get('prompt', '')
suffix = self.model_config.get('suffix', '')
temp_str = prefix+prompt
temp_tokens = self.tokenizer(temp_str).input_ids
len1 = len(temp_tokens)
temp_str += self.route_input
temp_tokens = self.tokenizer(temp_str).input_ids
self.input_slice = slice(len1, len(temp_tokens))
try:
assert self.tokenizer.decode(temp_tokens[self.input_slice]) == self.route_input
except AssertionError:
self.input_slice = slice(self.input_slice.start-1, self.input_slice.stop)
try:
assert self.tokenizer.decode(temp_tokens[self.input_slice]) == self.route_input
except AssertionError:
if self.tokenizer.decode(temp_tokens[self.input_slice]).lstrip() != self.route_input:
### Todo
raise NotImplementedError
temp_str += suffix
temp_tokens = self.tokenizer(temp_str).input_ids
len2 = len(temp_tokens)
if suffix.endswith(':'):
temp_str += ' '
temp_str += self.target
temp_tokens = self.tokenizer(temp_str).input_ids
self.target_slice = slice(len2, len(temp_tokens))
def grad(self):
model_embed = extract_model_embedding(self.model)
embed_weights = model_embed.weight
input_str = complete_input(self.model_config, self.route_input)
if input_str.endswith(':'):
input_str += ' '
input_str += self.target
input_ids = self.tokenizer(
input_str, truncation=True, return_tensors='pt'
).input_ids[0].to(self.device)
self.temp_input_ids = input_ids.detach()
compute_one_hot = torch.zeros(
self.input_slice.stop-self.input_slice.start,
embed_weights.shape[0],
dtype=embed_weights.dtype, device=self.device
)
compute_one_hot.scatter_(
1, input_ids[self.input_slice].unsqueeze(1),
torch.ones(
compute_one_hot.shape[0], 1, device=self.device, dtype=embed_weights.dtype
)
)
compute_one_hot.requires_grad_()
compute_embeds = (compute_one_hot @ embed_weights).unsqueeze(0)
raw_embeds = model_embed(input_ids.unsqueeze(0)).detach()
concat_embeds = torch.cat([
raw_embeds[:, :self.input_slice.start, :],
compute_embeds,
raw_embeds[:, self.input_slice.stop: , :]
], dim=1)
try:
logits = self.model(inputs_embeds=concat_embeds).logits[0]
except AttributeError:
logits = self.model(input_ids=input_ids.unsqueeze(0), inputs_embeds=concat_embeds)[0]
if logits.dim()>2:
logits = logits.squeeze()
try:
assert input_ids.shape[0]>=self.target_slice.stop
except AssertionError:
self.target_slice = slice(self.target_slice.start, input_ids.shape[0])
compute_logits = logits[self.target_slice.start-1 : self.target_slice.stop-1]
target = input_ids[self.target_slice]
loss = cross_entropy(compute_logits, target)
loss.backward()
self.temp_grad = compute_one_hot.grad.detach()
def sample(self):
self.temp_sample_list = []
values, indices = torch.topk(self.temp_grad, k=self.topk, dim=1)
sample_indices = torch.randperm(self.topk * self.temp_grad.shape[0])[:self.batch_size].tolist()
for i in range(self.batch_size):
pos = sample_indices[i] // self.topk
pos_index = indices[pos][sample_indices[i] % self.topk].item()