text
stringlengths
1
93.6k
print(f"Loaded {len(predictions)} Original Predictions")
for pred in predictions:
if prompt in pred:
s = pred.index(prompt)
e = s + len(prompt)
# print(f"Original Pred: {pred}")
trimmed_pred = pred[: s] + pred[e: ]
if "\n\ndef" in trimmed_pred:
trimmed_pred = trimmed_pred[: trimmed_pred.index("\n\ndef")]
if trimmed_pred.startswith("return "):
trimmed_pred = trimmed_pred[len("return "): ]
if verbose:
print(f"Trimmed Pred: \n{trimmed_pred}")
else:
trimmed_pred = pred
trimmed_predictions.append(trimmed_pred)
print(f"Collected {len(trimmed_predictions)} (trimmed) predictions")
return trimmed_predictions
def evaluate(model, dataloader, tokenizer, args):
model.eval()
if hasattr(model, "module"):
model = model.module
gen_kwargs = {
"max_length": args.max_length,
"num_beams": args.num_beams,
"num_return_sequences": args.num_return_sequences,
"temperature": args.temperature,
"top_p": args.top_p,
}
total = 0
write_path = Path(args.output_dir) / f"{args.language}-{args.model_size}-{args.model_data}-predictions"
fw = open(write_path / (f"{args.global_rank}.jsonl"), 'a')
print(f"Create sub-file: {write_path / (f'{args.global_rank}.jsonl')}")
with torch.no_grad():
for i, batch_inputs in enumerate(dataloader):
batch_prompts = batch_inputs["prompt"]
batch_inputs = {
k:v.to(model.device) for k,v in batch_inputs.items()
if k != "prompt"
}
outputs = model.generate(**batch_inputs, **gen_kwargs)
s, e = 0, gen_kwargs["num_return_sequences"]
batch_size = batch_inputs["input_ids"].size(0)
for j in range(batch_size):
j_preds = tokenizer.batch_decode(
outputs[s: e],
skip_special_tokens=True,
truncate_before_pattern=TRUC_PATTERN_LIST,
)
j_prompt = batch_prompts[j]
j_preds = remove_input_from_outputs(j_preds, j_prompt, args.verbose)
j_dict = {"predictions": j_preds}
fw.write(json.dumps(j_dict) + '\n')
s += gen_kwargs["num_return_sequences"]
e += gen_kwargs["num_return_sequences"]
total += 1
if (i + 1) % args.eval_print_freq == 0:
log = f"Process rank: {args.global_rank}, {i+1} / {len(dataloader)}"
logger.warning(log)
logger.warning(f"Process rank:{args.global_rank}, total {total} ")
if args.is_distributed:
torch.distributed.barrier()
def main():
torch.cuda.empty_cache()
gc.collect()
model_kwargs = {}
if args.world_size > 1:
model_kwargs["device_map"] = "balanced_low_0"
if args.dtype is not None:
if args.dtype == "int8":
model_kwargs["load_in_8bit"] = True
else:
model_kwargs["torch_dtype"] = torch.bfloat16 # else torch.float16
print(f"[Model Kwargs] {model_kwargs}")
tokenizer = AutoTokenizer.from_pretrained(args.model_name, padding_side='left')
tokenizer.pad_token = tokenizer.eos_token # '50256' use eos as pad token
eval_examples = src.data.load_data(