| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | import unittest |
| |
|
| | from transformers import CodeGenConfig, is_torch_available |
| | from transformers.file_utils import cached_property |
| | from transformers.testing_utils import backend_manual_seed, require_torch, slow, torch_device |
| |
|
| | from ...generation.test_utils import GenerationTesterMixin |
| | from ...test_configuration_common import ConfigTester |
| | from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask |
| | from ...test_pipeline_mixin import PipelineTesterMixin |
| |
|
| |
|
| | if is_torch_available(): |
| | import torch |
| |
|
| | from transformers import AutoTokenizer, CodeGenForCausalLM, CodeGenModel |
| |
|
| |
|
| | class CodeGenModelTester: |
| | def __init__( |
| | self, |
| | parent, |
| | batch_size=14, |
| | seq_length=7, |
| | is_training=True, |
| | use_token_type_ids=True, |
| | use_input_mask=True, |
| | use_labels=True, |
| | use_mc_token_ids=True, |
| | vocab_size=256, |
| | hidden_size=32, |
| | rotary_dim=4, |
| | num_hidden_layers=2, |
| | num_attention_heads=4, |
| | intermediate_size=37, |
| | hidden_act="gelu", |
| | hidden_dropout_prob=0.0, |
| | attention_probs_dropout_prob=0.0, |
| | max_position_embeddings=512, |
| | type_vocab_size=16, |
| | type_sequence_label_size=2, |
| | initializer_range=0.02, |
| | num_labels=3, |
| | num_choices=4, |
| | ): |
| | self.parent = parent |
| | self.batch_size = batch_size |
| | self.seq_length = seq_length |
| | self.is_training = is_training |
| | self.use_token_type_ids = use_token_type_ids |
| | self.use_input_mask = use_input_mask |
| | self.use_labels = use_labels |
| | self.use_mc_token_ids = use_mc_token_ids |
| | self.vocab_size = vocab_size |
| | self.hidden_size = hidden_size |
| | self.rotary_dim = rotary_dim |
| | self.num_hidden_layers = num_hidden_layers |
| | self.num_attention_heads = num_attention_heads |
| | self.intermediate_size = intermediate_size |
| | self.hidden_act = hidden_act |
| | self.hidden_dropout_prob = hidden_dropout_prob |
| | self.attention_probs_dropout_prob = attention_probs_dropout_prob |
| | self.max_position_embeddings = max_position_embeddings |
| | self.type_vocab_size = type_vocab_size |
| | self.type_sequence_label_size = type_sequence_label_size |
| | self.initializer_range = initializer_range |
| | self.num_labels = num_labels |
| | self.num_choices = num_choices |
| | self.scope = None |
| | self.bos_token_id = vocab_size - 1 |
| | self.eos_token_id = vocab_size - 1 |
| | self.pad_token_id = vocab_size - 1 |
| |
|
| | def get_large_model_config(self): |
| | return CodeGenConfig.from_pretrained("Salesforce/codegen-2B-mono") |
| |
|
| | def prepare_config_and_inputs(self): |
| | input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) |
| |
|
| | input_mask = None |
| | if self.use_input_mask: |
| | input_mask = random_attention_mask([self.batch_size, self.seq_length]) |
| |
|
| | token_type_ids = None |
| | if self.use_token_type_ids: |
| | token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) |
| |
|
| | mc_token_ids = None |
| | if self.use_mc_token_ids: |
| | mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length) |
| |
|
| | sequence_labels = None |
| | token_labels = None |
| | choice_labels = None |
| | if self.use_labels: |
| | sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) |
| | token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) |
| | choice_labels = ids_tensor([self.batch_size], self.num_choices) |
| |
|
| | config = self.get_config() |
| |
|
| | head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) |
| |
|
| | return ( |
| | config, |
| | input_ids, |
| | input_mask, |
| | head_mask, |
| | token_type_ids, |
| | mc_token_ids, |
| | sequence_labels, |
| | token_labels, |
| | choice_labels, |
| | ) |
| |
|
| | def get_config(self): |
| | return CodeGenConfig( |
| | vocab_size=self.vocab_size, |
| | n_embd=self.hidden_size, |
| | n_layer=self.num_hidden_layers, |
| | n_head=self.num_attention_heads, |
| | intermediate_size=self.intermediate_size, |
| | hidden_act=self.hidden_act, |
| | hidden_dropout_prob=self.hidden_dropout_prob, |
| | attention_probs_dropout_prob=self.attention_probs_dropout_prob, |
| | n_positions=self.max_position_embeddings, |
| | type_vocab_size=self.type_vocab_size, |
| | initializer_range=self.initializer_range, |
| | use_cache=True, |
| | bos_token_id=self.bos_token_id, |
| | eos_token_id=self.eos_token_id, |
| | pad_token_id=self.pad_token_id, |
| | rotary_dim=self.rotary_dim, |
| | ) |
| |
|
| | def create_and_check_codegen_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): |
| | model = CodeGenModel(config=config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) |
| | result = model(input_ids, token_type_ids=token_type_ids) |
| | result = model(input_ids) |
| |
|
| | self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) |
| | self.parent.assertEqual(len(result.past_key_values), config.n_layer) |
| |
|
| | def create_and_check_codegen_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): |
| | model = CodeGenModel(config=config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | |
| | outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True) |
| | outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids) |
| | outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False) |
| |
|
| | self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) |
| | self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) |
| |
|
| | output, past = outputs.to_tuple() |
| |
|
| | |
| | next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) |
| | next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size) |
| |
|
| | |
| | next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) |
| | next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) |
| |
|
| | output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"] |
| | output_from_past = model(next_tokens, token_type_ids=next_token_types, past_key_values=past)[ |
| | "last_hidden_state" |
| | ] |
| |
|
| | |
| | random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() |
| | output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() |
| | output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() |
| |
|
| | |
| | self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) |
| |
|
| | def create_and_check_codegen_model_attention_mask_past( |
| | self, config, input_ids, input_mask, head_mask, token_type_ids, *args |
| | ): |
| | model = CodeGenModel(config=config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | |
| | attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) |
| | half_seq_length = self.seq_length // 2 |
| | attn_mask[:, half_seq_length:] = 0 |
| |
|
| | |
| | output, past = model(input_ids, attention_mask=attn_mask).to_tuple() |
| |
|
| | |
| | next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) |
| |
|
| | |
| | random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 |
| | random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) |
| | input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens |
| |
|
| | |
| | next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) |
| | attn_mask = torch.cat( |
| | [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], |
| | dim=1, |
| | ) |
| |
|
| | |
| | output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] |
| | output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] |
| |
|
| | |
| | random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() |
| | output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() |
| | output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() |
| |
|
| | |
| | self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) |
| |
|
| | def create_and_check_codegen_model_past_large_inputs( |
| | self, config, input_ids, input_mask, head_mask, token_type_ids, *args |
| | ): |
| | model = CodeGenModel(config=config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | |
| | outputs = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask, use_cache=True) |
| |
|
| | output, past = outputs.to_tuple() |
| |
|
| | |
| | next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) |
| | next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size) |
| | next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) |
| |
|
| | |
| | next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) |
| | next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1) |
| | next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) |
| |
|
| | output_from_no_past = model( |
| | next_input_ids, token_type_ids=next_token_type_ids, attention_mask=next_attention_mask |
| | )["last_hidden_state"] |
| | output_from_past = model( |
| | next_tokens, token_type_ids=next_token_types, attention_mask=next_attention_mask, past_key_values=past |
| | )["last_hidden_state"] |
| | self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1]) |
| |
|
| | |
| | random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() |
| | output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() |
| | output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() |
| |
|
| | |
| | self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) |
| |
|
| | def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): |
| | model = CodeGenForCausalLM(config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) |
| | self.parent.assertEqual(result.loss.shape, ()) |
| | self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) |
| |
|
| | def create_and_check_forward_and_backwards( |
| | self, config, input_ids, input_mask, head_mask, token_type_ids, *args, gradient_checkpointing=False |
| | ): |
| | model = CodeGenForCausalLM(config) |
| | if gradient_checkpointing: |
| | model.gradient_checkpointing_enable() |
| | model.to(torch_device) |
| |
|
| | result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) |
| | self.parent.assertEqual(result.loss.shape, ()) |
| | self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) |
| | result.loss.backward() |
| |
|
| | def prepare_config_and_inputs_for_common(self): |
| | config_and_inputs = self.prepare_config_and_inputs() |
| |
|
| | ( |
| | config, |
| | input_ids, |
| | input_mask, |
| | head_mask, |
| | token_type_ids, |
| | mc_token_ids, |
| | sequence_labels, |
| | token_labels, |
| | choice_labels, |
| | ) = config_and_inputs |
| |
|
| | inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask} |
| |
|
| | return config, inputs_dict |
| |
|
| |
|
| | @require_torch |
| | class CodeGenModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): |
| | all_model_classes = (CodeGenModel, CodeGenForCausalLM) if is_torch_available() else () |
| | pipeline_model_mapping = ( |
| | {"feature-extraction": CodeGenModel, "text-generation": CodeGenForCausalLM} if is_torch_available() else {} |
| | ) |
| | fx_compatible = False |
| | test_pruning = False |
| | test_missing_keys = False |
| | test_model_parallel = False |
| | test_head_masking = False |
| |
|
| | |
| | def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): |
| | inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) |
| | return inputs_dict |
| |
|
| | def setUp(self): |
| | self.model_tester = CodeGenModelTester(self) |
| | self.config_tester = ConfigTester(self, config_class=CodeGenConfig, n_embd=37) |
| |
|
| | def test_config(self): |
| | self.config_tester.run_common_tests() |
| |
|
| | def test_codegen_model(self): |
| | config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| | self.model_tester.create_and_check_codegen_model(*config_and_inputs) |
| |
|
| | def test_codegen_model_past(self): |
| | config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| | self.model_tester.create_and_check_codegen_model_past(*config_and_inputs) |
| |
|
| | def test_codegen_model_att_mask_past(self): |
| | config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| | self.model_tester.create_and_check_codegen_model_attention_mask_past(*config_and_inputs) |
| |
|
| | def test_codegen_model_past_large_inputs(self): |
| | config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| | self.model_tester.create_and_check_codegen_model_past_large_inputs(*config_and_inputs) |
| |
|
| | def test_codegen_lm_head_model(self): |
| | config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| | self.model_tester.create_and_check_lm_head_model(*config_and_inputs) |
| |
|
| | def test_codegen_gradient_checkpointing(self): |
| | config_and_inputs = self.model_tester.prepare_config_and_inputs() |
| | self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) |
| |
|
| | @slow |
| | def test_batch_generation(self): |
| | tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono") |
| | model = CodeGenForCausalLM.from_pretrained("Salesforce/codegen-350M-mono") |
| | model.to(torch_device) |
| |
|
| | tokenizer.padding_side = "left" |
| |
|
| | |
| | tokenizer.pad_token = tokenizer.eos_token |
| | model.config.pad_token_id = model.config.eos_token_id |
| |
|
| | |
| | sentences = ["def hellow_world():", "def greet(name):"] |
| |
|
| | inputs = tokenizer(sentences, return_tensors="pt", padding=True) |
| | input_ids = inputs["input_ids"].to(torch_device) |
| | token_type_ids = torch.cat( |
| | [ |
| | input_ids.new_full((input_ids.shape[0], input_ids.shape[1] - 1), 0), |
| | input_ids.new_full((input_ids.shape[0], 1), 500), |
| | ], |
| | dim=-1, |
| | ) |
| |
|
| | outputs = model.generate( |
| | input_ids=input_ids, |
| | attention_mask=inputs["attention_mask"].to(torch_device), |
| | ) |
| |
|
| | outputs_tt = model.generate( |
| | input_ids=input_ids, |
| | attention_mask=inputs["attention_mask"].to(torch_device), |
| | token_type_ids=token_type_ids, |
| | ) |
| |
|
| | inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) |
| | output_non_padded = model.generate(input_ids=inputs_non_padded) |
| |
|
| | num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().item() |
| | inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) |
| | output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) |
| |
|
| | batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) |
| | batch_out_sentence_tt = tokenizer.batch_decode(outputs_tt, skip_special_tokens=True) |
| | non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) |
| | padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) |
| |
|
| | expected_output_sentence = [ |
| | 'def hellow_world():\n print("Hello World")\n\nhellow_world()', |
| | 'def greet(name):\n print(f"Hello {name}")\n\ng', |
| | ] |
| | self.assertListEqual(expected_output_sentence, batch_out_sentence) |
| | self.assertTrue(batch_out_sentence_tt != batch_out_sentence) |
| | self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) |
| |
|
| | @slow |
| | def test_model_from_pretrained(self): |
| | model_name = "Salesforce/codegen-350M-nl" |
| | model = CodeGenModel.from_pretrained(model_name) |
| | self.assertIsNotNone(model) |
| |
|
| |
|
| | @require_torch |
| | class CodeGenModelLanguageGenerationTest(unittest.TestCase): |
| | @cached_property |
| | def cached_tokenizer(self): |
| | return AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono") |
| |
|
| | @cached_property |
| | def cached_model(self): |
| | return CodeGenForCausalLM.from_pretrained("Salesforce/codegen-350M-mono") |
| |
|
| | @slow |
| | def test_lm_generate_codegen(self): |
| | tokenizer = self.cached_tokenizer |
| | for checkpointing in [True, False]: |
| | model = self.cached_model |
| |
|
| | if checkpointing: |
| | model.gradient_checkpointing_enable() |
| | else: |
| | model.gradient_checkpointing_disable() |
| | model.to(torch_device) |
| |
|
| | inputs = tokenizer("def hello_world():", return_tensors="pt").to(torch_device) |
| | expected_output = 'def hello_world():\n print("Hello World")\n\nhello_world()\n\n' |
| |
|
| | output_ids = model.generate(**inputs, do_sample=False) |
| | output_str = tokenizer.batch_decode(output_ids)[0] |
| |
|
| | self.assertEqual(output_str, expected_output) |
| |
|
| | @slow |
| | def test_codegen_sample(self): |
| | tokenizer = self.cached_tokenizer |
| | model = self.cached_model |
| | model.to(torch_device) |
| |
|
| | torch.manual_seed(0) |
| | backend_manual_seed(torch_device, 0) |
| |
|
| | tokenized = tokenizer("def hello_world():", return_tensors="pt", return_token_type_ids=True) |
| | input_ids = tokenized.input_ids.to(torch_device) |
| | output_ids = model.generate(input_ids, do_sample=True) |
| | output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) |
| |
|
| | token_type_ids = tokenized.token_type_ids.to(torch_device) |
| | output_seq = model.generate(input_ids=input_ids, do_sample=True, num_return_sequences=5) |
| | output_seq_tt = model.generate( |
| | input_ids=input_ids, token_type_ids=token_type_ids, do_sample=True, num_return_sequences=5 |
| | ) |
| | output_seq_strs = tokenizer.batch_decode(output_seq, skip_special_tokens=True) |
| | output_seq_tt_strs = tokenizer.batch_decode(output_seq_tt, skip_special_tokens=True) |
| |
|
| | if torch_device == "cuda": |
| | EXPECTED_OUTPUT_STR = 'def hello_world():\n print("Hello World")\n return True\n\nresult =' |
| | else: |
| | EXPECTED_OUTPUT_STR = "def hello_world():\r\n print('Hello, World.')\r\n\r\n\r" |
| |
|
| | self.assertEqual(output_str, EXPECTED_OUTPUT_STR) |
| | self.assertTrue( |
| | all(output_seq_strs[idx] != output_seq_tt_strs[idx] for idx in range(len(output_seq_tt_strs))) |
| | ) |
| |
|