| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | import json |
| | import os |
| | import re |
| | import unittest |
| | from functools import lru_cache |
| |
|
| | from transformers import CodeGenTokenizer, CodeGenTokenizerFast |
| | from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES |
| | from transformers.testing_utils import require_tokenizers, slow |
| |
|
| | from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible |
| |
|
| |
|
| | @require_tokenizers |
| | class CodeGenTokenizationTest(TokenizerTesterMixin, unittest.TestCase): |
| | from_pretrained_id = "Salesforce/codegen-350M-mono" |
| | tokenizer_class = CodeGenTokenizer |
| | rust_tokenizer_class = CodeGenTokenizerFast |
| | test_rust_tokenizer = True |
| | from_pretrained_kwargs = {"add_prefix_space": True} |
| | test_seq2seq = False |
| |
|
| | @classmethod |
| | def setUpClass(cls): |
| | super().setUpClass() |
| |
|
| | |
| | vocab = [ |
| | "l", |
| | "o", |
| | "w", |
| | "e", |
| | "r", |
| | "s", |
| | "t", |
| | "i", |
| | "d", |
| | "n", |
| | "\u0120", |
| | "\u0120l", |
| | "\u0120n", |
| | "\u0120lo", |
| | "\u0120low", |
| | "er", |
| | "\u0120lowest", |
| | "\u0120newer", |
| | "\u0120wider", |
| | "<unk>", |
| | "<|endoftext|>", |
| | ] |
| | vocab_tokens = dict(zip(vocab, range(len(vocab)))) |
| | merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] |
| | cls.special_tokens_map = {"unk_token": "<unk>"} |
| |
|
| | cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) |
| | cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) |
| | with open(cls.vocab_file, "w", encoding="utf-8") as fp: |
| | fp.write(json.dumps(vocab_tokens) + "\n") |
| | with open(cls.merges_file, "w", encoding="utf-8") as fp: |
| | fp.write("\n".join(merges)) |
| |
|
| | @classmethod |
| | @use_cache_if_possible |
| | @lru_cache(maxsize=64) |
| | def get_tokenizer(cls, pretrained_name=None, **kwargs): |
| | kwargs.update(cls.special_tokens_map) |
| | pretrained_name = pretrained_name or cls.tmpdirname |
| | return CodeGenTokenizer.from_pretrained(pretrained_name, **kwargs) |
| |
|
| | @classmethod |
| | @use_cache_if_possible |
| | @lru_cache(maxsize=64) |
| | def get_rust_tokenizer(cls, pretrained_name=None, **kwargs): |
| | kwargs.update(cls.special_tokens_map) |
| | pretrained_name = pretrained_name or cls.tmpdirname |
| | return CodeGenTokenizerFast.from_pretrained(pretrained_name, **kwargs) |
| |
|
| | def get_input_output_texts(self, tokenizer): |
| | input_text = "lower newer" |
| | output_text = "lower newer" |
| | return input_text, output_text |
| |
|
| | def test_full_tokenizer(self): |
| | tokenizer = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) |
| | text = "lower newer" |
| | bpe_tokens = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] |
| | tokens = tokenizer.tokenize(text, add_prefix_space=True) |
| | self.assertListEqual(tokens, bpe_tokens) |
| |
|
| | input_tokens = tokens + [tokenizer.unk_token] |
| | input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19] |
| | self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) |
| |
|
| | def test_rust_and_python_full_tokenizers(self): |
| | if not self.test_rust_tokenizer: |
| | self.skipTest(reason="test_rust_tokenizer is set to False") |
| |
|
| | tokenizer = self.get_tokenizer() |
| | rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) |
| |
|
| | sequence = "lower newer" |
| |
|
| | |
| | tokens = tokenizer.tokenize(sequence, add_prefix_space=True) |
| | rust_tokens = rust_tokenizer.tokenize(sequence) |
| | self.assertListEqual(tokens, rust_tokens) |
| |
|
| | |
| | ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) |
| | rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) |
| | self.assertListEqual(ids, rust_ids) |
| |
|
| | |
| | rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) |
| | ids = tokenizer.encode(sequence, add_prefix_space=True) |
| | rust_ids = rust_tokenizer.encode(sequence) |
| | self.assertListEqual(ids, rust_ids) |
| |
|
| | |
| | input_tokens = tokens + [rust_tokenizer.unk_token] |
| | input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19] |
| | self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) |
| |
|
| | @unittest.skip |
| | def test_pretokenized_inputs(self, *args, **kwargs): |
| | |
| | |
| | pass |
| |
|
| | def test_padding(self, max_length=15): |
| | for tokenizer, pretrained_name, kwargs in self.tokenizers_list: |
| | with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): |
| | tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs) |
| |
|
| | |
| | s = "This is a simple input" |
| | s2 = ["This is a simple input 1", "This is a simple input 2"] |
| | p = ("This is a simple input", "This is a pair") |
| | p2 = [ |
| | ("This is a simple input 1", "This is a simple input 2"), |
| | ("This is a simple pair 1", "This is a simple pair 2"), |
| | ] |
| |
|
| | |
| | self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") |
| |
|
| | |
| | self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") |
| |
|
| | |
| | self.assertRaises( |
| | ValueError, |
| | tokenizer_r.batch_encode_plus, |
| | s2, |
| | max_length=max_length, |
| | padding="max_length", |
| | ) |
| |
|
| | |
| | self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") |
| |
|
| | |
| | self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") |
| |
|
| | |
| | self.assertRaises( |
| | ValueError, |
| | tokenizer_r.batch_encode_plus, |
| | p2, |
| | max_length=max_length, |
| | padding="max_length", |
| | ) |
| |
|
| | def test_padding_if_pad_token_set_slow(self): |
| | tokenizer = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>") |
| |
|
| | |
| | s = "This is a simple input" |
| | s2 = ["This is a simple input looooooooong", "This is a simple input"] |
| | p = ("This is a simple input", "This is a pair") |
| | p2 = [ |
| | ("This is a simple input loooooong", "This is a simple input"), |
| | ("This is a simple pair loooooong", "This is a simple pair"), |
| | ] |
| |
|
| | pad_token_id = tokenizer.pad_token_id |
| |
|
| | out_s = tokenizer(s, padding="max_length", max_length=30, return_tensors="np") |
| | out_s2 = tokenizer(s2, padding=True, truncate=True, return_tensors="np") |
| | out_p = tokenizer(*p, padding="max_length", max_length=60, return_tensors="np") |
| | out_p2 = tokenizer(p2, padding=True, truncate=True, return_tensors="np") |
| |
|
| | |
| | |
| | self.assertEqual(out_s["input_ids"].shape[-1], 30) |
| | self.assertTrue(pad_token_id in out_s["input_ids"]) |
| | self.assertTrue(0 in out_s["attention_mask"]) |
| |
|
| | |
| | |
| | self.assertEqual(out_s2["input_ids"].shape[-1], 33) |
| | |
| | self.assertFalse(pad_token_id in out_s2["input_ids"][0]) |
| | self.assertFalse(0 in out_s2["attention_mask"][0]) |
| | |
| | self.assertTrue(pad_token_id in out_s2["input_ids"][1]) |
| | self.assertTrue(0 in out_s2["attention_mask"][1]) |
| |
|
| | |
| | |
| | self.assertEqual(out_p["input_ids"].shape[-1], 60) |
| | self.assertTrue(pad_token_id in out_p["input_ids"]) |
| | self.assertTrue(0 in out_p["attention_mask"]) |
| |
|
| | |
| | |
| | self.assertEqual(out_p2["input_ids"].shape[-1], 52) |
| | |
| | self.assertFalse(pad_token_id in out_p2["input_ids"][0]) |
| | self.assertFalse(0 in out_p2["attention_mask"][0]) |
| | |
| | self.assertTrue(pad_token_id in out_p2["input_ids"][1]) |
| | self.assertTrue(0 in out_p2["attention_mask"][1]) |
| |
|
| | def test_add_bos_token_slow(self): |
| | bos_token = "$$$" |
| | tokenizer = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=bos_token, add_bos_token=True) |
| |
|
| | s = "This is a simple input" |
| | s2 = ["This is a simple input 1", "This is a simple input 2"] |
| |
|
| | bos_token_id = tokenizer.bos_token_id |
| |
|
| | out_s = tokenizer(s) |
| | out_s2 = tokenizer(s2) |
| |
|
| | self.assertEqual(out_s.input_ids[0], bos_token_id) |
| | self.assertTrue(all(o[0] == bos_token_id for o in out_s2.input_ids)) |
| |
|
| | decode_s = tokenizer.decode(out_s.input_ids) |
| | decode_s2 = tokenizer.batch_decode(out_s2.input_ids) |
| |
|
| | self.assertTrue(decode_s.startswith(bos_token)) |
| | self.assertTrue(all(d.startswith(bos_token) for d in decode_s2)) |
| |
|
| | @slow |
| | def test_truncation(self): |
| | tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono") |
| |
|
| | text = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#" |
| | expected_truncated_text = "\nif len_a > len_b:\n result = a\nelse:\n result = b" |
| |
|
| | input_ids = tokenizer.encode(text) |
| | truncation_pattern = ["^#", re.escape("<|endoftext|>"), "^'''", '^"""', "\n\n\n"] |
| | decoded_text = tokenizer.decode(input_ids, truncate_before_pattern=truncation_pattern) |
| | self.assertEqual(decoded_text, expected_truncated_text) |
| | |
| |
|
| | |
| | @unittest.skip(reason="tokenizer has no padding token") |
| | def test_padding_different_model_input_name(self): |
| | pass |
| |
|
| | @slow |
| | def test_tokenizer_integration(self): |
| | |
| |
|
| | sequences = [ |
| | "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " |
| | "general-purpose architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet...) for Natural " |
| | "Language Understanding (NLU) and Natural Language Generation (NLG) with over 32+ pretrained " |
| | "models in 100+ languages and deep interoperability between Jax, PyTorch and TensorFlow.", |
| | "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " |
| | "conditioning on both left and right context in all layers.", |
| | "The quick brown fox jumps over the lazy dog.", |
| | ] |
| |
|
| | tokenizer_classes = [self.tokenizer_class] |
| | if self.test_rust_tokenizer: |
| | tokenizer_classes.append(self.rust_tokenizer_class) |
| |
|
| | |
| | for tokenizer_class in tokenizer_classes: |
| | tokenizer = tokenizer_class.from_pretrained("Salesforce/codegen-350M-mono") |
| |
|
| | encoding = tokenizer(sequences) |
| | decoded_sequences = [tokenizer.decode(seq, skip_special_tokens=True) for seq in encoding["input_ids"]] |
| |
|
| | |
| | expected_encoding = {'input_ids': [[41762, 364, 357, 36234, 1900, 355, 12972, 13165, 354, 12, 35636, 364, 290, 12972, 13165, 354, 12, 5310, 13363, 12, 4835, 8, 3769, 2276, 12, 29983, 45619, 357, 13246, 51, 11, 402, 11571, 12, 17, 11, 5564, 13246, 38586, 11, 16276, 44, 11, 4307, 346, 33, 861, 11, 16276, 7934, 23029, 329, 12068, 15417, 28491, 357, 32572, 52, 8, 290, 12068, 15417, 16588, 357, 32572, 38, 8, 351, 625, 3933, 10, 2181, 13363, 4981, 287, 1802, 10, 8950, 290, 2769, 48817, 1799, 1022, 449, 897, 11, 9485, 15884, 354, 290, 309, 22854, 37535, 13], [13246, 51, 318, 3562, 284, 662, 12, 27432, 2769, 8406, 4154, 282, 24612, 422, 9642, 9608, 276, 2420, 416, 26913, 21143, 319, 1111, 1364, 290, 826, 4732, 287, 477, 11685, 13], [464, 2068, 7586, 21831, 18045, 625, 262, 16931, 3290, 13]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} |
| | |
| |
|
| | encoding_data = encoding.data |
| | self.assertDictEqual(encoding_data, expected_encoding) |
| |
|
| | for expected, decoded in zip(sequences, decoded_sequences): |
| | self.assertEqual(expected, decoded) |
| |
|
| | |
| | for tokenizer_class in tokenizer_classes: |
| | tokenizer = tokenizer_class.from_pretrained("Salesforce/codegen-350M-mono", return_token_type_ids=True) |
| |
|
| | encoding = tokenizer(sequences) |
| | decoded_sequences = [tokenizer.decode(seq, skip_special_tokens=True) for seq in encoding["input_ids"]] |
| |
|
| | |
| | expected_encoding = {'input_ids': [[41762, 364, 357, 36234, 1900, 355, 12972, 13165, 354, 12, 35636, 364, 290, 12972, 13165, 354, 12, 5310, 13363, 12, 4835, 8, 3769, 2276, 12, 29983, 45619, 357, 13246, 51, 11, 402, 11571, 12, 17, 11, 5564, 13246, 38586, 11, 16276, 44, 11, 4307, 346, 33, 861, 11, 16276, 7934, 23029, 329, 12068, 15417, 28491, 357, 32572, 52, 8, 290, 12068, 15417, 16588, 357, 32572, 38, 8, 351, 625, 3933, 10, 2181, 13363, 4981, 287, 1802, 10, 8950, 290, 2769, 48817, 1799, 1022, 449, 897, 11, 9485, 15884, 354, 290, 309, 22854, 37535, 13], [13246, 51, 318, 3562, 284, 662, 12, 27432, 2769, 8406, 4154, 282, 24612, 422, 9642, 9608, 276, 2420, 416, 26913, 21143, 319, 1111, 1364, 290, 826, 4732, 287, 477, 11685, 13], [464, 2068, 7586, 21831, 18045, 625, 262, 16931, 3290, 13]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} |
| | |
| |
|
| | encoding_data = encoding.data |
| | self.assertDictEqual(encoding_data, expected_encoding) |
| |
|
| | for expected, decoded in zip(sequences, decoded_sequences): |
| | self.assertEqual(expected, decoded) |
| |
|